42ce177fe1e3f4a6f5116f7866b1cacc9592c578
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133 };
134
135 #undef RTE_RX_OFFLOAD_BIT2STR
136
137 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
138         { DEV_TX_OFFLOAD_##_name, #_name }
139
140 static const struct {
141         uint64_t offload;
142         const char *name;
143 } rte_tx_offload_names[] = {
144         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
145         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
152         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
153         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
158         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
159         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
160         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
161         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
162         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
164         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
165 };
166
167 #undef RTE_TX_OFFLOAD_BIT2STR
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         void *ret_param;                        /**< Return parameter */
180         enum rte_eth_event_type event;          /**< Interrupt event type */
181         uint32_t active;                        /**< Callback is executing */
182 };
183
184 enum {
185         STAT_QMAP_TX = 0,
186         STAT_QMAP_RX
187 };
188
189 int
190 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
191 {
192         int ret;
193         struct rte_devargs devargs = {.args = NULL};
194         const char *bus_param_key;
195         char *bus_str = NULL;
196         char *cls_str = NULL;
197         int str_size;
198
199         memset(iter, 0, sizeof(*iter));
200
201         /*
202          * The devargs string may use various syntaxes:
203          *   - 0000:08:00.0,representor=[1-3]
204          *   - pci:0000:06:00.0,representor=[0,5]
205          *   - class=eth,mac=00:11:22:33:44:55
206          * A new syntax is in development (not yet supported):
207          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
208          */
209
210         /*
211          * Handle pure class filter (i.e. without any bus-level argument),
212          * from future new syntax.
213          * rte_devargs_parse() is not yet supporting the new syntax,
214          * that's why this simple case is temporarily parsed here.
215          */
216 #define iter_anybus_str "class=eth,"
217         if (strncmp(devargs_str, iter_anybus_str,
218                         strlen(iter_anybus_str)) == 0) {
219                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
220                 goto end;
221         }
222
223         /* Split bus, device and parameters. */
224         ret = rte_devargs_parse(&devargs, devargs_str);
225         if (ret != 0)
226                 goto error;
227
228         /*
229          * Assume parameters of old syntax can match only at ethdev level.
230          * Extra parameters will be ignored, thanks to "+" prefix.
231          */
232         str_size = strlen(devargs.args) + 2;
233         cls_str = malloc(str_size);
234         if (cls_str == NULL) {
235                 ret = -ENOMEM;
236                 goto error;
237         }
238         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
239         if (ret != str_size - 1) {
240                 ret = -EINVAL;
241                 goto error;
242         }
243         iter->cls_str = cls_str;
244         free(devargs.args); /* allocated by rte_devargs_parse() */
245         devargs.args = NULL;
246
247         iter->bus = devargs.bus;
248         if (iter->bus->dev_iterate == NULL) {
249                 ret = -ENOTSUP;
250                 goto error;
251         }
252
253         /* Convert bus args to new syntax for use with new API dev_iterate. */
254         if (strcmp(iter->bus->name, "vdev") == 0) {
255                 bus_param_key = "name";
256         } else if (strcmp(iter->bus->name, "pci") == 0) {
257                 bus_param_key = "addr";
258         } else {
259                 ret = -ENOTSUP;
260                 goto error;
261         }
262         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
263         bus_str = malloc(str_size);
264         if (bus_str == NULL) {
265                 ret = -ENOMEM;
266                 goto error;
267         }
268         ret = snprintf(bus_str, str_size, "%s=%s",
269                         bus_param_key, devargs.name);
270         if (ret != str_size - 1) {
271                 ret = -EINVAL;
272                 goto error;
273         }
274         iter->bus_str = bus_str;
275
276 end:
277         iter->cls = rte_class_find_by_name("eth");
278         return 0;
279
280 error:
281         if (ret == -ENOTSUP)
282                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
283                                 iter->bus->name);
284         free(devargs.args);
285         free(bus_str);
286         free(cls_str);
287         return ret;
288 }
289
290 uint16_t
291 rte_eth_iterator_next(struct rte_dev_iterator *iter)
292 {
293         if (iter->cls == NULL) /* invalid ethdev iterator */
294                 return RTE_MAX_ETHPORTS;
295
296         do { /* loop to try all matching rte_device */
297                 /* If not pure ethdev filter and */
298                 if (iter->bus != NULL &&
299                                 /* not in middle of rte_eth_dev iteration, */
300                                 iter->class_device == NULL) {
301                         /* get next rte_device to try. */
302                         iter->device = iter->bus->dev_iterate(
303                                         iter->device, iter->bus_str, iter);
304                         if (iter->device == NULL)
305                                 break; /* no more rte_device candidate */
306                 }
307                 /* A device is matching bus part, need to check ethdev part. */
308                 iter->class_device = iter->cls->dev_iterate(
309                                 iter->class_device, iter->cls_str, iter);
310                 if (iter->class_device != NULL)
311                         return eth_dev_to_id(iter->class_device); /* match */
312         } while (iter->bus != NULL); /* need to try next rte_device */
313
314         /* No more ethdev port to iterate. */
315         rte_eth_iterator_cleanup(iter);
316         return RTE_MAX_ETHPORTS;
317 }
318
319 void
320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
321 {
322         if (iter->bus_str == NULL)
323                 return; /* nothing to free in pure class filter */
324         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
325         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
326         memset(iter, 0, sizeof(*iter));
327 }
328
329 uint16_t
330 rte_eth_find_next(uint16_t port_id)
331 {
332         while (port_id < RTE_MAX_ETHPORTS &&
333                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
334                 port_id++;
335
336         if (port_id >= RTE_MAX_ETHPORTS)
337                 return RTE_MAX_ETHPORTS;
338
339         return port_id;
340 }
341
342 /*
343  * Macro to iterate over all valid ports for internal usage.
344  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
345  */
346 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
347         for (port_id = rte_eth_find_next(0); \
348              port_id < RTE_MAX_ETHPORTS; \
349              port_id = rte_eth_find_next(port_id + 1))
350
351 uint16_t
352 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
353 {
354         port_id = rte_eth_find_next(port_id);
355         while (port_id < RTE_MAX_ETHPORTS &&
356                         rte_eth_devices[port_id].device != parent)
357                 port_id = rte_eth_find_next(port_id + 1);
358
359         return port_id;
360 }
361
362 uint16_t
363 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
364 {
365         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
366         return rte_eth_find_next_of(port_id,
367                         rte_eth_devices[ref_port_id].device);
368 }
369
370 static void
371 rte_eth_dev_shared_data_prepare(void)
372 {
373         const unsigned flags = 0;
374         const struct rte_memzone *mz;
375
376         rte_spinlock_lock(&rte_eth_shared_data_lock);
377
378         if (rte_eth_dev_shared_data == NULL) {
379                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
380                         /* Allocate port data and ownership shared memory. */
381                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
382                                         sizeof(*rte_eth_dev_shared_data),
383                                         rte_socket_id(), flags);
384                 } else
385                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
386                 if (mz == NULL)
387                         rte_panic("Cannot allocate ethdev shared data\n");
388
389                 rte_eth_dev_shared_data = mz->addr;
390                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
391                         rte_eth_dev_shared_data->next_owner_id =
392                                         RTE_ETH_DEV_NO_OWNER + 1;
393                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
394                         memset(rte_eth_dev_shared_data->data, 0,
395                                sizeof(rte_eth_dev_shared_data->data));
396                 }
397         }
398
399         rte_spinlock_unlock(&rte_eth_shared_data_lock);
400 }
401
402 static bool
403 is_allocated(const struct rte_eth_dev *ethdev)
404 {
405         return ethdev->data->name[0] != '\0';
406 }
407
408 static struct rte_eth_dev *
409 _rte_eth_dev_allocated(const char *name)
410 {
411         unsigned i;
412
413         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414                 if (rte_eth_devices[i].data != NULL &&
415                     strcmp(rte_eth_devices[i].data->name, name) == 0)
416                         return &rte_eth_devices[i];
417         }
418         return NULL;
419 }
420
421 struct rte_eth_dev *
422 rte_eth_dev_allocated(const char *name)
423 {
424         struct rte_eth_dev *ethdev;
425
426         rte_eth_dev_shared_data_prepare();
427
428         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
429
430         ethdev = _rte_eth_dev_allocated(name);
431
432         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
433
434         return ethdev;
435 }
436
437 static uint16_t
438 rte_eth_dev_find_free_port(void)
439 {
440         unsigned i;
441
442         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
443                 /* Using shared name field to find a free port. */
444                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
445                         RTE_ASSERT(rte_eth_devices[i].state ==
446                                    RTE_ETH_DEV_UNUSED);
447                         return i;
448                 }
449         }
450         return RTE_MAX_ETHPORTS;
451 }
452
453 static struct rte_eth_dev *
454 eth_dev_get(uint16_t port_id)
455 {
456         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
457
458         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
459
460         return eth_dev;
461 }
462
463 struct rte_eth_dev *
464 rte_eth_dev_allocate(const char *name)
465 {
466         uint16_t port_id;
467         struct rte_eth_dev *eth_dev = NULL;
468         size_t name_len;
469
470         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
471         if (name_len == 0) {
472                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
473                 return NULL;
474         }
475
476         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
477                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
478                 return NULL;
479         }
480
481         rte_eth_dev_shared_data_prepare();
482
483         /* Synchronize port creation between primary and secondary threads. */
484         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
485
486         if (_rte_eth_dev_allocated(name) != NULL) {
487                 RTE_ETHDEV_LOG(ERR,
488                         "Ethernet device with name %s already allocated\n",
489                         name);
490                 goto unlock;
491         }
492
493         port_id = rte_eth_dev_find_free_port();
494         if (port_id == RTE_MAX_ETHPORTS) {
495                 RTE_ETHDEV_LOG(ERR,
496                         "Reached maximum number of Ethernet ports\n");
497                 goto unlock;
498         }
499
500         eth_dev = eth_dev_get(port_id);
501         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
502         eth_dev->data->port_id = port_id;
503         eth_dev->data->mtu = RTE_ETHER_MTU;
504
505 unlock:
506         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508         return eth_dev;
509 }
510
511 /*
512  * Attach to a port already registered by the primary process, which
513  * makes sure that the same device would have the same port id both
514  * in the primary and secondary process.
515  */
516 struct rte_eth_dev *
517 rte_eth_dev_attach_secondary(const char *name)
518 {
519         uint16_t i;
520         struct rte_eth_dev *eth_dev = NULL;
521
522         rte_eth_dev_shared_data_prepare();
523
524         /* Synchronize port attachment to primary port creation and release. */
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529                         break;
530         }
531         if (i == RTE_MAX_ETHPORTS) {
532                 RTE_ETHDEV_LOG(ERR,
533                         "Device %s is not driven by the primary process\n",
534                         name);
535         } else {
536                 eth_dev = eth_dev_get(i);
537                 RTE_ASSERT(eth_dev->data->port_id == i);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541         return eth_dev;
542 }
543
544 int
545 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546 {
547         if (eth_dev == NULL)
548                 return -EINVAL;
549
550         rte_eth_dev_shared_data_prepare();
551
552         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553                 _rte_eth_dev_callback_process(eth_dev,
554                                 RTE_ETH_EVENT_DESTROY, NULL);
555
556         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558         eth_dev->state = RTE_ETH_DEV_UNUSED;
559
560         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
561                 rte_free(eth_dev->data->rx_queues);
562                 rte_free(eth_dev->data->tx_queues);
563                 rte_free(eth_dev->data->mac_addrs);
564                 rte_free(eth_dev->data->hash_mac_addrs);
565                 rte_free(eth_dev->data->dev_private);
566                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
567         }
568
569         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
570
571         return 0;
572 }
573
574 int
575 rte_eth_dev_is_valid_port(uint16_t port_id)
576 {
577         if (port_id >= RTE_MAX_ETHPORTS ||
578             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
579                 return 0;
580         else
581                 return 1;
582 }
583
584 static int
585 rte_eth_is_valid_owner_id(uint64_t owner_id)
586 {
587         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
588             rte_eth_dev_shared_data->next_owner_id <= owner_id)
589                 return 0;
590         return 1;
591 }
592
593 uint64_t
594 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
595 {
596         port_id = rte_eth_find_next(port_id);
597         while (port_id < RTE_MAX_ETHPORTS &&
598                         rte_eth_devices[port_id].data->owner.id != owner_id)
599                 port_id = rte_eth_find_next(port_id + 1);
600
601         return port_id;
602 }
603
604 int
605 rte_eth_dev_owner_new(uint64_t *owner_id)
606 {
607         rte_eth_dev_shared_data_prepare();
608
609         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
610
611         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
612
613         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
614         return 0;
615 }
616
617 static int
618 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
619                        const struct rte_eth_dev_owner *new_owner)
620 {
621         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
622         struct rte_eth_dev_owner *port_owner;
623
624         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
625                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
626                         port_id);
627                 return -ENODEV;
628         }
629
630         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
631             !rte_eth_is_valid_owner_id(old_owner_id)) {
632                 RTE_ETHDEV_LOG(ERR,
633                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
634                        old_owner_id, new_owner->id);
635                 return -EINVAL;
636         }
637
638         port_owner = &rte_eth_devices[port_id].data->owner;
639         if (port_owner->id != old_owner_id) {
640                 RTE_ETHDEV_LOG(ERR,
641                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
642                         port_id, port_owner->name, port_owner->id);
643                 return -EPERM;
644         }
645
646         /* can not truncate (same structure) */
647         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
648
649         port_owner->id = new_owner->id;
650
651         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
652                 port_id, new_owner->name, new_owner->id);
653
654         return 0;
655 }
656
657 int
658 rte_eth_dev_owner_set(const uint16_t port_id,
659                       const struct rte_eth_dev_owner *owner)
660 {
661         int ret;
662
663         rte_eth_dev_shared_data_prepare();
664
665         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
666
667         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
668
669         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
670         return ret;
671 }
672
673 int
674 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
675 {
676         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
677                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
678         int ret;
679
680         rte_eth_dev_shared_data_prepare();
681
682         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
683
684         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
685
686         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
687         return ret;
688 }
689
690 int
691 rte_eth_dev_owner_delete(const uint64_t owner_id)
692 {
693         uint16_t port_id;
694         int ret = 0;
695
696         rte_eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
699
700         if (rte_eth_is_valid_owner_id(owner_id)) {
701                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
702                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
703                                 memset(&rte_eth_devices[port_id].data->owner, 0,
704                                        sizeof(struct rte_eth_dev_owner));
705                 RTE_ETHDEV_LOG(NOTICE,
706                         "All port owners owned by %016"PRIx64" identifier have removed\n",
707                         owner_id);
708         } else {
709                 RTE_ETHDEV_LOG(ERR,
710                                "Invalid owner id=%016"PRIx64"\n",
711                                owner_id);
712                 ret = -EINVAL;
713         }
714
715         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
716
717         return ret;
718 }
719
720 int
721 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
722 {
723         int ret = 0;
724         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
725
726         rte_eth_dev_shared_data_prepare();
727
728         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
729
730         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
731                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
732                         port_id);
733                 ret = -ENODEV;
734         } else {
735                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
736         }
737
738         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
739         return ret;
740 }
741
742 int
743 rte_eth_dev_socket_id(uint16_t port_id)
744 {
745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
746         return rte_eth_devices[port_id].data->numa_node;
747 }
748
749 void *
750 rte_eth_dev_get_sec_ctx(uint16_t port_id)
751 {
752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
753         return rte_eth_devices[port_id].security_ctx;
754 }
755
756 uint16_t
757 rte_eth_dev_count_avail(void)
758 {
759         uint16_t p;
760         uint16_t count;
761
762         count = 0;
763
764         RTE_ETH_FOREACH_DEV(p)
765                 count++;
766
767         return count;
768 }
769
770 uint16_t
771 rte_eth_dev_count_total(void)
772 {
773         uint16_t port, count = 0;
774
775         RTE_ETH_FOREACH_VALID_DEV(port)
776                 count++;
777
778         return count;
779 }
780
781 int
782 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
783 {
784         char *tmp;
785
786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
787
788         if (name == NULL) {
789                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
790                 return -EINVAL;
791         }
792
793         /* shouldn't check 'rte_eth_devices[i].data',
794          * because it might be overwritten by VDEV PMD */
795         tmp = rte_eth_dev_shared_data->data[port_id].name;
796         strcpy(name, tmp);
797         return 0;
798 }
799
800 int
801 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
802 {
803         uint32_t pid;
804
805         if (name == NULL) {
806                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
807                 return -EINVAL;
808         }
809
810         RTE_ETH_FOREACH_VALID_DEV(pid)
811                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
812                         *port_id = pid;
813                         return 0;
814                 }
815
816         return -ENODEV;
817 }
818
819 static int
820 eth_err(uint16_t port_id, int ret)
821 {
822         if (ret == 0)
823                 return 0;
824         if (rte_eth_dev_is_removed(port_id))
825                 return -EIO;
826         return ret;
827 }
828
829 static int
830 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
831 {
832         uint16_t old_nb_queues = dev->data->nb_rx_queues;
833         void **rxq;
834         unsigned i;
835
836         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
837                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
838                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
839                                 RTE_CACHE_LINE_SIZE);
840                 if (dev->data->rx_queues == NULL) {
841                         dev->data->nb_rx_queues = 0;
842                         return -(ENOMEM);
843                 }
844         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
845                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
846
847                 rxq = dev->data->rx_queues;
848
849                 for (i = nb_queues; i < old_nb_queues; i++)
850                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
851                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
852                                 RTE_CACHE_LINE_SIZE);
853                 if (rxq == NULL)
854                         return -(ENOMEM);
855                 if (nb_queues > old_nb_queues) {
856                         uint16_t new_qs = nb_queues - old_nb_queues;
857
858                         memset(rxq + old_nb_queues, 0,
859                                 sizeof(rxq[0]) * new_qs);
860                 }
861
862                 dev->data->rx_queues = rxq;
863
864         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
865                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
866
867                 rxq = dev->data->rx_queues;
868
869                 for (i = nb_queues; i < old_nb_queues; i++)
870                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
871
872                 rte_free(dev->data->rx_queues);
873                 dev->data->rx_queues = NULL;
874         }
875         dev->data->nb_rx_queues = nb_queues;
876         return 0;
877 }
878
879 int
880 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
881 {
882         struct rte_eth_dev *dev;
883
884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
885
886         dev = &rte_eth_devices[port_id];
887         if (!dev->data->dev_started) {
888                 RTE_ETHDEV_LOG(ERR,
889                         "Port %u must be started before start any queue\n",
890                         port_id);
891                 return -EINVAL;
892         }
893
894         if (rx_queue_id >= dev->data->nb_rx_queues) {
895                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
896                 return -EINVAL;
897         }
898
899         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
900
901         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
902                 RTE_ETHDEV_LOG(INFO,
903                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
904                         rx_queue_id, port_id);
905                 return -EINVAL;
906         }
907
908         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
909                 RTE_ETHDEV_LOG(INFO,
910                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
911                         rx_queue_id, port_id);
912                 return 0;
913         }
914
915         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
916                                                              rx_queue_id));
917
918 }
919
920 int
921 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
922 {
923         struct rte_eth_dev *dev;
924
925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
926
927         dev = &rte_eth_devices[port_id];
928         if (rx_queue_id >= dev->data->nb_rx_queues) {
929                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
930                 return -EINVAL;
931         }
932
933         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
934
935         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
936                 RTE_ETHDEV_LOG(INFO,
937                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
938                         rx_queue_id, port_id);
939                 return -EINVAL;
940         }
941
942         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
943                 RTE_ETHDEV_LOG(INFO,
944                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
945                         rx_queue_id, port_id);
946                 return 0;
947         }
948
949         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
950
951 }
952
953 int
954 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
955 {
956         struct rte_eth_dev *dev;
957
958         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
959
960         dev = &rte_eth_devices[port_id];
961         if (!dev->data->dev_started) {
962                 RTE_ETHDEV_LOG(ERR,
963                         "Port %u must be started before start any queue\n",
964                         port_id);
965                 return -EINVAL;
966         }
967
968         if (tx_queue_id >= dev->data->nb_tx_queues) {
969                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
970                 return -EINVAL;
971         }
972
973         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
974
975         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
976                 RTE_ETHDEV_LOG(INFO,
977                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
978                         tx_queue_id, port_id);
979                 return -EINVAL;
980         }
981
982         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
983                 RTE_ETHDEV_LOG(INFO,
984                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
985                         tx_queue_id, port_id);
986                 return 0;
987         }
988
989         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
990 }
991
992 int
993 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
994 {
995         struct rte_eth_dev *dev;
996
997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
998
999         dev = &rte_eth_devices[port_id];
1000         if (tx_queue_id >= dev->data->nb_tx_queues) {
1001                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1002                 return -EINVAL;
1003         }
1004
1005         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1006
1007         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1010                         tx_queue_id, port_id);
1011                 return -EINVAL;
1012         }
1013
1014         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1015                 RTE_ETHDEV_LOG(INFO,
1016                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1017                         tx_queue_id, port_id);
1018                 return 0;
1019         }
1020
1021         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1022
1023 }
1024
1025 static int
1026 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1027 {
1028         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1029         void **txq;
1030         unsigned i;
1031
1032         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1033                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1034                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1035                                                    RTE_CACHE_LINE_SIZE);
1036                 if (dev->data->tx_queues == NULL) {
1037                         dev->data->nb_tx_queues = 0;
1038                         return -(ENOMEM);
1039                 }
1040         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1041                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1042
1043                 txq = dev->data->tx_queues;
1044
1045                 for (i = nb_queues; i < old_nb_queues; i++)
1046                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1047                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1048                                   RTE_CACHE_LINE_SIZE);
1049                 if (txq == NULL)
1050                         return -ENOMEM;
1051                 if (nb_queues > old_nb_queues) {
1052                         uint16_t new_qs = nb_queues - old_nb_queues;
1053
1054                         memset(txq + old_nb_queues, 0,
1055                                sizeof(txq[0]) * new_qs);
1056                 }
1057
1058                 dev->data->tx_queues = txq;
1059
1060         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1061                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1062
1063                 txq = dev->data->tx_queues;
1064
1065                 for (i = nb_queues; i < old_nb_queues; i++)
1066                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1067
1068                 rte_free(dev->data->tx_queues);
1069                 dev->data->tx_queues = NULL;
1070         }
1071         dev->data->nb_tx_queues = nb_queues;
1072         return 0;
1073 }
1074
1075 uint32_t
1076 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1077 {
1078         switch (speed) {
1079         case ETH_SPEED_NUM_10M:
1080                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1081         case ETH_SPEED_NUM_100M:
1082                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1083         case ETH_SPEED_NUM_1G:
1084                 return ETH_LINK_SPEED_1G;
1085         case ETH_SPEED_NUM_2_5G:
1086                 return ETH_LINK_SPEED_2_5G;
1087         case ETH_SPEED_NUM_5G:
1088                 return ETH_LINK_SPEED_5G;
1089         case ETH_SPEED_NUM_10G:
1090                 return ETH_LINK_SPEED_10G;
1091         case ETH_SPEED_NUM_20G:
1092                 return ETH_LINK_SPEED_20G;
1093         case ETH_SPEED_NUM_25G:
1094                 return ETH_LINK_SPEED_25G;
1095         case ETH_SPEED_NUM_40G:
1096                 return ETH_LINK_SPEED_40G;
1097         case ETH_SPEED_NUM_50G:
1098                 return ETH_LINK_SPEED_50G;
1099         case ETH_SPEED_NUM_56G:
1100                 return ETH_LINK_SPEED_56G;
1101         case ETH_SPEED_NUM_100G:
1102                 return ETH_LINK_SPEED_100G;
1103         default:
1104                 return 0;
1105         }
1106 }
1107
1108 const char *
1109 rte_eth_dev_rx_offload_name(uint64_t offload)
1110 {
1111         const char *name = "UNKNOWN";
1112         unsigned int i;
1113
1114         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1115                 if (offload == rte_rx_offload_names[i].offload) {
1116                         name = rte_rx_offload_names[i].name;
1117                         break;
1118                 }
1119         }
1120
1121         return name;
1122 }
1123
1124 const char *
1125 rte_eth_dev_tx_offload_name(uint64_t offload)
1126 {
1127         const char *name = "UNKNOWN";
1128         unsigned int i;
1129
1130         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1131                 if (offload == rte_tx_offload_names[i].offload) {
1132                         name = rte_tx_offload_names[i].name;
1133                         break;
1134                 }
1135         }
1136
1137         return name;
1138 }
1139
1140 static inline int
1141 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1142                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1143 {
1144         int ret = 0;
1145
1146         if (dev_info_size == 0) {
1147                 if (config_size != max_rx_pkt_len) {
1148                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1149                                        " %u != %u is not allowed\n",
1150                                        port_id, config_size, max_rx_pkt_len);
1151                         ret = -EINVAL;
1152                 }
1153         } else if (config_size > dev_info_size) {
1154                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1155                                "> max allowed value %u\n", port_id, config_size,
1156                                dev_info_size);
1157                 ret = -EINVAL;
1158         } else if (config_size < RTE_ETHER_MIN_LEN) {
1159                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1160                                "< min allowed value %u\n", port_id, config_size,
1161                                (unsigned int)RTE_ETHER_MIN_LEN);
1162                 ret = -EINVAL;
1163         }
1164         return ret;
1165 }
1166
1167 /*
1168  * Validate offloads that are requested through rte_eth_dev_configure against
1169  * the offloads successfuly set by the ethernet device.
1170  *
1171  * @param port_id
1172  *   The port identifier of the Ethernet device.
1173  * @param req_offloads
1174  *   The offloads that have been requested through `rte_eth_dev_configure`.
1175  * @param set_offloads
1176  *   The offloads successfuly set by the ethernet device.
1177  * @param offload_type
1178  *   The offload type i.e. Rx/Tx string.
1179  * @param offload_name
1180  *   The function that prints the offload name.
1181  * @return
1182  *   - (0) if validation successful.
1183  *   - (-EINVAL) if requested offload has been silently disabled.
1184  *
1185  */
1186 static int
1187 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1188                   uint64_t set_offloads, const char *offload_type,
1189                   const char *(*offload_name)(uint64_t))
1190 {
1191         uint64_t offloads_diff = req_offloads ^ set_offloads;
1192         uint64_t offload;
1193         int ret = 0;
1194
1195         while (offloads_diff != 0) {
1196                 /* Check if any offload is requested but not enabled. */
1197                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1198                 if (offload & req_offloads) {
1199                         RTE_ETHDEV_LOG(ERR,
1200                                 "Port %u failed to enable %s offload %s\n",
1201                                 port_id, offload_type, offload_name(offload));
1202                         ret = -EINVAL;
1203                 }
1204
1205                 /* Chech if offload couldn't be disabled. */
1206                 if (offload & set_offloads) {
1207                         RTE_ETHDEV_LOG(DEBUG,
1208                                 "Port %u %s offload %s is not requested but enabled\n",
1209                                 port_id, offload_type, offload_name(offload));
1210                 }
1211
1212                 offloads_diff &= ~offload;
1213         }
1214
1215         return ret;
1216 }
1217
1218 int
1219 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1220                       const struct rte_eth_conf *dev_conf)
1221 {
1222         struct rte_eth_dev *dev;
1223         struct rte_eth_dev_info dev_info;
1224         struct rte_eth_conf orig_conf;
1225         int diag;
1226         int ret;
1227
1228         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1229
1230         dev = &rte_eth_devices[port_id];
1231
1232         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1233
1234         if (dev->data->dev_started) {
1235                 RTE_ETHDEV_LOG(ERR,
1236                         "Port %u must be stopped to allow configuration\n",
1237                         port_id);
1238                 return -EBUSY;
1239         }
1240
1241          /* Store original config, as rollback required on failure */
1242         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1243
1244         /*
1245          * Copy the dev_conf parameter into the dev structure.
1246          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1247          */
1248         if (dev_conf != &dev->data->dev_conf)
1249                 memcpy(&dev->data->dev_conf, dev_conf,
1250                        sizeof(dev->data->dev_conf));
1251
1252         ret = rte_eth_dev_info_get(port_id, &dev_info);
1253         if (ret != 0)
1254                 goto rollback;
1255
1256         /* If number of queues specified by application for both Rx and Tx is
1257          * zero, use driver preferred values. This cannot be done individually
1258          * as it is valid for either Tx or Rx (but not both) to be zero.
1259          * If driver does not provide any preferred valued, fall back on
1260          * EAL defaults.
1261          */
1262         if (nb_rx_q == 0 && nb_tx_q == 0) {
1263                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1264                 if (nb_rx_q == 0)
1265                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1266                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1267                 if (nb_tx_q == 0)
1268                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1269         }
1270
1271         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1272                 RTE_ETHDEV_LOG(ERR,
1273                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1274                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1275                 ret = -EINVAL;
1276                 goto rollback;
1277         }
1278
1279         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1280                 RTE_ETHDEV_LOG(ERR,
1281                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1282                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1283                 ret = -EINVAL;
1284                 goto rollback;
1285         }
1286
1287         /*
1288          * Check that the numbers of RX and TX queues are not greater
1289          * than the maximum number of RX and TX queues supported by the
1290          * configured device.
1291          */
1292         if (nb_rx_q > dev_info.max_rx_queues) {
1293                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1294                         port_id, nb_rx_q, dev_info.max_rx_queues);
1295                 ret = -EINVAL;
1296                 goto rollback;
1297         }
1298
1299         if (nb_tx_q > dev_info.max_tx_queues) {
1300                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1301                         port_id, nb_tx_q, dev_info.max_tx_queues);
1302                 ret = -EINVAL;
1303                 goto rollback;
1304         }
1305
1306         /* Check that the device supports requested interrupts */
1307         if ((dev_conf->intr_conf.lsc == 1) &&
1308                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1309                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1310                         dev->device->driver->name);
1311                 ret = -EINVAL;
1312                 goto rollback;
1313         }
1314         if ((dev_conf->intr_conf.rmv == 1) &&
1315                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1316                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1317                         dev->device->driver->name);
1318                 ret = -EINVAL;
1319                 goto rollback;
1320         }
1321
1322         /*
1323          * If jumbo frames are enabled, check that the maximum RX packet
1324          * length is supported by the configured device.
1325          */
1326         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1327                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1328                         RTE_ETHDEV_LOG(ERR,
1329                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1330                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1331                                 dev_info.max_rx_pktlen);
1332                         ret = -EINVAL;
1333                         goto rollback;
1334                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1335                         RTE_ETHDEV_LOG(ERR,
1336                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1337                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1338                                 (unsigned int)RTE_ETHER_MIN_LEN);
1339                         ret = -EINVAL;
1340                         goto rollback;
1341                 }
1342         } else {
1343                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1344                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1345                         /* Use default value */
1346                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1347                                                         RTE_ETHER_MAX_LEN;
1348         }
1349
1350         /*
1351          * If LRO is enabled, check that the maximum aggregated packet
1352          * size is supported by the configured device.
1353          */
1354         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1355                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1356                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1357                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1358                 ret = check_lro_pkt_size(port_id,
1359                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1360                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1361                                 dev_info.max_lro_pkt_size);
1362                 if (ret != 0)
1363                         goto rollback;
1364         }
1365
1366         /* Any requested offloading must be within its device capabilities */
1367         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1368              dev_conf->rxmode.offloads) {
1369                 RTE_ETHDEV_LOG(ERR,
1370                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1371                         "capabilities 0x%"PRIx64" in %s()\n",
1372                         port_id, dev_conf->rxmode.offloads,
1373                         dev_info.rx_offload_capa,
1374                         __func__);
1375                 ret = -EINVAL;
1376                 goto rollback;
1377         }
1378         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1379              dev_conf->txmode.offloads) {
1380                 RTE_ETHDEV_LOG(ERR,
1381                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1382                         "capabilities 0x%"PRIx64" in %s()\n",
1383                         port_id, dev_conf->txmode.offloads,
1384                         dev_info.tx_offload_capa,
1385                         __func__);
1386                 ret = -EINVAL;
1387                 goto rollback;
1388         }
1389
1390         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1391                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1392
1393         /* Check that device supports requested rss hash functions. */
1394         if ((dev_info.flow_type_rss_offloads |
1395              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1396             dev_info.flow_type_rss_offloads) {
1397                 RTE_ETHDEV_LOG(ERR,
1398                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1399                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1400                         dev_info.flow_type_rss_offloads);
1401                 ret = -EINVAL;
1402                 goto rollback;
1403         }
1404
1405         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1406         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1407             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1408                 RTE_ETHDEV_LOG(ERR,
1409                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1410                         port_id,
1411                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1412                 ret = -EINVAL;
1413                 goto rollback;
1414         }
1415
1416         /*
1417          * Setup new number of RX/TX queues and reconfigure device.
1418          */
1419         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1420         if (diag != 0) {
1421                 RTE_ETHDEV_LOG(ERR,
1422                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1423                         port_id, diag);
1424                 ret = diag;
1425                 goto rollback;
1426         }
1427
1428         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1429         if (diag != 0) {
1430                 RTE_ETHDEV_LOG(ERR,
1431                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1432                         port_id, diag);
1433                 rte_eth_dev_rx_queue_config(dev, 0);
1434                 ret = diag;
1435                 goto rollback;
1436         }
1437
1438         diag = (*dev->dev_ops->dev_configure)(dev);
1439         if (diag != 0) {
1440                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1441                         port_id, diag);
1442                 ret = eth_err(port_id, diag);
1443                 goto reset_queues;
1444         }
1445
1446         /* Initialize Rx profiling if enabled at compilation time. */
1447         diag = __rte_eth_dev_profile_init(port_id, dev);
1448         if (diag != 0) {
1449                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1450                         port_id, diag);
1451                 ret = eth_err(port_id, diag);
1452                 goto reset_queues;
1453         }
1454
1455         /* Validate Rx offloads. */
1456         diag = validate_offloads(port_id,
1457                         dev_conf->rxmode.offloads,
1458                         dev->data->dev_conf.rxmode.offloads, "Rx",
1459                         rte_eth_dev_rx_offload_name);
1460         if (diag != 0) {
1461                 ret = diag;
1462                 goto reset_queues;
1463         }
1464
1465         /* Validate Tx offloads. */
1466         diag = validate_offloads(port_id,
1467                         dev_conf->txmode.offloads,
1468                         dev->data->dev_conf.txmode.offloads, "Tx",
1469                         rte_eth_dev_tx_offload_name);
1470         if (diag != 0) {
1471                 ret = diag;
1472                 goto reset_queues;
1473         }
1474
1475         return 0;
1476 reset_queues:
1477         rte_eth_dev_rx_queue_config(dev, 0);
1478         rte_eth_dev_tx_queue_config(dev, 0);
1479 rollback:
1480         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1481
1482         return ret;
1483 }
1484
1485 void
1486 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1487 {
1488         if (dev->data->dev_started) {
1489                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1490                         dev->data->port_id);
1491                 return;
1492         }
1493
1494         rte_eth_dev_rx_queue_config(dev, 0);
1495         rte_eth_dev_tx_queue_config(dev, 0);
1496
1497         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1498 }
1499
1500 static void
1501 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1502                         struct rte_eth_dev_info *dev_info)
1503 {
1504         struct rte_ether_addr *addr;
1505         uint16_t i;
1506         uint32_t pool = 0;
1507         uint64_t pool_mask;
1508
1509         /* replay MAC address configuration including default MAC */
1510         addr = &dev->data->mac_addrs[0];
1511         if (*dev->dev_ops->mac_addr_set != NULL)
1512                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1513         else if (*dev->dev_ops->mac_addr_add != NULL)
1514                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1515
1516         if (*dev->dev_ops->mac_addr_add != NULL) {
1517                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1518                         addr = &dev->data->mac_addrs[i];
1519
1520                         /* skip zero address */
1521                         if (rte_is_zero_ether_addr(addr))
1522                                 continue;
1523
1524                         pool = 0;
1525                         pool_mask = dev->data->mac_pool_sel[i];
1526
1527                         do {
1528                                 if (pool_mask & 1ULL)
1529                                         (*dev->dev_ops->mac_addr_add)(dev,
1530                                                 addr, i, pool);
1531                                 pool_mask >>= 1;
1532                                 pool++;
1533                         } while (pool_mask);
1534                 }
1535         }
1536 }
1537
1538 static int
1539 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1540                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1541 {
1542         int ret;
1543
1544         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1545                 rte_eth_dev_mac_restore(dev, dev_info);
1546
1547         /* replay promiscuous configuration */
1548         /*
1549          * use callbacks directly since we don't need port_id check and
1550          * would like to bypass the same value set
1551          */
1552         if (rte_eth_promiscuous_get(port_id) == 1 &&
1553             *dev->dev_ops->promiscuous_enable != NULL) {
1554                 ret = eth_err(port_id,
1555                               (*dev->dev_ops->promiscuous_enable)(dev));
1556                 if (ret != 0 && ret != -ENOTSUP) {
1557                         RTE_ETHDEV_LOG(ERR,
1558                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1559                                 port_id, rte_strerror(-ret));
1560                         return ret;
1561                 }
1562         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1563                    *dev->dev_ops->promiscuous_disable != NULL) {
1564                 ret = eth_err(port_id,
1565                               (*dev->dev_ops->promiscuous_disable)(dev));
1566                 if (ret != 0 && ret != -ENOTSUP) {
1567                         RTE_ETHDEV_LOG(ERR,
1568                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1569                                 port_id, rte_strerror(-ret));
1570                         return ret;
1571                 }
1572         }
1573
1574         /* replay all multicast configuration */
1575         /*
1576          * use callbacks directly since we don't need port_id check and
1577          * would like to bypass the same value set
1578          */
1579         if (rte_eth_allmulticast_get(port_id) == 1 &&
1580             *dev->dev_ops->allmulticast_enable != NULL) {
1581                 ret = eth_err(port_id,
1582                               (*dev->dev_ops->allmulticast_enable)(dev));
1583                 if (ret != 0 && ret != -ENOTSUP) {
1584                         RTE_ETHDEV_LOG(ERR,
1585                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1586                                 port_id, rte_strerror(-ret));
1587                         return ret;
1588                 }
1589         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1590                    *dev->dev_ops->allmulticast_disable != NULL) {
1591                 ret = eth_err(port_id,
1592                               (*dev->dev_ops->allmulticast_disable)(dev));
1593                 if (ret != 0 && ret != -ENOTSUP) {
1594                         RTE_ETHDEV_LOG(ERR,
1595                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1596                                 port_id, rte_strerror(-ret));
1597                         return ret;
1598                 }
1599         }
1600
1601         return 0;
1602 }
1603
1604 int
1605 rte_eth_dev_start(uint16_t port_id)
1606 {
1607         struct rte_eth_dev *dev;
1608         struct rte_eth_dev_info dev_info;
1609         int diag;
1610         int ret;
1611
1612         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1613
1614         dev = &rte_eth_devices[port_id];
1615
1616         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1617
1618         if (dev->data->dev_started != 0) {
1619                 RTE_ETHDEV_LOG(INFO,
1620                         "Device with port_id=%"PRIu16" already started\n",
1621                         port_id);
1622                 return 0;
1623         }
1624
1625         ret = rte_eth_dev_info_get(port_id, &dev_info);
1626         if (ret != 0)
1627                 return ret;
1628
1629         /* Lets restore MAC now if device does not support live change */
1630         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1631                 rte_eth_dev_mac_restore(dev, &dev_info);
1632
1633         diag = (*dev->dev_ops->dev_start)(dev);
1634         if (diag == 0)
1635                 dev->data->dev_started = 1;
1636         else
1637                 return eth_err(port_id, diag);
1638
1639         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1640         if (ret != 0) {
1641                 RTE_ETHDEV_LOG(ERR,
1642                         "Error during restoring configuration for device (port %u): %s\n",
1643                         port_id, rte_strerror(-ret));
1644                 rte_eth_dev_stop(port_id);
1645                 return ret;
1646         }
1647
1648         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1649                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1650                 (*dev->dev_ops->link_update)(dev, 0);
1651         }
1652         return 0;
1653 }
1654
1655 void
1656 rte_eth_dev_stop(uint16_t port_id)
1657 {
1658         struct rte_eth_dev *dev;
1659
1660         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1661         dev = &rte_eth_devices[port_id];
1662
1663         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1664
1665         if (dev->data->dev_started == 0) {
1666                 RTE_ETHDEV_LOG(INFO,
1667                         "Device with port_id=%"PRIu16" already stopped\n",
1668                         port_id);
1669                 return;
1670         }
1671
1672         dev->data->dev_started = 0;
1673         (*dev->dev_ops->dev_stop)(dev);
1674 }
1675
1676 int
1677 rte_eth_dev_set_link_up(uint16_t port_id)
1678 {
1679         struct rte_eth_dev *dev;
1680
1681         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1682
1683         dev = &rte_eth_devices[port_id];
1684
1685         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1686         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1687 }
1688
1689 int
1690 rte_eth_dev_set_link_down(uint16_t port_id)
1691 {
1692         struct rte_eth_dev *dev;
1693
1694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1695
1696         dev = &rte_eth_devices[port_id];
1697
1698         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1699         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1700 }
1701
1702 void
1703 rte_eth_dev_close(uint16_t port_id)
1704 {
1705         struct rte_eth_dev *dev;
1706
1707         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1708         dev = &rte_eth_devices[port_id];
1709
1710         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1711         dev->data->dev_started = 0;
1712         (*dev->dev_ops->dev_close)(dev);
1713
1714         /* check behaviour flag - temporary for PMD migration */
1715         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1716                 /* new behaviour: send event + reset state + free all data */
1717                 rte_eth_dev_release_port(dev);
1718                 return;
1719         }
1720         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1721                         "The driver %s should migrate to the new behaviour.\n",
1722                         dev->device->driver->name);
1723         /* old behaviour: only free queue arrays */
1724         dev->data->nb_rx_queues = 0;
1725         rte_free(dev->data->rx_queues);
1726         dev->data->rx_queues = NULL;
1727         dev->data->nb_tx_queues = 0;
1728         rte_free(dev->data->tx_queues);
1729         dev->data->tx_queues = NULL;
1730 }
1731
1732 int
1733 rte_eth_dev_reset(uint16_t port_id)
1734 {
1735         struct rte_eth_dev *dev;
1736         int ret;
1737
1738         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1739         dev = &rte_eth_devices[port_id];
1740
1741         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1742
1743         rte_eth_dev_stop(port_id);
1744         ret = dev->dev_ops->dev_reset(dev);
1745
1746         return eth_err(port_id, ret);
1747 }
1748
1749 int
1750 rte_eth_dev_is_removed(uint16_t port_id)
1751 {
1752         struct rte_eth_dev *dev;
1753         int ret;
1754
1755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1756
1757         dev = &rte_eth_devices[port_id];
1758
1759         if (dev->state == RTE_ETH_DEV_REMOVED)
1760                 return 1;
1761
1762         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1763
1764         ret = dev->dev_ops->is_removed(dev);
1765         if (ret != 0)
1766                 /* Device is physically removed. */
1767                 dev->state = RTE_ETH_DEV_REMOVED;
1768
1769         return ret;
1770 }
1771
1772 int
1773 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1774                        uint16_t nb_rx_desc, unsigned int socket_id,
1775                        const struct rte_eth_rxconf *rx_conf,
1776                        struct rte_mempool *mp)
1777 {
1778         int ret;
1779         uint32_t mbp_buf_size;
1780         struct rte_eth_dev *dev;
1781         struct rte_eth_dev_info dev_info;
1782         struct rte_eth_rxconf local_conf;
1783         void **rxq;
1784
1785         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1786
1787         dev = &rte_eth_devices[port_id];
1788         if (rx_queue_id >= dev->data->nb_rx_queues) {
1789                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1790                 return -EINVAL;
1791         }
1792
1793         if (mp == NULL) {
1794                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1795                 return -EINVAL;
1796         }
1797
1798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1799
1800         /*
1801          * Check the size of the mbuf data buffer.
1802          * This value must be provided in the private data of the memory pool.
1803          * First check that the memory pool has a valid private data.
1804          */
1805         ret = rte_eth_dev_info_get(port_id, &dev_info);
1806         if (ret != 0)
1807                 return ret;
1808
1809         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1810                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1811                         mp->name, (int)mp->private_data_size,
1812                         (int)sizeof(struct rte_pktmbuf_pool_private));
1813                 return -ENOSPC;
1814         }
1815         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1816
1817         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1818                 RTE_ETHDEV_LOG(ERR,
1819                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1820                         mp->name, (int)mbp_buf_size,
1821                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1822                         (int)RTE_PKTMBUF_HEADROOM,
1823                         (int)dev_info.min_rx_bufsize);
1824                 return -EINVAL;
1825         }
1826
1827         /* Use default specified by driver, if nb_rx_desc is zero */
1828         if (nb_rx_desc == 0) {
1829                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1830                 /* If driver default is also zero, fall back on EAL default */
1831                 if (nb_rx_desc == 0)
1832                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1833         }
1834
1835         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1836                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1837                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1838
1839                 RTE_ETHDEV_LOG(ERR,
1840                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1841                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1842                         dev_info.rx_desc_lim.nb_min,
1843                         dev_info.rx_desc_lim.nb_align);
1844                 return -EINVAL;
1845         }
1846
1847         if (dev->data->dev_started &&
1848                 !(dev_info.dev_capa &
1849                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1850                 return -EBUSY;
1851
1852         if (dev->data->dev_started &&
1853                 (dev->data->rx_queue_state[rx_queue_id] !=
1854                         RTE_ETH_QUEUE_STATE_STOPPED))
1855                 return -EBUSY;
1856
1857         rxq = dev->data->rx_queues;
1858         if (rxq[rx_queue_id]) {
1859                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1860                                         -ENOTSUP);
1861                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1862                 rxq[rx_queue_id] = NULL;
1863         }
1864
1865         if (rx_conf == NULL)
1866                 rx_conf = &dev_info.default_rxconf;
1867
1868         local_conf = *rx_conf;
1869
1870         /*
1871          * If an offloading has already been enabled in
1872          * rte_eth_dev_configure(), it has been enabled on all queues,
1873          * so there is no need to enable it in this queue again.
1874          * The local_conf.offloads input to underlying PMD only carries
1875          * those offloadings which are only enabled on this queue and
1876          * not enabled on all queues.
1877          */
1878         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1879
1880         /*
1881          * New added offloadings for this queue are those not enabled in
1882          * rte_eth_dev_configure() and they must be per-queue type.
1883          * A pure per-port offloading can't be enabled on a queue while
1884          * disabled on another queue. A pure per-port offloading can't
1885          * be enabled for any queue as new added one if it hasn't been
1886          * enabled in rte_eth_dev_configure().
1887          */
1888         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1889              local_conf.offloads) {
1890                 RTE_ETHDEV_LOG(ERR,
1891                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1892                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1893                         port_id, rx_queue_id, local_conf.offloads,
1894                         dev_info.rx_queue_offload_capa,
1895                         __func__);
1896                 return -EINVAL;
1897         }
1898
1899         /*
1900          * If LRO is enabled, check that the maximum aggregated packet
1901          * size is supported by the configured device.
1902          */
1903         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1904                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1905                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1906                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1907                 int ret = check_lro_pkt_size(port_id,
1908                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1909                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1910                                 dev_info.max_lro_pkt_size);
1911                 if (ret != 0)
1912                         return ret;
1913         }
1914
1915         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1916                                               socket_id, &local_conf, mp);
1917         if (!ret) {
1918                 if (!dev->data->min_rx_buf_size ||
1919                     dev->data->min_rx_buf_size > mbp_buf_size)
1920                         dev->data->min_rx_buf_size = mbp_buf_size;
1921         }
1922
1923         return eth_err(port_id, ret);
1924 }
1925
1926 int
1927 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1928                                uint16_t nb_rx_desc,
1929                                const struct rte_eth_hairpin_conf *conf)
1930 {
1931         int ret;
1932         struct rte_eth_dev *dev;
1933         struct rte_eth_hairpin_cap cap;
1934         void **rxq;
1935         int i;
1936         int count;
1937
1938         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1939
1940         dev = &rte_eth_devices[port_id];
1941         if (rx_queue_id >= dev->data->nb_rx_queues) {
1942                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1943                 return -EINVAL;
1944         }
1945         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1946         if (ret != 0)
1947                 return ret;
1948         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1949                                 -ENOTSUP);
1950         /* if nb_rx_desc is zero use max number of desc from the driver. */
1951         if (nb_rx_desc == 0)
1952                 nb_rx_desc = cap.max_nb_desc;
1953         if (nb_rx_desc > cap.max_nb_desc) {
1954                 RTE_ETHDEV_LOG(ERR,
1955                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1956                         nb_rx_desc, cap.max_nb_desc);
1957                 return -EINVAL;
1958         }
1959         if (conf->peer_count > cap.max_rx_2_tx) {
1960                 RTE_ETHDEV_LOG(ERR,
1961                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1962                         conf->peer_count, cap.max_rx_2_tx);
1963                 return -EINVAL;
1964         }
1965         if (conf->peer_count == 0) {
1966                 RTE_ETHDEV_LOG(ERR,
1967                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1968                         conf->peer_count);
1969                 return -EINVAL;
1970         }
1971         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1972              cap.max_nb_queues != UINT16_MAX; i++) {
1973                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1974                         count++;
1975         }
1976         if (count > cap.max_nb_queues) {
1977                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1978                 cap.max_nb_queues);
1979                 return -EINVAL;
1980         }
1981         if (dev->data->dev_started)
1982                 return -EBUSY;
1983         rxq = dev->data->rx_queues;
1984         if (rxq[rx_queue_id] != NULL) {
1985                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1986                                         -ENOTSUP);
1987                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1988                 rxq[rx_queue_id] = NULL;
1989         }
1990         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1991                                                       nb_rx_desc, conf);
1992         if (ret == 0)
1993                 dev->data->rx_queue_state[rx_queue_id] =
1994                         RTE_ETH_QUEUE_STATE_HAIRPIN;
1995         return eth_err(port_id, ret);
1996 }
1997
1998 int
1999 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2000                        uint16_t nb_tx_desc, unsigned int socket_id,
2001                        const struct rte_eth_txconf *tx_conf)
2002 {
2003         struct rte_eth_dev *dev;
2004         struct rte_eth_dev_info dev_info;
2005         struct rte_eth_txconf local_conf;
2006         void **txq;
2007         int ret;
2008
2009         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2010
2011         dev = &rte_eth_devices[port_id];
2012         if (tx_queue_id >= dev->data->nb_tx_queues) {
2013                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2014                 return -EINVAL;
2015         }
2016
2017         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2018
2019         ret = rte_eth_dev_info_get(port_id, &dev_info);
2020         if (ret != 0)
2021                 return ret;
2022
2023         /* Use default specified by driver, if nb_tx_desc is zero */
2024         if (nb_tx_desc == 0) {
2025                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2026                 /* If driver default is zero, fall back on EAL default */
2027                 if (nb_tx_desc == 0)
2028                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2029         }
2030         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2031             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2032             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2033                 RTE_ETHDEV_LOG(ERR,
2034                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2035                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2036                         dev_info.tx_desc_lim.nb_min,
2037                         dev_info.tx_desc_lim.nb_align);
2038                 return -EINVAL;
2039         }
2040
2041         if (dev->data->dev_started &&
2042                 !(dev_info.dev_capa &
2043                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2044                 return -EBUSY;
2045
2046         if (dev->data->dev_started &&
2047                 (dev->data->tx_queue_state[tx_queue_id] !=
2048                         RTE_ETH_QUEUE_STATE_STOPPED))
2049                 return -EBUSY;
2050
2051         txq = dev->data->tx_queues;
2052         if (txq[tx_queue_id]) {
2053                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2054                                         -ENOTSUP);
2055                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2056                 txq[tx_queue_id] = NULL;
2057         }
2058
2059         if (tx_conf == NULL)
2060                 tx_conf = &dev_info.default_txconf;
2061
2062         local_conf = *tx_conf;
2063
2064         /*
2065          * If an offloading has already been enabled in
2066          * rte_eth_dev_configure(), it has been enabled on all queues,
2067          * so there is no need to enable it in this queue again.
2068          * The local_conf.offloads input to underlying PMD only carries
2069          * those offloadings which are only enabled on this queue and
2070          * not enabled on all queues.
2071          */
2072         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2073
2074         /*
2075          * New added offloadings for this queue are those not enabled in
2076          * rte_eth_dev_configure() and they must be per-queue type.
2077          * A pure per-port offloading can't be enabled on a queue while
2078          * disabled on another queue. A pure per-port offloading can't
2079          * be enabled for any queue as new added one if it hasn't been
2080          * enabled in rte_eth_dev_configure().
2081          */
2082         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2083              local_conf.offloads) {
2084                 RTE_ETHDEV_LOG(ERR,
2085                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2086                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2087                         port_id, tx_queue_id, local_conf.offloads,
2088                         dev_info.tx_queue_offload_capa,
2089                         __func__);
2090                 return -EINVAL;
2091         }
2092
2093         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2094                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2095 }
2096
2097 int
2098 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2099                                uint16_t nb_tx_desc,
2100                                const struct rte_eth_hairpin_conf *conf)
2101 {
2102         struct rte_eth_dev *dev;
2103         struct rte_eth_hairpin_cap cap;
2104         void **txq;
2105         int i;
2106         int count;
2107         int ret;
2108
2109         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2110         dev = &rte_eth_devices[port_id];
2111         if (tx_queue_id >= dev->data->nb_tx_queues) {
2112                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2113                 return -EINVAL;
2114         }
2115         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2116         if (ret != 0)
2117                 return ret;
2118         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2119                                 -ENOTSUP);
2120         /* if nb_rx_desc is zero use max number of desc from the driver. */
2121         if (nb_tx_desc == 0)
2122                 nb_tx_desc = cap.max_nb_desc;
2123         if (nb_tx_desc > cap.max_nb_desc) {
2124                 RTE_ETHDEV_LOG(ERR,
2125                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2126                         nb_tx_desc, cap.max_nb_desc);
2127                 return -EINVAL;
2128         }
2129         if (conf->peer_count > cap.max_tx_2_rx) {
2130                 RTE_ETHDEV_LOG(ERR,
2131                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2132                         conf->peer_count, cap.max_tx_2_rx);
2133                 return -EINVAL;
2134         }
2135         if (conf->peer_count == 0) {
2136                 RTE_ETHDEV_LOG(ERR,
2137                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2138                         conf->peer_count);
2139                 return -EINVAL;
2140         }
2141         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2142              cap.max_nb_queues != UINT16_MAX; i++) {
2143                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2144                         count++;
2145         }
2146         if (count > cap.max_nb_queues) {
2147                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2148                 cap.max_nb_queues);
2149                 return -EINVAL;
2150         }
2151         if (dev->data->dev_started)
2152                 return -EBUSY;
2153         txq = dev->data->tx_queues;
2154         if (txq[tx_queue_id] != NULL) {
2155                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2156                                         -ENOTSUP);
2157                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2158                 txq[tx_queue_id] = NULL;
2159         }
2160         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2161                 (dev, tx_queue_id, nb_tx_desc, conf);
2162         if (ret == 0)
2163                 dev->data->tx_queue_state[tx_queue_id] =
2164                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2165         return eth_err(port_id, ret);
2166 }
2167
2168 void
2169 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2170                 void *userdata __rte_unused)
2171 {
2172         unsigned i;
2173
2174         for (i = 0; i < unsent; i++)
2175                 rte_pktmbuf_free(pkts[i]);
2176 }
2177
2178 void
2179 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2180                 void *userdata)
2181 {
2182         uint64_t *count = userdata;
2183         unsigned i;
2184
2185         for (i = 0; i < unsent; i++)
2186                 rte_pktmbuf_free(pkts[i]);
2187
2188         *count += unsent;
2189 }
2190
2191 int
2192 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2193                 buffer_tx_error_fn cbfn, void *userdata)
2194 {
2195         buffer->error_callback = cbfn;
2196         buffer->error_userdata = userdata;
2197         return 0;
2198 }
2199
2200 int
2201 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2202 {
2203         int ret = 0;
2204
2205         if (buffer == NULL)
2206                 return -EINVAL;
2207
2208         buffer->size = size;
2209         if (buffer->error_callback == NULL) {
2210                 ret = rte_eth_tx_buffer_set_err_callback(
2211                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2212         }
2213
2214         return ret;
2215 }
2216
2217 int
2218 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2219 {
2220         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2221         int ret;
2222
2223         /* Validate Input Data. Bail if not valid or not supported. */
2224         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2225         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2226
2227         /* Call driver to free pending mbufs. */
2228         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2229                                                free_cnt);
2230         return eth_err(port_id, ret);
2231 }
2232
2233 int
2234 rte_eth_promiscuous_enable(uint16_t port_id)
2235 {
2236         struct rte_eth_dev *dev;
2237         int diag = 0;
2238
2239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2240         dev = &rte_eth_devices[port_id];
2241
2242         if (dev->data->promiscuous == 1)
2243                 return 0;
2244
2245         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2246
2247         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2248         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2249
2250         return eth_err(port_id, diag);
2251 }
2252
2253 int
2254 rte_eth_promiscuous_disable(uint16_t port_id)
2255 {
2256         struct rte_eth_dev *dev;
2257         int diag = 0;
2258
2259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2260         dev = &rte_eth_devices[port_id];
2261
2262         if (dev->data->promiscuous == 0)
2263                 return 0;
2264
2265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2266
2267         dev->data->promiscuous = 0;
2268         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2269         if (diag != 0)
2270                 dev->data->promiscuous = 1;
2271
2272         return eth_err(port_id, diag);
2273 }
2274
2275 int
2276 rte_eth_promiscuous_get(uint16_t port_id)
2277 {
2278         struct rte_eth_dev *dev;
2279
2280         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2281
2282         dev = &rte_eth_devices[port_id];
2283         return dev->data->promiscuous;
2284 }
2285
2286 int
2287 rte_eth_allmulticast_enable(uint16_t port_id)
2288 {
2289         struct rte_eth_dev *dev;
2290         int diag;
2291
2292         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2293         dev = &rte_eth_devices[port_id];
2294
2295         if (dev->data->all_multicast == 1)
2296                 return 0;
2297
2298         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2299         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2300         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2301
2302         return eth_err(port_id, diag);
2303 }
2304
2305 int
2306 rte_eth_allmulticast_disable(uint16_t port_id)
2307 {
2308         struct rte_eth_dev *dev;
2309         int diag;
2310
2311         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2312         dev = &rte_eth_devices[port_id];
2313
2314         if (dev->data->all_multicast == 0)
2315                 return 0;
2316
2317         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2318         dev->data->all_multicast = 0;
2319         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2320         if (diag != 0)
2321                 dev->data->all_multicast = 1;
2322
2323         return eth_err(port_id, diag);
2324 }
2325
2326 int
2327 rte_eth_allmulticast_get(uint16_t port_id)
2328 {
2329         struct rte_eth_dev *dev;
2330
2331         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2332
2333         dev = &rte_eth_devices[port_id];
2334         return dev->data->all_multicast;
2335 }
2336
2337 int
2338 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2339 {
2340         struct rte_eth_dev *dev;
2341
2342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2343         dev = &rte_eth_devices[port_id];
2344
2345         if (dev->data->dev_conf.intr_conf.lsc &&
2346             dev->data->dev_started)
2347                 rte_eth_linkstatus_get(dev, eth_link);
2348         else {
2349                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2350                 (*dev->dev_ops->link_update)(dev, 1);
2351                 *eth_link = dev->data->dev_link;
2352         }
2353
2354         return 0;
2355 }
2356
2357 int
2358 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2359 {
2360         struct rte_eth_dev *dev;
2361
2362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2363         dev = &rte_eth_devices[port_id];
2364
2365         if (dev->data->dev_conf.intr_conf.lsc &&
2366             dev->data->dev_started)
2367                 rte_eth_linkstatus_get(dev, eth_link);
2368         else {
2369                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2370                 (*dev->dev_ops->link_update)(dev, 0);
2371                 *eth_link = dev->data->dev_link;
2372         }
2373
2374         return 0;
2375 }
2376
2377 int
2378 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2379 {
2380         struct rte_eth_dev *dev;
2381
2382         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2383
2384         dev = &rte_eth_devices[port_id];
2385         memset(stats, 0, sizeof(*stats));
2386
2387         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2388         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2389         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2390 }
2391
2392 int
2393 rte_eth_stats_reset(uint16_t port_id)
2394 {
2395         struct rte_eth_dev *dev;
2396         int ret;
2397
2398         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2399         dev = &rte_eth_devices[port_id];
2400
2401         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2402         ret = (*dev->dev_ops->stats_reset)(dev);
2403         if (ret != 0)
2404                 return eth_err(port_id, ret);
2405
2406         dev->data->rx_mbuf_alloc_failed = 0;
2407
2408         return 0;
2409 }
2410
2411 static inline int
2412 get_xstats_basic_count(struct rte_eth_dev *dev)
2413 {
2414         uint16_t nb_rxqs, nb_txqs;
2415         int count;
2416
2417         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2418         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2419
2420         count = RTE_NB_STATS;
2421         count += nb_rxqs * RTE_NB_RXQ_STATS;
2422         count += nb_txqs * RTE_NB_TXQ_STATS;
2423
2424         return count;
2425 }
2426
2427 static int
2428 get_xstats_count(uint16_t port_id)
2429 {
2430         struct rte_eth_dev *dev;
2431         int count;
2432
2433         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2434         dev = &rte_eth_devices[port_id];
2435         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2436                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2437                                 NULL, 0);
2438                 if (count < 0)
2439                         return eth_err(port_id, count);
2440         }
2441         if (dev->dev_ops->xstats_get_names != NULL) {
2442                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2443                 if (count < 0)
2444                         return eth_err(port_id, count);
2445         } else
2446                 count = 0;
2447
2448
2449         count += get_xstats_basic_count(dev);
2450
2451         return count;
2452 }
2453
2454 int
2455 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2456                 uint64_t *id)
2457 {
2458         int cnt_xstats, idx_xstat;
2459
2460         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2461
2462         if (!id) {
2463                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2464                 return -ENOMEM;
2465         }
2466
2467         if (!xstat_name) {
2468                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2469                 return -ENOMEM;
2470         }
2471
2472         /* Get count */
2473         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2474         if (cnt_xstats  < 0) {
2475                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2476                 return -ENODEV;
2477         }
2478
2479         /* Get id-name lookup table */
2480         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2481
2482         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2483                         port_id, xstats_names, cnt_xstats, NULL)) {
2484                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2485                 return -1;
2486         }
2487
2488         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2489                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2490                         *id = idx_xstat;
2491                         return 0;
2492                 };
2493         }
2494
2495         return -EINVAL;
2496 }
2497
2498 /* retrieve basic stats names */
2499 static int
2500 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2501         struct rte_eth_xstat_name *xstats_names)
2502 {
2503         int cnt_used_entries = 0;
2504         uint32_t idx, id_queue;
2505         uint16_t num_q;
2506
2507         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2508                 strlcpy(xstats_names[cnt_used_entries].name,
2509                         rte_stats_strings[idx].name,
2510                         sizeof(xstats_names[0].name));
2511                 cnt_used_entries++;
2512         }
2513         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2514         for (id_queue = 0; id_queue < num_q; id_queue++) {
2515                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2516                         snprintf(xstats_names[cnt_used_entries].name,
2517                                 sizeof(xstats_names[0].name),
2518                                 "rx_q%u%s",
2519                                 id_queue, rte_rxq_stats_strings[idx].name);
2520                         cnt_used_entries++;
2521                 }
2522
2523         }
2524         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2525         for (id_queue = 0; id_queue < num_q; id_queue++) {
2526                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2527                         snprintf(xstats_names[cnt_used_entries].name,
2528                                 sizeof(xstats_names[0].name),
2529                                 "tx_q%u%s",
2530                                 id_queue, rte_txq_stats_strings[idx].name);
2531                         cnt_used_entries++;
2532                 }
2533         }
2534         return cnt_used_entries;
2535 }
2536
2537 /* retrieve ethdev extended statistics names */
2538 int
2539 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2540         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2541         uint64_t *ids)
2542 {
2543         struct rte_eth_xstat_name *xstats_names_copy;
2544         unsigned int no_basic_stat_requested = 1;
2545         unsigned int no_ext_stat_requested = 1;
2546         unsigned int expected_entries;
2547         unsigned int basic_count;
2548         struct rte_eth_dev *dev;
2549         unsigned int i;
2550         int ret;
2551
2552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2553         dev = &rte_eth_devices[port_id];
2554
2555         basic_count = get_xstats_basic_count(dev);
2556         ret = get_xstats_count(port_id);
2557         if (ret < 0)
2558                 return ret;
2559         expected_entries = (unsigned int)ret;
2560
2561         /* Return max number of stats if no ids given */
2562         if (!ids) {
2563                 if (!xstats_names)
2564                         return expected_entries;
2565                 else if (xstats_names && size < expected_entries)
2566                         return expected_entries;
2567         }
2568
2569         if (ids && !xstats_names)
2570                 return -EINVAL;
2571
2572         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2573                 uint64_t ids_copy[size];
2574
2575                 for (i = 0; i < size; i++) {
2576                         if (ids[i] < basic_count) {
2577                                 no_basic_stat_requested = 0;
2578                                 break;
2579                         }
2580
2581                         /*
2582                          * Convert ids to xstats ids that PMD knows.
2583                          * ids known by user are basic + extended stats.
2584                          */
2585                         ids_copy[i] = ids[i] - basic_count;
2586                 }
2587
2588                 if (no_basic_stat_requested)
2589                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2590                                         xstats_names, ids_copy, size);
2591         }
2592
2593         /* Retrieve all stats */
2594         if (!ids) {
2595                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2596                                 expected_entries);
2597                 if (num_stats < 0 || num_stats > (int)expected_entries)
2598                         return num_stats;
2599                 else
2600                         return expected_entries;
2601         }
2602
2603         xstats_names_copy = calloc(expected_entries,
2604                 sizeof(struct rte_eth_xstat_name));
2605
2606         if (!xstats_names_copy) {
2607                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2608                 return -ENOMEM;
2609         }
2610
2611         if (ids) {
2612                 for (i = 0; i < size; i++) {
2613                         if (ids[i] >= basic_count) {
2614                                 no_ext_stat_requested = 0;
2615                                 break;
2616                         }
2617                 }
2618         }
2619
2620         /* Fill xstats_names_copy structure */
2621         if (ids && no_ext_stat_requested) {
2622                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2623         } else {
2624                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2625                         expected_entries);
2626                 if (ret < 0) {
2627                         free(xstats_names_copy);
2628                         return ret;
2629                 }
2630         }
2631
2632         /* Filter stats */
2633         for (i = 0; i < size; i++) {
2634                 if (ids[i] >= expected_entries) {
2635                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2636                         free(xstats_names_copy);
2637                         return -1;
2638                 }
2639                 xstats_names[i] = xstats_names_copy[ids[i]];
2640         }
2641
2642         free(xstats_names_copy);
2643         return size;
2644 }
2645
2646 int
2647 rte_eth_xstats_get_names(uint16_t port_id,
2648         struct rte_eth_xstat_name *xstats_names,
2649         unsigned int size)
2650 {
2651         struct rte_eth_dev *dev;
2652         int cnt_used_entries;
2653         int cnt_expected_entries;
2654         int cnt_driver_entries;
2655
2656         cnt_expected_entries = get_xstats_count(port_id);
2657         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2658                         (int)size < cnt_expected_entries)
2659                 return cnt_expected_entries;
2660
2661         /* port_id checked in get_xstats_count() */
2662         dev = &rte_eth_devices[port_id];
2663
2664         cnt_used_entries = rte_eth_basic_stats_get_names(
2665                 dev, xstats_names);
2666
2667         if (dev->dev_ops->xstats_get_names != NULL) {
2668                 /* If there are any driver-specific xstats, append them
2669                  * to end of list.
2670                  */
2671                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2672                         dev,
2673                         xstats_names + cnt_used_entries,
2674                         size - cnt_used_entries);
2675                 if (cnt_driver_entries < 0)
2676                         return eth_err(port_id, cnt_driver_entries);
2677                 cnt_used_entries += cnt_driver_entries;
2678         }
2679
2680         return cnt_used_entries;
2681 }
2682
2683
2684 static int
2685 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2686 {
2687         struct rte_eth_dev *dev;
2688         struct rte_eth_stats eth_stats;
2689         unsigned int count = 0, i, q;
2690         uint64_t val, *stats_ptr;
2691         uint16_t nb_rxqs, nb_txqs;
2692         int ret;
2693
2694         ret = rte_eth_stats_get(port_id, &eth_stats);
2695         if (ret < 0)
2696                 return ret;
2697
2698         dev = &rte_eth_devices[port_id];
2699
2700         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2701         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2702
2703         /* global stats */
2704         for (i = 0; i < RTE_NB_STATS; i++) {
2705                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2706                                         rte_stats_strings[i].offset);
2707                 val = *stats_ptr;
2708                 xstats[count++].value = val;
2709         }
2710
2711         /* per-rxq stats */
2712         for (q = 0; q < nb_rxqs; q++) {
2713                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2714                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2715                                         rte_rxq_stats_strings[i].offset +
2716                                         q * sizeof(uint64_t));
2717                         val = *stats_ptr;
2718                         xstats[count++].value = val;
2719                 }
2720         }
2721
2722         /* per-txq stats */
2723         for (q = 0; q < nb_txqs; q++) {
2724                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2725                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2726                                         rte_txq_stats_strings[i].offset +
2727                                         q * sizeof(uint64_t));
2728                         val = *stats_ptr;
2729                         xstats[count++].value = val;
2730                 }
2731         }
2732         return count;
2733 }
2734
2735 /* retrieve ethdev extended statistics */
2736 int
2737 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2738                          uint64_t *values, unsigned int size)
2739 {
2740         unsigned int no_basic_stat_requested = 1;
2741         unsigned int no_ext_stat_requested = 1;
2742         unsigned int num_xstats_filled;
2743         unsigned int basic_count;
2744         uint16_t expected_entries;
2745         struct rte_eth_dev *dev;
2746         unsigned int i;
2747         int ret;
2748
2749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2750         ret = get_xstats_count(port_id);
2751         if (ret < 0)
2752                 return ret;
2753         expected_entries = (uint16_t)ret;
2754         struct rte_eth_xstat xstats[expected_entries];
2755         dev = &rte_eth_devices[port_id];
2756         basic_count = get_xstats_basic_count(dev);
2757
2758         /* Return max number of stats if no ids given */
2759         if (!ids) {
2760                 if (!values)
2761                         return expected_entries;
2762                 else if (values && size < expected_entries)
2763                         return expected_entries;
2764         }
2765
2766         if (ids && !values)
2767                 return -EINVAL;
2768
2769         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2770                 unsigned int basic_count = get_xstats_basic_count(dev);
2771                 uint64_t ids_copy[size];
2772
2773                 for (i = 0; i < size; i++) {
2774                         if (ids[i] < basic_count) {
2775                                 no_basic_stat_requested = 0;
2776                                 break;
2777                         }
2778
2779                         /*
2780                          * Convert ids to xstats ids that PMD knows.
2781                          * ids known by user are basic + extended stats.
2782                          */
2783                         ids_copy[i] = ids[i] - basic_count;
2784                 }
2785
2786                 if (no_basic_stat_requested)
2787                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2788                                         values, size);
2789         }
2790
2791         if (ids) {
2792                 for (i = 0; i < size; i++) {
2793                         if (ids[i] >= basic_count) {
2794                                 no_ext_stat_requested = 0;
2795                                 break;
2796                         }
2797                 }
2798         }
2799
2800         /* Fill the xstats structure */
2801         if (ids && no_ext_stat_requested)
2802                 ret = rte_eth_basic_stats_get(port_id, xstats);
2803         else
2804                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2805
2806         if (ret < 0)
2807                 return ret;
2808         num_xstats_filled = (unsigned int)ret;
2809
2810         /* Return all stats */
2811         if (!ids) {
2812                 for (i = 0; i < num_xstats_filled; i++)
2813                         values[i] = xstats[i].value;
2814                 return expected_entries;
2815         }
2816
2817         /* Filter stats */
2818         for (i = 0; i < size; i++) {
2819                 if (ids[i] >= expected_entries) {
2820                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2821                         return -1;
2822                 }
2823                 values[i] = xstats[ids[i]].value;
2824         }
2825         return size;
2826 }
2827
2828 int
2829 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2830         unsigned int n)
2831 {
2832         struct rte_eth_dev *dev;
2833         unsigned int count = 0, i;
2834         signed int xcount = 0;
2835         uint16_t nb_rxqs, nb_txqs;
2836         int ret;
2837
2838         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2839
2840         dev = &rte_eth_devices[port_id];
2841
2842         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2843         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2844
2845         /* Return generic statistics */
2846         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2847                 (nb_txqs * RTE_NB_TXQ_STATS);
2848
2849         /* implemented by the driver */
2850         if (dev->dev_ops->xstats_get != NULL) {
2851                 /* Retrieve the xstats from the driver at the end of the
2852                  * xstats struct.
2853                  */
2854                 xcount = (*dev->dev_ops->xstats_get)(dev,
2855                                      xstats ? xstats + count : NULL,
2856                                      (n > count) ? n - count : 0);
2857
2858                 if (xcount < 0)
2859                         return eth_err(port_id, xcount);
2860         }
2861
2862         if (n < count + xcount || xstats == NULL)
2863                 return count + xcount;
2864
2865         /* now fill the xstats structure */
2866         ret = rte_eth_basic_stats_get(port_id, xstats);
2867         if (ret < 0)
2868                 return ret;
2869         count = ret;
2870
2871         for (i = 0; i < count; i++)
2872                 xstats[i].id = i;
2873         /* add an offset to driver-specific stats */
2874         for ( ; i < count + xcount; i++)
2875                 xstats[i].id += count;
2876
2877         return count + xcount;
2878 }
2879
2880 /* reset ethdev extended statistics */
2881 int
2882 rte_eth_xstats_reset(uint16_t port_id)
2883 {
2884         struct rte_eth_dev *dev;
2885
2886         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2887         dev = &rte_eth_devices[port_id];
2888
2889         /* implemented by the driver */
2890         if (dev->dev_ops->xstats_reset != NULL)
2891                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2892
2893         /* fallback to default */
2894         return rte_eth_stats_reset(port_id);
2895 }
2896
2897 static int
2898 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2899                 uint8_t is_rx)
2900 {
2901         struct rte_eth_dev *dev;
2902
2903         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2904
2905         dev = &rte_eth_devices[port_id];
2906
2907         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2908
2909         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2910                 return -EINVAL;
2911
2912         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2913                 return -EINVAL;
2914
2915         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2916                 return -EINVAL;
2917
2918         return (*dev->dev_ops->queue_stats_mapping_set)
2919                         (dev, queue_id, stat_idx, is_rx);
2920 }
2921
2922
2923 int
2924 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2925                 uint8_t stat_idx)
2926 {
2927         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2928                                                 stat_idx, STAT_QMAP_TX));
2929 }
2930
2931
2932 int
2933 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2934                 uint8_t stat_idx)
2935 {
2936         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2937                                                 stat_idx, STAT_QMAP_RX));
2938 }
2939
2940 int
2941 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2942 {
2943         struct rte_eth_dev *dev;
2944
2945         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2946         dev = &rte_eth_devices[port_id];
2947
2948         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2949         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2950                                                         fw_version, fw_size));
2951 }
2952
2953 int
2954 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2955 {
2956         struct rte_eth_dev *dev;
2957         const struct rte_eth_desc_lim lim = {
2958                 .nb_max = UINT16_MAX,
2959                 .nb_min = 0,
2960                 .nb_align = 1,
2961                 .nb_seg_max = UINT16_MAX,
2962                 .nb_mtu_seg_max = UINT16_MAX,
2963         };
2964         int diag;
2965
2966         /*
2967          * Init dev_info before port_id check since caller does not have
2968          * return status and does not know if get is successful or not.
2969          */
2970         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2971         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
2972
2973         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2974         dev = &rte_eth_devices[port_id];
2975
2976         dev_info->rx_desc_lim = lim;
2977         dev_info->tx_desc_lim = lim;
2978         dev_info->device = dev->device;
2979         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2980         dev_info->max_mtu = UINT16_MAX;
2981
2982         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2983         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2984         if (diag != 0) {
2985                 /* Cleanup already filled in device information */
2986                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2987                 return eth_err(port_id, diag);
2988         }
2989
2990         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
2991         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
2992                         RTE_MAX_QUEUES_PER_PORT);
2993         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
2994                         RTE_MAX_QUEUES_PER_PORT);
2995
2996         dev_info->driver_name = dev->device->driver->name;
2997         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2998         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2999
3000         dev_info->dev_flags = &dev->data->dev_flags;
3001
3002         return 0;
3003 }
3004
3005 int
3006 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3007                                  uint32_t *ptypes, int num)
3008 {
3009         int i, j;
3010         struct rte_eth_dev *dev;
3011         const uint32_t *all_ptypes;
3012
3013         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3014         dev = &rte_eth_devices[port_id];
3015         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3016         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3017
3018         if (!all_ptypes)
3019                 return 0;
3020
3021         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3022                 if (all_ptypes[i] & ptype_mask) {
3023                         if (j < num)
3024                                 ptypes[j] = all_ptypes[i];
3025                         j++;
3026                 }
3027
3028         return j;
3029 }
3030
3031 int
3032 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3033                                  uint32_t *set_ptypes, unsigned int num)
3034 {
3035         const uint32_t valid_ptype_masks[] = {
3036                 RTE_PTYPE_L2_MASK,
3037                 RTE_PTYPE_L3_MASK,
3038                 RTE_PTYPE_L4_MASK,
3039                 RTE_PTYPE_TUNNEL_MASK,
3040                 RTE_PTYPE_INNER_L2_MASK,
3041                 RTE_PTYPE_INNER_L3_MASK,
3042                 RTE_PTYPE_INNER_L4_MASK,
3043         };
3044         const uint32_t *all_ptypes;
3045         struct rte_eth_dev *dev;
3046         uint32_t unused_mask;
3047         unsigned int i, j;
3048         int ret;
3049
3050         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3051         dev = &rte_eth_devices[port_id];
3052
3053         if (num > 0 && set_ptypes == NULL)
3054                 return -EINVAL;
3055
3056         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3057                         *dev->dev_ops->dev_ptypes_set == NULL) {
3058                 ret = 0;
3059                 goto ptype_unknown;
3060         }
3061
3062         if (ptype_mask == 0) {
3063                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3064                                 ptype_mask);
3065                 goto ptype_unknown;
3066         }
3067
3068         unused_mask = ptype_mask;
3069         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3070                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3071                 if (mask && mask != valid_ptype_masks[i]) {
3072                         ret = -EINVAL;
3073                         goto ptype_unknown;
3074                 }
3075                 unused_mask &= ~valid_ptype_masks[i];
3076         }
3077
3078         if (unused_mask) {
3079                 ret = -EINVAL;
3080                 goto ptype_unknown;
3081         }
3082
3083         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3084         if (all_ptypes == NULL) {
3085                 ret = 0;
3086                 goto ptype_unknown;
3087         }
3088
3089         /*
3090          * Accommodate as many set_ptypes as possible. If the supplied
3091          * set_ptypes array is insufficient fill it partially.
3092          */
3093         for (i = 0, j = 0; set_ptypes != NULL &&
3094                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3095                 if (ptype_mask & all_ptypes[i]) {
3096                         if (j < num - 1) {
3097                                 set_ptypes[j] = all_ptypes[i];
3098                                 j++;
3099                                 continue;
3100                         }
3101                         break;
3102                 }
3103         }
3104
3105         if (set_ptypes != NULL && j < num)
3106                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3107
3108         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3109
3110 ptype_unknown:
3111         if (num > 0)
3112                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3113
3114         return ret;
3115 }
3116
3117 int
3118 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3119 {
3120         struct rte_eth_dev *dev;
3121
3122         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3123         dev = &rte_eth_devices[port_id];
3124         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3125
3126         return 0;
3127 }
3128
3129 int
3130 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3131 {
3132         struct rte_eth_dev *dev;
3133
3134         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3135
3136         dev = &rte_eth_devices[port_id];
3137         *mtu = dev->data->mtu;
3138         return 0;
3139 }
3140
3141 int
3142 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3143 {
3144         int ret;
3145         struct rte_eth_dev_info dev_info;
3146         struct rte_eth_dev *dev;
3147
3148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3149         dev = &rte_eth_devices[port_id];
3150         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3151
3152         /*
3153          * Check if the device supports dev_infos_get, if it does not
3154          * skip min_mtu/max_mtu validation here as this requires values
3155          * that are populated within the call to rte_eth_dev_info_get()
3156          * which relies on dev->dev_ops->dev_infos_get.
3157          */
3158         if (*dev->dev_ops->dev_infos_get != NULL) {
3159                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3160                 if (ret != 0)
3161                         return ret;
3162
3163                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3164                         return -EINVAL;
3165         }
3166
3167         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3168         if (!ret)
3169                 dev->data->mtu = mtu;
3170
3171         return eth_err(port_id, ret);
3172 }
3173
3174 int
3175 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3176 {
3177         struct rte_eth_dev *dev;
3178         int ret;
3179
3180         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3181         dev = &rte_eth_devices[port_id];
3182         if (!(dev->data->dev_conf.rxmode.offloads &
3183               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3184                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3185                         port_id);
3186                 return -ENOSYS;
3187         }
3188
3189         if (vlan_id > 4095) {
3190                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3191                         port_id, vlan_id);
3192                 return -EINVAL;
3193         }
3194         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3195
3196         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3197         if (ret == 0) {
3198                 struct rte_vlan_filter_conf *vfc;
3199                 int vidx;
3200                 int vbit;
3201
3202                 vfc = &dev->data->vlan_filter_conf;
3203                 vidx = vlan_id / 64;
3204                 vbit = vlan_id % 64;
3205
3206                 if (on)
3207                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3208                 else
3209                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3210         }
3211
3212         return eth_err(port_id, ret);
3213 }
3214
3215 int
3216 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3217                                     int on)
3218 {
3219         struct rte_eth_dev *dev;
3220
3221         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3222         dev = &rte_eth_devices[port_id];
3223         if (rx_queue_id >= dev->data->nb_rx_queues) {
3224                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3225                 return -EINVAL;
3226         }
3227
3228         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3229         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3230
3231         return 0;
3232 }
3233
3234 int
3235 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3236                                 enum rte_vlan_type vlan_type,
3237                                 uint16_t tpid)
3238 {
3239         struct rte_eth_dev *dev;
3240
3241         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3242         dev = &rte_eth_devices[port_id];
3243         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3244
3245         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3246                                                                tpid));
3247 }
3248
3249 int
3250 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3251 {
3252         struct rte_eth_dev *dev;
3253         int ret = 0;
3254         int mask = 0;
3255         int cur, org = 0;
3256         uint64_t orig_offloads;
3257         uint64_t *dev_offloads;
3258
3259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3260         dev = &rte_eth_devices[port_id];
3261
3262         /* save original values in case of failure */
3263         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3264         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3265
3266         /*check which option changed by application*/
3267         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3268         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3269         if (cur != org) {
3270                 if (cur)
3271                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3272                 else
3273                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3274                 mask |= ETH_VLAN_STRIP_MASK;
3275         }
3276
3277         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3278         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3279         if (cur != org) {
3280                 if (cur)
3281                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3282                 else
3283                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3284                 mask |= ETH_VLAN_FILTER_MASK;
3285         }
3286
3287         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3288         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3289         if (cur != org) {
3290                 if (cur)
3291                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3292                 else
3293                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3294                 mask |= ETH_VLAN_EXTEND_MASK;
3295         }
3296
3297         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3298         org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3299         if (cur != org) {
3300                 if (cur)
3301                         *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3302                 else
3303                         *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3304                 mask |= ETH_QINQ_STRIP_MASK;
3305         }
3306
3307         /*no change*/
3308         if (mask == 0)
3309                 return ret;
3310
3311         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3312         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3313         if (ret) {
3314                 /* hit an error restore  original values */
3315                 *dev_offloads = orig_offloads;
3316         }
3317
3318         return eth_err(port_id, ret);
3319 }
3320
3321 int
3322 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3323 {
3324         struct rte_eth_dev *dev;
3325         uint64_t *dev_offloads;
3326         int ret = 0;
3327
3328         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3329         dev = &rte_eth_devices[port_id];
3330         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3331
3332         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3333                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3334
3335         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3336                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3337
3338         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3339                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3340
3341         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3342                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3343
3344         return ret;
3345 }
3346
3347 int
3348 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3349 {
3350         struct rte_eth_dev *dev;
3351
3352         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3353         dev = &rte_eth_devices[port_id];
3354         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3355
3356         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3357 }
3358
3359 int
3360 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3361 {
3362         struct rte_eth_dev *dev;
3363
3364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3365         dev = &rte_eth_devices[port_id];
3366         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3367         memset(fc_conf, 0, sizeof(*fc_conf));
3368         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3369 }
3370
3371 int
3372 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3373 {
3374         struct rte_eth_dev *dev;
3375
3376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3377         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3378                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3379                 return -EINVAL;
3380         }
3381
3382         dev = &rte_eth_devices[port_id];
3383         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3384         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3385 }
3386
3387 int
3388 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3389                                    struct rte_eth_pfc_conf *pfc_conf)
3390 {
3391         struct rte_eth_dev *dev;
3392
3393         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3394         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3395                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3396                 return -EINVAL;
3397         }
3398
3399         dev = &rte_eth_devices[port_id];
3400         /* High water, low water validation are device specific */
3401         if  (*dev->dev_ops->priority_flow_ctrl_set)
3402                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3403                                         (dev, pfc_conf));
3404         return -ENOTSUP;
3405 }
3406
3407 static int
3408 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3409                         uint16_t reta_size)
3410 {
3411         uint16_t i, num;
3412
3413         if (!reta_conf)
3414                 return -EINVAL;
3415
3416         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3417         for (i = 0; i < num; i++) {
3418                 if (reta_conf[i].mask)
3419                         return 0;
3420         }
3421
3422         return -EINVAL;
3423 }
3424
3425 static int
3426 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3427                          uint16_t reta_size,
3428                          uint16_t max_rxq)
3429 {
3430         uint16_t i, idx, shift;
3431
3432         if (!reta_conf)
3433                 return -EINVAL;
3434
3435         if (max_rxq == 0) {
3436                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3437                 return -EINVAL;
3438         }
3439
3440         for (i = 0; i < reta_size; i++) {
3441                 idx = i / RTE_RETA_GROUP_SIZE;
3442                 shift = i % RTE_RETA_GROUP_SIZE;
3443                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3444                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3445                         RTE_ETHDEV_LOG(ERR,
3446                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3447                                 idx, shift,
3448                                 reta_conf[idx].reta[shift], max_rxq);
3449                         return -EINVAL;
3450                 }
3451         }
3452
3453         return 0;
3454 }
3455
3456 int
3457 rte_eth_dev_rss_reta_update(uint16_t port_id,
3458                             struct rte_eth_rss_reta_entry64 *reta_conf,
3459                             uint16_t reta_size)
3460 {
3461         struct rte_eth_dev *dev;
3462         int ret;
3463
3464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3465         /* Check mask bits */
3466         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3467         if (ret < 0)
3468                 return ret;
3469
3470         dev = &rte_eth_devices[port_id];
3471
3472         /* Check entry value */
3473         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3474                                 dev->data->nb_rx_queues);
3475         if (ret < 0)
3476                 return ret;
3477
3478         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3479         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3480                                                              reta_size));
3481 }
3482
3483 int
3484 rte_eth_dev_rss_reta_query(uint16_t port_id,
3485                            struct rte_eth_rss_reta_entry64 *reta_conf,
3486                            uint16_t reta_size)
3487 {
3488         struct rte_eth_dev *dev;
3489         int ret;
3490
3491         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3492
3493         /* Check mask bits */
3494         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3495         if (ret < 0)
3496                 return ret;
3497
3498         dev = &rte_eth_devices[port_id];
3499         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3500         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3501                                                             reta_size));
3502 }
3503
3504 int
3505 rte_eth_dev_rss_hash_update(uint16_t port_id,
3506                             struct rte_eth_rss_conf *rss_conf)
3507 {
3508         struct rte_eth_dev *dev;
3509         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3510         int ret;
3511
3512         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3513
3514         ret = rte_eth_dev_info_get(port_id, &dev_info);
3515         if (ret != 0)
3516                 return ret;
3517
3518         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3519
3520         dev = &rte_eth_devices[port_id];
3521         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3522             dev_info.flow_type_rss_offloads) {
3523                 RTE_ETHDEV_LOG(ERR,
3524                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3525                         port_id, rss_conf->rss_hf,
3526                         dev_info.flow_type_rss_offloads);
3527                 return -EINVAL;
3528         }
3529         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3530         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3531                                                                  rss_conf));
3532 }
3533
3534 int
3535 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3536                               struct rte_eth_rss_conf *rss_conf)
3537 {
3538         struct rte_eth_dev *dev;
3539
3540         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3541         dev = &rte_eth_devices[port_id];
3542         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3543         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3544                                                                    rss_conf));
3545 }
3546
3547 int
3548 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3549                                 struct rte_eth_udp_tunnel *udp_tunnel)
3550 {
3551         struct rte_eth_dev *dev;
3552
3553         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3554         if (udp_tunnel == NULL) {
3555                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3556                 return -EINVAL;
3557         }
3558
3559         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3560                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3561                 return -EINVAL;
3562         }
3563
3564         dev = &rte_eth_devices[port_id];
3565         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3566         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3567                                                                 udp_tunnel));
3568 }
3569
3570 int
3571 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3572                                    struct rte_eth_udp_tunnel *udp_tunnel)
3573 {
3574         struct rte_eth_dev *dev;
3575
3576         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3577         dev = &rte_eth_devices[port_id];
3578
3579         if (udp_tunnel == NULL) {
3580                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3581                 return -EINVAL;
3582         }
3583
3584         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3585                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3586                 return -EINVAL;
3587         }
3588
3589         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3590         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3591                                                                 udp_tunnel));
3592 }
3593
3594 int
3595 rte_eth_led_on(uint16_t port_id)
3596 {
3597         struct rte_eth_dev *dev;
3598
3599         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3600         dev = &rte_eth_devices[port_id];
3601         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3602         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3603 }
3604
3605 int
3606 rte_eth_led_off(uint16_t port_id)
3607 {
3608         struct rte_eth_dev *dev;
3609
3610         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3611         dev = &rte_eth_devices[port_id];
3612         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3613         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3614 }
3615
3616 /*
3617  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3618  * an empty spot.
3619  */
3620 static int
3621 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3622 {
3623         struct rte_eth_dev_info dev_info;
3624         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3625         unsigned i;
3626         int ret;
3627
3628         ret = rte_eth_dev_info_get(port_id, &dev_info);
3629         if (ret != 0)
3630                 return -1;
3631
3632         for (i = 0; i < dev_info.max_mac_addrs; i++)
3633                 if (memcmp(addr, &dev->data->mac_addrs[i],
3634                                 RTE_ETHER_ADDR_LEN) == 0)
3635                         return i;
3636
3637         return -1;
3638 }
3639
3640 static const struct rte_ether_addr null_mac_addr;
3641
3642 int
3643 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3644                         uint32_t pool)
3645 {
3646         struct rte_eth_dev *dev;
3647         int index;
3648         uint64_t pool_mask;
3649         int ret;
3650
3651         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3652         dev = &rte_eth_devices[port_id];
3653         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3654
3655         if (rte_is_zero_ether_addr(addr)) {
3656                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3657                         port_id);
3658                 return -EINVAL;
3659         }
3660         if (pool >= ETH_64_POOLS) {
3661                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3662                 return -EINVAL;
3663         }
3664
3665         index = get_mac_addr_index(port_id, addr);
3666         if (index < 0) {
3667                 index = get_mac_addr_index(port_id, &null_mac_addr);
3668                 if (index < 0) {
3669                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3670                                 port_id);
3671                         return -ENOSPC;
3672                 }
3673         } else {
3674                 pool_mask = dev->data->mac_pool_sel[index];
3675
3676                 /* Check if both MAC address and pool is already there, and do nothing */
3677                 if (pool_mask & (1ULL << pool))
3678                         return 0;
3679         }
3680
3681         /* Update NIC */
3682         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3683
3684         if (ret == 0) {
3685                 /* Update address in NIC data structure */
3686                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3687
3688                 /* Update pool bitmap in NIC data structure */
3689                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3690         }
3691
3692         return eth_err(port_id, ret);
3693 }
3694
3695 int
3696 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3697 {
3698         struct rte_eth_dev *dev;
3699         int index;
3700
3701         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3702         dev = &rte_eth_devices[port_id];
3703         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3704
3705         index = get_mac_addr_index(port_id, addr);
3706         if (index == 0) {
3707                 RTE_ETHDEV_LOG(ERR,
3708                         "Port %u: Cannot remove default MAC address\n",
3709                         port_id);
3710                 return -EADDRINUSE;
3711         } else if (index < 0)
3712                 return 0;  /* Do nothing if address wasn't found */
3713
3714         /* Update NIC */
3715         (*dev->dev_ops->mac_addr_remove)(dev, index);
3716
3717         /* Update address in NIC data structure */
3718         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3719
3720         /* reset pool bitmap */
3721         dev->data->mac_pool_sel[index] = 0;
3722
3723         return 0;
3724 }
3725
3726 int
3727 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3728 {
3729         struct rte_eth_dev *dev;
3730         int ret;
3731
3732         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3733
3734         if (!rte_is_valid_assigned_ether_addr(addr))
3735                 return -EINVAL;
3736
3737         dev = &rte_eth_devices[port_id];
3738         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3739
3740         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3741         if (ret < 0)
3742                 return ret;
3743
3744         /* Update default address in NIC data structure */
3745         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3746
3747         return 0;
3748 }
3749
3750
3751 /*
3752  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3753  * an empty spot.
3754  */
3755 static int
3756 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3757 {
3758         struct rte_eth_dev_info dev_info;
3759         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3760         unsigned i;
3761         int ret;
3762
3763         ret = rte_eth_dev_info_get(port_id, &dev_info);
3764         if (ret != 0)
3765                 return -1;
3766
3767         if (!dev->data->hash_mac_addrs)
3768                 return -1;
3769
3770         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3771                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3772                         RTE_ETHER_ADDR_LEN) == 0)
3773                         return i;
3774
3775         return -1;
3776 }
3777
3778 int
3779 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3780                                 uint8_t on)
3781 {
3782         int index;
3783         int ret;
3784         struct rte_eth_dev *dev;
3785
3786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3787
3788         dev = &rte_eth_devices[port_id];
3789         if (rte_is_zero_ether_addr(addr)) {
3790                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3791                         port_id);
3792                 return -EINVAL;
3793         }
3794
3795         index = get_hash_mac_addr_index(port_id, addr);
3796         /* Check if it's already there, and do nothing */
3797         if ((index >= 0) && on)
3798                 return 0;
3799
3800         if (index < 0) {
3801                 if (!on) {
3802                         RTE_ETHDEV_LOG(ERR,
3803                                 "Port %u: the MAC address was not set in UTA\n",
3804                                 port_id);
3805                         return -EINVAL;
3806                 }
3807
3808                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3809                 if (index < 0) {
3810                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3811                                 port_id);
3812                         return -ENOSPC;
3813                 }
3814         }
3815
3816         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3817         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3818         if (ret == 0) {
3819                 /* Update address in NIC data structure */
3820                 if (on)
3821                         rte_ether_addr_copy(addr,
3822                                         &dev->data->hash_mac_addrs[index]);
3823                 else
3824                         rte_ether_addr_copy(&null_mac_addr,
3825                                         &dev->data->hash_mac_addrs[index]);
3826         }
3827
3828         return eth_err(port_id, ret);
3829 }
3830
3831 int
3832 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3833 {
3834         struct rte_eth_dev *dev;
3835
3836         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3837
3838         dev = &rte_eth_devices[port_id];
3839
3840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3841         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3842                                                                        on));
3843 }
3844
3845 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3846                                         uint16_t tx_rate)
3847 {
3848         struct rte_eth_dev *dev;
3849         struct rte_eth_dev_info dev_info;
3850         struct rte_eth_link link;
3851         int ret;
3852
3853         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3854
3855         ret = rte_eth_dev_info_get(port_id, &dev_info);
3856         if (ret != 0)
3857                 return ret;
3858
3859         dev = &rte_eth_devices[port_id];
3860         link = dev->data->dev_link;
3861
3862         if (queue_idx > dev_info.max_tx_queues) {
3863                 RTE_ETHDEV_LOG(ERR,
3864                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3865                         port_id, queue_idx);
3866                 return -EINVAL;
3867         }
3868
3869         if (tx_rate > link.link_speed) {
3870                 RTE_ETHDEV_LOG(ERR,
3871                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3872                         tx_rate, link.link_speed);
3873                 return -EINVAL;
3874         }
3875
3876         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3877         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3878                                                         queue_idx, tx_rate));
3879 }
3880
3881 int
3882 rte_eth_mirror_rule_set(uint16_t port_id,
3883                         struct rte_eth_mirror_conf *mirror_conf,
3884                         uint8_t rule_id, uint8_t on)
3885 {
3886         struct rte_eth_dev *dev;
3887
3888         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3889         if (mirror_conf->rule_type == 0) {
3890                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3891                 return -EINVAL;
3892         }
3893
3894         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3895                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3896                         ETH_64_POOLS - 1);
3897                 return -EINVAL;
3898         }
3899
3900         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3901              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3902             (mirror_conf->pool_mask == 0)) {
3903                 RTE_ETHDEV_LOG(ERR,
3904                         "Invalid mirror pool, pool mask can not be 0\n");
3905                 return -EINVAL;
3906         }
3907
3908         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3909             mirror_conf->vlan.vlan_mask == 0) {
3910                 RTE_ETHDEV_LOG(ERR,
3911                         "Invalid vlan mask, vlan mask can not be 0\n");
3912                 return -EINVAL;
3913         }
3914
3915         dev = &rte_eth_devices[port_id];
3916         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3917
3918         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3919                                                 mirror_conf, rule_id, on));
3920 }
3921
3922 int
3923 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3924 {
3925         struct rte_eth_dev *dev;
3926
3927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3928
3929         dev = &rte_eth_devices[port_id];
3930         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3931
3932         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3933                                                                    rule_id));
3934 }
3935
3936 RTE_INIT(eth_dev_init_cb_lists)
3937 {
3938         int i;
3939
3940         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3941                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3942 }
3943
3944 int
3945 rte_eth_dev_callback_register(uint16_t port_id,
3946                         enum rte_eth_event_type event,
3947                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3948 {
3949         struct rte_eth_dev *dev;
3950         struct rte_eth_dev_callback *user_cb;
3951         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3952         uint16_t last_port;
3953
3954         if (!cb_fn)
3955                 return -EINVAL;
3956
3957         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3958                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3959                 return -EINVAL;
3960         }
3961
3962         if (port_id == RTE_ETH_ALL) {
3963                 next_port = 0;
3964                 last_port = RTE_MAX_ETHPORTS - 1;
3965         } else {
3966                 next_port = last_port = port_id;
3967         }
3968
3969         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3970
3971         do {
3972                 dev = &rte_eth_devices[next_port];
3973
3974                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3975                         if (user_cb->cb_fn == cb_fn &&
3976                                 user_cb->cb_arg == cb_arg &&
3977                                 user_cb->event == event) {
3978                                 break;
3979                         }
3980                 }
3981
3982                 /* create a new callback. */
3983                 if (user_cb == NULL) {
3984                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3985                                 sizeof(struct rte_eth_dev_callback), 0);
3986                         if (user_cb != NULL) {
3987                                 user_cb->cb_fn = cb_fn;
3988                                 user_cb->cb_arg = cb_arg;
3989                                 user_cb->event = event;
3990                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3991                                                   user_cb, next);
3992                         } else {
3993                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3994                                 rte_eth_dev_callback_unregister(port_id, event,
3995                                                                 cb_fn, cb_arg);
3996                                 return -ENOMEM;
3997                         }
3998
3999                 }
4000         } while (++next_port <= last_port);
4001
4002         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4003         return 0;
4004 }
4005
4006 int
4007 rte_eth_dev_callback_unregister(uint16_t port_id,
4008                         enum rte_eth_event_type event,
4009                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4010 {
4011         int ret;
4012         struct rte_eth_dev *dev;
4013         struct rte_eth_dev_callback *cb, *next;
4014         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4015         uint16_t last_port;
4016
4017         if (!cb_fn)
4018                 return -EINVAL;
4019
4020         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4021                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4022                 return -EINVAL;
4023         }
4024
4025         if (port_id == RTE_ETH_ALL) {
4026                 next_port = 0;
4027                 last_port = RTE_MAX_ETHPORTS - 1;
4028         } else {
4029                 next_port = last_port = port_id;
4030         }
4031
4032         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4033
4034         do {
4035                 dev = &rte_eth_devices[next_port];
4036                 ret = 0;
4037                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4038                      cb = next) {
4039
4040                         next = TAILQ_NEXT(cb, next);
4041
4042                         if (cb->cb_fn != cb_fn || cb->event != event ||
4043                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4044                                 continue;
4045
4046                         /*
4047                          * if this callback is not executing right now,
4048                          * then remove it.
4049                          */
4050                         if (cb->active == 0) {
4051                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4052                                 rte_free(cb);
4053                         } else {
4054                                 ret = -EAGAIN;
4055                         }
4056                 }
4057         } while (++next_port <= last_port);
4058
4059         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4060         return ret;
4061 }
4062
4063 int
4064 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4065         enum rte_eth_event_type event, void *ret_param)
4066 {
4067         struct rte_eth_dev_callback *cb_lst;
4068         struct rte_eth_dev_callback dev_cb;
4069         int rc = 0;
4070
4071         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4072         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4073                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4074                         continue;
4075                 dev_cb = *cb_lst;
4076                 cb_lst->active = 1;
4077                 if (ret_param != NULL)
4078                         dev_cb.ret_param = ret_param;
4079
4080                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4081                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4082                                 dev_cb.cb_arg, dev_cb.ret_param);
4083                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4084                 cb_lst->active = 0;
4085         }
4086         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4087         return rc;
4088 }
4089
4090 void
4091 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4092 {
4093         if (dev == NULL)
4094                 return;
4095
4096         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4097
4098         dev->state = RTE_ETH_DEV_ATTACHED;
4099 }
4100
4101 int
4102 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4103 {
4104         uint32_t vec;
4105         struct rte_eth_dev *dev;
4106         struct rte_intr_handle *intr_handle;
4107         uint16_t qid;
4108         int rc;
4109
4110         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4111
4112         dev = &rte_eth_devices[port_id];
4113
4114         if (!dev->intr_handle) {
4115                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4116                 return -ENOTSUP;
4117         }
4118
4119         intr_handle = dev->intr_handle;
4120         if (!intr_handle->intr_vec) {
4121                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4122                 return -EPERM;
4123         }
4124
4125         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4126                 vec = intr_handle->intr_vec[qid];
4127                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4128                 if (rc && rc != -EEXIST) {
4129                         RTE_ETHDEV_LOG(ERR,
4130                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4131                                 port_id, qid, op, epfd, vec);
4132                 }
4133         }
4134
4135         return 0;
4136 }
4137
4138 int
4139 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4140 {
4141         struct rte_intr_handle *intr_handle;
4142         struct rte_eth_dev *dev;
4143         unsigned int efd_idx;
4144         uint32_t vec;
4145         int fd;
4146
4147         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4148
4149         dev = &rte_eth_devices[port_id];
4150
4151         if (queue_id >= dev->data->nb_rx_queues) {
4152                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4153                 return -1;
4154         }
4155
4156         if (!dev->intr_handle) {
4157                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4158                 return -1;
4159         }
4160
4161         intr_handle = dev->intr_handle;
4162         if (!intr_handle->intr_vec) {
4163                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4164                 return -1;
4165         }
4166
4167         vec = intr_handle->intr_vec[queue_id];
4168         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4169                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4170         fd = intr_handle->efds[efd_idx];
4171
4172         return fd;
4173 }
4174
4175 const struct rte_memzone *
4176 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4177                          uint16_t queue_id, size_t size, unsigned align,
4178                          int socket_id)
4179 {
4180         char z_name[RTE_MEMZONE_NAMESIZE];
4181         const struct rte_memzone *mz;
4182         int rc;
4183
4184         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
4185                       dev->data->port_id, queue_id, ring_name);
4186         if (rc >= RTE_MEMZONE_NAMESIZE) {
4187                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4188                 rte_errno = ENAMETOOLONG;
4189                 return NULL;
4190         }
4191
4192         mz = rte_memzone_lookup(z_name);
4193         if (mz)
4194                 return mz;
4195
4196         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4197                         RTE_MEMZONE_IOVA_CONTIG, align);
4198 }
4199
4200 int
4201 rte_eth_dev_create(struct rte_device *device, const char *name,
4202         size_t priv_data_size,
4203         ethdev_bus_specific_init ethdev_bus_specific_init,
4204         void *bus_init_params,
4205         ethdev_init_t ethdev_init, void *init_params)
4206 {
4207         struct rte_eth_dev *ethdev;
4208         int retval;
4209
4210         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4211
4212         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4213                 ethdev = rte_eth_dev_allocate(name);
4214                 if (!ethdev)
4215                         return -ENODEV;
4216
4217                 if (priv_data_size) {
4218                         ethdev->data->dev_private = rte_zmalloc_socket(
4219                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4220                                 device->numa_node);
4221
4222                         if (!ethdev->data->dev_private) {
4223                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
4224                                 retval = -ENOMEM;
4225                                 goto probe_failed;
4226                         }
4227                 }
4228         } else {
4229                 ethdev = rte_eth_dev_attach_secondary(name);
4230                 if (!ethdev) {
4231                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
4232                                 "ethdev doesn't exist");
4233                         return  -ENODEV;
4234                 }
4235         }
4236
4237         ethdev->device = device;
4238
4239         if (ethdev_bus_specific_init) {
4240                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4241                 if (retval) {
4242                         RTE_LOG(ERR, EAL,
4243                                 "ethdev bus specific initialisation failed");
4244                         goto probe_failed;
4245                 }
4246         }
4247
4248         retval = ethdev_init(ethdev, init_params);
4249         if (retval) {
4250                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
4251                 goto probe_failed;
4252         }
4253
4254         rte_eth_dev_probing_finish(ethdev);
4255
4256         return retval;
4257
4258 probe_failed:
4259         rte_eth_dev_release_port(ethdev);
4260         return retval;
4261 }
4262
4263 int
4264 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4265         ethdev_uninit_t ethdev_uninit)
4266 {
4267         int ret;
4268
4269         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4270         if (!ethdev)
4271                 return -ENODEV;
4272
4273         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4274
4275         ret = ethdev_uninit(ethdev);
4276         if (ret)
4277                 return ret;
4278
4279         return rte_eth_dev_release_port(ethdev);
4280 }
4281
4282 int
4283 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4284                           int epfd, int op, void *data)
4285 {
4286         uint32_t vec;
4287         struct rte_eth_dev *dev;
4288         struct rte_intr_handle *intr_handle;
4289         int rc;
4290
4291         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4292
4293         dev = &rte_eth_devices[port_id];
4294         if (queue_id >= dev->data->nb_rx_queues) {
4295                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4296                 return -EINVAL;
4297         }
4298
4299         if (!dev->intr_handle) {
4300                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4301                 return -ENOTSUP;
4302         }
4303
4304         intr_handle = dev->intr_handle;
4305         if (!intr_handle->intr_vec) {
4306                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4307                 return -EPERM;
4308         }
4309
4310         vec = intr_handle->intr_vec[queue_id];
4311         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4312         if (rc && rc != -EEXIST) {
4313                 RTE_ETHDEV_LOG(ERR,
4314                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4315                         port_id, queue_id, op, epfd, vec);
4316                 return rc;
4317         }
4318
4319         return 0;
4320 }
4321
4322 int
4323 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4324                            uint16_t queue_id)
4325 {
4326         struct rte_eth_dev *dev;
4327
4328         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4329
4330         dev = &rte_eth_devices[port_id];
4331
4332         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4333         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4334                                                                 queue_id));
4335 }
4336
4337 int
4338 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4339                             uint16_t queue_id)
4340 {
4341         struct rte_eth_dev *dev;
4342
4343         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4344
4345         dev = &rte_eth_devices[port_id];
4346
4347         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4348         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4349                                                                 queue_id));
4350 }
4351
4352
4353 int
4354 rte_eth_dev_filter_supported(uint16_t port_id,
4355                              enum rte_filter_type filter_type)
4356 {
4357         struct rte_eth_dev *dev;
4358
4359         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4360
4361         dev = &rte_eth_devices[port_id];
4362         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4363         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4364                                 RTE_ETH_FILTER_NOP, NULL);
4365 }
4366
4367 int
4368 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4369                         enum rte_filter_op filter_op, void *arg)
4370 {
4371         struct rte_eth_dev *dev;
4372
4373         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4374
4375         dev = &rte_eth_devices[port_id];
4376         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4377         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4378                                                              filter_op, arg));
4379 }
4380
4381 const struct rte_eth_rxtx_callback *
4382 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4383                 rte_rx_callback_fn fn, void *user_param)
4384 {
4385 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4386         rte_errno = ENOTSUP;
4387         return NULL;
4388 #endif
4389         struct rte_eth_dev *dev;
4390
4391         /* check input parameters */
4392         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4393                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4394                 rte_errno = EINVAL;
4395                 return NULL;
4396         }
4397         dev = &rte_eth_devices[port_id];
4398         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4399                 rte_errno = EINVAL;
4400                 return NULL;
4401         }
4402         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4403
4404         if (cb == NULL) {
4405                 rte_errno = ENOMEM;
4406                 return NULL;
4407         }
4408
4409         cb->fn.rx = fn;
4410         cb->param = user_param;
4411
4412         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4413         /* Add the callbacks in fifo order. */
4414         struct rte_eth_rxtx_callback *tail =
4415                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4416
4417         if (!tail) {
4418                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4419
4420         } else {
4421                 while (tail->next)
4422                         tail = tail->next;
4423                 tail->next = cb;
4424         }
4425         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4426
4427         return cb;
4428 }
4429
4430 const struct rte_eth_rxtx_callback *
4431 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4432                 rte_rx_callback_fn fn, void *user_param)
4433 {
4434 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4435         rte_errno = ENOTSUP;
4436         return NULL;
4437 #endif
4438         /* check input parameters */
4439         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4440                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4441                 rte_errno = EINVAL;
4442                 return NULL;
4443         }
4444
4445         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4446
4447         if (cb == NULL) {
4448                 rte_errno = ENOMEM;
4449                 return NULL;
4450         }
4451
4452         cb->fn.rx = fn;
4453         cb->param = user_param;
4454
4455         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4456         /* Add the callbacks at fisrt position*/
4457         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4458         rte_smp_wmb();
4459         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4460         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4461
4462         return cb;
4463 }
4464
4465 const struct rte_eth_rxtx_callback *
4466 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4467                 rte_tx_callback_fn fn, void *user_param)
4468 {
4469 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4470         rte_errno = ENOTSUP;
4471         return NULL;
4472 #endif
4473         struct rte_eth_dev *dev;
4474
4475         /* check input parameters */
4476         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4477                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4478                 rte_errno = EINVAL;
4479                 return NULL;
4480         }
4481
4482         dev = &rte_eth_devices[port_id];
4483         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4484                 rte_errno = EINVAL;
4485                 return NULL;
4486         }
4487
4488         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4489
4490         if (cb == NULL) {
4491                 rte_errno = ENOMEM;
4492                 return NULL;
4493         }
4494
4495         cb->fn.tx = fn;
4496         cb->param = user_param;
4497
4498         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4499         /* Add the callbacks in fifo order. */
4500         struct rte_eth_rxtx_callback *tail =
4501                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4502
4503         if (!tail) {
4504                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4505
4506         } else {
4507                 while (tail->next)
4508                         tail = tail->next;
4509                 tail->next = cb;
4510         }
4511         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4512
4513         return cb;
4514 }
4515
4516 int
4517 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4518                 const struct rte_eth_rxtx_callback *user_cb)
4519 {
4520 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4521         return -ENOTSUP;
4522 #endif
4523         /* Check input parameters. */
4524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4525         if (user_cb == NULL ||
4526                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4527                 return -EINVAL;
4528
4529         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4530         struct rte_eth_rxtx_callback *cb;
4531         struct rte_eth_rxtx_callback **prev_cb;
4532         int ret = -EINVAL;
4533
4534         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4535         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4536         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4537                 cb = *prev_cb;
4538                 if (cb == user_cb) {
4539                         /* Remove the user cb from the callback list. */
4540                         *prev_cb = cb->next;
4541                         ret = 0;
4542                         break;
4543                 }
4544         }
4545         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4546
4547         return ret;
4548 }
4549
4550 int
4551 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4552                 const struct rte_eth_rxtx_callback *user_cb)
4553 {
4554 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4555         return -ENOTSUP;
4556 #endif
4557         /* Check input parameters. */
4558         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4559         if (user_cb == NULL ||
4560                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4561                 return -EINVAL;
4562
4563         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4564         int ret = -EINVAL;
4565         struct rte_eth_rxtx_callback *cb;
4566         struct rte_eth_rxtx_callback **prev_cb;
4567
4568         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4569         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4570         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4571                 cb = *prev_cb;
4572                 if (cb == user_cb) {
4573                         /* Remove the user cb from the callback list. */
4574                         *prev_cb = cb->next;
4575                         ret = 0;
4576                         break;
4577                 }
4578         }
4579         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4580
4581         return ret;
4582 }
4583
4584 int
4585 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4586         struct rte_eth_rxq_info *qinfo)
4587 {
4588         struct rte_eth_dev *dev;
4589
4590         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4591
4592         if (qinfo == NULL)
4593                 return -EINVAL;
4594
4595         dev = &rte_eth_devices[port_id];
4596         if (queue_id >= dev->data->nb_rx_queues) {
4597                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4598                 return -EINVAL;
4599         }
4600
4601         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4602                 RTE_ETHDEV_LOG(INFO,
4603                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4604                         queue_id, port_id);
4605                 return -EINVAL;
4606         }
4607
4608         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4609
4610         memset(qinfo, 0, sizeof(*qinfo));
4611         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4612         return 0;
4613 }
4614
4615 int
4616 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4617         struct rte_eth_txq_info *qinfo)
4618 {
4619         struct rte_eth_dev *dev;
4620
4621         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4622
4623         if (qinfo == NULL)
4624                 return -EINVAL;
4625
4626         dev = &rte_eth_devices[port_id];
4627         if (queue_id >= dev->data->nb_tx_queues) {
4628                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4629                 return -EINVAL;
4630         }
4631
4632         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4633                 RTE_ETHDEV_LOG(INFO,
4634                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4635                         queue_id, port_id);
4636                 return -EINVAL;
4637         }
4638
4639         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4640
4641         memset(qinfo, 0, sizeof(*qinfo));
4642         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4643
4644         return 0;
4645 }
4646
4647 int
4648 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4649                           struct rte_eth_burst_mode *mode)
4650 {
4651         struct rte_eth_dev *dev;
4652
4653         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4654
4655         if (mode == NULL)
4656                 return -EINVAL;
4657
4658         dev = &rte_eth_devices[port_id];
4659
4660         if (queue_id >= dev->data->nb_rx_queues) {
4661                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4662                 return -EINVAL;
4663         }
4664
4665         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4666         memset(mode, 0, sizeof(*mode));
4667         return eth_err(port_id,
4668                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4669 }
4670
4671 int
4672 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4673                           struct rte_eth_burst_mode *mode)
4674 {
4675         struct rte_eth_dev *dev;
4676
4677         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4678
4679         if (mode == NULL)
4680                 return -EINVAL;
4681
4682         dev = &rte_eth_devices[port_id];
4683
4684         if (queue_id >= dev->data->nb_tx_queues) {
4685                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4686                 return -EINVAL;
4687         }
4688
4689         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4690         memset(mode, 0, sizeof(*mode));
4691         return eth_err(port_id,
4692                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4693 }
4694
4695 int
4696 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4697                              struct rte_ether_addr *mc_addr_set,
4698                              uint32_t nb_mc_addr)
4699 {
4700         struct rte_eth_dev *dev;
4701
4702         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4703
4704         dev = &rte_eth_devices[port_id];
4705         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4706         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4707                                                 mc_addr_set, nb_mc_addr));
4708 }
4709
4710 int
4711 rte_eth_timesync_enable(uint16_t port_id)
4712 {
4713         struct rte_eth_dev *dev;
4714
4715         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4716         dev = &rte_eth_devices[port_id];
4717
4718         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4719         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4720 }
4721
4722 int
4723 rte_eth_timesync_disable(uint16_t port_id)
4724 {
4725         struct rte_eth_dev *dev;
4726
4727         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4728         dev = &rte_eth_devices[port_id];
4729
4730         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4731         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4732 }
4733
4734 int
4735 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4736                                    uint32_t flags)
4737 {
4738         struct rte_eth_dev *dev;
4739
4740         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4741         dev = &rte_eth_devices[port_id];
4742
4743         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4744         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4745                                 (dev, timestamp, flags));
4746 }
4747
4748 int
4749 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4750                                    struct timespec *timestamp)
4751 {
4752         struct rte_eth_dev *dev;
4753
4754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4755         dev = &rte_eth_devices[port_id];
4756
4757         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4758         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4759                                 (dev, timestamp));
4760 }
4761
4762 int
4763 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4764 {
4765         struct rte_eth_dev *dev;
4766
4767         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4768         dev = &rte_eth_devices[port_id];
4769
4770         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4771         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4772                                                                       delta));
4773 }
4774
4775 int
4776 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4777 {
4778         struct rte_eth_dev *dev;
4779
4780         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4781         dev = &rte_eth_devices[port_id];
4782
4783         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4784         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4785                                                                 timestamp));
4786 }
4787
4788 int
4789 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4790 {
4791         struct rte_eth_dev *dev;
4792
4793         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4794         dev = &rte_eth_devices[port_id];
4795
4796         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4797         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4798                                                                 timestamp));
4799 }
4800
4801 int
4802 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4803 {
4804         struct rte_eth_dev *dev;
4805
4806         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4807         dev = &rte_eth_devices[port_id];
4808
4809         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4810         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4811 }
4812
4813 int
4814 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4815 {
4816         struct rte_eth_dev *dev;
4817
4818         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4819
4820         dev = &rte_eth_devices[port_id];
4821         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4822         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4823 }
4824
4825 int
4826 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4827 {
4828         struct rte_eth_dev *dev;
4829
4830         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4831
4832         dev = &rte_eth_devices[port_id];
4833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4834         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4835 }
4836
4837 int
4838 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4839 {
4840         struct rte_eth_dev *dev;
4841
4842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4843
4844         dev = &rte_eth_devices[port_id];
4845         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4846         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4847 }
4848
4849 int
4850 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4851 {
4852         struct rte_eth_dev *dev;
4853
4854         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4855
4856         dev = &rte_eth_devices[port_id];
4857         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4858         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4859 }
4860
4861 int
4862 rte_eth_dev_get_module_info(uint16_t port_id,
4863                             struct rte_eth_dev_module_info *modinfo)
4864 {
4865         struct rte_eth_dev *dev;
4866
4867         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4868
4869         dev = &rte_eth_devices[port_id];
4870         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4871         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4872 }
4873
4874 int
4875 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4876                               struct rte_dev_eeprom_info *info)
4877 {
4878         struct rte_eth_dev *dev;
4879
4880         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4881
4882         dev = &rte_eth_devices[port_id];
4883         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4884         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4885 }
4886
4887 int
4888 rte_eth_dev_get_dcb_info(uint16_t port_id,
4889                              struct rte_eth_dcb_info *dcb_info)
4890 {
4891         struct rte_eth_dev *dev;
4892
4893         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4894
4895         dev = &rte_eth_devices[port_id];
4896         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4897
4898         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4899         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4900 }
4901
4902 int
4903 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4904                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4905 {
4906         struct rte_eth_dev *dev;
4907
4908         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4909         if (l2_tunnel == NULL) {
4910                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4911                 return -EINVAL;
4912         }
4913
4914         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4915                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4916                 return -EINVAL;
4917         }
4918
4919         dev = &rte_eth_devices[port_id];
4920         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4921                                 -ENOTSUP);
4922         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4923                                                                 l2_tunnel));
4924 }
4925
4926 int
4927 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4928                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4929                                   uint32_t mask,
4930                                   uint8_t en)
4931 {
4932         struct rte_eth_dev *dev;
4933
4934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4935
4936         if (l2_tunnel == NULL) {
4937                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4938                 return -EINVAL;
4939         }
4940
4941         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4942                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4943                 return -EINVAL;
4944         }
4945
4946         if (mask == 0) {
4947                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4948                 return -EINVAL;
4949         }
4950
4951         dev = &rte_eth_devices[port_id];
4952         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4953                                 -ENOTSUP);
4954         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4955                                                         l2_tunnel, mask, en));
4956 }
4957
4958 static void
4959 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4960                            const struct rte_eth_desc_lim *desc_lim)
4961 {
4962         if (desc_lim->nb_align != 0)
4963                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4964
4965         if (desc_lim->nb_max != 0)
4966                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4967
4968         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4969 }
4970
4971 int
4972 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4973                                  uint16_t *nb_rx_desc,
4974                                  uint16_t *nb_tx_desc)
4975 {
4976         struct rte_eth_dev_info dev_info;
4977         int ret;
4978
4979         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4980
4981         ret = rte_eth_dev_info_get(port_id, &dev_info);
4982         if (ret != 0)
4983                 return ret;
4984
4985         if (nb_rx_desc != NULL)
4986                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4987
4988         if (nb_tx_desc != NULL)
4989                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4990
4991         return 0;
4992 }
4993
4994 int
4995 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4996                                    struct rte_eth_hairpin_cap *cap)
4997 {
4998         struct rte_eth_dev *dev;
4999
5000         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
5001
5002         dev = &rte_eth_devices[port_id];
5003         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5004         memset(cap, 0, sizeof(*cap));
5005         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5006 }
5007
5008 int
5009 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5010 {
5011         if (dev->data->rx_queue_state[queue_id] ==
5012             RTE_ETH_QUEUE_STATE_HAIRPIN)
5013                 return 1;
5014         return 0;
5015 }
5016
5017 int
5018 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5019 {
5020         if (dev->data->tx_queue_state[queue_id] ==
5021             RTE_ETH_QUEUE_STATE_HAIRPIN)
5022                 return 1;
5023         return 0;
5024 }
5025
5026 int
5027 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5028 {
5029         struct rte_eth_dev *dev;
5030
5031         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5032
5033         if (pool == NULL)
5034                 return -EINVAL;
5035
5036         dev = &rte_eth_devices[port_id];
5037
5038         if (*dev->dev_ops->pool_ops_supported == NULL)
5039                 return 1; /* all pools are supported */
5040
5041         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5042 }
5043
5044 /**
5045  * A set of values to describe the possible states of a switch domain.
5046  */
5047 enum rte_eth_switch_domain_state {
5048         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5049         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5050 };
5051
5052 /**
5053  * Array of switch domains available for allocation. Array is sized to
5054  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5055  * ethdev ports in a single process.
5056  */
5057 static struct rte_eth_dev_switch {
5058         enum rte_eth_switch_domain_state state;
5059 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5060
5061 int
5062 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5063 {
5064         unsigned int i;
5065
5066         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5067
5068         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5069                 if (rte_eth_switch_domains[i].state ==
5070                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5071                         rte_eth_switch_domains[i].state =
5072                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5073                         *domain_id = i;
5074                         return 0;
5075                 }
5076         }
5077
5078         return -ENOSPC;
5079 }
5080
5081 int
5082 rte_eth_switch_domain_free(uint16_t domain_id)
5083 {
5084         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5085                 domain_id >= RTE_MAX_ETHPORTS)
5086                 return -EINVAL;
5087
5088         if (rte_eth_switch_domains[domain_id].state !=
5089                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5090                 return -EINVAL;
5091
5092         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5093
5094         return 0;
5095 }
5096
5097 static int
5098 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5099 {
5100         int state;
5101         struct rte_kvargs_pair *pair;
5102         char *letter;
5103
5104         arglist->str = strdup(str_in);
5105         if (arglist->str == NULL)
5106                 return -ENOMEM;
5107
5108         letter = arglist->str;
5109         state = 0;
5110         arglist->count = 0;
5111         pair = &arglist->pairs[0];
5112         while (1) {
5113                 switch (state) {
5114                 case 0: /* Initial */
5115                         if (*letter == '=')
5116                                 return -EINVAL;
5117                         else if (*letter == '\0')
5118                                 return 0;
5119
5120                         state = 1;
5121                         pair->key = letter;
5122                         /* fall-thru */
5123
5124                 case 1: /* Parsing key */
5125                         if (*letter == '=') {
5126                                 *letter = '\0';
5127                                 pair->value = letter + 1;
5128                                 state = 2;
5129                         } else if (*letter == ',' || *letter == '\0')
5130                                 return -EINVAL;
5131                         break;
5132
5133
5134                 case 2: /* Parsing value */
5135                         if (*letter == '[')
5136                                 state = 3;
5137                         else if (*letter == ',') {
5138                                 *letter = '\0';
5139                                 arglist->count++;
5140                                 pair = &arglist->pairs[arglist->count];
5141                                 state = 0;
5142                         } else if (*letter == '\0') {
5143                                 letter--;
5144                                 arglist->count++;
5145                                 pair = &arglist->pairs[arglist->count];
5146                                 state = 0;
5147                         }
5148                         break;
5149
5150                 case 3: /* Parsing list */
5151                         if (*letter == ']')
5152                                 state = 2;
5153                         else if (*letter == '\0')
5154                                 return -EINVAL;
5155                         break;
5156                 }
5157                 letter++;
5158         }
5159 }
5160
5161 int
5162 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5163 {
5164         struct rte_kvargs args;
5165         struct rte_kvargs_pair *pair;
5166         unsigned int i;
5167         int result = 0;
5168
5169         memset(eth_da, 0, sizeof(*eth_da));
5170
5171         result = rte_eth_devargs_tokenise(&args, dargs);
5172         if (result < 0)
5173                 goto parse_cleanup;
5174
5175         for (i = 0; i < args.count; i++) {
5176                 pair = &args.pairs[i];
5177                 if (strcmp("representor", pair->key) == 0) {
5178                         result = rte_eth_devargs_parse_list(pair->value,
5179                                 rte_eth_devargs_parse_representor_ports,
5180                                 eth_da);
5181                         if (result < 0)
5182                                 goto parse_cleanup;
5183                 }
5184         }
5185
5186 parse_cleanup:
5187         if (args.str)
5188                 free(args.str);
5189
5190         return result;
5191 }
5192
5193 RTE_INIT(ethdev_init_log)
5194 {
5195         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
5196         if (rte_eth_dev_logtype >= 0)
5197                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
5198 }