ethdev: limit maximum number of queues
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133 };
134
135 #undef RTE_RX_OFFLOAD_BIT2STR
136
137 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
138         { DEV_TX_OFFLOAD_##_name, #_name }
139
140 static const struct {
141         uint64_t offload;
142         const char *name;
143 } rte_tx_offload_names[] = {
144         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
145         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
152         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
153         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
158         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
159         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
160         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
161         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
162         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
164         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
165 };
166
167 #undef RTE_TX_OFFLOAD_BIT2STR
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         void *ret_param;                        /**< Return parameter */
180         enum rte_eth_event_type event;          /**< Interrupt event type */
181         uint32_t active;                        /**< Callback is executing */
182 };
183
184 enum {
185         STAT_QMAP_TX = 0,
186         STAT_QMAP_RX
187 };
188
189 int
190 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
191 {
192         int ret;
193         struct rte_devargs devargs = {.args = NULL};
194         const char *bus_param_key;
195         char *bus_str = NULL;
196         char *cls_str = NULL;
197         int str_size;
198
199         memset(iter, 0, sizeof(*iter));
200
201         /*
202          * The devargs string may use various syntaxes:
203          *   - 0000:08:00.0,representor=[1-3]
204          *   - pci:0000:06:00.0,representor=[0,5]
205          *   - class=eth,mac=00:11:22:33:44:55
206          * A new syntax is in development (not yet supported):
207          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
208          */
209
210         /*
211          * Handle pure class filter (i.e. without any bus-level argument),
212          * from future new syntax.
213          * rte_devargs_parse() is not yet supporting the new syntax,
214          * that's why this simple case is temporarily parsed here.
215          */
216 #define iter_anybus_str "class=eth,"
217         if (strncmp(devargs_str, iter_anybus_str,
218                         strlen(iter_anybus_str)) == 0) {
219                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
220                 goto end;
221         }
222
223         /* Split bus, device and parameters. */
224         ret = rte_devargs_parse(&devargs, devargs_str);
225         if (ret != 0)
226                 goto error;
227
228         /*
229          * Assume parameters of old syntax can match only at ethdev level.
230          * Extra parameters will be ignored, thanks to "+" prefix.
231          */
232         str_size = strlen(devargs.args) + 2;
233         cls_str = malloc(str_size);
234         if (cls_str == NULL) {
235                 ret = -ENOMEM;
236                 goto error;
237         }
238         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
239         if (ret != str_size - 1) {
240                 ret = -EINVAL;
241                 goto error;
242         }
243         iter->cls_str = cls_str;
244         free(devargs.args); /* allocated by rte_devargs_parse() */
245         devargs.args = NULL;
246
247         iter->bus = devargs.bus;
248         if (iter->bus->dev_iterate == NULL) {
249                 ret = -ENOTSUP;
250                 goto error;
251         }
252
253         /* Convert bus args to new syntax for use with new API dev_iterate. */
254         if (strcmp(iter->bus->name, "vdev") == 0) {
255                 bus_param_key = "name";
256         } else if (strcmp(iter->bus->name, "pci") == 0) {
257                 bus_param_key = "addr";
258         } else {
259                 ret = -ENOTSUP;
260                 goto error;
261         }
262         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
263         bus_str = malloc(str_size);
264         if (bus_str == NULL) {
265                 ret = -ENOMEM;
266                 goto error;
267         }
268         ret = snprintf(bus_str, str_size, "%s=%s",
269                         bus_param_key, devargs.name);
270         if (ret != str_size - 1) {
271                 ret = -EINVAL;
272                 goto error;
273         }
274         iter->bus_str = bus_str;
275
276 end:
277         iter->cls = rte_class_find_by_name("eth");
278         return 0;
279
280 error:
281         if (ret == -ENOTSUP)
282                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
283                                 iter->bus->name);
284         free(devargs.args);
285         free(bus_str);
286         free(cls_str);
287         return ret;
288 }
289
290 uint16_t
291 rte_eth_iterator_next(struct rte_dev_iterator *iter)
292 {
293         if (iter->cls == NULL) /* invalid ethdev iterator */
294                 return RTE_MAX_ETHPORTS;
295
296         do { /* loop to try all matching rte_device */
297                 /* If not pure ethdev filter and */
298                 if (iter->bus != NULL &&
299                                 /* not in middle of rte_eth_dev iteration, */
300                                 iter->class_device == NULL) {
301                         /* get next rte_device to try. */
302                         iter->device = iter->bus->dev_iterate(
303                                         iter->device, iter->bus_str, iter);
304                         if (iter->device == NULL)
305                                 break; /* no more rte_device candidate */
306                 }
307                 /* A device is matching bus part, need to check ethdev part. */
308                 iter->class_device = iter->cls->dev_iterate(
309                                 iter->class_device, iter->cls_str, iter);
310                 if (iter->class_device != NULL)
311                         return eth_dev_to_id(iter->class_device); /* match */
312         } while (iter->bus != NULL); /* need to try next rte_device */
313
314         /* No more ethdev port to iterate. */
315         rte_eth_iterator_cleanup(iter);
316         return RTE_MAX_ETHPORTS;
317 }
318
319 void
320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
321 {
322         if (iter->bus_str == NULL)
323                 return; /* nothing to free in pure class filter */
324         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
325         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
326         memset(iter, 0, sizeof(*iter));
327 }
328
329 uint16_t
330 rte_eth_find_next(uint16_t port_id)
331 {
332         while (port_id < RTE_MAX_ETHPORTS &&
333                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
334                 port_id++;
335
336         if (port_id >= RTE_MAX_ETHPORTS)
337                 return RTE_MAX_ETHPORTS;
338
339         return port_id;
340 }
341
342 /*
343  * Macro to iterate over all valid ports for internal usage.
344  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
345  */
346 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
347         for (port_id = rte_eth_find_next(0); \
348              port_id < RTE_MAX_ETHPORTS; \
349              port_id = rte_eth_find_next(port_id + 1))
350
351 uint16_t
352 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
353 {
354         port_id = rte_eth_find_next(port_id);
355         while (port_id < RTE_MAX_ETHPORTS &&
356                         rte_eth_devices[port_id].device != parent)
357                 port_id = rte_eth_find_next(port_id + 1);
358
359         return port_id;
360 }
361
362 uint16_t
363 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
364 {
365         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
366         return rte_eth_find_next_of(port_id,
367                         rte_eth_devices[ref_port_id].device);
368 }
369
370 static void
371 rte_eth_dev_shared_data_prepare(void)
372 {
373         const unsigned flags = 0;
374         const struct rte_memzone *mz;
375
376         rte_spinlock_lock(&rte_eth_shared_data_lock);
377
378         if (rte_eth_dev_shared_data == NULL) {
379                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
380                         /* Allocate port data and ownership shared memory. */
381                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
382                                         sizeof(*rte_eth_dev_shared_data),
383                                         rte_socket_id(), flags);
384                 } else
385                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
386                 if (mz == NULL)
387                         rte_panic("Cannot allocate ethdev shared data\n");
388
389                 rte_eth_dev_shared_data = mz->addr;
390                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
391                         rte_eth_dev_shared_data->next_owner_id =
392                                         RTE_ETH_DEV_NO_OWNER + 1;
393                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
394                         memset(rte_eth_dev_shared_data->data, 0,
395                                sizeof(rte_eth_dev_shared_data->data));
396                 }
397         }
398
399         rte_spinlock_unlock(&rte_eth_shared_data_lock);
400 }
401
402 static bool
403 is_allocated(const struct rte_eth_dev *ethdev)
404 {
405         return ethdev->data->name[0] != '\0';
406 }
407
408 static struct rte_eth_dev *
409 _rte_eth_dev_allocated(const char *name)
410 {
411         unsigned i;
412
413         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414                 if (rte_eth_devices[i].data != NULL &&
415                     strcmp(rte_eth_devices[i].data->name, name) == 0)
416                         return &rte_eth_devices[i];
417         }
418         return NULL;
419 }
420
421 struct rte_eth_dev *
422 rte_eth_dev_allocated(const char *name)
423 {
424         struct rte_eth_dev *ethdev;
425
426         rte_eth_dev_shared_data_prepare();
427
428         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
429
430         ethdev = _rte_eth_dev_allocated(name);
431
432         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
433
434         return ethdev;
435 }
436
437 static uint16_t
438 rte_eth_dev_find_free_port(void)
439 {
440         unsigned i;
441
442         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
443                 /* Using shared name field to find a free port. */
444                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
445                         RTE_ASSERT(rte_eth_devices[i].state ==
446                                    RTE_ETH_DEV_UNUSED);
447                         return i;
448                 }
449         }
450         return RTE_MAX_ETHPORTS;
451 }
452
453 static struct rte_eth_dev *
454 eth_dev_get(uint16_t port_id)
455 {
456         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
457
458         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
459
460         return eth_dev;
461 }
462
463 struct rte_eth_dev *
464 rte_eth_dev_allocate(const char *name)
465 {
466         uint16_t port_id;
467         struct rte_eth_dev *eth_dev = NULL;
468         size_t name_len;
469
470         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
471         if (name_len == 0) {
472                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
473                 return NULL;
474         }
475
476         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
477                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
478                 return NULL;
479         }
480
481         rte_eth_dev_shared_data_prepare();
482
483         /* Synchronize port creation between primary and secondary threads. */
484         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
485
486         if (_rte_eth_dev_allocated(name) != NULL) {
487                 RTE_ETHDEV_LOG(ERR,
488                         "Ethernet device with name %s already allocated\n",
489                         name);
490                 goto unlock;
491         }
492
493         port_id = rte_eth_dev_find_free_port();
494         if (port_id == RTE_MAX_ETHPORTS) {
495                 RTE_ETHDEV_LOG(ERR,
496                         "Reached maximum number of Ethernet ports\n");
497                 goto unlock;
498         }
499
500         eth_dev = eth_dev_get(port_id);
501         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
502         eth_dev->data->port_id = port_id;
503         eth_dev->data->mtu = RTE_ETHER_MTU;
504
505 unlock:
506         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508         return eth_dev;
509 }
510
511 /*
512  * Attach to a port already registered by the primary process, which
513  * makes sure that the same device would have the same port id both
514  * in the primary and secondary process.
515  */
516 struct rte_eth_dev *
517 rte_eth_dev_attach_secondary(const char *name)
518 {
519         uint16_t i;
520         struct rte_eth_dev *eth_dev = NULL;
521
522         rte_eth_dev_shared_data_prepare();
523
524         /* Synchronize port attachment to primary port creation and release. */
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529                         break;
530         }
531         if (i == RTE_MAX_ETHPORTS) {
532                 RTE_ETHDEV_LOG(ERR,
533                         "Device %s is not driven by the primary process\n",
534                         name);
535         } else {
536                 eth_dev = eth_dev_get(i);
537                 RTE_ASSERT(eth_dev->data->port_id == i);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541         return eth_dev;
542 }
543
544 int
545 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546 {
547         if (eth_dev == NULL)
548                 return -EINVAL;
549
550         rte_eth_dev_shared_data_prepare();
551
552         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553                 _rte_eth_dev_callback_process(eth_dev,
554                                 RTE_ETH_EVENT_DESTROY, NULL);
555
556         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558         eth_dev->state = RTE_ETH_DEV_UNUSED;
559
560         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
561                 rte_free(eth_dev->data->rx_queues);
562                 rte_free(eth_dev->data->tx_queues);
563                 rte_free(eth_dev->data->mac_addrs);
564                 rte_free(eth_dev->data->hash_mac_addrs);
565                 rte_free(eth_dev->data->dev_private);
566                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
567         }
568
569         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
570
571         return 0;
572 }
573
574 int
575 rte_eth_dev_is_valid_port(uint16_t port_id)
576 {
577         if (port_id >= RTE_MAX_ETHPORTS ||
578             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
579                 return 0;
580         else
581                 return 1;
582 }
583
584 static int
585 rte_eth_is_valid_owner_id(uint64_t owner_id)
586 {
587         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
588             rte_eth_dev_shared_data->next_owner_id <= owner_id)
589                 return 0;
590         return 1;
591 }
592
593 uint64_t
594 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
595 {
596         port_id = rte_eth_find_next(port_id);
597         while (port_id < RTE_MAX_ETHPORTS &&
598                         rte_eth_devices[port_id].data->owner.id != owner_id)
599                 port_id = rte_eth_find_next(port_id + 1);
600
601         return port_id;
602 }
603
604 int
605 rte_eth_dev_owner_new(uint64_t *owner_id)
606 {
607         rte_eth_dev_shared_data_prepare();
608
609         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
610
611         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
612
613         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
614         return 0;
615 }
616
617 static int
618 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
619                        const struct rte_eth_dev_owner *new_owner)
620 {
621         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
622         struct rte_eth_dev_owner *port_owner;
623
624         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
625                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
626                         port_id);
627                 return -ENODEV;
628         }
629
630         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
631             !rte_eth_is_valid_owner_id(old_owner_id)) {
632                 RTE_ETHDEV_LOG(ERR,
633                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
634                        old_owner_id, new_owner->id);
635                 return -EINVAL;
636         }
637
638         port_owner = &rte_eth_devices[port_id].data->owner;
639         if (port_owner->id != old_owner_id) {
640                 RTE_ETHDEV_LOG(ERR,
641                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
642                         port_id, port_owner->name, port_owner->id);
643                 return -EPERM;
644         }
645
646         /* can not truncate (same structure) */
647         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
648
649         port_owner->id = new_owner->id;
650
651         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
652                 port_id, new_owner->name, new_owner->id);
653
654         return 0;
655 }
656
657 int
658 rte_eth_dev_owner_set(const uint16_t port_id,
659                       const struct rte_eth_dev_owner *owner)
660 {
661         int ret;
662
663         rte_eth_dev_shared_data_prepare();
664
665         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
666
667         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
668
669         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
670         return ret;
671 }
672
673 int
674 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
675 {
676         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
677                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
678         int ret;
679
680         rte_eth_dev_shared_data_prepare();
681
682         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
683
684         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
685
686         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
687         return ret;
688 }
689
690 int
691 rte_eth_dev_owner_delete(const uint64_t owner_id)
692 {
693         uint16_t port_id;
694         int ret = 0;
695
696         rte_eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
699
700         if (rte_eth_is_valid_owner_id(owner_id)) {
701                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
702                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
703                                 memset(&rte_eth_devices[port_id].data->owner, 0,
704                                        sizeof(struct rte_eth_dev_owner));
705                 RTE_ETHDEV_LOG(NOTICE,
706                         "All port owners owned by %016"PRIx64" identifier have removed\n",
707                         owner_id);
708         } else {
709                 RTE_ETHDEV_LOG(ERR,
710                                "Invalid owner id=%016"PRIx64"\n",
711                                owner_id);
712                 ret = -EINVAL;
713         }
714
715         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
716
717         return ret;
718 }
719
720 int
721 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
722 {
723         int ret = 0;
724         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
725
726         rte_eth_dev_shared_data_prepare();
727
728         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
729
730         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
731                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
732                         port_id);
733                 ret = -ENODEV;
734         } else {
735                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
736         }
737
738         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
739         return ret;
740 }
741
742 int
743 rte_eth_dev_socket_id(uint16_t port_id)
744 {
745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
746         return rte_eth_devices[port_id].data->numa_node;
747 }
748
749 void *
750 rte_eth_dev_get_sec_ctx(uint16_t port_id)
751 {
752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
753         return rte_eth_devices[port_id].security_ctx;
754 }
755
756 uint16_t
757 rte_eth_dev_count_avail(void)
758 {
759         uint16_t p;
760         uint16_t count;
761
762         count = 0;
763
764         RTE_ETH_FOREACH_DEV(p)
765                 count++;
766
767         return count;
768 }
769
770 uint16_t
771 rte_eth_dev_count_total(void)
772 {
773         uint16_t port, count = 0;
774
775         RTE_ETH_FOREACH_VALID_DEV(port)
776                 count++;
777
778         return count;
779 }
780
781 int
782 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
783 {
784         char *tmp;
785
786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
787
788         if (name == NULL) {
789                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
790                 return -EINVAL;
791         }
792
793         /* shouldn't check 'rte_eth_devices[i].data',
794          * because it might be overwritten by VDEV PMD */
795         tmp = rte_eth_dev_shared_data->data[port_id].name;
796         strcpy(name, tmp);
797         return 0;
798 }
799
800 int
801 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
802 {
803         uint32_t pid;
804
805         if (name == NULL) {
806                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
807                 return -EINVAL;
808         }
809
810         RTE_ETH_FOREACH_VALID_DEV(pid)
811                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
812                         *port_id = pid;
813                         return 0;
814                 }
815
816         return -ENODEV;
817 }
818
819 static int
820 eth_err(uint16_t port_id, int ret)
821 {
822         if (ret == 0)
823                 return 0;
824         if (rte_eth_dev_is_removed(port_id))
825                 return -EIO;
826         return ret;
827 }
828
829 static int
830 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
831 {
832         uint16_t old_nb_queues = dev->data->nb_rx_queues;
833         void **rxq;
834         unsigned i;
835
836         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
837                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
838                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
839                                 RTE_CACHE_LINE_SIZE);
840                 if (dev->data->rx_queues == NULL) {
841                         dev->data->nb_rx_queues = 0;
842                         return -(ENOMEM);
843                 }
844         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
845                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
846
847                 rxq = dev->data->rx_queues;
848
849                 for (i = nb_queues; i < old_nb_queues; i++)
850                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
851                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
852                                 RTE_CACHE_LINE_SIZE);
853                 if (rxq == NULL)
854                         return -(ENOMEM);
855                 if (nb_queues > old_nb_queues) {
856                         uint16_t new_qs = nb_queues - old_nb_queues;
857
858                         memset(rxq + old_nb_queues, 0,
859                                 sizeof(rxq[0]) * new_qs);
860                 }
861
862                 dev->data->rx_queues = rxq;
863
864         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
865                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
866
867                 rxq = dev->data->rx_queues;
868
869                 for (i = nb_queues; i < old_nb_queues; i++)
870                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
871
872                 rte_free(dev->data->rx_queues);
873                 dev->data->rx_queues = NULL;
874         }
875         dev->data->nb_rx_queues = nb_queues;
876         return 0;
877 }
878
879 int
880 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
881 {
882         struct rte_eth_dev *dev;
883
884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
885
886         dev = &rte_eth_devices[port_id];
887         if (!dev->data->dev_started) {
888                 RTE_ETHDEV_LOG(ERR,
889                         "Port %u must be started before start any queue\n",
890                         port_id);
891                 return -EINVAL;
892         }
893
894         if (rx_queue_id >= dev->data->nb_rx_queues) {
895                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
896                 return -EINVAL;
897         }
898
899         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
900
901         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
902                 RTE_ETHDEV_LOG(INFO,
903                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
904                         rx_queue_id, port_id);
905                 return -EINVAL;
906         }
907
908         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
909                 RTE_ETHDEV_LOG(INFO,
910                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
911                         rx_queue_id, port_id);
912                 return 0;
913         }
914
915         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
916                                                              rx_queue_id));
917
918 }
919
920 int
921 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
922 {
923         struct rte_eth_dev *dev;
924
925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
926
927         dev = &rte_eth_devices[port_id];
928         if (rx_queue_id >= dev->data->nb_rx_queues) {
929                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
930                 return -EINVAL;
931         }
932
933         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
934
935         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
936                 RTE_ETHDEV_LOG(INFO,
937                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
938                         rx_queue_id, port_id);
939                 return -EINVAL;
940         }
941
942         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
943                 RTE_ETHDEV_LOG(INFO,
944                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
945                         rx_queue_id, port_id);
946                 return 0;
947         }
948
949         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
950
951 }
952
953 int
954 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
955 {
956         struct rte_eth_dev *dev;
957
958         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
959
960         dev = &rte_eth_devices[port_id];
961         if (!dev->data->dev_started) {
962                 RTE_ETHDEV_LOG(ERR,
963                         "Port %u must be started before start any queue\n",
964                         port_id);
965                 return -EINVAL;
966         }
967
968         if (tx_queue_id >= dev->data->nb_tx_queues) {
969                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
970                 return -EINVAL;
971         }
972
973         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
974
975         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
976                 RTE_ETHDEV_LOG(INFO,
977                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
978                         tx_queue_id, port_id);
979                 return -EINVAL;
980         }
981
982         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
983                 RTE_ETHDEV_LOG(INFO,
984                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
985                         tx_queue_id, port_id);
986                 return 0;
987         }
988
989         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
990 }
991
992 int
993 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
994 {
995         struct rte_eth_dev *dev;
996
997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
998
999         dev = &rte_eth_devices[port_id];
1000         if (tx_queue_id >= dev->data->nb_tx_queues) {
1001                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1002                 return -EINVAL;
1003         }
1004
1005         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1006
1007         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1010                         tx_queue_id, port_id);
1011                 return -EINVAL;
1012         }
1013
1014         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1015                 RTE_ETHDEV_LOG(INFO,
1016                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1017                         tx_queue_id, port_id);
1018                 return 0;
1019         }
1020
1021         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1022
1023 }
1024
1025 static int
1026 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1027 {
1028         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1029         void **txq;
1030         unsigned i;
1031
1032         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1033                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1034                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1035                                                    RTE_CACHE_LINE_SIZE);
1036                 if (dev->data->tx_queues == NULL) {
1037                         dev->data->nb_tx_queues = 0;
1038                         return -(ENOMEM);
1039                 }
1040         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1041                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1042
1043                 txq = dev->data->tx_queues;
1044
1045                 for (i = nb_queues; i < old_nb_queues; i++)
1046                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1047                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1048                                   RTE_CACHE_LINE_SIZE);
1049                 if (txq == NULL)
1050                         return -ENOMEM;
1051                 if (nb_queues > old_nb_queues) {
1052                         uint16_t new_qs = nb_queues - old_nb_queues;
1053
1054                         memset(txq + old_nb_queues, 0,
1055                                sizeof(txq[0]) * new_qs);
1056                 }
1057
1058                 dev->data->tx_queues = txq;
1059
1060         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1061                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1062
1063                 txq = dev->data->tx_queues;
1064
1065                 for (i = nb_queues; i < old_nb_queues; i++)
1066                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1067
1068                 rte_free(dev->data->tx_queues);
1069                 dev->data->tx_queues = NULL;
1070         }
1071         dev->data->nb_tx_queues = nb_queues;
1072         return 0;
1073 }
1074
1075 uint32_t
1076 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1077 {
1078         switch (speed) {
1079         case ETH_SPEED_NUM_10M:
1080                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1081         case ETH_SPEED_NUM_100M:
1082                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1083         case ETH_SPEED_NUM_1G:
1084                 return ETH_LINK_SPEED_1G;
1085         case ETH_SPEED_NUM_2_5G:
1086                 return ETH_LINK_SPEED_2_5G;
1087         case ETH_SPEED_NUM_5G:
1088                 return ETH_LINK_SPEED_5G;
1089         case ETH_SPEED_NUM_10G:
1090                 return ETH_LINK_SPEED_10G;
1091         case ETH_SPEED_NUM_20G:
1092                 return ETH_LINK_SPEED_20G;
1093         case ETH_SPEED_NUM_25G:
1094                 return ETH_LINK_SPEED_25G;
1095         case ETH_SPEED_NUM_40G:
1096                 return ETH_LINK_SPEED_40G;
1097         case ETH_SPEED_NUM_50G:
1098                 return ETH_LINK_SPEED_50G;
1099         case ETH_SPEED_NUM_56G:
1100                 return ETH_LINK_SPEED_56G;
1101         case ETH_SPEED_NUM_100G:
1102                 return ETH_LINK_SPEED_100G;
1103         default:
1104                 return 0;
1105         }
1106 }
1107
1108 const char *
1109 rte_eth_dev_rx_offload_name(uint64_t offload)
1110 {
1111         const char *name = "UNKNOWN";
1112         unsigned int i;
1113
1114         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1115                 if (offload == rte_rx_offload_names[i].offload) {
1116                         name = rte_rx_offload_names[i].name;
1117                         break;
1118                 }
1119         }
1120
1121         return name;
1122 }
1123
1124 const char *
1125 rte_eth_dev_tx_offload_name(uint64_t offload)
1126 {
1127         const char *name = "UNKNOWN";
1128         unsigned int i;
1129
1130         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1131                 if (offload == rte_tx_offload_names[i].offload) {
1132                         name = rte_tx_offload_names[i].name;
1133                         break;
1134                 }
1135         }
1136
1137         return name;
1138 }
1139
1140 static inline int
1141 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1142                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1143 {
1144         int ret = 0;
1145
1146         if (dev_info_size == 0) {
1147                 if (config_size != max_rx_pkt_len) {
1148                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1149                                        " %u != %u is not allowed\n",
1150                                        port_id, config_size, max_rx_pkt_len);
1151                         ret = -EINVAL;
1152                 }
1153         } else if (config_size > dev_info_size) {
1154                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1155                                "> max allowed value %u\n", port_id, config_size,
1156                                dev_info_size);
1157                 ret = -EINVAL;
1158         } else if (config_size < RTE_ETHER_MIN_LEN) {
1159                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1160                                "< min allowed value %u\n", port_id, config_size,
1161                                (unsigned int)RTE_ETHER_MIN_LEN);
1162                 ret = -EINVAL;
1163         }
1164         return ret;
1165 }
1166
1167 /*
1168  * Validate offloads that are requested through rte_eth_dev_configure against
1169  * the offloads successfuly set by the ethernet device.
1170  *
1171  * @param port_id
1172  *   The port identifier of the Ethernet device.
1173  * @param req_offloads
1174  *   The offloads that have been requested through `rte_eth_dev_configure`.
1175  * @param set_offloads
1176  *   The offloads successfuly set by the ethernet device.
1177  * @param offload_type
1178  *   The offload type i.e. Rx/Tx string.
1179  * @param offload_name
1180  *   The function that prints the offload name.
1181  * @return
1182  *   - (0) if validation successful.
1183  *   - (-EINVAL) if requested offload has been silently disabled.
1184  *
1185  */
1186 static int
1187 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1188                   uint64_t set_offloads, const char *offload_type,
1189                   const char *(*offload_name)(uint64_t))
1190 {
1191         uint64_t offloads_diff = req_offloads ^ set_offloads;
1192         uint64_t offload;
1193         int ret = 0;
1194
1195         while (offloads_diff != 0) {
1196                 /* Check if any offload is requested but not enabled. */
1197                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1198                 if (offload & req_offloads) {
1199                         RTE_ETHDEV_LOG(ERR,
1200                                 "Port %u failed to enable %s offload %s\n",
1201                                 port_id, offload_type, offload_name(offload));
1202                         ret = -EINVAL;
1203                 }
1204
1205                 /* Chech if offload couldn't be disabled. */
1206                 if (offload & set_offloads) {
1207                         RTE_ETHDEV_LOG(DEBUG,
1208                                 "Port %u %s offload %s is not requested but enabled\n",
1209                                 port_id, offload_type, offload_name(offload));
1210                 }
1211
1212                 offloads_diff &= ~offload;
1213         }
1214
1215         return ret;
1216 }
1217
1218 int
1219 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1220                       const struct rte_eth_conf *dev_conf)
1221 {
1222         struct rte_eth_dev *dev;
1223         struct rte_eth_dev_info dev_info;
1224         struct rte_eth_conf orig_conf;
1225         int diag;
1226         int ret;
1227
1228         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1229
1230         dev = &rte_eth_devices[port_id];
1231
1232         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1233
1234         if (dev->data->dev_started) {
1235                 RTE_ETHDEV_LOG(ERR,
1236                         "Port %u must be stopped to allow configuration\n",
1237                         port_id);
1238                 return -EBUSY;
1239         }
1240
1241          /* Store original config, as rollback required on failure */
1242         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1243
1244         /*
1245          * Copy the dev_conf parameter into the dev structure.
1246          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1247          */
1248         if (dev_conf != &dev->data->dev_conf)
1249                 memcpy(&dev->data->dev_conf, dev_conf,
1250                        sizeof(dev->data->dev_conf));
1251
1252         ret = rte_eth_dev_info_get(port_id, &dev_info);
1253         if (ret != 0)
1254                 goto rollback;
1255
1256         /* If number of queues specified by application for both Rx and Tx is
1257          * zero, use driver preferred values. This cannot be done individually
1258          * as it is valid for either Tx or Rx (but not both) to be zero.
1259          * If driver does not provide any preferred valued, fall back on
1260          * EAL defaults.
1261          */
1262         if (nb_rx_q == 0 && nb_tx_q == 0) {
1263                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1264                 if (nb_rx_q == 0)
1265                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1266                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1267                 if (nb_tx_q == 0)
1268                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1269         }
1270
1271         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1272                 RTE_ETHDEV_LOG(ERR,
1273                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1274                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1275                 ret = -EINVAL;
1276                 goto rollback;
1277         }
1278
1279         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1280                 RTE_ETHDEV_LOG(ERR,
1281                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1282                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1283                 ret = -EINVAL;
1284                 goto rollback;
1285         }
1286
1287         /*
1288          * Check that the numbers of RX and TX queues are not greater
1289          * than the maximum number of RX and TX queues supported by the
1290          * configured device.
1291          */
1292         if (nb_rx_q > dev_info.max_rx_queues) {
1293                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1294                         port_id, nb_rx_q, dev_info.max_rx_queues);
1295                 ret = -EINVAL;
1296                 goto rollback;
1297         }
1298
1299         if (nb_tx_q > dev_info.max_tx_queues) {
1300                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1301                         port_id, nb_tx_q, dev_info.max_tx_queues);
1302                 ret = -EINVAL;
1303                 goto rollback;
1304         }
1305
1306         /* Check that the device supports requested interrupts */
1307         if ((dev_conf->intr_conf.lsc == 1) &&
1308                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1309                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1310                         dev->device->driver->name);
1311                 ret = -EINVAL;
1312                 goto rollback;
1313         }
1314         if ((dev_conf->intr_conf.rmv == 1) &&
1315                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1316                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1317                         dev->device->driver->name);
1318                 ret = -EINVAL;
1319                 goto rollback;
1320         }
1321
1322         /*
1323          * If jumbo frames are enabled, check that the maximum RX packet
1324          * length is supported by the configured device.
1325          */
1326         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1327                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1328                         RTE_ETHDEV_LOG(ERR,
1329                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1330                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1331                                 dev_info.max_rx_pktlen);
1332                         ret = -EINVAL;
1333                         goto rollback;
1334                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1335                         RTE_ETHDEV_LOG(ERR,
1336                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1337                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1338                                 (unsigned int)RTE_ETHER_MIN_LEN);
1339                         ret = -EINVAL;
1340                         goto rollback;
1341                 }
1342         } else {
1343                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1344                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1345                         /* Use default value */
1346                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1347                                                         RTE_ETHER_MAX_LEN;
1348         }
1349
1350         /*
1351          * If LRO is enabled, check that the maximum aggregated packet
1352          * size is supported by the configured device.
1353          */
1354         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1355                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1356                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1357                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1358                 ret = check_lro_pkt_size(port_id,
1359                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1360                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1361                                 dev_info.max_lro_pkt_size);
1362                 if (ret != 0)
1363                         goto rollback;
1364         }
1365
1366         /* Any requested offloading must be within its device capabilities */
1367         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1368              dev_conf->rxmode.offloads) {
1369                 RTE_ETHDEV_LOG(ERR,
1370                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1371                         "capabilities 0x%"PRIx64" in %s()\n",
1372                         port_id, dev_conf->rxmode.offloads,
1373                         dev_info.rx_offload_capa,
1374                         __func__);
1375                 ret = -EINVAL;
1376                 goto rollback;
1377         }
1378         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1379              dev_conf->txmode.offloads) {
1380                 RTE_ETHDEV_LOG(ERR,
1381                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1382                         "capabilities 0x%"PRIx64" in %s()\n",
1383                         port_id, dev_conf->txmode.offloads,
1384                         dev_info.tx_offload_capa,
1385                         __func__);
1386                 ret = -EINVAL;
1387                 goto rollback;
1388         }
1389
1390         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1391                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1392
1393         /* Check that device supports requested rss hash functions. */
1394         if ((dev_info.flow_type_rss_offloads |
1395              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1396             dev_info.flow_type_rss_offloads) {
1397                 RTE_ETHDEV_LOG(ERR,
1398                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1399                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1400                         dev_info.flow_type_rss_offloads);
1401                 ret = -EINVAL;
1402                 goto rollback;
1403         }
1404
1405         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1406         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1407             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1408                 RTE_ETHDEV_LOG(ERR,
1409                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1410                         port_id,
1411                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1412                 ret = -EINVAL;
1413                 goto rollback;
1414         }
1415
1416         /*
1417          * Setup new number of RX/TX queues and reconfigure device.
1418          */
1419         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1420         if (diag != 0) {
1421                 RTE_ETHDEV_LOG(ERR,
1422                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1423                         port_id, diag);
1424                 ret = diag;
1425                 goto rollback;
1426         }
1427
1428         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1429         if (diag != 0) {
1430                 RTE_ETHDEV_LOG(ERR,
1431                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1432                         port_id, diag);
1433                 rte_eth_dev_rx_queue_config(dev, 0);
1434                 ret = diag;
1435                 goto rollback;
1436         }
1437
1438         diag = (*dev->dev_ops->dev_configure)(dev);
1439         if (diag != 0) {
1440                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1441                         port_id, diag);
1442                 ret = eth_err(port_id, diag);
1443                 goto reset_queues;
1444         }
1445
1446         /* Initialize Rx profiling if enabled at compilation time. */
1447         diag = __rte_eth_dev_profile_init(port_id, dev);
1448         if (diag != 0) {
1449                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1450                         port_id, diag);
1451                 ret = eth_err(port_id, diag);
1452                 goto reset_queues;
1453         }
1454
1455         /* Validate Rx offloads. */
1456         diag = validate_offloads(port_id,
1457                         dev_conf->rxmode.offloads,
1458                         dev->data->dev_conf.rxmode.offloads, "Rx",
1459                         rte_eth_dev_rx_offload_name);
1460         if (diag != 0) {
1461                 ret = diag;
1462                 goto reset_queues;
1463         }
1464
1465         /* Validate Tx offloads. */
1466         diag = validate_offloads(port_id,
1467                         dev_conf->txmode.offloads,
1468                         dev->data->dev_conf.txmode.offloads, "Tx",
1469                         rte_eth_dev_tx_offload_name);
1470         if (diag != 0) {
1471                 ret = diag;
1472                 goto reset_queues;
1473         }
1474
1475         return 0;
1476 reset_queues:
1477         rte_eth_dev_rx_queue_config(dev, 0);
1478         rte_eth_dev_tx_queue_config(dev, 0);
1479 rollback:
1480         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1481
1482         return ret;
1483 }
1484
1485 void
1486 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1487 {
1488         if (dev->data->dev_started) {
1489                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1490                         dev->data->port_id);
1491                 return;
1492         }
1493
1494         rte_eth_dev_rx_queue_config(dev, 0);
1495         rte_eth_dev_tx_queue_config(dev, 0);
1496
1497         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1498 }
1499
1500 static void
1501 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1502                         struct rte_eth_dev_info *dev_info)
1503 {
1504         struct rte_ether_addr *addr;
1505         uint16_t i;
1506         uint32_t pool = 0;
1507         uint64_t pool_mask;
1508
1509         /* replay MAC address configuration including default MAC */
1510         addr = &dev->data->mac_addrs[0];
1511         if (*dev->dev_ops->mac_addr_set != NULL)
1512                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1513         else if (*dev->dev_ops->mac_addr_add != NULL)
1514                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1515
1516         if (*dev->dev_ops->mac_addr_add != NULL) {
1517                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1518                         addr = &dev->data->mac_addrs[i];
1519
1520                         /* skip zero address */
1521                         if (rte_is_zero_ether_addr(addr))
1522                                 continue;
1523
1524                         pool = 0;
1525                         pool_mask = dev->data->mac_pool_sel[i];
1526
1527                         do {
1528                                 if (pool_mask & 1ULL)
1529                                         (*dev->dev_ops->mac_addr_add)(dev,
1530                                                 addr, i, pool);
1531                                 pool_mask >>= 1;
1532                                 pool++;
1533                         } while (pool_mask);
1534                 }
1535         }
1536 }
1537
1538 static int
1539 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1540                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1541 {
1542         int ret;
1543
1544         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1545                 rte_eth_dev_mac_restore(dev, dev_info);
1546
1547         /* replay promiscuous configuration */
1548         /*
1549          * use callbacks directly since we don't need port_id check and
1550          * would like to bypass the same value set
1551          */
1552         if (rte_eth_promiscuous_get(port_id) == 1 &&
1553             *dev->dev_ops->promiscuous_enable != NULL) {
1554                 ret = eth_err(port_id,
1555                               (*dev->dev_ops->promiscuous_enable)(dev));
1556                 if (ret != 0 && ret != -ENOTSUP) {
1557                         RTE_ETHDEV_LOG(ERR,
1558                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1559                                 port_id, rte_strerror(-ret));
1560                         return ret;
1561                 }
1562         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1563                    *dev->dev_ops->promiscuous_disable != NULL) {
1564                 ret = eth_err(port_id,
1565                               (*dev->dev_ops->promiscuous_disable)(dev));
1566                 if (ret != 0 && ret != -ENOTSUP) {
1567                         RTE_ETHDEV_LOG(ERR,
1568                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1569                                 port_id, rte_strerror(-ret));
1570                         return ret;
1571                 }
1572         }
1573
1574         /* replay all multicast configuration */
1575         /*
1576          * use callbacks directly since we don't need port_id check and
1577          * would like to bypass the same value set
1578          */
1579         if (rte_eth_allmulticast_get(port_id) == 1 &&
1580             *dev->dev_ops->allmulticast_enable != NULL) {
1581                 ret = eth_err(port_id,
1582                               (*dev->dev_ops->allmulticast_enable)(dev));
1583                 if (ret != 0 && ret != -ENOTSUP) {
1584                         RTE_ETHDEV_LOG(ERR,
1585                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1586                                 port_id, rte_strerror(-ret));
1587                         return ret;
1588                 }
1589         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1590                    *dev->dev_ops->allmulticast_disable != NULL) {
1591                 ret = eth_err(port_id,
1592                               (*dev->dev_ops->allmulticast_disable)(dev));
1593                 if (ret != 0 && ret != -ENOTSUP) {
1594                         RTE_ETHDEV_LOG(ERR,
1595                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1596                                 port_id, rte_strerror(-ret));
1597                         return ret;
1598                 }
1599         }
1600
1601         return 0;
1602 }
1603
1604 int
1605 rte_eth_dev_start(uint16_t port_id)
1606 {
1607         struct rte_eth_dev *dev;
1608         struct rte_eth_dev_info dev_info;
1609         int diag;
1610         int ret;
1611
1612         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1613
1614         dev = &rte_eth_devices[port_id];
1615
1616         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1617
1618         if (dev->data->dev_started != 0) {
1619                 RTE_ETHDEV_LOG(INFO,
1620                         "Device with port_id=%"PRIu16" already started\n",
1621                         port_id);
1622                 return 0;
1623         }
1624
1625         ret = rte_eth_dev_info_get(port_id, &dev_info);
1626         if (ret != 0)
1627                 return ret;
1628
1629         /* Lets restore MAC now if device does not support live change */
1630         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1631                 rte_eth_dev_mac_restore(dev, &dev_info);
1632
1633         diag = (*dev->dev_ops->dev_start)(dev);
1634         if (diag == 0)
1635                 dev->data->dev_started = 1;
1636         else
1637                 return eth_err(port_id, diag);
1638
1639         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1640         if (ret != 0) {
1641                 RTE_ETHDEV_LOG(ERR,
1642                         "Error during restoring configuration for device (port %u): %s\n",
1643                         port_id, rte_strerror(-ret));
1644                 rte_eth_dev_stop(port_id);
1645                 return ret;
1646         }
1647
1648         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1649                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1650                 (*dev->dev_ops->link_update)(dev, 0);
1651         }
1652         return 0;
1653 }
1654
1655 void
1656 rte_eth_dev_stop(uint16_t port_id)
1657 {
1658         struct rte_eth_dev *dev;
1659
1660         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1661         dev = &rte_eth_devices[port_id];
1662
1663         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1664
1665         if (dev->data->dev_started == 0) {
1666                 RTE_ETHDEV_LOG(INFO,
1667                         "Device with port_id=%"PRIu16" already stopped\n",
1668                         port_id);
1669                 return;
1670         }
1671
1672         dev->data->dev_started = 0;
1673         (*dev->dev_ops->dev_stop)(dev);
1674 }
1675
1676 int
1677 rte_eth_dev_set_link_up(uint16_t port_id)
1678 {
1679         struct rte_eth_dev *dev;
1680
1681         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1682
1683         dev = &rte_eth_devices[port_id];
1684
1685         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1686         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1687 }
1688
1689 int
1690 rte_eth_dev_set_link_down(uint16_t port_id)
1691 {
1692         struct rte_eth_dev *dev;
1693
1694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1695
1696         dev = &rte_eth_devices[port_id];
1697
1698         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1699         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1700 }
1701
1702 void
1703 rte_eth_dev_close(uint16_t port_id)
1704 {
1705         struct rte_eth_dev *dev;
1706
1707         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1708         dev = &rte_eth_devices[port_id];
1709
1710         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1711         dev->data->dev_started = 0;
1712         (*dev->dev_ops->dev_close)(dev);
1713
1714         /* check behaviour flag - temporary for PMD migration */
1715         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1716                 /* new behaviour: send event + reset state + free all data */
1717                 rte_eth_dev_release_port(dev);
1718                 return;
1719         }
1720         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1721                         "The driver %s should migrate to the new behaviour.\n",
1722                         dev->device->driver->name);
1723         /* old behaviour: only free queue arrays */
1724         dev->data->nb_rx_queues = 0;
1725         rte_free(dev->data->rx_queues);
1726         dev->data->rx_queues = NULL;
1727         dev->data->nb_tx_queues = 0;
1728         rte_free(dev->data->tx_queues);
1729         dev->data->tx_queues = NULL;
1730 }
1731
1732 int
1733 rte_eth_dev_reset(uint16_t port_id)
1734 {
1735         struct rte_eth_dev *dev;
1736         int ret;
1737
1738         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1739         dev = &rte_eth_devices[port_id];
1740
1741         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1742
1743         rte_eth_dev_stop(port_id);
1744         ret = dev->dev_ops->dev_reset(dev);
1745
1746         return eth_err(port_id, ret);
1747 }
1748
1749 int
1750 rte_eth_dev_is_removed(uint16_t port_id)
1751 {
1752         struct rte_eth_dev *dev;
1753         int ret;
1754
1755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1756
1757         dev = &rte_eth_devices[port_id];
1758
1759         if (dev->state == RTE_ETH_DEV_REMOVED)
1760                 return 1;
1761
1762         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1763
1764         ret = dev->dev_ops->is_removed(dev);
1765         if (ret != 0)
1766                 /* Device is physically removed. */
1767                 dev->state = RTE_ETH_DEV_REMOVED;
1768
1769         return ret;
1770 }
1771
1772 int
1773 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1774                        uint16_t nb_rx_desc, unsigned int socket_id,
1775                        const struct rte_eth_rxconf *rx_conf,
1776                        struct rte_mempool *mp)
1777 {
1778         int ret;
1779         uint32_t mbp_buf_size;
1780         struct rte_eth_dev *dev;
1781         struct rte_eth_dev_info dev_info;
1782         struct rte_eth_rxconf local_conf;
1783         void **rxq;
1784
1785         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1786
1787         dev = &rte_eth_devices[port_id];
1788         if (rx_queue_id >= dev->data->nb_rx_queues) {
1789                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1790                 return -EINVAL;
1791         }
1792
1793         if (mp == NULL) {
1794                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1795                 return -EINVAL;
1796         }
1797
1798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1799
1800         /*
1801          * Check the size of the mbuf data buffer.
1802          * This value must be provided in the private data of the memory pool.
1803          * First check that the memory pool has a valid private data.
1804          */
1805         ret = rte_eth_dev_info_get(port_id, &dev_info);
1806         if (ret != 0)
1807                 return ret;
1808
1809         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1810                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1811                         mp->name, (int)mp->private_data_size,
1812                         (int)sizeof(struct rte_pktmbuf_pool_private));
1813                 return -ENOSPC;
1814         }
1815         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1816
1817         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1818                 RTE_ETHDEV_LOG(ERR,
1819                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1820                         mp->name, (int)mbp_buf_size,
1821                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1822                         (int)RTE_PKTMBUF_HEADROOM,
1823                         (int)dev_info.min_rx_bufsize);
1824                 return -EINVAL;
1825         }
1826
1827         /* Use default specified by driver, if nb_rx_desc is zero */
1828         if (nb_rx_desc == 0) {
1829                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1830                 /* If driver default is also zero, fall back on EAL default */
1831                 if (nb_rx_desc == 0)
1832                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1833         }
1834
1835         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1836                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1837                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1838
1839                 RTE_ETHDEV_LOG(ERR,
1840                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1841                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1842                         dev_info.rx_desc_lim.nb_min,
1843                         dev_info.rx_desc_lim.nb_align);
1844                 return -EINVAL;
1845         }
1846
1847         if (dev->data->dev_started &&
1848                 !(dev_info.dev_capa &
1849                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1850                 return -EBUSY;
1851
1852         if (dev->data->dev_started &&
1853                 (dev->data->rx_queue_state[rx_queue_id] !=
1854                         RTE_ETH_QUEUE_STATE_STOPPED))
1855                 return -EBUSY;
1856
1857         rxq = dev->data->rx_queues;
1858         if (rxq[rx_queue_id]) {
1859                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1860                                         -ENOTSUP);
1861                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1862                 rxq[rx_queue_id] = NULL;
1863         }
1864
1865         if (rx_conf == NULL)
1866                 rx_conf = &dev_info.default_rxconf;
1867
1868         local_conf = *rx_conf;
1869
1870         /*
1871          * If an offloading has already been enabled in
1872          * rte_eth_dev_configure(), it has been enabled on all queues,
1873          * so there is no need to enable it in this queue again.
1874          * The local_conf.offloads input to underlying PMD only carries
1875          * those offloadings which are only enabled on this queue and
1876          * not enabled on all queues.
1877          */
1878         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1879
1880         /*
1881          * New added offloadings for this queue are those not enabled in
1882          * rte_eth_dev_configure() and they must be per-queue type.
1883          * A pure per-port offloading can't be enabled on a queue while
1884          * disabled on another queue. A pure per-port offloading can't
1885          * be enabled for any queue as new added one if it hasn't been
1886          * enabled in rte_eth_dev_configure().
1887          */
1888         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1889              local_conf.offloads) {
1890                 RTE_ETHDEV_LOG(ERR,
1891                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1892                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1893                         port_id, rx_queue_id, local_conf.offloads,
1894                         dev_info.rx_queue_offload_capa,
1895                         __func__);
1896                 return -EINVAL;
1897         }
1898
1899         /*
1900          * If LRO is enabled, check that the maximum aggregated packet
1901          * size is supported by the configured device.
1902          */
1903         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1904                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1905                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1906                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1907                 int ret = check_lro_pkt_size(port_id,
1908                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1909                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1910                                 dev_info.max_lro_pkt_size);
1911                 if (ret != 0)
1912                         return ret;
1913         }
1914
1915         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1916                                               socket_id, &local_conf, mp);
1917         if (!ret) {
1918                 if (!dev->data->min_rx_buf_size ||
1919                     dev->data->min_rx_buf_size > mbp_buf_size)
1920                         dev->data->min_rx_buf_size = mbp_buf_size;
1921         }
1922
1923         return eth_err(port_id, ret);
1924 }
1925
1926 int
1927 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1928                                uint16_t nb_rx_desc,
1929                                const struct rte_eth_hairpin_conf *conf)
1930 {
1931         int ret;
1932         struct rte_eth_dev *dev;
1933         struct rte_eth_hairpin_cap cap;
1934         void **rxq;
1935         int i;
1936         int count;
1937
1938         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1939
1940         dev = &rte_eth_devices[port_id];
1941         if (rx_queue_id >= dev->data->nb_rx_queues) {
1942                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1943                 return -EINVAL;
1944         }
1945         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1946         if (ret != 0)
1947                 return ret;
1948         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1949                                 -ENOTSUP);
1950         /* if nb_rx_desc is zero use max number of desc from the driver. */
1951         if (nb_rx_desc == 0)
1952                 nb_rx_desc = cap.max_nb_desc;
1953         if (nb_rx_desc > cap.max_nb_desc) {
1954                 RTE_ETHDEV_LOG(ERR,
1955                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1956                         nb_rx_desc, cap.max_nb_desc);
1957                 return -EINVAL;
1958         }
1959         if (conf->peer_count > cap.max_rx_2_tx) {
1960                 RTE_ETHDEV_LOG(ERR,
1961                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1962                         conf->peer_count, cap.max_rx_2_tx);
1963                 return -EINVAL;
1964         }
1965         if (conf->peer_count == 0) {
1966                 RTE_ETHDEV_LOG(ERR,
1967                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1968                         conf->peer_count);
1969                 return -EINVAL;
1970         }
1971         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1972              cap.max_nb_queues != UINT16_MAX; i++) {
1973                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1974                         count++;
1975         }
1976         if (count > cap.max_nb_queues) {
1977                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1978                 cap.max_nb_queues);
1979                 return -EINVAL;
1980         }
1981         if (dev->data->dev_started)
1982                 return -EBUSY;
1983         rxq = dev->data->rx_queues;
1984         if (rxq[rx_queue_id] != NULL) {
1985                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1986                                         -ENOTSUP);
1987                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1988                 rxq[rx_queue_id] = NULL;
1989         }
1990         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1991                                                       nb_rx_desc, conf);
1992         if (ret == 0)
1993                 dev->data->rx_queue_state[rx_queue_id] =
1994                         RTE_ETH_QUEUE_STATE_HAIRPIN;
1995         return eth_err(port_id, ret);
1996 }
1997
1998 int
1999 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2000                        uint16_t nb_tx_desc, unsigned int socket_id,
2001                        const struct rte_eth_txconf *tx_conf)
2002 {
2003         struct rte_eth_dev *dev;
2004         struct rte_eth_dev_info dev_info;
2005         struct rte_eth_txconf local_conf;
2006         void **txq;
2007         int ret;
2008
2009         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2010
2011         dev = &rte_eth_devices[port_id];
2012         if (tx_queue_id >= dev->data->nb_tx_queues) {
2013                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2014                 return -EINVAL;
2015         }
2016
2017         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2018
2019         ret = rte_eth_dev_info_get(port_id, &dev_info);
2020         if (ret != 0)
2021                 return ret;
2022
2023         /* Use default specified by driver, if nb_tx_desc is zero */
2024         if (nb_tx_desc == 0) {
2025                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2026                 /* If driver default is zero, fall back on EAL default */
2027                 if (nb_tx_desc == 0)
2028                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2029         }
2030         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2031             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2032             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2033                 RTE_ETHDEV_LOG(ERR,
2034                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2035                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2036                         dev_info.tx_desc_lim.nb_min,
2037                         dev_info.tx_desc_lim.nb_align);
2038                 return -EINVAL;
2039         }
2040
2041         if (dev->data->dev_started &&
2042                 !(dev_info.dev_capa &
2043                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2044                 return -EBUSY;
2045
2046         if (dev->data->dev_started &&
2047                 (dev->data->tx_queue_state[tx_queue_id] !=
2048                         RTE_ETH_QUEUE_STATE_STOPPED))
2049                 return -EBUSY;
2050
2051         txq = dev->data->tx_queues;
2052         if (txq[tx_queue_id]) {
2053                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2054                                         -ENOTSUP);
2055                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2056                 txq[tx_queue_id] = NULL;
2057         }
2058
2059         if (tx_conf == NULL)
2060                 tx_conf = &dev_info.default_txconf;
2061
2062         local_conf = *tx_conf;
2063
2064         /*
2065          * If an offloading has already been enabled in
2066          * rte_eth_dev_configure(), it has been enabled on all queues,
2067          * so there is no need to enable it in this queue again.
2068          * The local_conf.offloads input to underlying PMD only carries
2069          * those offloadings which are only enabled on this queue and
2070          * not enabled on all queues.
2071          */
2072         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2073
2074         /*
2075          * New added offloadings for this queue are those not enabled in
2076          * rte_eth_dev_configure() and they must be per-queue type.
2077          * A pure per-port offloading can't be enabled on a queue while
2078          * disabled on another queue. A pure per-port offloading can't
2079          * be enabled for any queue as new added one if it hasn't been
2080          * enabled in rte_eth_dev_configure().
2081          */
2082         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2083              local_conf.offloads) {
2084                 RTE_ETHDEV_LOG(ERR,
2085                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2086                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2087                         port_id, tx_queue_id, local_conf.offloads,
2088                         dev_info.tx_queue_offload_capa,
2089                         __func__);
2090                 return -EINVAL;
2091         }
2092
2093         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2094                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2095 }
2096
2097 int
2098 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2099                                uint16_t nb_tx_desc,
2100                                const struct rte_eth_hairpin_conf *conf)
2101 {
2102         struct rte_eth_dev *dev;
2103         struct rte_eth_hairpin_cap cap;
2104         void **txq;
2105         int i;
2106         int count;
2107         int ret;
2108
2109         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2110         dev = &rte_eth_devices[port_id];
2111         if (tx_queue_id >= dev->data->nb_tx_queues) {
2112                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2113                 return -EINVAL;
2114         }
2115         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2116         if (ret != 0)
2117                 return ret;
2118         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2119                                 -ENOTSUP);
2120         /* if nb_rx_desc is zero use max number of desc from the driver. */
2121         if (nb_tx_desc == 0)
2122                 nb_tx_desc = cap.max_nb_desc;
2123         if (nb_tx_desc > cap.max_nb_desc) {
2124                 RTE_ETHDEV_LOG(ERR,
2125                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2126                         nb_tx_desc, cap.max_nb_desc);
2127                 return -EINVAL;
2128         }
2129         if (conf->peer_count > cap.max_tx_2_rx) {
2130                 RTE_ETHDEV_LOG(ERR,
2131                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2132                         conf->peer_count, cap.max_tx_2_rx);
2133                 return -EINVAL;
2134         }
2135         if (conf->peer_count == 0) {
2136                 RTE_ETHDEV_LOG(ERR,
2137                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2138                         conf->peer_count);
2139                 return -EINVAL;
2140         }
2141         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2142              cap.max_nb_queues != UINT16_MAX; i++) {
2143                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2144                         count++;
2145         }
2146         if (count > cap.max_nb_queues) {
2147                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2148                 cap.max_nb_queues);
2149                 return -EINVAL;
2150         }
2151         if (dev->data->dev_started)
2152                 return -EBUSY;
2153         txq = dev->data->tx_queues;
2154         if (txq[tx_queue_id] != NULL) {
2155                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2156                                         -ENOTSUP);
2157                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2158                 txq[tx_queue_id] = NULL;
2159         }
2160         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2161                 (dev, tx_queue_id, nb_tx_desc, conf);
2162         if (ret == 0)
2163                 dev->data->tx_queue_state[tx_queue_id] =
2164                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2165         return eth_err(port_id, ret);
2166 }
2167
2168 void
2169 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2170                 void *userdata __rte_unused)
2171 {
2172         unsigned i;
2173
2174         for (i = 0; i < unsent; i++)
2175                 rte_pktmbuf_free(pkts[i]);
2176 }
2177
2178 void
2179 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2180                 void *userdata)
2181 {
2182         uint64_t *count = userdata;
2183         unsigned i;
2184
2185         for (i = 0; i < unsent; i++)
2186                 rte_pktmbuf_free(pkts[i]);
2187
2188         *count += unsent;
2189 }
2190
2191 int
2192 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2193                 buffer_tx_error_fn cbfn, void *userdata)
2194 {
2195         buffer->error_callback = cbfn;
2196         buffer->error_userdata = userdata;
2197         return 0;
2198 }
2199
2200 int
2201 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2202 {
2203         int ret = 0;
2204
2205         if (buffer == NULL)
2206                 return -EINVAL;
2207
2208         buffer->size = size;
2209         if (buffer->error_callback == NULL) {
2210                 ret = rte_eth_tx_buffer_set_err_callback(
2211                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2212         }
2213
2214         return ret;
2215 }
2216
2217 int
2218 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2219 {
2220         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2221         int ret;
2222
2223         /* Validate Input Data. Bail if not valid or not supported. */
2224         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2225         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2226
2227         /* Call driver to free pending mbufs. */
2228         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2229                                                free_cnt);
2230         return eth_err(port_id, ret);
2231 }
2232
2233 int
2234 rte_eth_promiscuous_enable(uint16_t port_id)
2235 {
2236         struct rte_eth_dev *dev;
2237         int diag = 0;
2238
2239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2240         dev = &rte_eth_devices[port_id];
2241
2242         if (dev->data->promiscuous == 1)
2243                 return 0;
2244
2245         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2246
2247         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2248         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2249
2250         return eth_err(port_id, diag);
2251 }
2252
2253 int
2254 rte_eth_promiscuous_disable(uint16_t port_id)
2255 {
2256         struct rte_eth_dev *dev;
2257         int diag = 0;
2258
2259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2260         dev = &rte_eth_devices[port_id];
2261
2262         if (dev->data->promiscuous == 0)
2263                 return 0;
2264
2265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2266
2267         dev->data->promiscuous = 0;
2268         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2269         if (diag != 0)
2270                 dev->data->promiscuous = 1;
2271
2272         return eth_err(port_id, diag);
2273 }
2274
2275 int
2276 rte_eth_promiscuous_get(uint16_t port_id)
2277 {
2278         struct rte_eth_dev *dev;
2279
2280         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2281
2282         dev = &rte_eth_devices[port_id];
2283         return dev->data->promiscuous;
2284 }
2285
2286 int
2287 rte_eth_allmulticast_enable(uint16_t port_id)
2288 {
2289         struct rte_eth_dev *dev;
2290         int diag;
2291
2292         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2293         dev = &rte_eth_devices[port_id];
2294
2295         if (dev->data->all_multicast == 1)
2296                 return 0;
2297
2298         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2299         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2300         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2301
2302         return eth_err(port_id, diag);
2303 }
2304
2305 int
2306 rte_eth_allmulticast_disable(uint16_t port_id)
2307 {
2308         struct rte_eth_dev *dev;
2309         int diag;
2310
2311         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2312         dev = &rte_eth_devices[port_id];
2313
2314         if (dev->data->all_multicast == 0)
2315                 return 0;
2316
2317         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2318         dev->data->all_multicast = 0;
2319         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2320         if (diag != 0)
2321                 dev->data->all_multicast = 1;
2322
2323         return eth_err(port_id, diag);
2324 }
2325
2326 int
2327 rte_eth_allmulticast_get(uint16_t port_id)
2328 {
2329         struct rte_eth_dev *dev;
2330
2331         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2332
2333         dev = &rte_eth_devices[port_id];
2334         return dev->data->all_multicast;
2335 }
2336
2337 int
2338 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2339 {
2340         struct rte_eth_dev *dev;
2341
2342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2343         dev = &rte_eth_devices[port_id];
2344
2345         if (dev->data->dev_conf.intr_conf.lsc &&
2346             dev->data->dev_started)
2347                 rte_eth_linkstatus_get(dev, eth_link);
2348         else {
2349                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2350                 (*dev->dev_ops->link_update)(dev, 1);
2351                 *eth_link = dev->data->dev_link;
2352         }
2353
2354         return 0;
2355 }
2356
2357 int
2358 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2359 {
2360         struct rte_eth_dev *dev;
2361
2362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2363         dev = &rte_eth_devices[port_id];
2364
2365         if (dev->data->dev_conf.intr_conf.lsc &&
2366             dev->data->dev_started)
2367                 rte_eth_linkstatus_get(dev, eth_link);
2368         else {
2369                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2370                 (*dev->dev_ops->link_update)(dev, 0);
2371                 *eth_link = dev->data->dev_link;
2372         }
2373
2374         return 0;
2375 }
2376
2377 int
2378 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2379 {
2380         struct rte_eth_dev *dev;
2381
2382         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2383
2384         dev = &rte_eth_devices[port_id];
2385         memset(stats, 0, sizeof(*stats));
2386
2387         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2388         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2389         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2390 }
2391
2392 int
2393 rte_eth_stats_reset(uint16_t port_id)
2394 {
2395         struct rte_eth_dev *dev;
2396         int ret;
2397
2398         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2399         dev = &rte_eth_devices[port_id];
2400
2401         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2402         ret = (*dev->dev_ops->stats_reset)(dev);
2403         if (ret != 0)
2404                 return eth_err(port_id, ret);
2405
2406         dev->data->rx_mbuf_alloc_failed = 0;
2407
2408         return 0;
2409 }
2410
2411 static inline int
2412 get_xstats_basic_count(struct rte_eth_dev *dev)
2413 {
2414         uint16_t nb_rxqs, nb_txqs;
2415         int count;
2416
2417         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2418         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2419
2420         count = RTE_NB_STATS;
2421         count += nb_rxqs * RTE_NB_RXQ_STATS;
2422         count += nb_txqs * RTE_NB_TXQ_STATS;
2423
2424         return count;
2425 }
2426
2427 static int
2428 get_xstats_count(uint16_t port_id)
2429 {
2430         struct rte_eth_dev *dev;
2431         int count;
2432
2433         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2434         dev = &rte_eth_devices[port_id];
2435         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2436                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2437                                 NULL, 0);
2438                 if (count < 0)
2439                         return eth_err(port_id, count);
2440         }
2441         if (dev->dev_ops->xstats_get_names != NULL) {
2442                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2443                 if (count < 0)
2444                         return eth_err(port_id, count);
2445         } else
2446                 count = 0;
2447
2448
2449         count += get_xstats_basic_count(dev);
2450
2451         return count;
2452 }
2453
2454 int
2455 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2456                 uint64_t *id)
2457 {
2458         int cnt_xstats, idx_xstat;
2459
2460         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2461
2462         if (!id) {
2463                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2464                 return -ENOMEM;
2465         }
2466
2467         if (!xstat_name) {
2468                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2469                 return -ENOMEM;
2470         }
2471
2472         /* Get count */
2473         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2474         if (cnt_xstats  < 0) {
2475                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2476                 return -ENODEV;
2477         }
2478
2479         /* Get id-name lookup table */
2480         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2481
2482         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2483                         port_id, xstats_names, cnt_xstats, NULL)) {
2484                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2485                 return -1;
2486         }
2487
2488         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2489                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2490                         *id = idx_xstat;
2491                         return 0;
2492                 };
2493         }
2494
2495         return -EINVAL;
2496 }
2497
2498 /* retrieve basic stats names */
2499 static int
2500 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2501         struct rte_eth_xstat_name *xstats_names)
2502 {
2503         int cnt_used_entries = 0;
2504         uint32_t idx, id_queue;
2505         uint16_t num_q;
2506
2507         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2508                 strlcpy(xstats_names[cnt_used_entries].name,
2509                         rte_stats_strings[idx].name,
2510                         sizeof(xstats_names[0].name));
2511                 cnt_used_entries++;
2512         }
2513         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2514         for (id_queue = 0; id_queue < num_q; id_queue++) {
2515                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2516                         snprintf(xstats_names[cnt_used_entries].name,
2517                                 sizeof(xstats_names[0].name),
2518                                 "rx_q%u%s",
2519                                 id_queue, rte_rxq_stats_strings[idx].name);
2520                         cnt_used_entries++;
2521                 }
2522
2523         }
2524         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2525         for (id_queue = 0; id_queue < num_q; id_queue++) {
2526                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2527                         snprintf(xstats_names[cnt_used_entries].name,
2528                                 sizeof(xstats_names[0].name),
2529                                 "tx_q%u%s",
2530                                 id_queue, rte_txq_stats_strings[idx].name);
2531                         cnt_used_entries++;
2532                 }
2533         }
2534         return cnt_used_entries;
2535 }
2536
2537 /* retrieve ethdev extended statistics names */
2538 int
2539 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2540         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2541         uint64_t *ids)
2542 {
2543         struct rte_eth_xstat_name *xstats_names_copy;
2544         unsigned int no_basic_stat_requested = 1;
2545         unsigned int no_ext_stat_requested = 1;
2546         unsigned int expected_entries;
2547         unsigned int basic_count;
2548         struct rte_eth_dev *dev;
2549         unsigned int i;
2550         int ret;
2551
2552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2553         dev = &rte_eth_devices[port_id];
2554
2555         basic_count = get_xstats_basic_count(dev);
2556         ret = get_xstats_count(port_id);
2557         if (ret < 0)
2558                 return ret;
2559         expected_entries = (unsigned int)ret;
2560
2561         /* Return max number of stats if no ids given */
2562         if (!ids) {
2563                 if (!xstats_names)
2564                         return expected_entries;
2565                 else if (xstats_names && size < expected_entries)
2566                         return expected_entries;
2567         }
2568
2569         if (ids && !xstats_names)
2570                 return -EINVAL;
2571
2572         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2573                 uint64_t ids_copy[size];
2574
2575                 for (i = 0; i < size; i++) {
2576                         if (ids[i] < basic_count) {
2577                                 no_basic_stat_requested = 0;
2578                                 break;
2579                         }
2580
2581                         /*
2582                          * Convert ids to xstats ids that PMD knows.
2583                          * ids known by user are basic + extended stats.
2584                          */
2585                         ids_copy[i] = ids[i] - basic_count;
2586                 }
2587
2588                 if (no_basic_stat_requested)
2589                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2590                                         xstats_names, ids_copy, size);
2591         }
2592
2593         /* Retrieve all stats */
2594         if (!ids) {
2595                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2596                                 expected_entries);
2597                 if (num_stats < 0 || num_stats > (int)expected_entries)
2598                         return num_stats;
2599                 else
2600                         return expected_entries;
2601         }
2602
2603         xstats_names_copy = calloc(expected_entries,
2604                 sizeof(struct rte_eth_xstat_name));
2605
2606         if (!xstats_names_copy) {
2607                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2608                 return -ENOMEM;
2609         }
2610
2611         if (ids) {
2612                 for (i = 0; i < size; i++) {
2613                         if (ids[i] >= basic_count) {
2614                                 no_ext_stat_requested = 0;
2615                                 break;
2616                         }
2617                 }
2618         }
2619
2620         /* Fill xstats_names_copy structure */
2621         if (ids && no_ext_stat_requested) {
2622                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2623         } else {
2624                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2625                         expected_entries);
2626                 if (ret < 0) {
2627                         free(xstats_names_copy);
2628                         return ret;
2629                 }
2630         }
2631
2632         /* Filter stats */
2633         for (i = 0; i < size; i++) {
2634                 if (ids[i] >= expected_entries) {
2635                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2636                         free(xstats_names_copy);
2637                         return -1;
2638                 }
2639                 xstats_names[i] = xstats_names_copy[ids[i]];
2640         }
2641
2642         free(xstats_names_copy);
2643         return size;
2644 }
2645
2646 int
2647 rte_eth_xstats_get_names(uint16_t port_id,
2648         struct rte_eth_xstat_name *xstats_names,
2649         unsigned int size)
2650 {
2651         struct rte_eth_dev *dev;
2652         int cnt_used_entries;
2653         int cnt_expected_entries;
2654         int cnt_driver_entries;
2655
2656         cnt_expected_entries = get_xstats_count(port_id);
2657         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2658                         (int)size < cnt_expected_entries)
2659                 return cnt_expected_entries;
2660
2661         /* port_id checked in get_xstats_count() */
2662         dev = &rte_eth_devices[port_id];
2663
2664         cnt_used_entries = rte_eth_basic_stats_get_names(
2665                 dev, xstats_names);
2666
2667         if (dev->dev_ops->xstats_get_names != NULL) {
2668                 /* If there are any driver-specific xstats, append them
2669                  * to end of list.
2670                  */
2671                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2672                         dev,
2673                         xstats_names + cnt_used_entries,
2674                         size - cnt_used_entries);
2675                 if (cnt_driver_entries < 0)
2676                         return eth_err(port_id, cnt_driver_entries);
2677                 cnt_used_entries += cnt_driver_entries;
2678         }
2679
2680         return cnt_used_entries;
2681 }
2682
2683
2684 static int
2685 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2686 {
2687         struct rte_eth_dev *dev;
2688         struct rte_eth_stats eth_stats;
2689         unsigned int count = 0, i, q;
2690         uint64_t val, *stats_ptr;
2691         uint16_t nb_rxqs, nb_txqs;
2692         int ret;
2693
2694         ret = rte_eth_stats_get(port_id, &eth_stats);
2695         if (ret < 0)
2696                 return ret;
2697
2698         dev = &rte_eth_devices[port_id];
2699
2700         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2701         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2702
2703         /* global stats */
2704         for (i = 0; i < RTE_NB_STATS; i++) {
2705                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2706                                         rte_stats_strings[i].offset);
2707                 val = *stats_ptr;
2708                 xstats[count++].value = val;
2709         }
2710
2711         /* per-rxq stats */
2712         for (q = 0; q < nb_rxqs; q++) {
2713                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2714                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2715                                         rte_rxq_stats_strings[i].offset +
2716                                         q * sizeof(uint64_t));
2717                         val = *stats_ptr;
2718                         xstats[count++].value = val;
2719                 }
2720         }
2721
2722         /* per-txq stats */
2723         for (q = 0; q < nb_txqs; q++) {
2724                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2725                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2726                                         rte_txq_stats_strings[i].offset +
2727                                         q * sizeof(uint64_t));
2728                         val = *stats_ptr;
2729                         xstats[count++].value = val;
2730                 }
2731         }
2732         return count;
2733 }
2734
2735 /* retrieve ethdev extended statistics */
2736 int
2737 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2738                          uint64_t *values, unsigned int size)
2739 {
2740         unsigned int no_basic_stat_requested = 1;
2741         unsigned int no_ext_stat_requested = 1;
2742         unsigned int num_xstats_filled;
2743         unsigned int basic_count;
2744         uint16_t expected_entries;
2745         struct rte_eth_dev *dev;
2746         unsigned int i;
2747         int ret;
2748
2749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2750         ret = get_xstats_count(port_id);
2751         if (ret < 0)
2752                 return ret;
2753         expected_entries = (uint16_t)ret;
2754         struct rte_eth_xstat xstats[expected_entries];
2755         dev = &rte_eth_devices[port_id];
2756         basic_count = get_xstats_basic_count(dev);
2757
2758         /* Return max number of stats if no ids given */
2759         if (!ids) {
2760                 if (!values)
2761                         return expected_entries;
2762                 else if (values && size < expected_entries)
2763                         return expected_entries;
2764         }
2765
2766         if (ids && !values)
2767                 return -EINVAL;
2768
2769         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2770                 unsigned int basic_count = get_xstats_basic_count(dev);
2771                 uint64_t ids_copy[size];
2772
2773                 for (i = 0; i < size; i++) {
2774                         if (ids[i] < basic_count) {
2775                                 no_basic_stat_requested = 0;
2776                                 break;
2777                         }
2778
2779                         /*
2780                          * Convert ids to xstats ids that PMD knows.
2781                          * ids known by user are basic + extended stats.
2782                          */
2783                         ids_copy[i] = ids[i] - basic_count;
2784                 }
2785
2786                 if (no_basic_stat_requested)
2787                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2788                                         values, size);
2789         }
2790
2791         if (ids) {
2792                 for (i = 0; i < size; i++) {
2793                         if (ids[i] >= basic_count) {
2794                                 no_ext_stat_requested = 0;
2795                                 break;
2796                         }
2797                 }
2798         }
2799
2800         /* Fill the xstats structure */
2801         if (ids && no_ext_stat_requested)
2802                 ret = rte_eth_basic_stats_get(port_id, xstats);
2803         else
2804                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2805
2806         if (ret < 0)
2807                 return ret;
2808         num_xstats_filled = (unsigned int)ret;
2809
2810         /* Return all stats */
2811         if (!ids) {
2812                 for (i = 0; i < num_xstats_filled; i++)
2813                         values[i] = xstats[i].value;
2814                 return expected_entries;
2815         }
2816
2817         /* Filter stats */
2818         for (i = 0; i < size; i++) {
2819                 if (ids[i] >= expected_entries) {
2820                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2821                         return -1;
2822                 }
2823                 values[i] = xstats[ids[i]].value;
2824         }
2825         return size;
2826 }
2827
2828 int
2829 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2830         unsigned int n)
2831 {
2832         struct rte_eth_dev *dev;
2833         unsigned int count = 0, i;
2834         signed int xcount = 0;
2835         uint16_t nb_rxqs, nb_txqs;
2836         int ret;
2837
2838         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2839
2840         dev = &rte_eth_devices[port_id];
2841
2842         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2843         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2844
2845         /* Return generic statistics */
2846         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2847                 (nb_txqs * RTE_NB_TXQ_STATS);
2848
2849         /* implemented by the driver */
2850         if (dev->dev_ops->xstats_get != NULL) {
2851                 /* Retrieve the xstats from the driver at the end of the
2852                  * xstats struct.
2853                  */
2854                 xcount = (*dev->dev_ops->xstats_get)(dev,
2855                                      xstats ? xstats + count : NULL,
2856                                      (n > count) ? n - count : 0);
2857
2858                 if (xcount < 0)
2859                         return eth_err(port_id, xcount);
2860         }
2861
2862         if (n < count + xcount || xstats == NULL)
2863                 return count + xcount;
2864
2865         /* now fill the xstats structure */
2866         ret = rte_eth_basic_stats_get(port_id, xstats);
2867         if (ret < 0)
2868                 return ret;
2869         count = ret;
2870
2871         for (i = 0; i < count; i++)
2872                 xstats[i].id = i;
2873         /* add an offset to driver-specific stats */
2874         for ( ; i < count + xcount; i++)
2875                 xstats[i].id += count;
2876
2877         return count + xcount;
2878 }
2879
2880 /* reset ethdev extended statistics */
2881 int
2882 rte_eth_xstats_reset(uint16_t port_id)
2883 {
2884         struct rte_eth_dev *dev;
2885
2886         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2887         dev = &rte_eth_devices[port_id];
2888
2889         /* implemented by the driver */
2890         if (dev->dev_ops->xstats_reset != NULL)
2891                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2892
2893         /* fallback to default */
2894         return rte_eth_stats_reset(port_id);
2895 }
2896
2897 static int
2898 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2899                 uint8_t is_rx)
2900 {
2901         struct rte_eth_dev *dev;
2902
2903         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2904
2905         dev = &rte_eth_devices[port_id];
2906
2907         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2908
2909         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2910                 return -EINVAL;
2911
2912         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2913                 return -EINVAL;
2914
2915         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2916                 return -EINVAL;
2917
2918         return (*dev->dev_ops->queue_stats_mapping_set)
2919                         (dev, queue_id, stat_idx, is_rx);
2920 }
2921
2922
2923 int
2924 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2925                 uint8_t stat_idx)
2926 {
2927         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2928                                                 stat_idx, STAT_QMAP_TX));
2929 }
2930
2931
2932 int
2933 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2934                 uint8_t stat_idx)
2935 {
2936         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2937                                                 stat_idx, STAT_QMAP_RX));
2938 }
2939
2940 int
2941 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2942 {
2943         struct rte_eth_dev *dev;
2944
2945         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2946         dev = &rte_eth_devices[port_id];
2947
2948         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2949         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2950                                                         fw_version, fw_size));
2951 }
2952
2953 int
2954 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2955 {
2956         struct rte_eth_dev *dev;
2957         const struct rte_eth_desc_lim lim = {
2958                 .nb_max = UINT16_MAX,
2959                 .nb_min = 0,
2960                 .nb_align = 1,
2961                 .nb_seg_max = UINT16_MAX,
2962                 .nb_mtu_seg_max = UINT16_MAX,
2963         };
2964         int diag;
2965
2966         /*
2967          * Init dev_info before port_id check since caller does not have
2968          * return status and does not know if get is successful or not.
2969          */
2970         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2971
2972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2973         dev = &rte_eth_devices[port_id];
2974
2975         dev_info->rx_desc_lim = lim;
2976         dev_info->tx_desc_lim = lim;
2977         dev_info->device = dev->device;
2978         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2979         dev_info->max_mtu = UINT16_MAX;
2980
2981         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2982         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2983         if (diag != 0) {
2984                 /* Cleanup already filled in device information */
2985                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2986                 return eth_err(port_id, diag);
2987         }
2988
2989         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
2990         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
2991                         RTE_MAX_QUEUES_PER_PORT);
2992         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
2993                         RTE_MAX_QUEUES_PER_PORT);
2994
2995         dev_info->driver_name = dev->device->driver->name;
2996         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2997         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2998
2999         dev_info->dev_flags = &dev->data->dev_flags;
3000
3001         return 0;
3002 }
3003
3004 int
3005 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3006                                  uint32_t *ptypes, int num)
3007 {
3008         int i, j;
3009         struct rte_eth_dev *dev;
3010         const uint32_t *all_ptypes;
3011
3012         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3013         dev = &rte_eth_devices[port_id];
3014         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3015         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3016
3017         if (!all_ptypes)
3018                 return 0;
3019
3020         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3021                 if (all_ptypes[i] & ptype_mask) {
3022                         if (j < num)
3023                                 ptypes[j] = all_ptypes[i];
3024                         j++;
3025                 }
3026
3027         return j;
3028 }
3029
3030 int
3031 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3032                                  uint32_t *set_ptypes, unsigned int num)
3033 {
3034         const uint32_t valid_ptype_masks[] = {
3035                 RTE_PTYPE_L2_MASK,
3036                 RTE_PTYPE_L3_MASK,
3037                 RTE_PTYPE_L4_MASK,
3038                 RTE_PTYPE_TUNNEL_MASK,
3039                 RTE_PTYPE_INNER_L2_MASK,
3040                 RTE_PTYPE_INNER_L3_MASK,
3041                 RTE_PTYPE_INNER_L4_MASK,
3042         };
3043         const uint32_t *all_ptypes;
3044         struct rte_eth_dev *dev;
3045         uint32_t unused_mask;
3046         unsigned int i, j;
3047         int ret;
3048
3049         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3050         dev = &rte_eth_devices[port_id];
3051
3052         if (num > 0 && set_ptypes == NULL)
3053                 return -EINVAL;
3054
3055         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3056                         *dev->dev_ops->dev_ptypes_set == NULL) {
3057                 ret = 0;
3058                 goto ptype_unknown;
3059         }
3060
3061         if (ptype_mask == 0) {
3062                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3063                                 ptype_mask);
3064                 goto ptype_unknown;
3065         }
3066
3067         unused_mask = ptype_mask;
3068         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3069                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3070                 if (mask && mask != valid_ptype_masks[i]) {
3071                         ret = -EINVAL;
3072                         goto ptype_unknown;
3073                 }
3074                 unused_mask &= ~valid_ptype_masks[i];
3075         }
3076
3077         if (unused_mask) {
3078                 ret = -EINVAL;
3079                 goto ptype_unknown;
3080         }
3081
3082         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3083         if (all_ptypes == NULL) {
3084                 ret = 0;
3085                 goto ptype_unknown;
3086         }
3087
3088         /*
3089          * Accommodate as many set_ptypes as possible. If the supplied
3090          * set_ptypes array is insufficient fill it partially.
3091          */
3092         for (i = 0, j = 0; set_ptypes != NULL &&
3093                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3094                 if (ptype_mask & all_ptypes[i]) {
3095                         if (j < num - 1) {
3096                                 set_ptypes[j] = all_ptypes[i];
3097                                 j++;
3098                                 continue;
3099                         }
3100                         break;
3101                 }
3102         }
3103
3104         if (set_ptypes != NULL && j < num)
3105                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3106
3107         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3108
3109 ptype_unknown:
3110         if (num > 0)
3111                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3112
3113         return ret;
3114 }
3115
3116 int
3117 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3118 {
3119         struct rte_eth_dev *dev;
3120
3121         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3122         dev = &rte_eth_devices[port_id];
3123         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3124
3125         return 0;
3126 }
3127
3128 int
3129 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3130 {
3131         struct rte_eth_dev *dev;
3132
3133         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3134
3135         dev = &rte_eth_devices[port_id];
3136         *mtu = dev->data->mtu;
3137         return 0;
3138 }
3139
3140 int
3141 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3142 {
3143         int ret;
3144         struct rte_eth_dev_info dev_info;
3145         struct rte_eth_dev *dev;
3146
3147         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3148         dev = &rte_eth_devices[port_id];
3149         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3150
3151         /*
3152          * Check if the device supports dev_infos_get, if it does not
3153          * skip min_mtu/max_mtu validation here as this requires values
3154          * that are populated within the call to rte_eth_dev_info_get()
3155          * which relies on dev->dev_ops->dev_infos_get.
3156          */
3157         if (*dev->dev_ops->dev_infos_get != NULL) {
3158                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3159                 if (ret != 0)
3160                         return ret;
3161
3162                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3163                         return -EINVAL;
3164         }
3165
3166         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3167         if (!ret)
3168                 dev->data->mtu = mtu;
3169
3170         return eth_err(port_id, ret);
3171 }
3172
3173 int
3174 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3175 {
3176         struct rte_eth_dev *dev;
3177         int ret;
3178
3179         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3180         dev = &rte_eth_devices[port_id];
3181         if (!(dev->data->dev_conf.rxmode.offloads &
3182               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3183                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3184                         port_id);
3185                 return -ENOSYS;
3186         }
3187
3188         if (vlan_id > 4095) {
3189                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3190                         port_id, vlan_id);
3191                 return -EINVAL;
3192         }
3193         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3194
3195         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3196         if (ret == 0) {
3197                 struct rte_vlan_filter_conf *vfc;
3198                 int vidx;
3199                 int vbit;
3200
3201                 vfc = &dev->data->vlan_filter_conf;
3202                 vidx = vlan_id / 64;
3203                 vbit = vlan_id % 64;
3204
3205                 if (on)
3206                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3207                 else
3208                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3209         }
3210
3211         return eth_err(port_id, ret);
3212 }
3213
3214 int
3215 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3216                                     int on)
3217 {
3218         struct rte_eth_dev *dev;
3219
3220         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3221         dev = &rte_eth_devices[port_id];
3222         if (rx_queue_id >= dev->data->nb_rx_queues) {
3223                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3224                 return -EINVAL;
3225         }
3226
3227         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3228         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3229
3230         return 0;
3231 }
3232
3233 int
3234 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3235                                 enum rte_vlan_type vlan_type,
3236                                 uint16_t tpid)
3237 {
3238         struct rte_eth_dev *dev;
3239
3240         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3241         dev = &rte_eth_devices[port_id];
3242         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3243
3244         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3245                                                                tpid));
3246 }
3247
3248 int
3249 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3250 {
3251         struct rte_eth_dev *dev;
3252         int ret = 0;
3253         int mask = 0;
3254         int cur, org = 0;
3255         uint64_t orig_offloads;
3256         uint64_t *dev_offloads;
3257
3258         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3259         dev = &rte_eth_devices[port_id];
3260
3261         /* save original values in case of failure */
3262         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3263         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3264
3265         /*check which option changed by application*/
3266         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3267         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3268         if (cur != org) {
3269                 if (cur)
3270                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3271                 else
3272                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3273                 mask |= ETH_VLAN_STRIP_MASK;
3274         }
3275
3276         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3277         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3278         if (cur != org) {
3279                 if (cur)
3280                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3281                 else
3282                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3283                 mask |= ETH_VLAN_FILTER_MASK;
3284         }
3285
3286         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3287         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3288         if (cur != org) {
3289                 if (cur)
3290                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3291                 else
3292                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3293                 mask |= ETH_VLAN_EXTEND_MASK;
3294         }
3295
3296         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3297         org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3298         if (cur != org) {
3299                 if (cur)
3300                         *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3301                 else
3302                         *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3303                 mask |= ETH_QINQ_STRIP_MASK;
3304         }
3305
3306         /*no change*/
3307         if (mask == 0)
3308                 return ret;
3309
3310         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3311         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3312         if (ret) {
3313                 /* hit an error restore  original values */
3314                 *dev_offloads = orig_offloads;
3315         }
3316
3317         return eth_err(port_id, ret);
3318 }
3319
3320 int
3321 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3322 {
3323         struct rte_eth_dev *dev;
3324         uint64_t *dev_offloads;
3325         int ret = 0;
3326
3327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3328         dev = &rte_eth_devices[port_id];
3329         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3330
3331         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3332                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3333
3334         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3335                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3336
3337         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3338                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3339
3340         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3341                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3342
3343         return ret;
3344 }
3345
3346 int
3347 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3348 {
3349         struct rte_eth_dev *dev;
3350
3351         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3352         dev = &rte_eth_devices[port_id];
3353         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3354
3355         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3356 }
3357
3358 int
3359 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3360 {
3361         struct rte_eth_dev *dev;
3362
3363         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3364         dev = &rte_eth_devices[port_id];
3365         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3366         memset(fc_conf, 0, sizeof(*fc_conf));
3367         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3368 }
3369
3370 int
3371 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3372 {
3373         struct rte_eth_dev *dev;
3374
3375         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3376         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3377                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3378                 return -EINVAL;
3379         }
3380
3381         dev = &rte_eth_devices[port_id];
3382         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3383         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3384 }
3385
3386 int
3387 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3388                                    struct rte_eth_pfc_conf *pfc_conf)
3389 {
3390         struct rte_eth_dev *dev;
3391
3392         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3393         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3394                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3395                 return -EINVAL;
3396         }
3397
3398         dev = &rte_eth_devices[port_id];
3399         /* High water, low water validation are device specific */
3400         if  (*dev->dev_ops->priority_flow_ctrl_set)
3401                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3402                                         (dev, pfc_conf));
3403         return -ENOTSUP;
3404 }
3405
3406 static int
3407 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3408                         uint16_t reta_size)
3409 {
3410         uint16_t i, num;
3411
3412         if (!reta_conf)
3413                 return -EINVAL;
3414
3415         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3416         for (i = 0; i < num; i++) {
3417                 if (reta_conf[i].mask)
3418                         return 0;
3419         }
3420
3421         return -EINVAL;
3422 }
3423
3424 static int
3425 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3426                          uint16_t reta_size,
3427                          uint16_t max_rxq)
3428 {
3429         uint16_t i, idx, shift;
3430
3431         if (!reta_conf)
3432                 return -EINVAL;
3433
3434         if (max_rxq == 0) {
3435                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3436                 return -EINVAL;
3437         }
3438
3439         for (i = 0; i < reta_size; i++) {
3440                 idx = i / RTE_RETA_GROUP_SIZE;
3441                 shift = i % RTE_RETA_GROUP_SIZE;
3442                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3443                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3444                         RTE_ETHDEV_LOG(ERR,
3445                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3446                                 idx, shift,
3447                                 reta_conf[idx].reta[shift], max_rxq);
3448                         return -EINVAL;
3449                 }
3450         }
3451
3452         return 0;
3453 }
3454
3455 int
3456 rte_eth_dev_rss_reta_update(uint16_t port_id,
3457                             struct rte_eth_rss_reta_entry64 *reta_conf,
3458                             uint16_t reta_size)
3459 {
3460         struct rte_eth_dev *dev;
3461         int ret;
3462
3463         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3464         /* Check mask bits */
3465         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3466         if (ret < 0)
3467                 return ret;
3468
3469         dev = &rte_eth_devices[port_id];
3470
3471         /* Check entry value */
3472         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3473                                 dev->data->nb_rx_queues);
3474         if (ret < 0)
3475                 return ret;
3476
3477         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3478         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3479                                                              reta_size));
3480 }
3481
3482 int
3483 rte_eth_dev_rss_reta_query(uint16_t port_id,
3484                            struct rte_eth_rss_reta_entry64 *reta_conf,
3485                            uint16_t reta_size)
3486 {
3487         struct rte_eth_dev *dev;
3488         int ret;
3489
3490         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3491
3492         /* Check mask bits */
3493         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3494         if (ret < 0)
3495                 return ret;
3496
3497         dev = &rte_eth_devices[port_id];
3498         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3499         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3500                                                             reta_size));
3501 }
3502
3503 int
3504 rte_eth_dev_rss_hash_update(uint16_t port_id,
3505                             struct rte_eth_rss_conf *rss_conf)
3506 {
3507         struct rte_eth_dev *dev;
3508         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3509         int ret;
3510
3511         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3512
3513         ret = rte_eth_dev_info_get(port_id, &dev_info);
3514         if (ret != 0)
3515                 return ret;
3516
3517         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3518
3519         dev = &rte_eth_devices[port_id];
3520         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3521             dev_info.flow_type_rss_offloads) {
3522                 RTE_ETHDEV_LOG(ERR,
3523                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3524                         port_id, rss_conf->rss_hf,
3525                         dev_info.flow_type_rss_offloads);
3526                 return -EINVAL;
3527         }
3528         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3529         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3530                                                                  rss_conf));
3531 }
3532
3533 int
3534 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3535                               struct rte_eth_rss_conf *rss_conf)
3536 {
3537         struct rte_eth_dev *dev;
3538
3539         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3540         dev = &rte_eth_devices[port_id];
3541         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3542         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3543                                                                    rss_conf));
3544 }
3545
3546 int
3547 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3548                                 struct rte_eth_udp_tunnel *udp_tunnel)
3549 {
3550         struct rte_eth_dev *dev;
3551
3552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3553         if (udp_tunnel == NULL) {
3554                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3555                 return -EINVAL;
3556         }
3557
3558         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3559                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3560                 return -EINVAL;
3561         }
3562
3563         dev = &rte_eth_devices[port_id];
3564         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3565         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3566                                                                 udp_tunnel));
3567 }
3568
3569 int
3570 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3571                                    struct rte_eth_udp_tunnel *udp_tunnel)
3572 {
3573         struct rte_eth_dev *dev;
3574
3575         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3576         dev = &rte_eth_devices[port_id];
3577
3578         if (udp_tunnel == NULL) {
3579                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3580                 return -EINVAL;
3581         }
3582
3583         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3584                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3585                 return -EINVAL;
3586         }
3587
3588         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3589         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3590                                                                 udp_tunnel));
3591 }
3592
3593 int
3594 rte_eth_led_on(uint16_t port_id)
3595 {
3596         struct rte_eth_dev *dev;
3597
3598         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3599         dev = &rte_eth_devices[port_id];
3600         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3601         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3602 }
3603
3604 int
3605 rte_eth_led_off(uint16_t port_id)
3606 {
3607         struct rte_eth_dev *dev;
3608
3609         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3610         dev = &rte_eth_devices[port_id];
3611         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3612         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3613 }
3614
3615 /*
3616  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3617  * an empty spot.
3618  */
3619 static int
3620 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3621 {
3622         struct rte_eth_dev_info dev_info;
3623         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3624         unsigned i;
3625         int ret;
3626
3627         ret = rte_eth_dev_info_get(port_id, &dev_info);
3628         if (ret != 0)
3629                 return -1;
3630
3631         for (i = 0; i < dev_info.max_mac_addrs; i++)
3632                 if (memcmp(addr, &dev->data->mac_addrs[i],
3633                                 RTE_ETHER_ADDR_LEN) == 0)
3634                         return i;
3635
3636         return -1;
3637 }
3638
3639 static const struct rte_ether_addr null_mac_addr;
3640
3641 int
3642 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3643                         uint32_t pool)
3644 {
3645         struct rte_eth_dev *dev;
3646         int index;
3647         uint64_t pool_mask;
3648         int ret;
3649
3650         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3651         dev = &rte_eth_devices[port_id];
3652         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3653
3654         if (rte_is_zero_ether_addr(addr)) {
3655                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3656                         port_id);
3657                 return -EINVAL;
3658         }
3659         if (pool >= ETH_64_POOLS) {
3660                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3661                 return -EINVAL;
3662         }
3663
3664         index = get_mac_addr_index(port_id, addr);
3665         if (index < 0) {
3666                 index = get_mac_addr_index(port_id, &null_mac_addr);
3667                 if (index < 0) {
3668                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3669                                 port_id);
3670                         return -ENOSPC;
3671                 }
3672         } else {
3673                 pool_mask = dev->data->mac_pool_sel[index];
3674
3675                 /* Check if both MAC address and pool is already there, and do nothing */
3676                 if (pool_mask & (1ULL << pool))
3677                         return 0;
3678         }
3679
3680         /* Update NIC */
3681         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3682
3683         if (ret == 0) {
3684                 /* Update address in NIC data structure */
3685                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3686
3687                 /* Update pool bitmap in NIC data structure */
3688                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3689         }
3690
3691         return eth_err(port_id, ret);
3692 }
3693
3694 int
3695 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3696 {
3697         struct rte_eth_dev *dev;
3698         int index;
3699
3700         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3701         dev = &rte_eth_devices[port_id];
3702         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3703
3704         index = get_mac_addr_index(port_id, addr);
3705         if (index == 0) {
3706                 RTE_ETHDEV_LOG(ERR,
3707                         "Port %u: Cannot remove default MAC address\n",
3708                         port_id);
3709                 return -EADDRINUSE;
3710         } else if (index < 0)
3711                 return 0;  /* Do nothing if address wasn't found */
3712
3713         /* Update NIC */
3714         (*dev->dev_ops->mac_addr_remove)(dev, index);
3715
3716         /* Update address in NIC data structure */
3717         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3718
3719         /* reset pool bitmap */
3720         dev->data->mac_pool_sel[index] = 0;
3721
3722         return 0;
3723 }
3724
3725 int
3726 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3727 {
3728         struct rte_eth_dev *dev;
3729         int ret;
3730
3731         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3732
3733         if (!rte_is_valid_assigned_ether_addr(addr))
3734                 return -EINVAL;
3735
3736         dev = &rte_eth_devices[port_id];
3737         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3738
3739         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3740         if (ret < 0)
3741                 return ret;
3742
3743         /* Update default address in NIC data structure */
3744         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3745
3746         return 0;
3747 }
3748
3749
3750 /*
3751  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3752  * an empty spot.
3753  */
3754 static int
3755 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3756 {
3757         struct rte_eth_dev_info dev_info;
3758         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3759         unsigned i;
3760         int ret;
3761
3762         ret = rte_eth_dev_info_get(port_id, &dev_info);
3763         if (ret != 0)
3764                 return -1;
3765
3766         if (!dev->data->hash_mac_addrs)
3767                 return -1;
3768
3769         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3770                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3771                         RTE_ETHER_ADDR_LEN) == 0)
3772                         return i;
3773
3774         return -1;
3775 }
3776
3777 int
3778 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3779                                 uint8_t on)
3780 {
3781         int index;
3782         int ret;
3783         struct rte_eth_dev *dev;
3784
3785         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3786
3787         dev = &rte_eth_devices[port_id];
3788         if (rte_is_zero_ether_addr(addr)) {
3789                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3790                         port_id);
3791                 return -EINVAL;
3792         }
3793
3794         index = get_hash_mac_addr_index(port_id, addr);
3795         /* Check if it's already there, and do nothing */
3796         if ((index >= 0) && on)
3797                 return 0;
3798
3799         if (index < 0) {
3800                 if (!on) {
3801                         RTE_ETHDEV_LOG(ERR,
3802                                 "Port %u: the MAC address was not set in UTA\n",
3803                                 port_id);
3804                         return -EINVAL;
3805                 }
3806
3807                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3808                 if (index < 0) {
3809                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3810                                 port_id);
3811                         return -ENOSPC;
3812                 }
3813         }
3814
3815         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3816         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3817         if (ret == 0) {
3818                 /* Update address in NIC data structure */
3819                 if (on)
3820                         rte_ether_addr_copy(addr,
3821                                         &dev->data->hash_mac_addrs[index]);
3822                 else
3823                         rte_ether_addr_copy(&null_mac_addr,
3824                                         &dev->data->hash_mac_addrs[index]);
3825         }
3826
3827         return eth_err(port_id, ret);
3828 }
3829
3830 int
3831 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3832 {
3833         struct rte_eth_dev *dev;
3834
3835         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3836
3837         dev = &rte_eth_devices[port_id];
3838
3839         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3840         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3841                                                                        on));
3842 }
3843
3844 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3845                                         uint16_t tx_rate)
3846 {
3847         struct rte_eth_dev *dev;
3848         struct rte_eth_dev_info dev_info;
3849         struct rte_eth_link link;
3850         int ret;
3851
3852         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3853
3854         ret = rte_eth_dev_info_get(port_id, &dev_info);
3855         if (ret != 0)
3856                 return ret;
3857
3858         dev = &rte_eth_devices[port_id];
3859         link = dev->data->dev_link;
3860
3861         if (queue_idx > dev_info.max_tx_queues) {
3862                 RTE_ETHDEV_LOG(ERR,
3863                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3864                         port_id, queue_idx);
3865                 return -EINVAL;
3866         }
3867
3868         if (tx_rate > link.link_speed) {
3869                 RTE_ETHDEV_LOG(ERR,
3870                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3871                         tx_rate, link.link_speed);
3872                 return -EINVAL;
3873         }
3874
3875         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3876         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3877                                                         queue_idx, tx_rate));
3878 }
3879
3880 int
3881 rte_eth_mirror_rule_set(uint16_t port_id,
3882                         struct rte_eth_mirror_conf *mirror_conf,
3883                         uint8_t rule_id, uint8_t on)
3884 {
3885         struct rte_eth_dev *dev;
3886
3887         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3888         if (mirror_conf->rule_type == 0) {
3889                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3890                 return -EINVAL;
3891         }
3892
3893         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3894                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3895                         ETH_64_POOLS - 1);
3896                 return -EINVAL;
3897         }
3898
3899         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3900              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3901             (mirror_conf->pool_mask == 0)) {
3902                 RTE_ETHDEV_LOG(ERR,
3903                         "Invalid mirror pool, pool mask can not be 0\n");
3904                 return -EINVAL;
3905         }
3906
3907         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3908             mirror_conf->vlan.vlan_mask == 0) {
3909                 RTE_ETHDEV_LOG(ERR,
3910                         "Invalid vlan mask, vlan mask can not be 0\n");
3911                 return -EINVAL;
3912         }
3913
3914         dev = &rte_eth_devices[port_id];
3915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3916
3917         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3918                                                 mirror_conf, rule_id, on));
3919 }
3920
3921 int
3922 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3923 {
3924         struct rte_eth_dev *dev;
3925
3926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3927
3928         dev = &rte_eth_devices[port_id];
3929         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3930
3931         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3932                                                                    rule_id));
3933 }
3934
3935 RTE_INIT(eth_dev_init_cb_lists)
3936 {
3937         int i;
3938
3939         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3940                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3941 }
3942
3943 int
3944 rte_eth_dev_callback_register(uint16_t port_id,
3945                         enum rte_eth_event_type event,
3946                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3947 {
3948         struct rte_eth_dev *dev;
3949         struct rte_eth_dev_callback *user_cb;
3950         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3951         uint16_t last_port;
3952
3953         if (!cb_fn)
3954                 return -EINVAL;
3955
3956         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3957                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3958                 return -EINVAL;
3959         }
3960
3961         if (port_id == RTE_ETH_ALL) {
3962                 next_port = 0;
3963                 last_port = RTE_MAX_ETHPORTS - 1;
3964         } else {
3965                 next_port = last_port = port_id;
3966         }
3967
3968         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3969
3970         do {
3971                 dev = &rte_eth_devices[next_port];
3972
3973                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3974                         if (user_cb->cb_fn == cb_fn &&
3975                                 user_cb->cb_arg == cb_arg &&
3976                                 user_cb->event == event) {
3977                                 break;
3978                         }
3979                 }
3980
3981                 /* create a new callback. */
3982                 if (user_cb == NULL) {
3983                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3984                                 sizeof(struct rte_eth_dev_callback), 0);
3985                         if (user_cb != NULL) {
3986                                 user_cb->cb_fn = cb_fn;
3987                                 user_cb->cb_arg = cb_arg;
3988                                 user_cb->event = event;
3989                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3990                                                   user_cb, next);
3991                         } else {
3992                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3993                                 rte_eth_dev_callback_unregister(port_id, event,
3994                                                                 cb_fn, cb_arg);
3995                                 return -ENOMEM;
3996                         }
3997
3998                 }
3999         } while (++next_port <= last_port);
4000
4001         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4002         return 0;
4003 }
4004
4005 int
4006 rte_eth_dev_callback_unregister(uint16_t port_id,
4007                         enum rte_eth_event_type event,
4008                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4009 {
4010         int ret;
4011         struct rte_eth_dev *dev;
4012         struct rte_eth_dev_callback *cb, *next;
4013         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4014         uint16_t last_port;
4015
4016         if (!cb_fn)
4017                 return -EINVAL;
4018
4019         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4020                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4021                 return -EINVAL;
4022         }
4023
4024         if (port_id == RTE_ETH_ALL) {
4025                 next_port = 0;
4026                 last_port = RTE_MAX_ETHPORTS - 1;
4027         } else {
4028                 next_port = last_port = port_id;
4029         }
4030
4031         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4032
4033         do {
4034                 dev = &rte_eth_devices[next_port];
4035                 ret = 0;
4036                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4037                      cb = next) {
4038
4039                         next = TAILQ_NEXT(cb, next);
4040
4041                         if (cb->cb_fn != cb_fn || cb->event != event ||
4042                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4043                                 continue;
4044
4045                         /*
4046                          * if this callback is not executing right now,
4047                          * then remove it.
4048                          */
4049                         if (cb->active == 0) {
4050                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4051                                 rte_free(cb);
4052                         } else {
4053                                 ret = -EAGAIN;
4054                         }
4055                 }
4056         } while (++next_port <= last_port);
4057
4058         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4059         return ret;
4060 }
4061
4062 int
4063 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4064         enum rte_eth_event_type event, void *ret_param)
4065 {
4066         struct rte_eth_dev_callback *cb_lst;
4067         struct rte_eth_dev_callback dev_cb;
4068         int rc = 0;
4069
4070         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4071         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4072                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4073                         continue;
4074                 dev_cb = *cb_lst;
4075                 cb_lst->active = 1;
4076                 if (ret_param != NULL)
4077                         dev_cb.ret_param = ret_param;
4078
4079                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4080                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4081                                 dev_cb.cb_arg, dev_cb.ret_param);
4082                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4083                 cb_lst->active = 0;
4084         }
4085         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4086         return rc;
4087 }
4088
4089 void
4090 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4091 {
4092         if (dev == NULL)
4093                 return;
4094
4095         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4096
4097         dev->state = RTE_ETH_DEV_ATTACHED;
4098 }
4099
4100 int
4101 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4102 {
4103         uint32_t vec;
4104         struct rte_eth_dev *dev;
4105         struct rte_intr_handle *intr_handle;
4106         uint16_t qid;
4107         int rc;
4108
4109         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4110
4111         dev = &rte_eth_devices[port_id];
4112
4113         if (!dev->intr_handle) {
4114                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4115                 return -ENOTSUP;
4116         }
4117
4118         intr_handle = dev->intr_handle;
4119         if (!intr_handle->intr_vec) {
4120                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4121                 return -EPERM;
4122         }
4123
4124         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4125                 vec = intr_handle->intr_vec[qid];
4126                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4127                 if (rc && rc != -EEXIST) {
4128                         RTE_ETHDEV_LOG(ERR,
4129                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4130                                 port_id, qid, op, epfd, vec);
4131                 }
4132         }
4133
4134         return 0;
4135 }
4136
4137 int
4138 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4139 {
4140         struct rte_intr_handle *intr_handle;
4141         struct rte_eth_dev *dev;
4142         unsigned int efd_idx;
4143         uint32_t vec;
4144         int fd;
4145
4146         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4147
4148         dev = &rte_eth_devices[port_id];
4149
4150         if (queue_id >= dev->data->nb_rx_queues) {
4151                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4152                 return -1;
4153         }
4154
4155         if (!dev->intr_handle) {
4156                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4157                 return -1;
4158         }
4159
4160         intr_handle = dev->intr_handle;
4161         if (!intr_handle->intr_vec) {
4162                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4163                 return -1;
4164         }
4165
4166         vec = intr_handle->intr_vec[queue_id];
4167         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4168                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4169         fd = intr_handle->efds[efd_idx];
4170
4171         return fd;
4172 }
4173
4174 const struct rte_memzone *
4175 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4176                          uint16_t queue_id, size_t size, unsigned align,
4177                          int socket_id)
4178 {
4179         char z_name[RTE_MEMZONE_NAMESIZE];
4180         const struct rte_memzone *mz;
4181         int rc;
4182
4183         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
4184                       dev->data->port_id, queue_id, ring_name);
4185         if (rc >= RTE_MEMZONE_NAMESIZE) {
4186                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4187                 rte_errno = ENAMETOOLONG;
4188                 return NULL;
4189         }
4190
4191         mz = rte_memzone_lookup(z_name);
4192         if (mz)
4193                 return mz;
4194
4195         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4196                         RTE_MEMZONE_IOVA_CONTIG, align);
4197 }
4198
4199 int
4200 rte_eth_dev_create(struct rte_device *device, const char *name,
4201         size_t priv_data_size,
4202         ethdev_bus_specific_init ethdev_bus_specific_init,
4203         void *bus_init_params,
4204         ethdev_init_t ethdev_init, void *init_params)
4205 {
4206         struct rte_eth_dev *ethdev;
4207         int retval;
4208
4209         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4210
4211         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4212                 ethdev = rte_eth_dev_allocate(name);
4213                 if (!ethdev)
4214                         return -ENODEV;
4215
4216                 if (priv_data_size) {
4217                         ethdev->data->dev_private = rte_zmalloc_socket(
4218                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4219                                 device->numa_node);
4220
4221                         if (!ethdev->data->dev_private) {
4222                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
4223                                 retval = -ENOMEM;
4224                                 goto probe_failed;
4225                         }
4226                 }
4227         } else {
4228                 ethdev = rte_eth_dev_attach_secondary(name);
4229                 if (!ethdev) {
4230                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
4231                                 "ethdev doesn't exist");
4232                         return  -ENODEV;
4233                 }
4234         }
4235
4236         ethdev->device = device;
4237
4238         if (ethdev_bus_specific_init) {
4239                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4240                 if (retval) {
4241                         RTE_LOG(ERR, EAL,
4242                                 "ethdev bus specific initialisation failed");
4243                         goto probe_failed;
4244                 }
4245         }
4246
4247         retval = ethdev_init(ethdev, init_params);
4248         if (retval) {
4249                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
4250                 goto probe_failed;
4251         }
4252
4253         rte_eth_dev_probing_finish(ethdev);
4254
4255         return retval;
4256
4257 probe_failed:
4258         rte_eth_dev_release_port(ethdev);
4259         return retval;
4260 }
4261
4262 int
4263 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4264         ethdev_uninit_t ethdev_uninit)
4265 {
4266         int ret;
4267
4268         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4269         if (!ethdev)
4270                 return -ENODEV;
4271
4272         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4273
4274         ret = ethdev_uninit(ethdev);
4275         if (ret)
4276                 return ret;
4277
4278         return rte_eth_dev_release_port(ethdev);
4279 }
4280
4281 int
4282 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4283                           int epfd, int op, void *data)
4284 {
4285         uint32_t vec;
4286         struct rte_eth_dev *dev;
4287         struct rte_intr_handle *intr_handle;
4288         int rc;
4289
4290         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4291
4292         dev = &rte_eth_devices[port_id];
4293         if (queue_id >= dev->data->nb_rx_queues) {
4294                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4295                 return -EINVAL;
4296         }
4297
4298         if (!dev->intr_handle) {
4299                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4300                 return -ENOTSUP;
4301         }
4302
4303         intr_handle = dev->intr_handle;
4304         if (!intr_handle->intr_vec) {
4305                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4306                 return -EPERM;
4307         }
4308
4309         vec = intr_handle->intr_vec[queue_id];
4310         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4311         if (rc && rc != -EEXIST) {
4312                 RTE_ETHDEV_LOG(ERR,
4313                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4314                         port_id, queue_id, op, epfd, vec);
4315                 return rc;
4316         }
4317
4318         return 0;
4319 }
4320
4321 int
4322 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4323                            uint16_t queue_id)
4324 {
4325         struct rte_eth_dev *dev;
4326
4327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4328
4329         dev = &rte_eth_devices[port_id];
4330
4331         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4332         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4333                                                                 queue_id));
4334 }
4335
4336 int
4337 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4338                             uint16_t queue_id)
4339 {
4340         struct rte_eth_dev *dev;
4341
4342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4343
4344         dev = &rte_eth_devices[port_id];
4345
4346         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4347         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4348                                                                 queue_id));
4349 }
4350
4351
4352 int
4353 rte_eth_dev_filter_supported(uint16_t port_id,
4354                              enum rte_filter_type filter_type)
4355 {
4356         struct rte_eth_dev *dev;
4357
4358         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4359
4360         dev = &rte_eth_devices[port_id];
4361         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4362         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4363                                 RTE_ETH_FILTER_NOP, NULL);
4364 }
4365
4366 int
4367 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4368                         enum rte_filter_op filter_op, void *arg)
4369 {
4370         struct rte_eth_dev *dev;
4371
4372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4373
4374         dev = &rte_eth_devices[port_id];
4375         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4376         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4377                                                              filter_op, arg));
4378 }
4379
4380 const struct rte_eth_rxtx_callback *
4381 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4382                 rte_rx_callback_fn fn, void *user_param)
4383 {
4384 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4385         rte_errno = ENOTSUP;
4386         return NULL;
4387 #endif
4388         struct rte_eth_dev *dev;
4389
4390         /* check input parameters */
4391         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4392                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4393                 rte_errno = EINVAL;
4394                 return NULL;
4395         }
4396         dev = &rte_eth_devices[port_id];
4397         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4398                 rte_errno = EINVAL;
4399                 return NULL;
4400         }
4401         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4402
4403         if (cb == NULL) {
4404                 rte_errno = ENOMEM;
4405                 return NULL;
4406         }
4407
4408         cb->fn.rx = fn;
4409         cb->param = user_param;
4410
4411         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4412         /* Add the callbacks in fifo order. */
4413         struct rte_eth_rxtx_callback *tail =
4414                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4415
4416         if (!tail) {
4417                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4418
4419         } else {
4420                 while (tail->next)
4421                         tail = tail->next;
4422                 tail->next = cb;
4423         }
4424         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4425
4426         return cb;
4427 }
4428
4429 const struct rte_eth_rxtx_callback *
4430 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4431                 rte_rx_callback_fn fn, void *user_param)
4432 {
4433 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4434         rte_errno = ENOTSUP;
4435         return NULL;
4436 #endif
4437         /* check input parameters */
4438         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4439                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4440                 rte_errno = EINVAL;
4441                 return NULL;
4442         }
4443
4444         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4445
4446         if (cb == NULL) {
4447                 rte_errno = ENOMEM;
4448                 return NULL;
4449         }
4450
4451         cb->fn.rx = fn;
4452         cb->param = user_param;
4453
4454         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4455         /* Add the callbacks at fisrt position*/
4456         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4457         rte_smp_wmb();
4458         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4459         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4460
4461         return cb;
4462 }
4463
4464 const struct rte_eth_rxtx_callback *
4465 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4466                 rte_tx_callback_fn fn, void *user_param)
4467 {
4468 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4469         rte_errno = ENOTSUP;
4470         return NULL;
4471 #endif
4472         struct rte_eth_dev *dev;
4473
4474         /* check input parameters */
4475         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4476                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4477                 rte_errno = EINVAL;
4478                 return NULL;
4479         }
4480
4481         dev = &rte_eth_devices[port_id];
4482         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4483                 rte_errno = EINVAL;
4484                 return NULL;
4485         }
4486
4487         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4488
4489         if (cb == NULL) {
4490                 rte_errno = ENOMEM;
4491                 return NULL;
4492         }
4493
4494         cb->fn.tx = fn;
4495         cb->param = user_param;
4496
4497         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4498         /* Add the callbacks in fifo order. */
4499         struct rte_eth_rxtx_callback *tail =
4500                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4501
4502         if (!tail) {
4503                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4504
4505         } else {
4506                 while (tail->next)
4507                         tail = tail->next;
4508                 tail->next = cb;
4509         }
4510         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4511
4512         return cb;
4513 }
4514
4515 int
4516 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4517                 const struct rte_eth_rxtx_callback *user_cb)
4518 {
4519 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4520         return -ENOTSUP;
4521 #endif
4522         /* Check input parameters. */
4523         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4524         if (user_cb == NULL ||
4525                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4526                 return -EINVAL;
4527
4528         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4529         struct rte_eth_rxtx_callback *cb;
4530         struct rte_eth_rxtx_callback **prev_cb;
4531         int ret = -EINVAL;
4532
4533         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4534         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4535         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4536                 cb = *prev_cb;
4537                 if (cb == user_cb) {
4538                         /* Remove the user cb from the callback list. */
4539                         *prev_cb = cb->next;
4540                         ret = 0;
4541                         break;
4542                 }
4543         }
4544         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4545
4546         return ret;
4547 }
4548
4549 int
4550 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4551                 const struct rte_eth_rxtx_callback *user_cb)
4552 {
4553 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4554         return -ENOTSUP;
4555 #endif
4556         /* Check input parameters. */
4557         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4558         if (user_cb == NULL ||
4559                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4560                 return -EINVAL;
4561
4562         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4563         int ret = -EINVAL;
4564         struct rte_eth_rxtx_callback *cb;
4565         struct rte_eth_rxtx_callback **prev_cb;
4566
4567         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4568         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4569         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4570                 cb = *prev_cb;
4571                 if (cb == user_cb) {
4572                         /* Remove the user cb from the callback list. */
4573                         *prev_cb = cb->next;
4574                         ret = 0;
4575                         break;
4576                 }
4577         }
4578         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4579
4580         return ret;
4581 }
4582
4583 int
4584 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4585         struct rte_eth_rxq_info *qinfo)
4586 {
4587         struct rte_eth_dev *dev;
4588
4589         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4590
4591         if (qinfo == NULL)
4592                 return -EINVAL;
4593
4594         dev = &rte_eth_devices[port_id];
4595         if (queue_id >= dev->data->nb_rx_queues) {
4596                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4597                 return -EINVAL;
4598         }
4599
4600         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4601                 RTE_ETHDEV_LOG(INFO,
4602                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4603                         queue_id, port_id);
4604                 return -EINVAL;
4605         }
4606
4607         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4608
4609         memset(qinfo, 0, sizeof(*qinfo));
4610         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4611         return 0;
4612 }
4613
4614 int
4615 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4616         struct rte_eth_txq_info *qinfo)
4617 {
4618         struct rte_eth_dev *dev;
4619
4620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4621
4622         if (qinfo == NULL)
4623                 return -EINVAL;
4624
4625         dev = &rte_eth_devices[port_id];
4626         if (queue_id >= dev->data->nb_tx_queues) {
4627                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4628                 return -EINVAL;
4629         }
4630
4631         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4632                 RTE_ETHDEV_LOG(INFO,
4633                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4634                         queue_id, port_id);
4635                 return -EINVAL;
4636         }
4637
4638         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4639
4640         memset(qinfo, 0, sizeof(*qinfo));
4641         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4642
4643         return 0;
4644 }
4645
4646 int
4647 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4648                           struct rte_eth_burst_mode *mode)
4649 {
4650         struct rte_eth_dev *dev;
4651
4652         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4653
4654         if (mode == NULL)
4655                 return -EINVAL;
4656
4657         dev = &rte_eth_devices[port_id];
4658
4659         if (queue_id >= dev->data->nb_rx_queues) {
4660                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4661                 return -EINVAL;
4662         }
4663
4664         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4665         memset(mode, 0, sizeof(*mode));
4666         return eth_err(port_id,
4667                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4668 }
4669
4670 int
4671 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4672                           struct rte_eth_burst_mode *mode)
4673 {
4674         struct rte_eth_dev *dev;
4675
4676         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4677
4678         if (mode == NULL)
4679                 return -EINVAL;
4680
4681         dev = &rte_eth_devices[port_id];
4682
4683         if (queue_id >= dev->data->nb_tx_queues) {
4684                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4685                 return -EINVAL;
4686         }
4687
4688         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4689         memset(mode, 0, sizeof(*mode));
4690         return eth_err(port_id,
4691                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4692 }
4693
4694 int
4695 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4696                              struct rte_ether_addr *mc_addr_set,
4697                              uint32_t nb_mc_addr)
4698 {
4699         struct rte_eth_dev *dev;
4700
4701         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4702
4703         dev = &rte_eth_devices[port_id];
4704         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4705         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4706                                                 mc_addr_set, nb_mc_addr));
4707 }
4708
4709 int
4710 rte_eth_timesync_enable(uint16_t port_id)
4711 {
4712         struct rte_eth_dev *dev;
4713
4714         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4715         dev = &rte_eth_devices[port_id];
4716
4717         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4718         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4719 }
4720
4721 int
4722 rte_eth_timesync_disable(uint16_t port_id)
4723 {
4724         struct rte_eth_dev *dev;
4725
4726         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4727         dev = &rte_eth_devices[port_id];
4728
4729         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4730         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4731 }
4732
4733 int
4734 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4735                                    uint32_t flags)
4736 {
4737         struct rte_eth_dev *dev;
4738
4739         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4740         dev = &rte_eth_devices[port_id];
4741
4742         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4743         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4744                                 (dev, timestamp, flags));
4745 }
4746
4747 int
4748 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4749                                    struct timespec *timestamp)
4750 {
4751         struct rte_eth_dev *dev;
4752
4753         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4754         dev = &rte_eth_devices[port_id];
4755
4756         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4757         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4758                                 (dev, timestamp));
4759 }
4760
4761 int
4762 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4763 {
4764         struct rte_eth_dev *dev;
4765
4766         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4767         dev = &rte_eth_devices[port_id];
4768
4769         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4770         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4771                                                                       delta));
4772 }
4773
4774 int
4775 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4776 {
4777         struct rte_eth_dev *dev;
4778
4779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4780         dev = &rte_eth_devices[port_id];
4781
4782         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4783         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4784                                                                 timestamp));
4785 }
4786
4787 int
4788 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4789 {
4790         struct rte_eth_dev *dev;
4791
4792         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4793         dev = &rte_eth_devices[port_id];
4794
4795         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4796         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4797                                                                 timestamp));
4798 }
4799
4800 int
4801 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4802 {
4803         struct rte_eth_dev *dev;
4804
4805         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4806         dev = &rte_eth_devices[port_id];
4807
4808         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4809         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4810 }
4811
4812 int
4813 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4814 {
4815         struct rte_eth_dev *dev;
4816
4817         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4818
4819         dev = &rte_eth_devices[port_id];
4820         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4821         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4822 }
4823
4824 int
4825 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4826 {
4827         struct rte_eth_dev *dev;
4828
4829         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4830
4831         dev = &rte_eth_devices[port_id];
4832         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4833         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4834 }
4835
4836 int
4837 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4838 {
4839         struct rte_eth_dev *dev;
4840
4841         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4842
4843         dev = &rte_eth_devices[port_id];
4844         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4845         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4846 }
4847
4848 int
4849 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4850 {
4851         struct rte_eth_dev *dev;
4852
4853         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4854
4855         dev = &rte_eth_devices[port_id];
4856         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4857         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4858 }
4859
4860 int
4861 rte_eth_dev_get_module_info(uint16_t port_id,
4862                             struct rte_eth_dev_module_info *modinfo)
4863 {
4864         struct rte_eth_dev *dev;
4865
4866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4867
4868         dev = &rte_eth_devices[port_id];
4869         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4870         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4871 }
4872
4873 int
4874 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4875                               struct rte_dev_eeprom_info *info)
4876 {
4877         struct rte_eth_dev *dev;
4878
4879         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4880
4881         dev = &rte_eth_devices[port_id];
4882         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4883         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4884 }
4885
4886 int
4887 rte_eth_dev_get_dcb_info(uint16_t port_id,
4888                              struct rte_eth_dcb_info *dcb_info)
4889 {
4890         struct rte_eth_dev *dev;
4891
4892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4893
4894         dev = &rte_eth_devices[port_id];
4895         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4896
4897         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4898         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4899 }
4900
4901 int
4902 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4903                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4904 {
4905         struct rte_eth_dev *dev;
4906
4907         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4908         if (l2_tunnel == NULL) {
4909                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4910                 return -EINVAL;
4911         }
4912
4913         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4914                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4915                 return -EINVAL;
4916         }
4917
4918         dev = &rte_eth_devices[port_id];
4919         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4920                                 -ENOTSUP);
4921         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4922                                                                 l2_tunnel));
4923 }
4924
4925 int
4926 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4927                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4928                                   uint32_t mask,
4929                                   uint8_t en)
4930 {
4931         struct rte_eth_dev *dev;
4932
4933         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4934
4935         if (l2_tunnel == NULL) {
4936                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4937                 return -EINVAL;
4938         }
4939
4940         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4941                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4942                 return -EINVAL;
4943         }
4944
4945         if (mask == 0) {
4946                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4947                 return -EINVAL;
4948         }
4949
4950         dev = &rte_eth_devices[port_id];
4951         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4952                                 -ENOTSUP);
4953         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4954                                                         l2_tunnel, mask, en));
4955 }
4956
4957 static void
4958 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4959                            const struct rte_eth_desc_lim *desc_lim)
4960 {
4961         if (desc_lim->nb_align != 0)
4962                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4963
4964         if (desc_lim->nb_max != 0)
4965                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4966
4967         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4968 }
4969
4970 int
4971 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4972                                  uint16_t *nb_rx_desc,
4973                                  uint16_t *nb_tx_desc)
4974 {
4975         struct rte_eth_dev_info dev_info;
4976         int ret;
4977
4978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4979
4980         ret = rte_eth_dev_info_get(port_id, &dev_info);
4981         if (ret != 0)
4982                 return ret;
4983
4984         if (nb_rx_desc != NULL)
4985                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4986
4987         if (nb_tx_desc != NULL)
4988                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4989
4990         return 0;
4991 }
4992
4993 int
4994 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4995                                    struct rte_eth_hairpin_cap *cap)
4996 {
4997         struct rte_eth_dev *dev;
4998
4999         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
5000
5001         dev = &rte_eth_devices[port_id];
5002         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5003         memset(cap, 0, sizeof(*cap));
5004         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5005 }
5006
5007 int
5008 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5009 {
5010         if (dev->data->rx_queue_state[queue_id] ==
5011             RTE_ETH_QUEUE_STATE_HAIRPIN)
5012                 return 1;
5013         return 0;
5014 }
5015
5016 int
5017 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5018 {
5019         if (dev->data->tx_queue_state[queue_id] ==
5020             RTE_ETH_QUEUE_STATE_HAIRPIN)
5021                 return 1;
5022         return 0;
5023 }
5024
5025 int
5026 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5027 {
5028         struct rte_eth_dev *dev;
5029
5030         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5031
5032         if (pool == NULL)
5033                 return -EINVAL;
5034
5035         dev = &rte_eth_devices[port_id];
5036
5037         if (*dev->dev_ops->pool_ops_supported == NULL)
5038                 return 1; /* all pools are supported */
5039
5040         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5041 }
5042
5043 /**
5044  * A set of values to describe the possible states of a switch domain.
5045  */
5046 enum rte_eth_switch_domain_state {
5047         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5048         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5049 };
5050
5051 /**
5052  * Array of switch domains available for allocation. Array is sized to
5053  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5054  * ethdev ports in a single process.
5055  */
5056 static struct rte_eth_dev_switch {
5057         enum rte_eth_switch_domain_state state;
5058 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5059
5060 int
5061 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5062 {
5063         unsigned int i;
5064
5065         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5066
5067         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
5068                 i < RTE_MAX_ETHPORTS; i++) {
5069                 if (rte_eth_switch_domains[i].state ==
5070                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5071                         rte_eth_switch_domains[i].state =
5072                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5073                         *domain_id = i;
5074                         return 0;
5075                 }
5076         }
5077
5078         return -ENOSPC;
5079 }
5080
5081 int
5082 rte_eth_switch_domain_free(uint16_t domain_id)
5083 {
5084         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5085                 domain_id >= RTE_MAX_ETHPORTS)
5086                 return -EINVAL;
5087
5088         if (rte_eth_switch_domains[domain_id].state !=
5089                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5090                 return -EINVAL;
5091
5092         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5093
5094         return 0;
5095 }
5096
5097 static int
5098 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5099 {
5100         int state;
5101         struct rte_kvargs_pair *pair;
5102         char *letter;
5103
5104         arglist->str = strdup(str_in);
5105         if (arglist->str == NULL)
5106                 return -ENOMEM;
5107
5108         letter = arglist->str;
5109         state = 0;
5110         arglist->count = 0;
5111         pair = &arglist->pairs[0];
5112         while (1) {
5113                 switch (state) {
5114                 case 0: /* Initial */
5115                         if (*letter == '=')
5116                                 return -EINVAL;
5117                         else if (*letter == '\0')
5118                                 return 0;
5119
5120                         state = 1;
5121                         pair->key = letter;
5122                         /* fall-thru */
5123
5124                 case 1: /* Parsing key */
5125                         if (*letter == '=') {
5126                                 *letter = '\0';
5127                                 pair->value = letter + 1;
5128                                 state = 2;
5129                         } else if (*letter == ',' || *letter == '\0')
5130                                 return -EINVAL;
5131                         break;
5132
5133
5134                 case 2: /* Parsing value */
5135                         if (*letter == '[')
5136                                 state = 3;
5137                         else if (*letter == ',') {
5138                                 *letter = '\0';
5139                                 arglist->count++;
5140                                 pair = &arglist->pairs[arglist->count];
5141                                 state = 0;
5142                         } else if (*letter == '\0') {
5143                                 letter--;
5144                                 arglist->count++;
5145                                 pair = &arglist->pairs[arglist->count];
5146                                 state = 0;
5147                         }
5148                         break;
5149
5150                 case 3: /* Parsing list */
5151                         if (*letter == ']')
5152                                 state = 2;
5153                         else if (*letter == '\0')
5154                                 return -EINVAL;
5155                         break;
5156                 }
5157                 letter++;
5158         }
5159 }
5160
5161 int
5162 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5163 {
5164         struct rte_kvargs args;
5165         struct rte_kvargs_pair *pair;
5166         unsigned int i;
5167         int result = 0;
5168
5169         memset(eth_da, 0, sizeof(*eth_da));
5170
5171         result = rte_eth_devargs_tokenise(&args, dargs);
5172         if (result < 0)
5173                 goto parse_cleanup;
5174
5175         for (i = 0; i < args.count; i++) {
5176                 pair = &args.pairs[i];
5177                 if (strcmp("representor", pair->key) == 0) {
5178                         result = rte_eth_devargs_parse_list(pair->value,
5179                                 rte_eth_devargs_parse_representor_ports,
5180                                 eth_da);
5181                         if (result < 0)
5182                                 goto parse_cleanup;
5183                 }
5184         }
5185
5186 parse_cleanup:
5187         if (args.str)
5188                 free(args.str);
5189
5190         return result;
5191 }
5192
5193 RTE_INIT(ethdev_init_log)
5194 {
5195         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
5196         if (rte_eth_dev_logtype >= 0)
5197                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
5198 }