8d2ce31a818305a0a7c38f64b7c0f98d13d6f1d5
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133 };
134
135 #undef RTE_RX_OFFLOAD_BIT2STR
136
137 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
138         { DEV_TX_OFFLOAD_##_name, #_name }
139
140 static const struct {
141         uint64_t offload;
142         const char *name;
143 } rte_tx_offload_names[] = {
144         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
145         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
152         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
153         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
158         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
159         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
160         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
161         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
162         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
164         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
165 };
166
167 #undef RTE_TX_OFFLOAD_BIT2STR
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         void *ret_param;                        /**< Return parameter */
180         enum rte_eth_event_type event;          /**< Interrupt event type */
181         uint32_t active;                        /**< Callback is executing */
182 };
183
184 enum {
185         STAT_QMAP_TX = 0,
186         STAT_QMAP_RX
187 };
188
189 int
190 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
191 {
192         int ret;
193         struct rte_devargs devargs = {.args = NULL};
194         const char *bus_param_key;
195         char *bus_str = NULL;
196         char *cls_str = NULL;
197         int str_size;
198
199         memset(iter, 0, sizeof(*iter));
200
201         /*
202          * The devargs string may use various syntaxes:
203          *   - 0000:08:00.0,representor=[1-3]
204          *   - pci:0000:06:00.0,representor=[0,5]
205          *   - class=eth,mac=00:11:22:33:44:55
206          * A new syntax is in development (not yet supported):
207          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
208          */
209
210         /*
211          * Handle pure class filter (i.e. without any bus-level argument),
212          * from future new syntax.
213          * rte_devargs_parse() is not yet supporting the new syntax,
214          * that's why this simple case is temporarily parsed here.
215          */
216 #define iter_anybus_str "class=eth,"
217         if (strncmp(devargs_str, iter_anybus_str,
218                         strlen(iter_anybus_str)) == 0) {
219                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
220                 goto end;
221         }
222
223         /* Split bus, device and parameters. */
224         ret = rte_devargs_parse(&devargs, devargs_str);
225         if (ret != 0)
226                 goto error;
227
228         /*
229          * Assume parameters of old syntax can match only at ethdev level.
230          * Extra parameters will be ignored, thanks to "+" prefix.
231          */
232         str_size = strlen(devargs.args) + 2;
233         cls_str = malloc(str_size);
234         if (cls_str == NULL) {
235                 ret = -ENOMEM;
236                 goto error;
237         }
238         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
239         if (ret != str_size - 1) {
240                 ret = -EINVAL;
241                 goto error;
242         }
243         iter->cls_str = cls_str;
244         free(devargs.args); /* allocated by rte_devargs_parse() */
245         devargs.args = NULL;
246
247         iter->bus = devargs.bus;
248         if (iter->bus->dev_iterate == NULL) {
249                 ret = -ENOTSUP;
250                 goto error;
251         }
252
253         /* Convert bus args to new syntax for use with new API dev_iterate. */
254         if (strcmp(iter->bus->name, "vdev") == 0) {
255                 bus_param_key = "name";
256         } else if (strcmp(iter->bus->name, "pci") == 0) {
257                 bus_param_key = "addr";
258         } else {
259                 ret = -ENOTSUP;
260                 goto error;
261         }
262         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
263         bus_str = malloc(str_size);
264         if (bus_str == NULL) {
265                 ret = -ENOMEM;
266                 goto error;
267         }
268         ret = snprintf(bus_str, str_size, "%s=%s",
269                         bus_param_key, devargs.name);
270         if (ret != str_size - 1) {
271                 ret = -EINVAL;
272                 goto error;
273         }
274         iter->bus_str = bus_str;
275
276 end:
277         iter->cls = rte_class_find_by_name("eth");
278         return 0;
279
280 error:
281         if (ret == -ENOTSUP)
282                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
283                                 iter->bus->name);
284         free(devargs.args);
285         free(bus_str);
286         free(cls_str);
287         return ret;
288 }
289
290 uint16_t
291 rte_eth_iterator_next(struct rte_dev_iterator *iter)
292 {
293         if (iter->cls == NULL) /* invalid ethdev iterator */
294                 return RTE_MAX_ETHPORTS;
295
296         do { /* loop to try all matching rte_device */
297                 /* If not pure ethdev filter and */
298                 if (iter->bus != NULL &&
299                                 /* not in middle of rte_eth_dev iteration, */
300                                 iter->class_device == NULL) {
301                         /* get next rte_device to try. */
302                         iter->device = iter->bus->dev_iterate(
303                                         iter->device, iter->bus_str, iter);
304                         if (iter->device == NULL)
305                                 break; /* no more rte_device candidate */
306                 }
307                 /* A device is matching bus part, need to check ethdev part. */
308                 iter->class_device = iter->cls->dev_iterate(
309                                 iter->class_device, iter->cls_str, iter);
310                 if (iter->class_device != NULL)
311                         return eth_dev_to_id(iter->class_device); /* match */
312         } while (iter->bus != NULL); /* need to try next rte_device */
313
314         /* No more ethdev port to iterate. */
315         rte_eth_iterator_cleanup(iter);
316         return RTE_MAX_ETHPORTS;
317 }
318
319 void
320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
321 {
322         if (iter->bus_str == NULL)
323                 return; /* nothing to free in pure class filter */
324         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
325         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
326         memset(iter, 0, sizeof(*iter));
327 }
328
329 uint16_t
330 rte_eth_find_next(uint16_t port_id)
331 {
332         while (port_id < RTE_MAX_ETHPORTS &&
333                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
334                 port_id++;
335
336         if (port_id >= RTE_MAX_ETHPORTS)
337                 return RTE_MAX_ETHPORTS;
338
339         return port_id;
340 }
341
342 /*
343  * Macro to iterate over all valid ports for internal usage.
344  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
345  */
346 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
347         for (port_id = rte_eth_find_next(0); \
348              port_id < RTE_MAX_ETHPORTS; \
349              port_id = rte_eth_find_next(port_id + 1))
350
351 uint16_t
352 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
353 {
354         port_id = rte_eth_find_next(port_id);
355         while (port_id < RTE_MAX_ETHPORTS &&
356                         rte_eth_devices[port_id].device != parent)
357                 port_id = rte_eth_find_next(port_id + 1);
358
359         return port_id;
360 }
361
362 uint16_t
363 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
364 {
365         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
366         return rte_eth_find_next_of(port_id,
367                         rte_eth_devices[ref_port_id].device);
368 }
369
370 static void
371 rte_eth_dev_shared_data_prepare(void)
372 {
373         const unsigned flags = 0;
374         const struct rte_memzone *mz;
375
376         rte_spinlock_lock(&rte_eth_shared_data_lock);
377
378         if (rte_eth_dev_shared_data == NULL) {
379                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
380                         /* Allocate port data and ownership shared memory. */
381                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
382                                         sizeof(*rte_eth_dev_shared_data),
383                                         rte_socket_id(), flags);
384                 } else
385                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
386                 if (mz == NULL)
387                         rte_panic("Cannot allocate ethdev shared data\n");
388
389                 rte_eth_dev_shared_data = mz->addr;
390                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
391                         rte_eth_dev_shared_data->next_owner_id =
392                                         RTE_ETH_DEV_NO_OWNER + 1;
393                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
394                         memset(rte_eth_dev_shared_data->data, 0,
395                                sizeof(rte_eth_dev_shared_data->data));
396                 }
397         }
398
399         rte_spinlock_unlock(&rte_eth_shared_data_lock);
400 }
401
402 static bool
403 is_allocated(const struct rte_eth_dev *ethdev)
404 {
405         return ethdev->data->name[0] != '\0';
406 }
407
408 static struct rte_eth_dev *
409 _rte_eth_dev_allocated(const char *name)
410 {
411         unsigned i;
412
413         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414                 if (rte_eth_devices[i].data != NULL &&
415                     strcmp(rte_eth_devices[i].data->name, name) == 0)
416                         return &rte_eth_devices[i];
417         }
418         return NULL;
419 }
420
421 struct rte_eth_dev *
422 rte_eth_dev_allocated(const char *name)
423 {
424         struct rte_eth_dev *ethdev;
425
426         rte_eth_dev_shared_data_prepare();
427
428         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
429
430         ethdev = _rte_eth_dev_allocated(name);
431
432         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
433
434         return ethdev;
435 }
436
437 static uint16_t
438 rte_eth_dev_find_free_port(void)
439 {
440         unsigned i;
441
442         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
443                 /* Using shared name field to find a free port. */
444                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
445                         RTE_ASSERT(rte_eth_devices[i].state ==
446                                    RTE_ETH_DEV_UNUSED);
447                         return i;
448                 }
449         }
450         return RTE_MAX_ETHPORTS;
451 }
452
453 static struct rte_eth_dev *
454 eth_dev_get(uint16_t port_id)
455 {
456         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
457
458         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
459
460         return eth_dev;
461 }
462
463 struct rte_eth_dev *
464 rte_eth_dev_allocate(const char *name)
465 {
466         uint16_t port_id;
467         struct rte_eth_dev *eth_dev = NULL;
468         size_t name_len;
469
470         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
471         if (name_len == 0) {
472                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
473                 return NULL;
474         }
475
476         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
477                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
478                 return NULL;
479         }
480
481         rte_eth_dev_shared_data_prepare();
482
483         /* Synchronize port creation between primary and secondary threads. */
484         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
485
486         if (_rte_eth_dev_allocated(name) != NULL) {
487                 RTE_ETHDEV_LOG(ERR,
488                         "Ethernet device with name %s already allocated\n",
489                         name);
490                 goto unlock;
491         }
492
493         port_id = rte_eth_dev_find_free_port();
494         if (port_id == RTE_MAX_ETHPORTS) {
495                 RTE_ETHDEV_LOG(ERR,
496                         "Reached maximum number of Ethernet ports\n");
497                 goto unlock;
498         }
499
500         eth_dev = eth_dev_get(port_id);
501         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
502         eth_dev->data->port_id = port_id;
503         eth_dev->data->mtu = RTE_ETHER_MTU;
504
505 unlock:
506         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508         return eth_dev;
509 }
510
511 /*
512  * Attach to a port already registered by the primary process, which
513  * makes sure that the same device would have the same port id both
514  * in the primary and secondary process.
515  */
516 struct rte_eth_dev *
517 rte_eth_dev_attach_secondary(const char *name)
518 {
519         uint16_t i;
520         struct rte_eth_dev *eth_dev = NULL;
521
522         rte_eth_dev_shared_data_prepare();
523
524         /* Synchronize port attachment to primary port creation and release. */
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529                         break;
530         }
531         if (i == RTE_MAX_ETHPORTS) {
532                 RTE_ETHDEV_LOG(ERR,
533                         "Device %s is not driven by the primary process\n",
534                         name);
535         } else {
536                 eth_dev = eth_dev_get(i);
537                 RTE_ASSERT(eth_dev->data->port_id == i);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541         return eth_dev;
542 }
543
544 int
545 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546 {
547         if (eth_dev == NULL)
548                 return -EINVAL;
549
550         rte_eth_dev_shared_data_prepare();
551
552         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553                 _rte_eth_dev_callback_process(eth_dev,
554                                 RTE_ETH_EVENT_DESTROY, NULL);
555
556         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558         eth_dev->state = RTE_ETH_DEV_UNUSED;
559
560         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
561                 rte_free(eth_dev->data->rx_queues);
562                 rte_free(eth_dev->data->tx_queues);
563                 rte_free(eth_dev->data->mac_addrs);
564                 rte_free(eth_dev->data->hash_mac_addrs);
565                 rte_free(eth_dev->data->dev_private);
566                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
567         }
568
569         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
570
571         return 0;
572 }
573
574 int
575 rte_eth_dev_is_valid_port(uint16_t port_id)
576 {
577         if (port_id >= RTE_MAX_ETHPORTS ||
578             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
579                 return 0;
580         else
581                 return 1;
582 }
583
584 static int
585 rte_eth_is_valid_owner_id(uint64_t owner_id)
586 {
587         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
588             rte_eth_dev_shared_data->next_owner_id <= owner_id)
589                 return 0;
590         return 1;
591 }
592
593 uint64_t
594 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
595 {
596         port_id = rte_eth_find_next(port_id);
597         while (port_id < RTE_MAX_ETHPORTS &&
598                         rte_eth_devices[port_id].data->owner.id != owner_id)
599                 port_id = rte_eth_find_next(port_id + 1);
600
601         return port_id;
602 }
603
604 int
605 rte_eth_dev_owner_new(uint64_t *owner_id)
606 {
607         rte_eth_dev_shared_data_prepare();
608
609         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
610
611         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
612
613         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
614         return 0;
615 }
616
617 static int
618 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
619                        const struct rte_eth_dev_owner *new_owner)
620 {
621         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
622         struct rte_eth_dev_owner *port_owner;
623
624         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
625                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
626                         port_id);
627                 return -ENODEV;
628         }
629
630         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
631             !rte_eth_is_valid_owner_id(old_owner_id)) {
632                 RTE_ETHDEV_LOG(ERR,
633                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
634                        old_owner_id, new_owner->id);
635                 return -EINVAL;
636         }
637
638         port_owner = &rte_eth_devices[port_id].data->owner;
639         if (port_owner->id != old_owner_id) {
640                 RTE_ETHDEV_LOG(ERR,
641                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
642                         port_id, port_owner->name, port_owner->id);
643                 return -EPERM;
644         }
645
646         /* can not truncate (same structure) */
647         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
648
649         port_owner->id = new_owner->id;
650
651         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
652                 port_id, new_owner->name, new_owner->id);
653
654         return 0;
655 }
656
657 int
658 rte_eth_dev_owner_set(const uint16_t port_id,
659                       const struct rte_eth_dev_owner *owner)
660 {
661         int ret;
662
663         rte_eth_dev_shared_data_prepare();
664
665         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
666
667         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
668
669         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
670         return ret;
671 }
672
673 int
674 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
675 {
676         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
677                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
678         int ret;
679
680         rte_eth_dev_shared_data_prepare();
681
682         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
683
684         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
685
686         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
687         return ret;
688 }
689
690 int
691 rte_eth_dev_owner_delete(const uint64_t owner_id)
692 {
693         uint16_t port_id;
694         int ret = 0;
695
696         rte_eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
699
700         if (rte_eth_is_valid_owner_id(owner_id)) {
701                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
702                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
703                                 memset(&rte_eth_devices[port_id].data->owner, 0,
704                                        sizeof(struct rte_eth_dev_owner));
705                 RTE_ETHDEV_LOG(NOTICE,
706                         "All port owners owned by %016"PRIx64" identifier have removed\n",
707                         owner_id);
708         } else {
709                 RTE_ETHDEV_LOG(ERR,
710                                "Invalid owner id=%016"PRIx64"\n",
711                                owner_id);
712                 ret = -EINVAL;
713         }
714
715         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
716
717         return ret;
718 }
719
720 int
721 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
722 {
723         int ret = 0;
724         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
725
726         rte_eth_dev_shared_data_prepare();
727
728         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
729
730         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
731                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
732                         port_id);
733                 ret = -ENODEV;
734         } else {
735                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
736         }
737
738         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
739         return ret;
740 }
741
742 int
743 rte_eth_dev_socket_id(uint16_t port_id)
744 {
745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
746         return rte_eth_devices[port_id].data->numa_node;
747 }
748
749 void *
750 rte_eth_dev_get_sec_ctx(uint16_t port_id)
751 {
752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
753         return rte_eth_devices[port_id].security_ctx;
754 }
755
756 uint16_t
757 rte_eth_dev_count_avail(void)
758 {
759         uint16_t p;
760         uint16_t count;
761
762         count = 0;
763
764         RTE_ETH_FOREACH_DEV(p)
765                 count++;
766
767         return count;
768 }
769
770 uint16_t
771 rte_eth_dev_count_total(void)
772 {
773         uint16_t port, count = 0;
774
775         RTE_ETH_FOREACH_VALID_DEV(port)
776                 count++;
777
778         return count;
779 }
780
781 int
782 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
783 {
784         char *tmp;
785
786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
787
788         if (name == NULL) {
789                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
790                 return -EINVAL;
791         }
792
793         /* shouldn't check 'rte_eth_devices[i].data',
794          * because it might be overwritten by VDEV PMD */
795         tmp = rte_eth_dev_shared_data->data[port_id].name;
796         strcpy(name, tmp);
797         return 0;
798 }
799
800 int
801 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
802 {
803         uint32_t pid;
804
805         if (name == NULL) {
806                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
807                 return -EINVAL;
808         }
809
810         RTE_ETH_FOREACH_VALID_DEV(pid)
811                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
812                         *port_id = pid;
813                         return 0;
814                 }
815
816         return -ENODEV;
817 }
818
819 static int
820 eth_err(uint16_t port_id, int ret)
821 {
822         if (ret == 0)
823                 return 0;
824         if (rte_eth_dev_is_removed(port_id))
825                 return -EIO;
826         return ret;
827 }
828
829 static int
830 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
831 {
832         uint16_t old_nb_queues = dev->data->nb_rx_queues;
833         void **rxq;
834         unsigned i;
835
836         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
837                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
838                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
839                                 RTE_CACHE_LINE_SIZE);
840                 if (dev->data->rx_queues == NULL) {
841                         dev->data->nb_rx_queues = 0;
842                         return -(ENOMEM);
843                 }
844         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
845                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
846
847                 rxq = dev->data->rx_queues;
848
849                 for (i = nb_queues; i < old_nb_queues; i++)
850                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
851                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
852                                 RTE_CACHE_LINE_SIZE);
853                 if (rxq == NULL)
854                         return -(ENOMEM);
855                 if (nb_queues > old_nb_queues) {
856                         uint16_t new_qs = nb_queues - old_nb_queues;
857
858                         memset(rxq + old_nb_queues, 0,
859                                 sizeof(rxq[0]) * new_qs);
860                 }
861
862                 dev->data->rx_queues = rxq;
863
864         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
865                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
866
867                 rxq = dev->data->rx_queues;
868
869                 for (i = nb_queues; i < old_nb_queues; i++)
870                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
871
872                 rte_free(dev->data->rx_queues);
873                 dev->data->rx_queues = NULL;
874         }
875         dev->data->nb_rx_queues = nb_queues;
876         return 0;
877 }
878
879 int
880 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
881 {
882         struct rte_eth_dev *dev;
883
884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
885
886         dev = &rte_eth_devices[port_id];
887         if (!dev->data->dev_started) {
888                 RTE_ETHDEV_LOG(ERR,
889                         "Port %u must be started before start any queue\n",
890                         port_id);
891                 return -EINVAL;
892         }
893
894         if (rx_queue_id >= dev->data->nb_rx_queues) {
895                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
896                 return -EINVAL;
897         }
898
899         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
900
901         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
902                 RTE_ETHDEV_LOG(INFO,
903                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
904                         rx_queue_id, port_id);
905                 return -EINVAL;
906         }
907
908         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
909                 RTE_ETHDEV_LOG(INFO,
910                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
911                         rx_queue_id, port_id);
912                 return 0;
913         }
914
915         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
916                                                              rx_queue_id));
917
918 }
919
920 int
921 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
922 {
923         struct rte_eth_dev *dev;
924
925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
926
927         dev = &rte_eth_devices[port_id];
928         if (rx_queue_id >= dev->data->nb_rx_queues) {
929                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
930                 return -EINVAL;
931         }
932
933         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
934
935         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
936                 RTE_ETHDEV_LOG(INFO,
937                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
938                         rx_queue_id, port_id);
939                 return -EINVAL;
940         }
941
942         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
943                 RTE_ETHDEV_LOG(INFO,
944                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
945                         rx_queue_id, port_id);
946                 return 0;
947         }
948
949         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
950
951 }
952
953 int
954 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
955 {
956         struct rte_eth_dev *dev;
957
958         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
959
960         dev = &rte_eth_devices[port_id];
961         if (!dev->data->dev_started) {
962                 RTE_ETHDEV_LOG(ERR,
963                         "Port %u must be started before start any queue\n",
964                         port_id);
965                 return -EINVAL;
966         }
967
968         if (tx_queue_id >= dev->data->nb_tx_queues) {
969                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
970                 return -EINVAL;
971         }
972
973         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
974
975         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
976                 RTE_ETHDEV_LOG(INFO,
977                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
978                         tx_queue_id, port_id);
979                 return -EINVAL;
980         }
981
982         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
983                 RTE_ETHDEV_LOG(INFO,
984                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
985                         tx_queue_id, port_id);
986                 return 0;
987         }
988
989         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
990 }
991
992 int
993 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
994 {
995         struct rte_eth_dev *dev;
996
997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
998
999         dev = &rte_eth_devices[port_id];
1000         if (tx_queue_id >= dev->data->nb_tx_queues) {
1001                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1002                 return -EINVAL;
1003         }
1004
1005         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1006
1007         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1010                         tx_queue_id, port_id);
1011                 return -EINVAL;
1012         }
1013
1014         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1015                 RTE_ETHDEV_LOG(INFO,
1016                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1017                         tx_queue_id, port_id);
1018                 return 0;
1019         }
1020
1021         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1022
1023 }
1024
1025 static int
1026 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1027 {
1028         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1029         void **txq;
1030         unsigned i;
1031
1032         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1033                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1034                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1035                                                    RTE_CACHE_LINE_SIZE);
1036                 if (dev->data->tx_queues == NULL) {
1037                         dev->data->nb_tx_queues = 0;
1038                         return -(ENOMEM);
1039                 }
1040         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1041                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1042
1043                 txq = dev->data->tx_queues;
1044
1045                 for (i = nb_queues; i < old_nb_queues; i++)
1046                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1047                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1048                                   RTE_CACHE_LINE_SIZE);
1049                 if (txq == NULL)
1050                         return -ENOMEM;
1051                 if (nb_queues > old_nb_queues) {
1052                         uint16_t new_qs = nb_queues - old_nb_queues;
1053
1054                         memset(txq + old_nb_queues, 0,
1055                                sizeof(txq[0]) * new_qs);
1056                 }
1057
1058                 dev->data->tx_queues = txq;
1059
1060         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1061                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1062
1063                 txq = dev->data->tx_queues;
1064
1065                 for (i = nb_queues; i < old_nb_queues; i++)
1066                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1067
1068                 rte_free(dev->data->tx_queues);
1069                 dev->data->tx_queues = NULL;
1070         }
1071         dev->data->nb_tx_queues = nb_queues;
1072         return 0;
1073 }
1074
1075 uint32_t
1076 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1077 {
1078         switch (speed) {
1079         case ETH_SPEED_NUM_10M:
1080                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1081         case ETH_SPEED_NUM_100M:
1082                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1083         case ETH_SPEED_NUM_1G:
1084                 return ETH_LINK_SPEED_1G;
1085         case ETH_SPEED_NUM_2_5G:
1086                 return ETH_LINK_SPEED_2_5G;
1087         case ETH_SPEED_NUM_5G:
1088                 return ETH_LINK_SPEED_5G;
1089         case ETH_SPEED_NUM_10G:
1090                 return ETH_LINK_SPEED_10G;
1091         case ETH_SPEED_NUM_20G:
1092                 return ETH_LINK_SPEED_20G;
1093         case ETH_SPEED_NUM_25G:
1094                 return ETH_LINK_SPEED_25G;
1095         case ETH_SPEED_NUM_40G:
1096                 return ETH_LINK_SPEED_40G;
1097         case ETH_SPEED_NUM_50G:
1098                 return ETH_LINK_SPEED_50G;
1099         case ETH_SPEED_NUM_56G:
1100                 return ETH_LINK_SPEED_56G;
1101         case ETH_SPEED_NUM_100G:
1102                 return ETH_LINK_SPEED_100G;
1103         default:
1104                 return 0;
1105         }
1106 }
1107
1108 const char *
1109 rte_eth_dev_rx_offload_name(uint64_t offload)
1110 {
1111         const char *name = "UNKNOWN";
1112         unsigned int i;
1113
1114         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1115                 if (offload == rte_rx_offload_names[i].offload) {
1116                         name = rte_rx_offload_names[i].name;
1117                         break;
1118                 }
1119         }
1120
1121         return name;
1122 }
1123
1124 const char *
1125 rte_eth_dev_tx_offload_name(uint64_t offload)
1126 {
1127         const char *name = "UNKNOWN";
1128         unsigned int i;
1129
1130         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1131                 if (offload == rte_tx_offload_names[i].offload) {
1132                         name = rte_tx_offload_names[i].name;
1133                         break;
1134                 }
1135         }
1136
1137         return name;
1138 }
1139
1140 static inline int
1141 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1142                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1143 {
1144         int ret = 0;
1145
1146         if (dev_info_size == 0) {
1147                 if (config_size != max_rx_pkt_len) {
1148                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1149                                        " %u != %u is not allowed\n",
1150                                        port_id, config_size, max_rx_pkt_len);
1151                         ret = -EINVAL;
1152                 }
1153         } else if (config_size > dev_info_size) {
1154                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1155                                "> max allowed value %u\n", port_id, config_size,
1156                                dev_info_size);
1157                 ret = -EINVAL;
1158         } else if (config_size < RTE_ETHER_MIN_LEN) {
1159                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1160                                "< min allowed value %u\n", port_id, config_size,
1161                                (unsigned int)RTE_ETHER_MIN_LEN);
1162                 ret = -EINVAL;
1163         }
1164         return ret;
1165 }
1166
1167 /*
1168  * Validate offloads that are requested through rte_eth_dev_configure against
1169  * the offloads successfuly set by the ethernet device.
1170  *
1171  * @param port_id
1172  *   The port identifier of the Ethernet device.
1173  * @param req_offloads
1174  *   The offloads that have been requested through `rte_eth_dev_configure`.
1175  * @param set_offloads
1176  *   The offloads successfuly set by the ethernet device.
1177  * @param offload_type
1178  *   The offload type i.e. Rx/Tx string.
1179  * @param offload_name
1180  *   The function that prints the offload name.
1181  * @return
1182  *   - (0) if validation successful.
1183  *   - (-EINVAL) if requested offload has been silently disabled.
1184  *
1185  */
1186 static int
1187 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1188                   uint64_t set_offloads, const char *offload_type,
1189                   const char *(*offload_name)(uint64_t))
1190 {
1191         uint64_t offloads_diff = req_offloads ^ set_offloads;
1192         uint64_t offload;
1193         int ret = 0;
1194
1195         while (offloads_diff != 0) {
1196                 /* Check if any offload is requested but not enabled. */
1197                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1198                 if (offload & req_offloads) {
1199                         RTE_ETHDEV_LOG(ERR,
1200                                 "Port %u failed to enable %s offload %s\n",
1201                                 port_id, offload_type, offload_name(offload));
1202                         ret = -EINVAL;
1203                 }
1204
1205                 /* Chech if offload couldn't be disabled. */
1206                 if (offload & set_offloads) {
1207                         RTE_ETHDEV_LOG(DEBUG,
1208                                 "Port %u %s offload %s is not requested but enabled\n",
1209                                 port_id, offload_type, offload_name(offload));
1210                 }
1211
1212                 offloads_diff &= ~offload;
1213         }
1214
1215         return ret;
1216 }
1217
1218 int
1219 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1220                       const struct rte_eth_conf *dev_conf)
1221 {
1222         struct rte_eth_dev *dev;
1223         struct rte_eth_dev_info dev_info;
1224         struct rte_eth_conf orig_conf;
1225         int diag;
1226         int ret;
1227
1228         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1229
1230         dev = &rte_eth_devices[port_id];
1231
1232         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1233
1234         if (dev->data->dev_started) {
1235                 RTE_ETHDEV_LOG(ERR,
1236                         "Port %u must be stopped to allow configuration\n",
1237                         port_id);
1238                 return -EBUSY;
1239         }
1240
1241          /* Store original config, as rollback required on failure */
1242         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1243
1244         /*
1245          * Copy the dev_conf parameter into the dev structure.
1246          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1247          */
1248         if (dev_conf != &dev->data->dev_conf)
1249                 memcpy(&dev->data->dev_conf, dev_conf,
1250                        sizeof(dev->data->dev_conf));
1251
1252         ret = rte_eth_dev_info_get(port_id, &dev_info);
1253         if (ret != 0)
1254                 goto rollback;
1255
1256         /* If number of queues specified by application for both Rx and Tx is
1257          * zero, use driver preferred values. This cannot be done individually
1258          * as it is valid for either Tx or Rx (but not both) to be zero.
1259          * If driver does not provide any preferred valued, fall back on
1260          * EAL defaults.
1261          */
1262         if (nb_rx_q == 0 && nb_tx_q == 0) {
1263                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1264                 if (nb_rx_q == 0)
1265                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1266                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1267                 if (nb_tx_q == 0)
1268                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1269         }
1270
1271         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1272                 RTE_ETHDEV_LOG(ERR,
1273                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1274                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1275                 ret = -EINVAL;
1276                 goto rollback;
1277         }
1278
1279         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1280                 RTE_ETHDEV_LOG(ERR,
1281                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1282                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1283                 ret = -EINVAL;
1284                 goto rollback;
1285         }
1286
1287         /*
1288          * Check that the numbers of RX and TX queues are not greater
1289          * than the maximum number of RX and TX queues supported by the
1290          * configured device.
1291          */
1292         if (nb_rx_q > dev_info.max_rx_queues) {
1293                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1294                         port_id, nb_rx_q, dev_info.max_rx_queues);
1295                 ret = -EINVAL;
1296                 goto rollback;
1297         }
1298
1299         if (nb_tx_q > dev_info.max_tx_queues) {
1300                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1301                         port_id, nb_tx_q, dev_info.max_tx_queues);
1302                 ret = -EINVAL;
1303                 goto rollback;
1304         }
1305
1306         /* Check that the device supports requested interrupts */
1307         if ((dev_conf->intr_conf.lsc == 1) &&
1308                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1309                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1310                         dev->device->driver->name);
1311                 ret = -EINVAL;
1312                 goto rollback;
1313         }
1314         if ((dev_conf->intr_conf.rmv == 1) &&
1315                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1316                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1317                         dev->device->driver->name);
1318                 ret = -EINVAL;
1319                 goto rollback;
1320         }
1321
1322         /*
1323          * If jumbo frames are enabled, check that the maximum RX packet
1324          * length is supported by the configured device.
1325          */
1326         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1327                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1328                         RTE_ETHDEV_LOG(ERR,
1329                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1330                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1331                                 dev_info.max_rx_pktlen);
1332                         ret = -EINVAL;
1333                         goto rollback;
1334                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1335                         RTE_ETHDEV_LOG(ERR,
1336                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1337                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1338                                 (unsigned int)RTE_ETHER_MIN_LEN);
1339                         ret = -EINVAL;
1340                         goto rollback;
1341                 }
1342         } else {
1343                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1344                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1345                         /* Use default value */
1346                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1347                                                         RTE_ETHER_MAX_LEN;
1348         }
1349
1350         /*
1351          * If LRO is enabled, check that the maximum aggregated packet
1352          * size is supported by the configured device.
1353          */
1354         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1355                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1356                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1357                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1358                 ret = check_lro_pkt_size(port_id,
1359                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1360                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1361                                 dev_info.max_lro_pkt_size);
1362                 if (ret != 0)
1363                         goto rollback;
1364         }
1365
1366         /* Any requested offloading must be within its device capabilities */
1367         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1368              dev_conf->rxmode.offloads) {
1369                 RTE_ETHDEV_LOG(ERR,
1370                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1371                         "capabilities 0x%"PRIx64" in %s()\n",
1372                         port_id, dev_conf->rxmode.offloads,
1373                         dev_info.rx_offload_capa,
1374                         __func__);
1375                 ret = -EINVAL;
1376                 goto rollback;
1377         }
1378         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1379              dev_conf->txmode.offloads) {
1380                 RTE_ETHDEV_LOG(ERR,
1381                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1382                         "capabilities 0x%"PRIx64" in %s()\n",
1383                         port_id, dev_conf->txmode.offloads,
1384                         dev_info.tx_offload_capa,
1385                         __func__);
1386                 ret = -EINVAL;
1387                 goto rollback;
1388         }
1389
1390         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1391                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1392
1393         /* Check that device supports requested rss hash functions. */
1394         if ((dev_info.flow_type_rss_offloads |
1395              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1396             dev_info.flow_type_rss_offloads) {
1397                 RTE_ETHDEV_LOG(ERR,
1398                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1399                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1400                         dev_info.flow_type_rss_offloads);
1401                 ret = -EINVAL;
1402                 goto rollback;
1403         }
1404
1405         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1406         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1407             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1408                 RTE_ETHDEV_LOG(ERR,
1409                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1410                         port_id,
1411                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1412                 ret = -EINVAL;
1413                 goto rollback;
1414         }
1415
1416         /*
1417          * Setup new number of RX/TX queues and reconfigure device.
1418          */
1419         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1420         if (diag != 0) {
1421                 RTE_ETHDEV_LOG(ERR,
1422                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1423                         port_id, diag);
1424                 ret = diag;
1425                 goto rollback;
1426         }
1427
1428         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1429         if (diag != 0) {
1430                 RTE_ETHDEV_LOG(ERR,
1431                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1432                         port_id, diag);
1433                 rte_eth_dev_rx_queue_config(dev, 0);
1434                 ret = diag;
1435                 goto rollback;
1436         }
1437
1438         diag = (*dev->dev_ops->dev_configure)(dev);
1439         if (diag != 0) {
1440                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1441                         port_id, diag);
1442                 ret = eth_err(port_id, diag);
1443                 goto reset_queues;
1444         }
1445
1446         /* Initialize Rx profiling if enabled at compilation time. */
1447         diag = __rte_eth_dev_profile_init(port_id, dev);
1448         if (diag != 0) {
1449                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1450                         port_id, diag);
1451                 ret = eth_err(port_id, diag);
1452                 goto reset_queues;
1453         }
1454
1455         /* Validate Rx offloads. */
1456         diag = validate_offloads(port_id,
1457                         dev_conf->rxmode.offloads,
1458                         dev->data->dev_conf.rxmode.offloads, "Rx",
1459                         rte_eth_dev_rx_offload_name);
1460         if (diag != 0) {
1461                 ret = diag;
1462                 goto reset_queues;
1463         }
1464
1465         /* Validate Tx offloads. */
1466         diag = validate_offloads(port_id,
1467                         dev_conf->txmode.offloads,
1468                         dev->data->dev_conf.txmode.offloads, "Tx",
1469                         rte_eth_dev_tx_offload_name);
1470         if (diag != 0) {
1471                 ret = diag;
1472                 goto reset_queues;
1473         }
1474
1475         return 0;
1476 reset_queues:
1477         rte_eth_dev_rx_queue_config(dev, 0);
1478         rte_eth_dev_tx_queue_config(dev, 0);
1479 rollback:
1480         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1481
1482         return ret;
1483 }
1484
1485 void
1486 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1487 {
1488         if (dev->data->dev_started) {
1489                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1490                         dev->data->port_id);
1491                 return;
1492         }
1493
1494         rte_eth_dev_rx_queue_config(dev, 0);
1495         rte_eth_dev_tx_queue_config(dev, 0);
1496
1497         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1498 }
1499
1500 static void
1501 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1502                         struct rte_eth_dev_info *dev_info)
1503 {
1504         struct rte_ether_addr *addr;
1505         uint16_t i;
1506         uint32_t pool = 0;
1507         uint64_t pool_mask;
1508
1509         /* replay MAC address configuration including default MAC */
1510         addr = &dev->data->mac_addrs[0];
1511         if (*dev->dev_ops->mac_addr_set != NULL)
1512                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1513         else if (*dev->dev_ops->mac_addr_add != NULL)
1514                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1515
1516         if (*dev->dev_ops->mac_addr_add != NULL) {
1517                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1518                         addr = &dev->data->mac_addrs[i];
1519
1520                         /* skip zero address */
1521                         if (rte_is_zero_ether_addr(addr))
1522                                 continue;
1523
1524                         pool = 0;
1525                         pool_mask = dev->data->mac_pool_sel[i];
1526
1527                         do {
1528                                 if (pool_mask & 1ULL)
1529                                         (*dev->dev_ops->mac_addr_add)(dev,
1530                                                 addr, i, pool);
1531                                 pool_mask >>= 1;
1532                                 pool++;
1533                         } while (pool_mask);
1534                 }
1535         }
1536 }
1537
1538 static int
1539 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1540                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1541 {
1542         int ret;
1543
1544         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1545                 rte_eth_dev_mac_restore(dev, dev_info);
1546
1547         /* replay promiscuous configuration */
1548         /*
1549          * use callbacks directly since we don't need port_id check and
1550          * would like to bypass the same value set
1551          */
1552         if (rte_eth_promiscuous_get(port_id) == 1 &&
1553             *dev->dev_ops->promiscuous_enable != NULL) {
1554                 ret = eth_err(port_id,
1555                               (*dev->dev_ops->promiscuous_enable)(dev));
1556                 if (ret != 0 && ret != -ENOTSUP) {
1557                         RTE_ETHDEV_LOG(ERR,
1558                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1559                                 port_id, rte_strerror(-ret));
1560                         return ret;
1561                 }
1562         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1563                    *dev->dev_ops->promiscuous_disable != NULL) {
1564                 ret = eth_err(port_id,
1565                               (*dev->dev_ops->promiscuous_disable)(dev));
1566                 if (ret != 0 && ret != -ENOTSUP) {
1567                         RTE_ETHDEV_LOG(ERR,
1568                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1569                                 port_id, rte_strerror(-ret));
1570                         return ret;
1571                 }
1572         }
1573
1574         /* replay all multicast configuration */
1575         /*
1576          * use callbacks directly since we don't need port_id check and
1577          * would like to bypass the same value set
1578          */
1579         if (rte_eth_allmulticast_get(port_id) == 1 &&
1580             *dev->dev_ops->allmulticast_enable != NULL) {
1581                 ret = eth_err(port_id,
1582                               (*dev->dev_ops->allmulticast_enable)(dev));
1583                 if (ret != 0 && ret != -ENOTSUP) {
1584                         RTE_ETHDEV_LOG(ERR,
1585                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1586                                 port_id, rte_strerror(-ret));
1587                         return ret;
1588                 }
1589         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1590                    *dev->dev_ops->allmulticast_disable != NULL) {
1591                 ret = eth_err(port_id,
1592                               (*dev->dev_ops->allmulticast_disable)(dev));
1593                 if (ret != 0 && ret != -ENOTSUP) {
1594                         RTE_ETHDEV_LOG(ERR,
1595                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1596                                 port_id, rte_strerror(-ret));
1597                         return ret;
1598                 }
1599         }
1600
1601         return 0;
1602 }
1603
1604 int
1605 rte_eth_dev_start(uint16_t port_id)
1606 {
1607         struct rte_eth_dev *dev;
1608         struct rte_eth_dev_info dev_info;
1609         int diag;
1610         int ret;
1611
1612         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1613
1614         dev = &rte_eth_devices[port_id];
1615
1616         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1617
1618         if (dev->data->dev_started != 0) {
1619                 RTE_ETHDEV_LOG(INFO,
1620                         "Device with port_id=%"PRIu16" already started\n",
1621                         port_id);
1622                 return 0;
1623         }
1624
1625         ret = rte_eth_dev_info_get(port_id, &dev_info);
1626         if (ret != 0)
1627                 return ret;
1628
1629         /* Lets restore MAC now if device does not support live change */
1630         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1631                 rte_eth_dev_mac_restore(dev, &dev_info);
1632
1633         diag = (*dev->dev_ops->dev_start)(dev);
1634         if (diag == 0)
1635                 dev->data->dev_started = 1;
1636         else
1637                 return eth_err(port_id, diag);
1638
1639         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1640         if (ret != 0) {
1641                 RTE_ETHDEV_LOG(ERR,
1642                         "Error during restoring configuration for device (port %u): %s\n",
1643                         port_id, rte_strerror(-ret));
1644                 rte_eth_dev_stop(port_id);
1645                 return ret;
1646         }
1647
1648         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1649                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1650                 (*dev->dev_ops->link_update)(dev, 0);
1651         }
1652         return 0;
1653 }
1654
1655 void
1656 rte_eth_dev_stop(uint16_t port_id)
1657 {
1658         struct rte_eth_dev *dev;
1659
1660         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1661         dev = &rte_eth_devices[port_id];
1662
1663         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1664
1665         if (dev->data->dev_started == 0) {
1666                 RTE_ETHDEV_LOG(INFO,
1667                         "Device with port_id=%"PRIu16" already stopped\n",
1668                         port_id);
1669                 return;
1670         }
1671
1672         dev->data->dev_started = 0;
1673         (*dev->dev_ops->dev_stop)(dev);
1674 }
1675
1676 int
1677 rte_eth_dev_set_link_up(uint16_t port_id)
1678 {
1679         struct rte_eth_dev *dev;
1680
1681         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1682
1683         dev = &rte_eth_devices[port_id];
1684
1685         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1686         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1687 }
1688
1689 int
1690 rte_eth_dev_set_link_down(uint16_t port_id)
1691 {
1692         struct rte_eth_dev *dev;
1693
1694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1695
1696         dev = &rte_eth_devices[port_id];
1697
1698         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1699         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1700 }
1701
1702 void
1703 rte_eth_dev_close(uint16_t port_id)
1704 {
1705         struct rte_eth_dev *dev;
1706
1707         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1708         dev = &rte_eth_devices[port_id];
1709
1710         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1711         dev->data->dev_started = 0;
1712         (*dev->dev_ops->dev_close)(dev);
1713
1714         /* check behaviour flag - temporary for PMD migration */
1715         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1716                 /* new behaviour: send event + reset state + free all data */
1717                 rte_eth_dev_release_port(dev);
1718                 return;
1719         }
1720         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1721                         "The driver %s should migrate to the new behaviour.\n",
1722                         dev->device->driver->name);
1723         /* old behaviour: only free queue arrays */
1724         dev->data->nb_rx_queues = 0;
1725         rte_free(dev->data->rx_queues);
1726         dev->data->rx_queues = NULL;
1727         dev->data->nb_tx_queues = 0;
1728         rte_free(dev->data->tx_queues);
1729         dev->data->tx_queues = NULL;
1730 }
1731
1732 int
1733 rte_eth_dev_reset(uint16_t port_id)
1734 {
1735         struct rte_eth_dev *dev;
1736         int ret;
1737
1738         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1739         dev = &rte_eth_devices[port_id];
1740
1741         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1742
1743         rte_eth_dev_stop(port_id);
1744         ret = dev->dev_ops->dev_reset(dev);
1745
1746         return eth_err(port_id, ret);
1747 }
1748
1749 int
1750 rte_eth_dev_is_removed(uint16_t port_id)
1751 {
1752         struct rte_eth_dev *dev;
1753         int ret;
1754
1755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1756
1757         dev = &rte_eth_devices[port_id];
1758
1759         if (dev->state == RTE_ETH_DEV_REMOVED)
1760                 return 1;
1761
1762         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1763
1764         ret = dev->dev_ops->is_removed(dev);
1765         if (ret != 0)
1766                 /* Device is physically removed. */
1767                 dev->state = RTE_ETH_DEV_REMOVED;
1768
1769         return ret;
1770 }
1771
1772 int
1773 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1774                        uint16_t nb_rx_desc, unsigned int socket_id,
1775                        const struct rte_eth_rxconf *rx_conf,
1776                        struct rte_mempool *mp)
1777 {
1778         int ret;
1779         uint32_t mbp_buf_size;
1780         struct rte_eth_dev *dev;
1781         struct rte_eth_dev_info dev_info;
1782         struct rte_eth_rxconf local_conf;
1783         void **rxq;
1784
1785         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1786
1787         dev = &rte_eth_devices[port_id];
1788         if (rx_queue_id >= dev->data->nb_rx_queues) {
1789                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1790                 return -EINVAL;
1791         }
1792
1793         if (mp == NULL) {
1794                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1795                 return -EINVAL;
1796         }
1797
1798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1799
1800         /*
1801          * Check the size of the mbuf data buffer.
1802          * This value must be provided in the private data of the memory pool.
1803          * First check that the memory pool has a valid private data.
1804          */
1805         ret = rte_eth_dev_info_get(port_id, &dev_info);
1806         if (ret != 0)
1807                 return ret;
1808
1809         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1810                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1811                         mp->name, (int)mp->private_data_size,
1812                         (int)sizeof(struct rte_pktmbuf_pool_private));
1813                 return -ENOSPC;
1814         }
1815         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1816
1817         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1818                 RTE_ETHDEV_LOG(ERR,
1819                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1820                         mp->name, (int)mbp_buf_size,
1821                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1822                         (int)RTE_PKTMBUF_HEADROOM,
1823                         (int)dev_info.min_rx_bufsize);
1824                 return -EINVAL;
1825         }
1826
1827         /* Use default specified by driver, if nb_rx_desc is zero */
1828         if (nb_rx_desc == 0) {
1829                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1830                 /* If driver default is also zero, fall back on EAL default */
1831                 if (nb_rx_desc == 0)
1832                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1833         }
1834
1835         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1836                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1837                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1838
1839                 RTE_ETHDEV_LOG(ERR,
1840                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1841                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1842                         dev_info.rx_desc_lim.nb_min,
1843                         dev_info.rx_desc_lim.nb_align);
1844                 return -EINVAL;
1845         }
1846
1847         if (dev->data->dev_started &&
1848                 !(dev_info.dev_capa &
1849                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1850                 return -EBUSY;
1851
1852         if (dev->data->dev_started &&
1853                 (dev->data->rx_queue_state[rx_queue_id] !=
1854                         RTE_ETH_QUEUE_STATE_STOPPED))
1855                 return -EBUSY;
1856
1857         rxq = dev->data->rx_queues;
1858         if (rxq[rx_queue_id]) {
1859                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1860                                         -ENOTSUP);
1861                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1862                 rxq[rx_queue_id] = NULL;
1863         }
1864
1865         if (rx_conf == NULL)
1866                 rx_conf = &dev_info.default_rxconf;
1867
1868         local_conf = *rx_conf;
1869
1870         /*
1871          * If an offloading has already been enabled in
1872          * rte_eth_dev_configure(), it has been enabled on all queues,
1873          * so there is no need to enable it in this queue again.
1874          * The local_conf.offloads input to underlying PMD only carries
1875          * those offloadings which are only enabled on this queue and
1876          * not enabled on all queues.
1877          */
1878         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1879
1880         /*
1881          * New added offloadings for this queue are those not enabled in
1882          * rte_eth_dev_configure() and they must be per-queue type.
1883          * A pure per-port offloading can't be enabled on a queue while
1884          * disabled on another queue. A pure per-port offloading can't
1885          * be enabled for any queue as new added one if it hasn't been
1886          * enabled in rte_eth_dev_configure().
1887          */
1888         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1889              local_conf.offloads) {
1890                 RTE_ETHDEV_LOG(ERR,
1891                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1892                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1893                         port_id, rx_queue_id, local_conf.offloads,
1894                         dev_info.rx_queue_offload_capa,
1895                         __func__);
1896                 return -EINVAL;
1897         }
1898
1899         /*
1900          * If LRO is enabled, check that the maximum aggregated packet
1901          * size is supported by the configured device.
1902          */
1903         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1904                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1905                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1906                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1907                 int ret = check_lro_pkt_size(port_id,
1908                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1909                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1910                                 dev_info.max_lro_pkt_size);
1911                 if (ret != 0)
1912                         return ret;
1913         }
1914
1915         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1916                                               socket_id, &local_conf, mp);
1917         if (!ret) {
1918                 if (!dev->data->min_rx_buf_size ||
1919                     dev->data->min_rx_buf_size > mbp_buf_size)
1920                         dev->data->min_rx_buf_size = mbp_buf_size;
1921         }
1922
1923         return eth_err(port_id, ret);
1924 }
1925
1926 int
1927 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1928                                uint16_t nb_rx_desc,
1929                                const struct rte_eth_hairpin_conf *conf)
1930 {
1931         int ret;
1932         struct rte_eth_dev *dev;
1933         struct rte_eth_hairpin_cap cap;
1934         void **rxq;
1935         int i;
1936         int count;
1937
1938         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1939
1940         dev = &rte_eth_devices[port_id];
1941         if (rx_queue_id >= dev->data->nb_rx_queues) {
1942                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1943                 return -EINVAL;
1944         }
1945         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1946         if (ret != 0)
1947                 return ret;
1948         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1949                                 -ENOTSUP);
1950         /* if nb_rx_desc is zero use max number of desc from the driver. */
1951         if (nb_rx_desc == 0)
1952                 nb_rx_desc = cap.max_nb_desc;
1953         if (nb_rx_desc > cap.max_nb_desc) {
1954                 RTE_ETHDEV_LOG(ERR,
1955                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1956                         nb_rx_desc, cap.max_nb_desc);
1957                 return -EINVAL;
1958         }
1959         if (conf->peer_count > cap.max_rx_2_tx) {
1960                 RTE_ETHDEV_LOG(ERR,
1961                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1962                         conf->peer_count, cap.max_rx_2_tx);
1963                 return -EINVAL;
1964         }
1965         if (conf->peer_count == 0) {
1966                 RTE_ETHDEV_LOG(ERR,
1967                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1968                         conf->peer_count);
1969                 return -EINVAL;
1970         }
1971         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1972              cap.max_nb_queues != UINT16_MAX; i++) {
1973                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1974                         count++;
1975         }
1976         if (count > cap.max_nb_queues) {
1977                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1978                 cap.max_nb_queues);
1979                 return -EINVAL;
1980         }
1981         if (dev->data->dev_started)
1982                 return -EBUSY;
1983         rxq = dev->data->rx_queues;
1984         if (rxq[rx_queue_id] != NULL) {
1985                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1986                                         -ENOTSUP);
1987                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1988                 rxq[rx_queue_id] = NULL;
1989         }
1990         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1991                                                       nb_rx_desc, conf);
1992         if (ret == 0)
1993                 dev->data->rx_queue_state[rx_queue_id] =
1994                         RTE_ETH_QUEUE_STATE_HAIRPIN;
1995         return eth_err(port_id, ret);
1996 }
1997
1998 int
1999 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2000                        uint16_t nb_tx_desc, unsigned int socket_id,
2001                        const struct rte_eth_txconf *tx_conf)
2002 {
2003         struct rte_eth_dev *dev;
2004         struct rte_eth_dev_info dev_info;
2005         struct rte_eth_txconf local_conf;
2006         void **txq;
2007         int ret;
2008
2009         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2010
2011         dev = &rte_eth_devices[port_id];
2012         if (tx_queue_id >= dev->data->nb_tx_queues) {
2013                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2014                 return -EINVAL;
2015         }
2016
2017         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2018
2019         ret = rte_eth_dev_info_get(port_id, &dev_info);
2020         if (ret != 0)
2021                 return ret;
2022
2023         /* Use default specified by driver, if nb_tx_desc is zero */
2024         if (nb_tx_desc == 0) {
2025                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2026                 /* If driver default is zero, fall back on EAL default */
2027                 if (nb_tx_desc == 0)
2028                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2029         }
2030         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2031             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2032             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2033                 RTE_ETHDEV_LOG(ERR,
2034                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2035                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2036                         dev_info.tx_desc_lim.nb_min,
2037                         dev_info.tx_desc_lim.nb_align);
2038                 return -EINVAL;
2039         }
2040
2041         if (dev->data->dev_started &&
2042                 !(dev_info.dev_capa &
2043                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2044                 return -EBUSY;
2045
2046         if (dev->data->dev_started &&
2047                 (dev->data->tx_queue_state[tx_queue_id] !=
2048                         RTE_ETH_QUEUE_STATE_STOPPED))
2049                 return -EBUSY;
2050
2051         txq = dev->data->tx_queues;
2052         if (txq[tx_queue_id]) {
2053                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2054                                         -ENOTSUP);
2055                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2056                 txq[tx_queue_id] = NULL;
2057         }
2058
2059         if (tx_conf == NULL)
2060                 tx_conf = &dev_info.default_txconf;
2061
2062         local_conf = *tx_conf;
2063
2064         /*
2065          * If an offloading has already been enabled in
2066          * rte_eth_dev_configure(), it has been enabled on all queues,
2067          * so there is no need to enable it in this queue again.
2068          * The local_conf.offloads input to underlying PMD only carries
2069          * those offloadings which are only enabled on this queue and
2070          * not enabled on all queues.
2071          */
2072         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2073
2074         /*
2075          * New added offloadings for this queue are those not enabled in
2076          * rte_eth_dev_configure() and they must be per-queue type.
2077          * A pure per-port offloading can't be enabled on a queue while
2078          * disabled on another queue. A pure per-port offloading can't
2079          * be enabled for any queue as new added one if it hasn't been
2080          * enabled in rte_eth_dev_configure().
2081          */
2082         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2083              local_conf.offloads) {
2084                 RTE_ETHDEV_LOG(ERR,
2085                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2086                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2087                         port_id, tx_queue_id, local_conf.offloads,
2088                         dev_info.tx_queue_offload_capa,
2089                         __func__);
2090                 return -EINVAL;
2091         }
2092
2093         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2094                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2095 }
2096
2097 int
2098 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2099                                uint16_t nb_tx_desc,
2100                                const struct rte_eth_hairpin_conf *conf)
2101 {
2102         struct rte_eth_dev *dev;
2103         struct rte_eth_hairpin_cap cap;
2104         void **txq;
2105         int i;
2106         int count;
2107         int ret;
2108
2109         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2110         dev = &rte_eth_devices[port_id];
2111         if (tx_queue_id >= dev->data->nb_tx_queues) {
2112                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2113                 return -EINVAL;
2114         }
2115         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2116         if (ret != 0)
2117                 return ret;
2118         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2119                                 -ENOTSUP);
2120         /* if nb_rx_desc is zero use max number of desc from the driver. */
2121         if (nb_tx_desc == 0)
2122                 nb_tx_desc = cap.max_nb_desc;
2123         if (nb_tx_desc > cap.max_nb_desc) {
2124                 RTE_ETHDEV_LOG(ERR,
2125                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2126                         nb_tx_desc, cap.max_nb_desc);
2127                 return -EINVAL;
2128         }
2129         if (conf->peer_count > cap.max_tx_2_rx) {
2130                 RTE_ETHDEV_LOG(ERR,
2131                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2132                         conf->peer_count, cap.max_tx_2_rx);
2133                 return -EINVAL;
2134         }
2135         if (conf->peer_count == 0) {
2136                 RTE_ETHDEV_LOG(ERR,
2137                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2138                         conf->peer_count);
2139                 return -EINVAL;
2140         }
2141         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2142              cap.max_nb_queues != UINT16_MAX; i++) {
2143                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2144                         count++;
2145         }
2146         if (count > cap.max_nb_queues) {
2147                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2148                 cap.max_nb_queues);
2149                 return -EINVAL;
2150         }
2151         if (dev->data->dev_started)
2152                 return -EBUSY;
2153         txq = dev->data->tx_queues;
2154         if (txq[tx_queue_id] != NULL) {
2155                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2156                                         -ENOTSUP);
2157                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2158                 txq[tx_queue_id] = NULL;
2159         }
2160         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2161                 (dev, tx_queue_id, nb_tx_desc, conf);
2162         if (ret == 0)
2163                 dev->data->tx_queue_state[tx_queue_id] =
2164                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2165         return eth_err(port_id, ret);
2166 }
2167
2168 void
2169 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2170                 void *userdata __rte_unused)
2171 {
2172         unsigned i;
2173
2174         for (i = 0; i < unsent; i++)
2175                 rte_pktmbuf_free(pkts[i]);
2176 }
2177
2178 void
2179 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2180                 void *userdata)
2181 {
2182         uint64_t *count = userdata;
2183         unsigned i;
2184
2185         for (i = 0; i < unsent; i++)
2186                 rte_pktmbuf_free(pkts[i]);
2187
2188         *count += unsent;
2189 }
2190
2191 int
2192 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2193                 buffer_tx_error_fn cbfn, void *userdata)
2194 {
2195         buffer->error_callback = cbfn;
2196         buffer->error_userdata = userdata;
2197         return 0;
2198 }
2199
2200 int
2201 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2202 {
2203         int ret = 0;
2204
2205         if (buffer == NULL)
2206                 return -EINVAL;
2207
2208         buffer->size = size;
2209         if (buffer->error_callback == NULL) {
2210                 ret = rte_eth_tx_buffer_set_err_callback(
2211                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2212         }
2213
2214         return ret;
2215 }
2216
2217 int
2218 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2219 {
2220         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2221         int ret;
2222
2223         /* Validate Input Data. Bail if not valid or not supported. */
2224         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2225         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2226
2227         /* Call driver to free pending mbufs. */
2228         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2229                                                free_cnt);
2230         return eth_err(port_id, ret);
2231 }
2232
2233 int
2234 rte_eth_promiscuous_enable(uint16_t port_id)
2235 {
2236         struct rte_eth_dev *dev;
2237         int diag = 0;
2238
2239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2240         dev = &rte_eth_devices[port_id];
2241
2242         if (dev->data->promiscuous == 1)
2243                 return 0;
2244
2245         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2246
2247         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2248         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2249
2250         return eth_err(port_id, diag);
2251 }
2252
2253 int
2254 rte_eth_promiscuous_disable(uint16_t port_id)
2255 {
2256         struct rte_eth_dev *dev;
2257         int diag = 0;
2258
2259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2260         dev = &rte_eth_devices[port_id];
2261
2262         if (dev->data->promiscuous == 0)
2263                 return 0;
2264
2265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2266
2267         dev->data->promiscuous = 0;
2268         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2269         if (diag != 0)
2270                 dev->data->promiscuous = 1;
2271
2272         return eth_err(port_id, diag);
2273 }
2274
2275 int
2276 rte_eth_promiscuous_get(uint16_t port_id)
2277 {
2278         struct rte_eth_dev *dev;
2279
2280         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2281
2282         dev = &rte_eth_devices[port_id];
2283         return dev->data->promiscuous;
2284 }
2285
2286 int
2287 rte_eth_allmulticast_enable(uint16_t port_id)
2288 {
2289         struct rte_eth_dev *dev;
2290         int diag;
2291
2292         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2293         dev = &rte_eth_devices[port_id];
2294
2295         if (dev->data->all_multicast == 1)
2296                 return 0;
2297
2298         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2299         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2300         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2301
2302         return eth_err(port_id, diag);
2303 }
2304
2305 int
2306 rte_eth_allmulticast_disable(uint16_t port_id)
2307 {
2308         struct rte_eth_dev *dev;
2309         int diag;
2310
2311         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2312         dev = &rte_eth_devices[port_id];
2313
2314         if (dev->data->all_multicast == 0)
2315                 return 0;
2316
2317         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2318         dev->data->all_multicast = 0;
2319         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2320         if (diag != 0)
2321                 dev->data->all_multicast = 1;
2322
2323         return eth_err(port_id, diag);
2324 }
2325
2326 int
2327 rte_eth_allmulticast_get(uint16_t port_id)
2328 {
2329         struct rte_eth_dev *dev;
2330
2331         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2332
2333         dev = &rte_eth_devices[port_id];
2334         return dev->data->all_multicast;
2335 }
2336
2337 int
2338 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2339 {
2340         struct rte_eth_dev *dev;
2341
2342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2343         dev = &rte_eth_devices[port_id];
2344
2345         if (dev->data->dev_conf.intr_conf.lsc &&
2346             dev->data->dev_started)
2347                 rte_eth_linkstatus_get(dev, eth_link);
2348         else {
2349                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2350                 (*dev->dev_ops->link_update)(dev, 1);
2351                 *eth_link = dev->data->dev_link;
2352         }
2353
2354         return 0;
2355 }
2356
2357 int
2358 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2359 {
2360         struct rte_eth_dev *dev;
2361
2362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2363         dev = &rte_eth_devices[port_id];
2364
2365         if (dev->data->dev_conf.intr_conf.lsc &&
2366             dev->data->dev_started)
2367                 rte_eth_linkstatus_get(dev, eth_link);
2368         else {
2369                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2370                 (*dev->dev_ops->link_update)(dev, 0);
2371                 *eth_link = dev->data->dev_link;
2372         }
2373
2374         return 0;
2375 }
2376
2377 int
2378 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2379 {
2380         struct rte_eth_dev *dev;
2381
2382         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2383
2384         dev = &rte_eth_devices[port_id];
2385         memset(stats, 0, sizeof(*stats));
2386
2387         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2388         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2389         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2390 }
2391
2392 int
2393 rte_eth_stats_reset(uint16_t port_id)
2394 {
2395         struct rte_eth_dev *dev;
2396         int ret;
2397
2398         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2399         dev = &rte_eth_devices[port_id];
2400
2401         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2402         ret = (*dev->dev_ops->stats_reset)(dev);
2403         if (ret != 0)
2404                 return eth_err(port_id, ret);
2405
2406         dev->data->rx_mbuf_alloc_failed = 0;
2407
2408         return 0;
2409 }
2410
2411 static inline int
2412 get_xstats_basic_count(struct rte_eth_dev *dev)
2413 {
2414         uint16_t nb_rxqs, nb_txqs;
2415         int count;
2416
2417         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2418         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2419
2420         count = RTE_NB_STATS;
2421         count += nb_rxqs * RTE_NB_RXQ_STATS;
2422         count += nb_txqs * RTE_NB_TXQ_STATS;
2423
2424         return count;
2425 }
2426
2427 static int
2428 get_xstats_count(uint16_t port_id)
2429 {
2430         struct rte_eth_dev *dev;
2431         int count;
2432
2433         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2434         dev = &rte_eth_devices[port_id];
2435         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2436                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2437                                 NULL, 0);
2438                 if (count < 0)
2439                         return eth_err(port_id, count);
2440         }
2441         if (dev->dev_ops->xstats_get_names != NULL) {
2442                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2443                 if (count < 0)
2444                         return eth_err(port_id, count);
2445         } else
2446                 count = 0;
2447
2448
2449         count += get_xstats_basic_count(dev);
2450
2451         return count;
2452 }
2453
2454 int
2455 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2456                 uint64_t *id)
2457 {
2458         int cnt_xstats, idx_xstat;
2459
2460         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2461
2462         if (!id) {
2463                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2464                 return -ENOMEM;
2465         }
2466
2467         if (!xstat_name) {
2468                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2469                 return -ENOMEM;
2470         }
2471
2472         /* Get count */
2473         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2474         if (cnt_xstats  < 0) {
2475                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2476                 return -ENODEV;
2477         }
2478
2479         /* Get id-name lookup table */
2480         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2481
2482         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2483                         port_id, xstats_names, cnt_xstats, NULL)) {
2484                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2485                 return -1;
2486         }
2487
2488         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2489                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2490                         *id = idx_xstat;
2491                         return 0;
2492                 };
2493         }
2494
2495         return -EINVAL;
2496 }
2497
2498 /* retrieve basic stats names */
2499 static int
2500 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2501         struct rte_eth_xstat_name *xstats_names)
2502 {
2503         int cnt_used_entries = 0;
2504         uint32_t idx, id_queue;
2505         uint16_t num_q;
2506
2507         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2508                 strlcpy(xstats_names[cnt_used_entries].name,
2509                         rte_stats_strings[idx].name,
2510                         sizeof(xstats_names[0].name));
2511                 cnt_used_entries++;
2512         }
2513         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2514         for (id_queue = 0; id_queue < num_q; id_queue++) {
2515                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2516                         snprintf(xstats_names[cnt_used_entries].name,
2517                                 sizeof(xstats_names[0].name),
2518                                 "rx_q%u%s",
2519                                 id_queue, rte_rxq_stats_strings[idx].name);
2520                         cnt_used_entries++;
2521                 }
2522
2523         }
2524         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2525         for (id_queue = 0; id_queue < num_q; id_queue++) {
2526                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2527                         snprintf(xstats_names[cnt_used_entries].name,
2528                                 sizeof(xstats_names[0].name),
2529                                 "tx_q%u%s",
2530                                 id_queue, rte_txq_stats_strings[idx].name);
2531                         cnt_used_entries++;
2532                 }
2533         }
2534         return cnt_used_entries;
2535 }
2536
2537 /* retrieve ethdev extended statistics names */
2538 int
2539 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2540         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2541         uint64_t *ids)
2542 {
2543         struct rte_eth_xstat_name *xstats_names_copy;
2544         unsigned int no_basic_stat_requested = 1;
2545         unsigned int no_ext_stat_requested = 1;
2546         unsigned int expected_entries;
2547         unsigned int basic_count;
2548         struct rte_eth_dev *dev;
2549         unsigned int i;
2550         int ret;
2551
2552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2553         dev = &rte_eth_devices[port_id];
2554
2555         basic_count = get_xstats_basic_count(dev);
2556         ret = get_xstats_count(port_id);
2557         if (ret < 0)
2558                 return ret;
2559         expected_entries = (unsigned int)ret;
2560
2561         /* Return max number of stats if no ids given */
2562         if (!ids) {
2563                 if (!xstats_names)
2564                         return expected_entries;
2565                 else if (xstats_names && size < expected_entries)
2566                         return expected_entries;
2567         }
2568
2569         if (ids && !xstats_names)
2570                 return -EINVAL;
2571
2572         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2573                 uint64_t ids_copy[size];
2574
2575                 for (i = 0; i < size; i++) {
2576                         if (ids[i] < basic_count) {
2577                                 no_basic_stat_requested = 0;
2578                                 break;
2579                         }
2580
2581                         /*
2582                          * Convert ids to xstats ids that PMD knows.
2583                          * ids known by user are basic + extended stats.
2584                          */
2585                         ids_copy[i] = ids[i] - basic_count;
2586                 }
2587
2588                 if (no_basic_stat_requested)
2589                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2590                                         xstats_names, ids_copy, size);
2591         }
2592
2593         /* Retrieve all stats */
2594         if (!ids) {
2595                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2596                                 expected_entries);
2597                 if (num_stats < 0 || num_stats > (int)expected_entries)
2598                         return num_stats;
2599                 else
2600                         return expected_entries;
2601         }
2602
2603         xstats_names_copy = calloc(expected_entries,
2604                 sizeof(struct rte_eth_xstat_name));
2605
2606         if (!xstats_names_copy) {
2607                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2608                 return -ENOMEM;
2609         }
2610
2611         if (ids) {
2612                 for (i = 0; i < size; i++) {
2613                         if (ids[i] >= basic_count) {
2614                                 no_ext_stat_requested = 0;
2615                                 break;
2616                         }
2617                 }
2618         }
2619
2620         /* Fill xstats_names_copy structure */
2621         if (ids && no_ext_stat_requested) {
2622                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2623         } else {
2624                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2625                         expected_entries);
2626                 if (ret < 0) {
2627                         free(xstats_names_copy);
2628                         return ret;
2629                 }
2630         }
2631
2632         /* Filter stats */
2633         for (i = 0; i < size; i++) {
2634                 if (ids[i] >= expected_entries) {
2635                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2636                         free(xstats_names_copy);
2637                         return -1;
2638                 }
2639                 xstats_names[i] = xstats_names_copy[ids[i]];
2640         }
2641
2642         free(xstats_names_copy);
2643         return size;
2644 }
2645
2646 int
2647 rte_eth_xstats_get_names(uint16_t port_id,
2648         struct rte_eth_xstat_name *xstats_names,
2649         unsigned int size)
2650 {
2651         struct rte_eth_dev *dev;
2652         int cnt_used_entries;
2653         int cnt_expected_entries;
2654         int cnt_driver_entries;
2655
2656         cnt_expected_entries = get_xstats_count(port_id);
2657         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2658                         (int)size < cnt_expected_entries)
2659                 return cnt_expected_entries;
2660
2661         /* port_id checked in get_xstats_count() */
2662         dev = &rte_eth_devices[port_id];
2663
2664         cnt_used_entries = rte_eth_basic_stats_get_names(
2665                 dev, xstats_names);
2666
2667         if (dev->dev_ops->xstats_get_names != NULL) {
2668                 /* If there are any driver-specific xstats, append them
2669                  * to end of list.
2670                  */
2671                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2672                         dev,
2673                         xstats_names + cnt_used_entries,
2674                         size - cnt_used_entries);
2675                 if (cnt_driver_entries < 0)
2676                         return eth_err(port_id, cnt_driver_entries);
2677                 cnt_used_entries += cnt_driver_entries;
2678         }
2679
2680         return cnt_used_entries;
2681 }
2682
2683
2684 static int
2685 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2686 {
2687         struct rte_eth_dev *dev;
2688         struct rte_eth_stats eth_stats;
2689         unsigned int count = 0, i, q;
2690         uint64_t val, *stats_ptr;
2691         uint16_t nb_rxqs, nb_txqs;
2692         int ret;
2693
2694         ret = rte_eth_stats_get(port_id, &eth_stats);
2695         if (ret < 0)
2696                 return ret;
2697
2698         dev = &rte_eth_devices[port_id];
2699
2700         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2701         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2702
2703         /* global stats */
2704         for (i = 0; i < RTE_NB_STATS; i++) {
2705                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2706                                         rte_stats_strings[i].offset);
2707                 val = *stats_ptr;
2708                 xstats[count++].value = val;
2709         }
2710
2711         /* per-rxq stats */
2712         for (q = 0; q < nb_rxqs; q++) {
2713                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2714                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2715                                         rte_rxq_stats_strings[i].offset +
2716                                         q * sizeof(uint64_t));
2717                         val = *stats_ptr;
2718                         xstats[count++].value = val;
2719                 }
2720         }
2721
2722         /* per-txq stats */
2723         for (q = 0; q < nb_txqs; q++) {
2724                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2725                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2726                                         rte_txq_stats_strings[i].offset +
2727                                         q * sizeof(uint64_t));
2728                         val = *stats_ptr;
2729                         xstats[count++].value = val;
2730                 }
2731         }
2732         return count;
2733 }
2734
2735 /* retrieve ethdev extended statistics */
2736 int
2737 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2738                          uint64_t *values, unsigned int size)
2739 {
2740         unsigned int no_basic_stat_requested = 1;
2741         unsigned int no_ext_stat_requested = 1;
2742         unsigned int num_xstats_filled;
2743         unsigned int basic_count;
2744         uint16_t expected_entries;
2745         struct rte_eth_dev *dev;
2746         unsigned int i;
2747         int ret;
2748
2749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2750         ret = get_xstats_count(port_id);
2751         if (ret < 0)
2752                 return ret;
2753         expected_entries = (uint16_t)ret;
2754         struct rte_eth_xstat xstats[expected_entries];
2755         dev = &rte_eth_devices[port_id];
2756         basic_count = get_xstats_basic_count(dev);
2757
2758         /* Return max number of stats if no ids given */
2759         if (!ids) {
2760                 if (!values)
2761                         return expected_entries;
2762                 else if (values && size < expected_entries)
2763                         return expected_entries;
2764         }
2765
2766         if (ids && !values)
2767                 return -EINVAL;
2768
2769         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2770                 unsigned int basic_count = get_xstats_basic_count(dev);
2771                 uint64_t ids_copy[size];
2772
2773                 for (i = 0; i < size; i++) {
2774                         if (ids[i] < basic_count) {
2775                                 no_basic_stat_requested = 0;
2776                                 break;
2777                         }
2778
2779                         /*
2780                          * Convert ids to xstats ids that PMD knows.
2781                          * ids known by user are basic + extended stats.
2782                          */
2783                         ids_copy[i] = ids[i] - basic_count;
2784                 }
2785
2786                 if (no_basic_stat_requested)
2787                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2788                                         values, size);
2789         }
2790
2791         if (ids) {
2792                 for (i = 0; i < size; i++) {
2793                         if (ids[i] >= basic_count) {
2794                                 no_ext_stat_requested = 0;
2795                                 break;
2796                         }
2797                 }
2798         }
2799
2800         /* Fill the xstats structure */
2801         if (ids && no_ext_stat_requested)
2802                 ret = rte_eth_basic_stats_get(port_id, xstats);
2803         else
2804                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2805
2806         if (ret < 0)
2807                 return ret;
2808         num_xstats_filled = (unsigned int)ret;
2809
2810         /* Return all stats */
2811         if (!ids) {
2812                 for (i = 0; i < num_xstats_filled; i++)
2813                         values[i] = xstats[i].value;
2814                 return expected_entries;
2815         }
2816
2817         /* Filter stats */
2818         for (i = 0; i < size; i++) {
2819                 if (ids[i] >= expected_entries) {
2820                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2821                         return -1;
2822                 }
2823                 values[i] = xstats[ids[i]].value;
2824         }
2825         return size;
2826 }
2827
2828 int
2829 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2830         unsigned int n)
2831 {
2832         struct rte_eth_dev *dev;
2833         unsigned int count = 0, i;
2834         signed int xcount = 0;
2835         uint16_t nb_rxqs, nb_txqs;
2836         int ret;
2837
2838         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2839
2840         dev = &rte_eth_devices[port_id];
2841
2842         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2843         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2844
2845         /* Return generic statistics */
2846         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2847                 (nb_txqs * RTE_NB_TXQ_STATS);
2848
2849         /* implemented by the driver */
2850         if (dev->dev_ops->xstats_get != NULL) {
2851                 /* Retrieve the xstats from the driver at the end of the
2852                  * xstats struct.
2853                  */
2854                 xcount = (*dev->dev_ops->xstats_get)(dev,
2855                                      xstats ? xstats + count : NULL,
2856                                      (n > count) ? n - count : 0);
2857
2858                 if (xcount < 0)
2859                         return eth_err(port_id, xcount);
2860         }
2861
2862         if (n < count + xcount || xstats == NULL)
2863                 return count + xcount;
2864
2865         /* now fill the xstats structure */
2866         ret = rte_eth_basic_stats_get(port_id, xstats);
2867         if (ret < 0)
2868                 return ret;
2869         count = ret;
2870
2871         for (i = 0; i < count; i++)
2872                 xstats[i].id = i;
2873         /* add an offset to driver-specific stats */
2874         for ( ; i < count + xcount; i++)
2875                 xstats[i].id += count;
2876
2877         return count + xcount;
2878 }
2879
2880 /* reset ethdev extended statistics */
2881 int
2882 rte_eth_xstats_reset(uint16_t port_id)
2883 {
2884         struct rte_eth_dev *dev;
2885
2886         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2887         dev = &rte_eth_devices[port_id];
2888
2889         /* implemented by the driver */
2890         if (dev->dev_ops->xstats_reset != NULL)
2891                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2892
2893         /* fallback to default */
2894         return rte_eth_stats_reset(port_id);
2895 }
2896
2897 static int
2898 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2899                 uint8_t is_rx)
2900 {
2901         struct rte_eth_dev *dev;
2902
2903         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2904
2905         dev = &rte_eth_devices[port_id];
2906
2907         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2908
2909         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2910                 return -EINVAL;
2911
2912         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2913                 return -EINVAL;
2914
2915         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2916                 return -EINVAL;
2917
2918         return (*dev->dev_ops->queue_stats_mapping_set)
2919                         (dev, queue_id, stat_idx, is_rx);
2920 }
2921
2922
2923 int
2924 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2925                 uint8_t stat_idx)
2926 {
2927         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2928                                                 stat_idx, STAT_QMAP_TX));
2929 }
2930
2931
2932 int
2933 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2934                 uint8_t stat_idx)
2935 {
2936         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2937                                                 stat_idx, STAT_QMAP_RX));
2938 }
2939
2940 int
2941 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2942 {
2943         struct rte_eth_dev *dev;
2944
2945         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2946         dev = &rte_eth_devices[port_id];
2947
2948         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2949         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2950                                                         fw_version, fw_size));
2951 }
2952
2953 int
2954 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2955 {
2956         struct rte_eth_dev *dev;
2957         const struct rte_eth_desc_lim lim = {
2958                 .nb_max = UINT16_MAX,
2959                 .nb_min = 0,
2960                 .nb_align = 1,
2961                 .nb_seg_max = UINT16_MAX,
2962                 .nb_mtu_seg_max = UINT16_MAX,
2963         };
2964         int diag;
2965
2966         /*
2967          * Init dev_info before port_id check since caller does not have
2968          * return status and does not know if get is successful or not.
2969          */
2970         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2971
2972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2973         dev = &rte_eth_devices[port_id];
2974
2975         dev_info->rx_desc_lim = lim;
2976         dev_info->tx_desc_lim = lim;
2977         dev_info->device = dev->device;
2978         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2979         dev_info->max_mtu = UINT16_MAX;
2980
2981         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2982         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2983         if (diag != 0) {
2984                 /* Cleanup already filled in device information */
2985                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2986                 return eth_err(port_id, diag);
2987         }
2988
2989         dev_info->driver_name = dev->device->driver->name;
2990         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2991         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2992
2993         dev_info->dev_flags = &dev->data->dev_flags;
2994
2995         return 0;
2996 }
2997
2998 int
2999 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3000                                  uint32_t *ptypes, int num)
3001 {
3002         int i, j;
3003         struct rte_eth_dev *dev;
3004         const uint32_t *all_ptypes;
3005
3006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3007         dev = &rte_eth_devices[port_id];
3008         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3009         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3010
3011         if (!all_ptypes)
3012                 return 0;
3013
3014         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3015                 if (all_ptypes[i] & ptype_mask) {
3016                         if (j < num)
3017                                 ptypes[j] = all_ptypes[i];
3018                         j++;
3019                 }
3020
3021         return j;
3022 }
3023
3024 int
3025 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3026                                  uint32_t *set_ptypes, unsigned int num)
3027 {
3028         const uint32_t valid_ptype_masks[] = {
3029                 RTE_PTYPE_L2_MASK,
3030                 RTE_PTYPE_L3_MASK,
3031                 RTE_PTYPE_L4_MASK,
3032                 RTE_PTYPE_TUNNEL_MASK,
3033                 RTE_PTYPE_INNER_L2_MASK,
3034                 RTE_PTYPE_INNER_L3_MASK,
3035                 RTE_PTYPE_INNER_L4_MASK,
3036         };
3037         const uint32_t *all_ptypes;
3038         struct rte_eth_dev *dev;
3039         uint32_t unused_mask;
3040         unsigned int i, j;
3041         int ret;
3042
3043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3044         dev = &rte_eth_devices[port_id];
3045
3046         if (num > 0 && set_ptypes == NULL)
3047                 return -EINVAL;
3048
3049         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3050                         *dev->dev_ops->dev_ptypes_set == NULL) {
3051                 ret = 0;
3052                 goto ptype_unknown;
3053         }
3054
3055         if (ptype_mask == 0) {
3056                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3057                                 ptype_mask);
3058                 goto ptype_unknown;
3059         }
3060
3061         unused_mask = ptype_mask;
3062         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3063                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3064                 if (mask && mask != valid_ptype_masks[i]) {
3065                         ret = -EINVAL;
3066                         goto ptype_unknown;
3067                 }
3068                 unused_mask &= ~valid_ptype_masks[i];
3069         }
3070
3071         if (unused_mask) {
3072                 ret = -EINVAL;
3073                 goto ptype_unknown;
3074         }
3075
3076         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3077         if (all_ptypes == NULL) {
3078                 ret = 0;
3079                 goto ptype_unknown;
3080         }
3081
3082         /*
3083          * Accommodate as many set_ptypes as possible. If the supplied
3084          * set_ptypes array is insufficient fill it partially.
3085          */
3086         for (i = 0, j = 0; set_ptypes != NULL &&
3087                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3088                 if (ptype_mask & all_ptypes[i]) {
3089                         if (j < num - 1) {
3090                                 set_ptypes[j] = all_ptypes[i];
3091                                 j++;
3092                                 continue;
3093                         }
3094                         break;
3095                 }
3096         }
3097
3098         if (set_ptypes != NULL && j < num)
3099                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3100
3101         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3102
3103 ptype_unknown:
3104         if (num > 0)
3105                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3106
3107         return ret;
3108 }
3109
3110 int
3111 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3112 {
3113         struct rte_eth_dev *dev;
3114
3115         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3116         dev = &rte_eth_devices[port_id];
3117         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3118
3119         return 0;
3120 }
3121
3122 int
3123 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3124 {
3125         struct rte_eth_dev *dev;
3126
3127         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3128
3129         dev = &rte_eth_devices[port_id];
3130         *mtu = dev->data->mtu;
3131         return 0;
3132 }
3133
3134 int
3135 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3136 {
3137         int ret;
3138         struct rte_eth_dev_info dev_info;
3139         struct rte_eth_dev *dev;
3140
3141         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3142         dev = &rte_eth_devices[port_id];
3143         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3144
3145         /*
3146          * Check if the device supports dev_infos_get, if it does not
3147          * skip min_mtu/max_mtu validation here as this requires values
3148          * that are populated within the call to rte_eth_dev_info_get()
3149          * which relies on dev->dev_ops->dev_infos_get.
3150          */
3151         if (*dev->dev_ops->dev_infos_get != NULL) {
3152                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3153                 if (ret != 0)
3154                         return ret;
3155
3156                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3157                         return -EINVAL;
3158         }
3159
3160         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3161         if (!ret)
3162                 dev->data->mtu = mtu;
3163
3164         return eth_err(port_id, ret);
3165 }
3166
3167 int
3168 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3169 {
3170         struct rte_eth_dev *dev;
3171         int ret;
3172
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3174         dev = &rte_eth_devices[port_id];
3175         if (!(dev->data->dev_conf.rxmode.offloads &
3176               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3177                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3178                         port_id);
3179                 return -ENOSYS;
3180         }
3181
3182         if (vlan_id > 4095) {
3183                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3184                         port_id, vlan_id);
3185                 return -EINVAL;
3186         }
3187         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3188
3189         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3190         if (ret == 0) {
3191                 struct rte_vlan_filter_conf *vfc;
3192                 int vidx;
3193                 int vbit;
3194
3195                 vfc = &dev->data->vlan_filter_conf;
3196                 vidx = vlan_id / 64;
3197                 vbit = vlan_id % 64;
3198
3199                 if (on)
3200                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3201                 else
3202                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3203         }
3204
3205         return eth_err(port_id, ret);
3206 }
3207
3208 int
3209 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3210                                     int on)
3211 {
3212         struct rte_eth_dev *dev;
3213
3214         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3215         dev = &rte_eth_devices[port_id];
3216         if (rx_queue_id >= dev->data->nb_rx_queues) {
3217                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3218                 return -EINVAL;
3219         }
3220
3221         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3222         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3223
3224         return 0;
3225 }
3226
3227 int
3228 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3229                                 enum rte_vlan_type vlan_type,
3230                                 uint16_t tpid)
3231 {
3232         struct rte_eth_dev *dev;
3233
3234         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3235         dev = &rte_eth_devices[port_id];
3236         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3237
3238         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3239                                                                tpid));
3240 }
3241
3242 int
3243 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3244 {
3245         struct rte_eth_dev *dev;
3246         int ret = 0;
3247         int mask = 0;
3248         int cur, org = 0;
3249         uint64_t orig_offloads;
3250         uint64_t *dev_offloads;
3251
3252         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3253         dev = &rte_eth_devices[port_id];
3254
3255         /* save original values in case of failure */
3256         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3257         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3258
3259         /*check which option changed by application*/
3260         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3261         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3262         if (cur != org) {
3263                 if (cur)
3264                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3265                 else
3266                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3267                 mask |= ETH_VLAN_STRIP_MASK;
3268         }
3269
3270         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3271         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3272         if (cur != org) {
3273                 if (cur)
3274                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3275                 else
3276                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3277                 mask |= ETH_VLAN_FILTER_MASK;
3278         }
3279
3280         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3281         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3282         if (cur != org) {
3283                 if (cur)
3284                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3285                 else
3286                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3287                 mask |= ETH_VLAN_EXTEND_MASK;
3288         }
3289
3290         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3291         org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3292         if (cur != org) {
3293                 if (cur)
3294                         *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3295                 else
3296                         *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3297                 mask |= ETH_QINQ_STRIP_MASK;
3298         }
3299
3300         /*no change*/
3301         if (mask == 0)
3302                 return ret;
3303
3304         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3305         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3306         if (ret) {
3307                 /* hit an error restore  original values */
3308                 *dev_offloads = orig_offloads;
3309         }
3310
3311         return eth_err(port_id, ret);
3312 }
3313
3314 int
3315 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3316 {
3317         struct rte_eth_dev *dev;
3318         uint64_t *dev_offloads;
3319         int ret = 0;
3320
3321         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3322         dev = &rte_eth_devices[port_id];
3323         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3324
3325         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3326                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3327
3328         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3329                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3330
3331         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3332                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3333
3334         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3335                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3336
3337         return ret;
3338 }
3339
3340 int
3341 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3342 {
3343         struct rte_eth_dev *dev;
3344
3345         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3346         dev = &rte_eth_devices[port_id];
3347         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3348
3349         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3350 }
3351
3352 int
3353 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3354 {
3355         struct rte_eth_dev *dev;
3356
3357         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3358         dev = &rte_eth_devices[port_id];
3359         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3360         memset(fc_conf, 0, sizeof(*fc_conf));
3361         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3362 }
3363
3364 int
3365 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3366 {
3367         struct rte_eth_dev *dev;
3368
3369         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3370         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3371                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3372                 return -EINVAL;
3373         }
3374
3375         dev = &rte_eth_devices[port_id];
3376         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3377         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3378 }
3379
3380 int
3381 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3382                                    struct rte_eth_pfc_conf *pfc_conf)
3383 {
3384         struct rte_eth_dev *dev;
3385
3386         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3387         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3388                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3389                 return -EINVAL;
3390         }
3391
3392         dev = &rte_eth_devices[port_id];
3393         /* High water, low water validation are device specific */
3394         if  (*dev->dev_ops->priority_flow_ctrl_set)
3395                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3396                                         (dev, pfc_conf));
3397         return -ENOTSUP;
3398 }
3399
3400 static int
3401 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3402                         uint16_t reta_size)
3403 {
3404         uint16_t i, num;
3405
3406         if (!reta_conf)
3407                 return -EINVAL;
3408
3409         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3410         for (i = 0; i < num; i++) {
3411                 if (reta_conf[i].mask)
3412                         return 0;
3413         }
3414
3415         return -EINVAL;
3416 }
3417
3418 static int
3419 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3420                          uint16_t reta_size,
3421                          uint16_t max_rxq)
3422 {
3423         uint16_t i, idx, shift;
3424
3425         if (!reta_conf)
3426                 return -EINVAL;
3427
3428         if (max_rxq == 0) {
3429                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3430                 return -EINVAL;
3431         }
3432
3433         for (i = 0; i < reta_size; i++) {
3434                 idx = i / RTE_RETA_GROUP_SIZE;
3435                 shift = i % RTE_RETA_GROUP_SIZE;
3436                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3437                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3438                         RTE_ETHDEV_LOG(ERR,
3439                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3440                                 idx, shift,
3441                                 reta_conf[idx].reta[shift], max_rxq);
3442                         return -EINVAL;
3443                 }
3444         }
3445
3446         return 0;
3447 }
3448
3449 int
3450 rte_eth_dev_rss_reta_update(uint16_t port_id,
3451                             struct rte_eth_rss_reta_entry64 *reta_conf,
3452                             uint16_t reta_size)
3453 {
3454         struct rte_eth_dev *dev;
3455         int ret;
3456
3457         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3458         /* Check mask bits */
3459         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3460         if (ret < 0)
3461                 return ret;
3462
3463         dev = &rte_eth_devices[port_id];
3464
3465         /* Check entry value */
3466         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3467                                 dev->data->nb_rx_queues);
3468         if (ret < 0)
3469                 return ret;
3470
3471         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3472         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3473                                                              reta_size));
3474 }
3475
3476 int
3477 rte_eth_dev_rss_reta_query(uint16_t port_id,
3478                            struct rte_eth_rss_reta_entry64 *reta_conf,
3479                            uint16_t reta_size)
3480 {
3481         struct rte_eth_dev *dev;
3482         int ret;
3483
3484         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3485
3486         /* Check mask bits */
3487         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3488         if (ret < 0)
3489                 return ret;
3490
3491         dev = &rte_eth_devices[port_id];
3492         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3493         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3494                                                             reta_size));
3495 }
3496
3497 int
3498 rte_eth_dev_rss_hash_update(uint16_t port_id,
3499                             struct rte_eth_rss_conf *rss_conf)
3500 {
3501         struct rte_eth_dev *dev;
3502         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3503         int ret;
3504
3505         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3506
3507         ret = rte_eth_dev_info_get(port_id, &dev_info);
3508         if (ret != 0)
3509                 return ret;
3510
3511         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3512
3513         dev = &rte_eth_devices[port_id];
3514         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3515             dev_info.flow_type_rss_offloads) {
3516                 RTE_ETHDEV_LOG(ERR,
3517                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3518                         port_id, rss_conf->rss_hf,
3519                         dev_info.flow_type_rss_offloads);
3520                 return -EINVAL;
3521         }
3522         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3523         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3524                                                                  rss_conf));
3525 }
3526
3527 int
3528 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3529                               struct rte_eth_rss_conf *rss_conf)
3530 {
3531         struct rte_eth_dev *dev;
3532
3533         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3534         dev = &rte_eth_devices[port_id];
3535         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3536         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3537                                                                    rss_conf));
3538 }
3539
3540 int
3541 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3542                                 struct rte_eth_udp_tunnel *udp_tunnel)
3543 {
3544         struct rte_eth_dev *dev;
3545
3546         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3547         if (udp_tunnel == NULL) {
3548                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3549                 return -EINVAL;
3550         }
3551
3552         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3553                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3554                 return -EINVAL;
3555         }
3556
3557         dev = &rte_eth_devices[port_id];
3558         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3559         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3560                                                                 udp_tunnel));
3561 }
3562
3563 int
3564 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3565                                    struct rte_eth_udp_tunnel *udp_tunnel)
3566 {
3567         struct rte_eth_dev *dev;
3568
3569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3570         dev = &rte_eth_devices[port_id];
3571
3572         if (udp_tunnel == NULL) {
3573                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3574                 return -EINVAL;
3575         }
3576
3577         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3578                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3579                 return -EINVAL;
3580         }
3581
3582         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3583         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3584                                                                 udp_tunnel));
3585 }
3586
3587 int
3588 rte_eth_led_on(uint16_t port_id)
3589 {
3590         struct rte_eth_dev *dev;
3591
3592         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3593         dev = &rte_eth_devices[port_id];
3594         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3595         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3596 }
3597
3598 int
3599 rte_eth_led_off(uint16_t port_id)
3600 {
3601         struct rte_eth_dev *dev;
3602
3603         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3604         dev = &rte_eth_devices[port_id];
3605         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3606         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3607 }
3608
3609 /*
3610  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3611  * an empty spot.
3612  */
3613 static int
3614 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3615 {
3616         struct rte_eth_dev_info dev_info;
3617         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3618         unsigned i;
3619         int ret;
3620
3621         ret = rte_eth_dev_info_get(port_id, &dev_info);
3622         if (ret != 0)
3623                 return -1;
3624
3625         for (i = 0; i < dev_info.max_mac_addrs; i++)
3626                 if (memcmp(addr, &dev->data->mac_addrs[i],
3627                                 RTE_ETHER_ADDR_LEN) == 0)
3628                         return i;
3629
3630         return -1;
3631 }
3632
3633 static const struct rte_ether_addr null_mac_addr;
3634
3635 int
3636 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3637                         uint32_t pool)
3638 {
3639         struct rte_eth_dev *dev;
3640         int index;
3641         uint64_t pool_mask;
3642         int ret;
3643
3644         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3645         dev = &rte_eth_devices[port_id];
3646         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3647
3648         if (rte_is_zero_ether_addr(addr)) {
3649                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3650                         port_id);
3651                 return -EINVAL;
3652         }
3653         if (pool >= ETH_64_POOLS) {
3654                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3655                 return -EINVAL;
3656         }
3657
3658         index = get_mac_addr_index(port_id, addr);
3659         if (index < 0) {
3660                 index = get_mac_addr_index(port_id, &null_mac_addr);
3661                 if (index < 0) {
3662                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3663                                 port_id);
3664                         return -ENOSPC;
3665                 }
3666         } else {
3667                 pool_mask = dev->data->mac_pool_sel[index];
3668
3669                 /* Check if both MAC address and pool is already there, and do nothing */
3670                 if (pool_mask & (1ULL << pool))
3671                         return 0;
3672         }
3673
3674         /* Update NIC */
3675         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3676
3677         if (ret == 0) {
3678                 /* Update address in NIC data structure */
3679                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3680
3681                 /* Update pool bitmap in NIC data structure */
3682                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3683         }
3684
3685         return eth_err(port_id, ret);
3686 }
3687
3688 int
3689 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3690 {
3691         struct rte_eth_dev *dev;
3692         int index;
3693
3694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3695         dev = &rte_eth_devices[port_id];
3696         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3697
3698         index = get_mac_addr_index(port_id, addr);
3699         if (index == 0) {
3700                 RTE_ETHDEV_LOG(ERR,
3701                         "Port %u: Cannot remove default MAC address\n",
3702                         port_id);
3703                 return -EADDRINUSE;
3704         } else if (index < 0)
3705                 return 0;  /* Do nothing if address wasn't found */
3706
3707         /* Update NIC */
3708         (*dev->dev_ops->mac_addr_remove)(dev, index);
3709
3710         /* Update address in NIC data structure */
3711         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3712
3713         /* reset pool bitmap */
3714         dev->data->mac_pool_sel[index] = 0;
3715
3716         return 0;
3717 }
3718
3719 int
3720 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3721 {
3722         struct rte_eth_dev *dev;
3723         int ret;
3724
3725         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3726
3727         if (!rte_is_valid_assigned_ether_addr(addr))
3728                 return -EINVAL;
3729
3730         dev = &rte_eth_devices[port_id];
3731         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3732
3733         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3734         if (ret < 0)
3735                 return ret;
3736
3737         /* Update default address in NIC data structure */
3738         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3739
3740         return 0;
3741 }
3742
3743
3744 /*
3745  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3746  * an empty spot.
3747  */
3748 static int
3749 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3750 {
3751         struct rte_eth_dev_info dev_info;
3752         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3753         unsigned i;
3754         int ret;
3755
3756         ret = rte_eth_dev_info_get(port_id, &dev_info);
3757         if (ret != 0)
3758                 return -1;
3759
3760         if (!dev->data->hash_mac_addrs)
3761                 return -1;
3762
3763         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3764                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3765                         RTE_ETHER_ADDR_LEN) == 0)
3766                         return i;
3767
3768         return -1;
3769 }
3770
3771 int
3772 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3773                                 uint8_t on)
3774 {
3775         int index;
3776         int ret;
3777         struct rte_eth_dev *dev;
3778
3779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3780
3781         dev = &rte_eth_devices[port_id];
3782         if (rte_is_zero_ether_addr(addr)) {
3783                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3784                         port_id);
3785                 return -EINVAL;
3786         }
3787
3788         index = get_hash_mac_addr_index(port_id, addr);
3789         /* Check if it's already there, and do nothing */
3790         if ((index >= 0) && on)
3791                 return 0;
3792
3793         if (index < 0) {
3794                 if (!on) {
3795                         RTE_ETHDEV_LOG(ERR,
3796                                 "Port %u: the MAC address was not set in UTA\n",
3797                                 port_id);
3798                         return -EINVAL;
3799                 }
3800
3801                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3802                 if (index < 0) {
3803                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3804                                 port_id);
3805                         return -ENOSPC;
3806                 }
3807         }
3808
3809         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3810         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3811         if (ret == 0) {
3812                 /* Update address in NIC data structure */
3813                 if (on)
3814                         rte_ether_addr_copy(addr,
3815                                         &dev->data->hash_mac_addrs[index]);
3816                 else
3817                         rte_ether_addr_copy(&null_mac_addr,
3818                                         &dev->data->hash_mac_addrs[index]);
3819         }
3820
3821         return eth_err(port_id, ret);
3822 }
3823
3824 int
3825 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3826 {
3827         struct rte_eth_dev *dev;
3828
3829         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3830
3831         dev = &rte_eth_devices[port_id];
3832
3833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3834         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3835                                                                        on));
3836 }
3837
3838 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3839                                         uint16_t tx_rate)
3840 {
3841         struct rte_eth_dev *dev;
3842         struct rte_eth_dev_info dev_info;
3843         struct rte_eth_link link;
3844         int ret;
3845
3846         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3847
3848         ret = rte_eth_dev_info_get(port_id, &dev_info);
3849         if (ret != 0)
3850                 return ret;
3851
3852         dev = &rte_eth_devices[port_id];
3853         link = dev->data->dev_link;
3854
3855         if (queue_idx > dev_info.max_tx_queues) {
3856                 RTE_ETHDEV_LOG(ERR,
3857                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3858                         port_id, queue_idx);
3859                 return -EINVAL;
3860         }
3861
3862         if (tx_rate > link.link_speed) {
3863                 RTE_ETHDEV_LOG(ERR,
3864                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3865                         tx_rate, link.link_speed);
3866                 return -EINVAL;
3867         }
3868
3869         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3870         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3871                                                         queue_idx, tx_rate));
3872 }
3873
3874 int
3875 rte_eth_mirror_rule_set(uint16_t port_id,
3876                         struct rte_eth_mirror_conf *mirror_conf,
3877                         uint8_t rule_id, uint8_t on)
3878 {
3879         struct rte_eth_dev *dev;
3880
3881         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3882         if (mirror_conf->rule_type == 0) {
3883                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3884                 return -EINVAL;
3885         }
3886
3887         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3888                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3889                         ETH_64_POOLS - 1);
3890                 return -EINVAL;
3891         }
3892
3893         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3894              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3895             (mirror_conf->pool_mask == 0)) {
3896                 RTE_ETHDEV_LOG(ERR,
3897                         "Invalid mirror pool, pool mask can not be 0\n");
3898                 return -EINVAL;
3899         }
3900
3901         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3902             mirror_conf->vlan.vlan_mask == 0) {
3903                 RTE_ETHDEV_LOG(ERR,
3904                         "Invalid vlan mask, vlan mask can not be 0\n");
3905                 return -EINVAL;
3906         }
3907
3908         dev = &rte_eth_devices[port_id];
3909         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3910
3911         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3912                                                 mirror_conf, rule_id, on));
3913 }
3914
3915 int
3916 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3917 {
3918         struct rte_eth_dev *dev;
3919
3920         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3921
3922         dev = &rte_eth_devices[port_id];
3923         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3924
3925         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3926                                                                    rule_id));
3927 }
3928
3929 RTE_INIT(eth_dev_init_cb_lists)
3930 {
3931         int i;
3932
3933         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3934                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3935 }
3936
3937 int
3938 rte_eth_dev_callback_register(uint16_t port_id,
3939                         enum rte_eth_event_type event,
3940                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3941 {
3942         struct rte_eth_dev *dev;
3943         struct rte_eth_dev_callback *user_cb;
3944         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3945         uint16_t last_port;
3946
3947         if (!cb_fn)
3948                 return -EINVAL;
3949
3950         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3951                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3952                 return -EINVAL;
3953         }
3954
3955         if (port_id == RTE_ETH_ALL) {
3956                 next_port = 0;
3957                 last_port = RTE_MAX_ETHPORTS - 1;
3958         } else {
3959                 next_port = last_port = port_id;
3960         }
3961
3962         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3963
3964         do {
3965                 dev = &rte_eth_devices[next_port];
3966
3967                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3968                         if (user_cb->cb_fn == cb_fn &&
3969                                 user_cb->cb_arg == cb_arg &&
3970                                 user_cb->event == event) {
3971                                 break;
3972                         }
3973                 }
3974
3975                 /* create a new callback. */
3976                 if (user_cb == NULL) {
3977                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3978                                 sizeof(struct rte_eth_dev_callback), 0);
3979                         if (user_cb != NULL) {
3980                                 user_cb->cb_fn = cb_fn;
3981                                 user_cb->cb_arg = cb_arg;
3982                                 user_cb->event = event;
3983                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3984                                                   user_cb, next);
3985                         } else {
3986                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3987                                 rte_eth_dev_callback_unregister(port_id, event,
3988                                                                 cb_fn, cb_arg);
3989                                 return -ENOMEM;
3990                         }
3991
3992                 }
3993         } while (++next_port <= last_port);
3994
3995         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3996         return 0;
3997 }
3998
3999 int
4000 rte_eth_dev_callback_unregister(uint16_t port_id,
4001                         enum rte_eth_event_type event,
4002                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4003 {
4004         int ret;
4005         struct rte_eth_dev *dev;
4006         struct rte_eth_dev_callback *cb, *next;
4007         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4008         uint16_t last_port;
4009
4010         if (!cb_fn)
4011                 return -EINVAL;
4012
4013         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4014                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4015                 return -EINVAL;
4016         }
4017
4018         if (port_id == RTE_ETH_ALL) {
4019                 next_port = 0;
4020                 last_port = RTE_MAX_ETHPORTS - 1;
4021         } else {
4022                 next_port = last_port = port_id;
4023         }
4024
4025         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4026
4027         do {
4028                 dev = &rte_eth_devices[next_port];
4029                 ret = 0;
4030                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4031                      cb = next) {
4032
4033                         next = TAILQ_NEXT(cb, next);
4034
4035                         if (cb->cb_fn != cb_fn || cb->event != event ||
4036                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4037                                 continue;
4038
4039                         /*
4040                          * if this callback is not executing right now,
4041                          * then remove it.
4042                          */
4043                         if (cb->active == 0) {
4044                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4045                                 rte_free(cb);
4046                         } else {
4047                                 ret = -EAGAIN;
4048                         }
4049                 }
4050         } while (++next_port <= last_port);
4051
4052         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4053         return ret;
4054 }
4055
4056 int
4057 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4058         enum rte_eth_event_type event, void *ret_param)
4059 {
4060         struct rte_eth_dev_callback *cb_lst;
4061         struct rte_eth_dev_callback dev_cb;
4062         int rc = 0;
4063
4064         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4065         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4066                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4067                         continue;
4068                 dev_cb = *cb_lst;
4069                 cb_lst->active = 1;
4070                 if (ret_param != NULL)
4071                         dev_cb.ret_param = ret_param;
4072
4073                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4074                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4075                                 dev_cb.cb_arg, dev_cb.ret_param);
4076                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4077                 cb_lst->active = 0;
4078         }
4079         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4080         return rc;
4081 }
4082
4083 void
4084 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4085 {
4086         if (dev == NULL)
4087                 return;
4088
4089         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4090
4091         dev->state = RTE_ETH_DEV_ATTACHED;
4092 }
4093
4094 int
4095 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4096 {
4097         uint32_t vec;
4098         struct rte_eth_dev *dev;
4099         struct rte_intr_handle *intr_handle;
4100         uint16_t qid;
4101         int rc;
4102
4103         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4104
4105         dev = &rte_eth_devices[port_id];
4106
4107         if (!dev->intr_handle) {
4108                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4109                 return -ENOTSUP;
4110         }
4111
4112         intr_handle = dev->intr_handle;
4113         if (!intr_handle->intr_vec) {
4114                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4115                 return -EPERM;
4116         }
4117
4118         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4119                 vec = intr_handle->intr_vec[qid];
4120                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4121                 if (rc && rc != -EEXIST) {
4122                         RTE_ETHDEV_LOG(ERR,
4123                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4124                                 port_id, qid, op, epfd, vec);
4125                 }
4126         }
4127
4128         return 0;
4129 }
4130
4131 int
4132 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4133 {
4134         struct rte_intr_handle *intr_handle;
4135         struct rte_eth_dev *dev;
4136         unsigned int efd_idx;
4137         uint32_t vec;
4138         int fd;
4139
4140         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4141
4142         dev = &rte_eth_devices[port_id];
4143
4144         if (queue_id >= dev->data->nb_rx_queues) {
4145                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4146                 return -1;
4147         }
4148
4149         if (!dev->intr_handle) {
4150                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4151                 return -1;
4152         }
4153
4154         intr_handle = dev->intr_handle;
4155         if (!intr_handle->intr_vec) {
4156                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4157                 return -1;
4158         }
4159
4160         vec = intr_handle->intr_vec[queue_id];
4161         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4162                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4163         fd = intr_handle->efds[efd_idx];
4164
4165         return fd;
4166 }
4167
4168 const struct rte_memzone *
4169 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4170                          uint16_t queue_id, size_t size, unsigned align,
4171                          int socket_id)
4172 {
4173         char z_name[RTE_MEMZONE_NAMESIZE];
4174         const struct rte_memzone *mz;
4175         int rc;
4176
4177         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
4178                       dev->data->port_id, queue_id, ring_name);
4179         if (rc >= RTE_MEMZONE_NAMESIZE) {
4180                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4181                 rte_errno = ENAMETOOLONG;
4182                 return NULL;
4183         }
4184
4185         mz = rte_memzone_lookup(z_name);
4186         if (mz)
4187                 return mz;
4188
4189         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4190                         RTE_MEMZONE_IOVA_CONTIG, align);
4191 }
4192
4193 int
4194 rte_eth_dev_create(struct rte_device *device, const char *name,
4195         size_t priv_data_size,
4196         ethdev_bus_specific_init ethdev_bus_specific_init,
4197         void *bus_init_params,
4198         ethdev_init_t ethdev_init, void *init_params)
4199 {
4200         struct rte_eth_dev *ethdev;
4201         int retval;
4202
4203         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4204
4205         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4206                 ethdev = rte_eth_dev_allocate(name);
4207                 if (!ethdev)
4208                         return -ENODEV;
4209
4210                 if (priv_data_size) {
4211                         ethdev->data->dev_private = rte_zmalloc_socket(
4212                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4213                                 device->numa_node);
4214
4215                         if (!ethdev->data->dev_private) {
4216                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
4217                                 retval = -ENOMEM;
4218                                 goto probe_failed;
4219                         }
4220                 }
4221         } else {
4222                 ethdev = rte_eth_dev_attach_secondary(name);
4223                 if (!ethdev) {
4224                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
4225                                 "ethdev doesn't exist");
4226                         return  -ENODEV;
4227                 }
4228         }
4229
4230         ethdev->device = device;
4231
4232         if (ethdev_bus_specific_init) {
4233                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4234                 if (retval) {
4235                         RTE_LOG(ERR, EAL,
4236                                 "ethdev bus specific initialisation failed");
4237                         goto probe_failed;
4238                 }
4239         }
4240
4241         retval = ethdev_init(ethdev, init_params);
4242         if (retval) {
4243                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
4244                 goto probe_failed;
4245         }
4246
4247         rte_eth_dev_probing_finish(ethdev);
4248
4249         return retval;
4250
4251 probe_failed:
4252         rte_eth_dev_release_port(ethdev);
4253         return retval;
4254 }
4255
4256 int
4257 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4258         ethdev_uninit_t ethdev_uninit)
4259 {
4260         int ret;
4261
4262         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4263         if (!ethdev)
4264                 return -ENODEV;
4265
4266         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4267
4268         ret = ethdev_uninit(ethdev);
4269         if (ret)
4270                 return ret;
4271
4272         return rte_eth_dev_release_port(ethdev);
4273 }
4274
4275 int
4276 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4277                           int epfd, int op, void *data)
4278 {
4279         uint32_t vec;
4280         struct rte_eth_dev *dev;
4281         struct rte_intr_handle *intr_handle;
4282         int rc;
4283
4284         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4285
4286         dev = &rte_eth_devices[port_id];
4287         if (queue_id >= dev->data->nb_rx_queues) {
4288                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4289                 return -EINVAL;
4290         }
4291
4292         if (!dev->intr_handle) {
4293                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4294                 return -ENOTSUP;
4295         }
4296
4297         intr_handle = dev->intr_handle;
4298         if (!intr_handle->intr_vec) {
4299                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4300                 return -EPERM;
4301         }
4302
4303         vec = intr_handle->intr_vec[queue_id];
4304         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4305         if (rc && rc != -EEXIST) {
4306                 RTE_ETHDEV_LOG(ERR,
4307                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4308                         port_id, queue_id, op, epfd, vec);
4309                 return rc;
4310         }
4311
4312         return 0;
4313 }
4314
4315 int
4316 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4317                            uint16_t queue_id)
4318 {
4319         struct rte_eth_dev *dev;
4320
4321         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4322
4323         dev = &rte_eth_devices[port_id];
4324
4325         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4326         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4327                                                                 queue_id));
4328 }
4329
4330 int
4331 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4332                             uint16_t queue_id)
4333 {
4334         struct rte_eth_dev *dev;
4335
4336         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4337
4338         dev = &rte_eth_devices[port_id];
4339
4340         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4341         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4342                                                                 queue_id));
4343 }
4344
4345
4346 int
4347 rte_eth_dev_filter_supported(uint16_t port_id,
4348                              enum rte_filter_type filter_type)
4349 {
4350         struct rte_eth_dev *dev;
4351
4352         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4353
4354         dev = &rte_eth_devices[port_id];
4355         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4356         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4357                                 RTE_ETH_FILTER_NOP, NULL);
4358 }
4359
4360 int
4361 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4362                         enum rte_filter_op filter_op, void *arg)
4363 {
4364         struct rte_eth_dev *dev;
4365
4366         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4367
4368         dev = &rte_eth_devices[port_id];
4369         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4370         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4371                                                              filter_op, arg));
4372 }
4373
4374 const struct rte_eth_rxtx_callback *
4375 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4376                 rte_rx_callback_fn fn, void *user_param)
4377 {
4378 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4379         rte_errno = ENOTSUP;
4380         return NULL;
4381 #endif
4382         struct rte_eth_dev *dev;
4383
4384         /* check input parameters */
4385         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4386                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4387                 rte_errno = EINVAL;
4388                 return NULL;
4389         }
4390         dev = &rte_eth_devices[port_id];
4391         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4392                 rte_errno = EINVAL;
4393                 return NULL;
4394         }
4395         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4396
4397         if (cb == NULL) {
4398                 rte_errno = ENOMEM;
4399                 return NULL;
4400         }
4401
4402         cb->fn.rx = fn;
4403         cb->param = user_param;
4404
4405         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4406         /* Add the callbacks in fifo order. */
4407         struct rte_eth_rxtx_callback *tail =
4408                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4409
4410         if (!tail) {
4411                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4412
4413         } else {
4414                 while (tail->next)
4415                         tail = tail->next;
4416                 tail->next = cb;
4417         }
4418         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4419
4420         return cb;
4421 }
4422
4423 const struct rte_eth_rxtx_callback *
4424 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4425                 rte_rx_callback_fn fn, void *user_param)
4426 {
4427 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4428         rte_errno = ENOTSUP;
4429         return NULL;
4430 #endif
4431         /* check input parameters */
4432         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4433                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4434                 rte_errno = EINVAL;
4435                 return NULL;
4436         }
4437
4438         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4439
4440         if (cb == NULL) {
4441                 rte_errno = ENOMEM;
4442                 return NULL;
4443         }
4444
4445         cb->fn.rx = fn;
4446         cb->param = user_param;
4447
4448         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4449         /* Add the callbacks at fisrt position*/
4450         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4451         rte_smp_wmb();
4452         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4453         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4454
4455         return cb;
4456 }
4457
4458 const struct rte_eth_rxtx_callback *
4459 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4460                 rte_tx_callback_fn fn, void *user_param)
4461 {
4462 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4463         rte_errno = ENOTSUP;
4464         return NULL;
4465 #endif
4466         struct rte_eth_dev *dev;
4467
4468         /* check input parameters */
4469         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4470                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4471                 rte_errno = EINVAL;
4472                 return NULL;
4473         }
4474
4475         dev = &rte_eth_devices[port_id];
4476         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4477                 rte_errno = EINVAL;
4478                 return NULL;
4479         }
4480
4481         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4482
4483         if (cb == NULL) {
4484                 rte_errno = ENOMEM;
4485                 return NULL;
4486         }
4487
4488         cb->fn.tx = fn;
4489         cb->param = user_param;
4490
4491         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4492         /* Add the callbacks in fifo order. */
4493         struct rte_eth_rxtx_callback *tail =
4494                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4495
4496         if (!tail) {
4497                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4498
4499         } else {
4500                 while (tail->next)
4501                         tail = tail->next;
4502                 tail->next = cb;
4503         }
4504         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4505
4506         return cb;
4507 }
4508
4509 int
4510 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4511                 const struct rte_eth_rxtx_callback *user_cb)
4512 {
4513 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4514         return -ENOTSUP;
4515 #endif
4516         /* Check input parameters. */
4517         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4518         if (user_cb == NULL ||
4519                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4520                 return -EINVAL;
4521
4522         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4523         struct rte_eth_rxtx_callback *cb;
4524         struct rte_eth_rxtx_callback **prev_cb;
4525         int ret = -EINVAL;
4526
4527         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4528         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4529         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4530                 cb = *prev_cb;
4531                 if (cb == user_cb) {
4532                         /* Remove the user cb from the callback list. */
4533                         *prev_cb = cb->next;
4534                         ret = 0;
4535                         break;
4536                 }
4537         }
4538         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4539
4540         return ret;
4541 }
4542
4543 int
4544 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4545                 const struct rte_eth_rxtx_callback *user_cb)
4546 {
4547 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4548         return -ENOTSUP;
4549 #endif
4550         /* Check input parameters. */
4551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4552         if (user_cb == NULL ||
4553                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4554                 return -EINVAL;
4555
4556         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4557         int ret = -EINVAL;
4558         struct rte_eth_rxtx_callback *cb;
4559         struct rte_eth_rxtx_callback **prev_cb;
4560
4561         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4562         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4563         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4564                 cb = *prev_cb;
4565                 if (cb == user_cb) {
4566                         /* Remove the user cb from the callback list. */
4567                         *prev_cb = cb->next;
4568                         ret = 0;
4569                         break;
4570                 }
4571         }
4572         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4573
4574         return ret;
4575 }
4576
4577 int
4578 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4579         struct rte_eth_rxq_info *qinfo)
4580 {
4581         struct rte_eth_dev *dev;
4582
4583         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4584
4585         if (qinfo == NULL)
4586                 return -EINVAL;
4587
4588         dev = &rte_eth_devices[port_id];
4589         if (queue_id >= dev->data->nb_rx_queues) {
4590                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4591                 return -EINVAL;
4592         }
4593
4594         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4595                 RTE_ETHDEV_LOG(INFO,
4596                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4597                         queue_id, port_id);
4598                 return -EINVAL;
4599         }
4600
4601         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4602
4603         memset(qinfo, 0, sizeof(*qinfo));
4604         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4605         return 0;
4606 }
4607
4608 int
4609 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4610         struct rte_eth_txq_info *qinfo)
4611 {
4612         struct rte_eth_dev *dev;
4613
4614         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4615
4616         if (qinfo == NULL)
4617                 return -EINVAL;
4618
4619         dev = &rte_eth_devices[port_id];
4620         if (queue_id >= dev->data->nb_tx_queues) {
4621                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4622                 return -EINVAL;
4623         }
4624
4625         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4626                 RTE_ETHDEV_LOG(INFO,
4627                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4628                         queue_id, port_id);
4629                 return -EINVAL;
4630         }
4631
4632         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4633
4634         memset(qinfo, 0, sizeof(*qinfo));
4635         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4636
4637         return 0;
4638 }
4639
4640 int
4641 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4642                           struct rte_eth_burst_mode *mode)
4643 {
4644         struct rte_eth_dev *dev;
4645
4646         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4647
4648         if (mode == NULL)
4649                 return -EINVAL;
4650
4651         dev = &rte_eth_devices[port_id];
4652
4653         if (queue_id >= dev->data->nb_rx_queues) {
4654                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4655                 return -EINVAL;
4656         }
4657
4658         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4659         memset(mode, 0, sizeof(*mode));
4660         return eth_err(port_id,
4661                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4662 }
4663
4664 int
4665 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4666                           struct rte_eth_burst_mode *mode)
4667 {
4668         struct rte_eth_dev *dev;
4669
4670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4671
4672         if (mode == NULL)
4673                 return -EINVAL;
4674
4675         dev = &rte_eth_devices[port_id];
4676
4677         if (queue_id >= dev->data->nb_tx_queues) {
4678                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4679                 return -EINVAL;
4680         }
4681
4682         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4683         memset(mode, 0, sizeof(*mode));
4684         return eth_err(port_id,
4685                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4686 }
4687
4688 int
4689 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4690                              struct rte_ether_addr *mc_addr_set,
4691                              uint32_t nb_mc_addr)
4692 {
4693         struct rte_eth_dev *dev;
4694
4695         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4696
4697         dev = &rte_eth_devices[port_id];
4698         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4699         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4700                                                 mc_addr_set, nb_mc_addr));
4701 }
4702
4703 int
4704 rte_eth_timesync_enable(uint16_t port_id)
4705 {
4706         struct rte_eth_dev *dev;
4707
4708         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4709         dev = &rte_eth_devices[port_id];
4710
4711         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4712         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4713 }
4714
4715 int
4716 rte_eth_timesync_disable(uint16_t port_id)
4717 {
4718         struct rte_eth_dev *dev;
4719
4720         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4721         dev = &rte_eth_devices[port_id];
4722
4723         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4724         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4725 }
4726
4727 int
4728 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4729                                    uint32_t flags)
4730 {
4731         struct rte_eth_dev *dev;
4732
4733         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4734         dev = &rte_eth_devices[port_id];
4735
4736         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4737         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4738                                 (dev, timestamp, flags));
4739 }
4740
4741 int
4742 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4743                                    struct timespec *timestamp)
4744 {
4745         struct rte_eth_dev *dev;
4746
4747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4748         dev = &rte_eth_devices[port_id];
4749
4750         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4751         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4752                                 (dev, timestamp));
4753 }
4754
4755 int
4756 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4757 {
4758         struct rte_eth_dev *dev;
4759
4760         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4761         dev = &rte_eth_devices[port_id];
4762
4763         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4764         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4765                                                                       delta));
4766 }
4767
4768 int
4769 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4770 {
4771         struct rte_eth_dev *dev;
4772
4773         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4774         dev = &rte_eth_devices[port_id];
4775
4776         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4777         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4778                                                                 timestamp));
4779 }
4780
4781 int
4782 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4783 {
4784         struct rte_eth_dev *dev;
4785
4786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4787         dev = &rte_eth_devices[port_id];
4788
4789         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4790         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4791                                                                 timestamp));
4792 }
4793
4794 int
4795 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4796 {
4797         struct rte_eth_dev *dev;
4798
4799         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4800         dev = &rte_eth_devices[port_id];
4801
4802         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4803         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4804 }
4805
4806 int
4807 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4808 {
4809         struct rte_eth_dev *dev;
4810
4811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4812
4813         dev = &rte_eth_devices[port_id];
4814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4815         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4816 }
4817
4818 int
4819 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4820 {
4821         struct rte_eth_dev *dev;
4822
4823         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4824
4825         dev = &rte_eth_devices[port_id];
4826         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4827         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4828 }
4829
4830 int
4831 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4832 {
4833         struct rte_eth_dev *dev;
4834
4835         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4836
4837         dev = &rte_eth_devices[port_id];
4838         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4839         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4840 }
4841
4842 int
4843 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4844 {
4845         struct rte_eth_dev *dev;
4846
4847         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4848
4849         dev = &rte_eth_devices[port_id];
4850         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4851         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4852 }
4853
4854 int
4855 rte_eth_dev_get_module_info(uint16_t port_id,
4856                             struct rte_eth_dev_module_info *modinfo)
4857 {
4858         struct rte_eth_dev *dev;
4859
4860         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4861
4862         dev = &rte_eth_devices[port_id];
4863         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4864         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4865 }
4866
4867 int
4868 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4869                               struct rte_dev_eeprom_info *info)
4870 {
4871         struct rte_eth_dev *dev;
4872
4873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4874
4875         dev = &rte_eth_devices[port_id];
4876         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4877         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4878 }
4879
4880 int
4881 rte_eth_dev_get_dcb_info(uint16_t port_id,
4882                              struct rte_eth_dcb_info *dcb_info)
4883 {
4884         struct rte_eth_dev *dev;
4885
4886         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4887
4888         dev = &rte_eth_devices[port_id];
4889         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4890
4891         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4892         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4893 }
4894
4895 int
4896 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4897                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4898 {
4899         struct rte_eth_dev *dev;
4900
4901         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4902         if (l2_tunnel == NULL) {
4903                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4904                 return -EINVAL;
4905         }
4906
4907         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4908                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4909                 return -EINVAL;
4910         }
4911
4912         dev = &rte_eth_devices[port_id];
4913         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4914                                 -ENOTSUP);
4915         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4916                                                                 l2_tunnel));
4917 }
4918
4919 int
4920 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4921                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4922                                   uint32_t mask,
4923                                   uint8_t en)
4924 {
4925         struct rte_eth_dev *dev;
4926
4927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4928
4929         if (l2_tunnel == NULL) {
4930                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4931                 return -EINVAL;
4932         }
4933
4934         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4935                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4936                 return -EINVAL;
4937         }
4938
4939         if (mask == 0) {
4940                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4941                 return -EINVAL;
4942         }
4943
4944         dev = &rte_eth_devices[port_id];
4945         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4946                                 -ENOTSUP);
4947         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4948                                                         l2_tunnel, mask, en));
4949 }
4950
4951 static void
4952 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4953                            const struct rte_eth_desc_lim *desc_lim)
4954 {
4955         if (desc_lim->nb_align != 0)
4956                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4957
4958         if (desc_lim->nb_max != 0)
4959                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4960
4961         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4962 }
4963
4964 int
4965 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4966                                  uint16_t *nb_rx_desc,
4967                                  uint16_t *nb_tx_desc)
4968 {
4969         struct rte_eth_dev_info dev_info;
4970         int ret;
4971
4972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4973
4974         ret = rte_eth_dev_info_get(port_id, &dev_info);
4975         if (ret != 0)
4976                 return ret;
4977
4978         if (nb_rx_desc != NULL)
4979                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4980
4981         if (nb_tx_desc != NULL)
4982                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4983
4984         return 0;
4985 }
4986
4987 int
4988 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4989                                    struct rte_eth_hairpin_cap *cap)
4990 {
4991         struct rte_eth_dev *dev;
4992
4993         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4994
4995         dev = &rte_eth_devices[port_id];
4996         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
4997         memset(cap, 0, sizeof(*cap));
4998         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
4999 }
5000
5001 int
5002 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5003 {
5004         if (dev->data->rx_queue_state[queue_id] ==
5005             RTE_ETH_QUEUE_STATE_HAIRPIN)
5006                 return 1;
5007         return 0;
5008 }
5009
5010 int
5011 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5012 {
5013         if (dev->data->tx_queue_state[queue_id] ==
5014             RTE_ETH_QUEUE_STATE_HAIRPIN)
5015                 return 1;
5016         return 0;
5017 }
5018
5019 int
5020 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5021 {
5022         struct rte_eth_dev *dev;
5023
5024         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5025
5026         if (pool == NULL)
5027                 return -EINVAL;
5028
5029         dev = &rte_eth_devices[port_id];
5030
5031         if (*dev->dev_ops->pool_ops_supported == NULL)
5032                 return 1; /* all pools are supported */
5033
5034         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5035 }
5036
5037 /**
5038  * A set of values to describe the possible states of a switch domain.
5039  */
5040 enum rte_eth_switch_domain_state {
5041         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5042         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5043 };
5044
5045 /**
5046  * Array of switch domains available for allocation. Array is sized to
5047  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5048  * ethdev ports in a single process.
5049  */
5050 static struct rte_eth_dev_switch {
5051         enum rte_eth_switch_domain_state state;
5052 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5053
5054 int
5055 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5056 {
5057         unsigned int i;
5058
5059         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5060
5061         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
5062                 i < RTE_MAX_ETHPORTS; i++) {
5063                 if (rte_eth_switch_domains[i].state ==
5064                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5065                         rte_eth_switch_domains[i].state =
5066                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5067                         *domain_id = i;
5068                         return 0;
5069                 }
5070         }
5071
5072         return -ENOSPC;
5073 }
5074
5075 int
5076 rte_eth_switch_domain_free(uint16_t domain_id)
5077 {
5078         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5079                 domain_id >= RTE_MAX_ETHPORTS)
5080                 return -EINVAL;
5081
5082         if (rte_eth_switch_domains[domain_id].state !=
5083                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5084                 return -EINVAL;
5085
5086         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5087
5088         return 0;
5089 }
5090
5091 static int
5092 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5093 {
5094         int state;
5095         struct rte_kvargs_pair *pair;
5096         char *letter;
5097
5098         arglist->str = strdup(str_in);
5099         if (arglist->str == NULL)
5100                 return -ENOMEM;
5101
5102         letter = arglist->str;
5103         state = 0;
5104         arglist->count = 0;
5105         pair = &arglist->pairs[0];
5106         while (1) {
5107                 switch (state) {
5108                 case 0: /* Initial */
5109                         if (*letter == '=')
5110                                 return -EINVAL;
5111                         else if (*letter == '\0')
5112                                 return 0;
5113
5114                         state = 1;
5115                         pair->key = letter;
5116                         /* fall-thru */
5117
5118                 case 1: /* Parsing key */
5119                         if (*letter == '=') {
5120                                 *letter = '\0';
5121                                 pair->value = letter + 1;
5122                                 state = 2;
5123                         } else if (*letter == ',' || *letter == '\0')
5124                                 return -EINVAL;
5125                         break;
5126
5127
5128                 case 2: /* Parsing value */
5129                         if (*letter == '[')
5130                                 state = 3;
5131                         else if (*letter == ',') {
5132                                 *letter = '\0';
5133                                 arglist->count++;
5134                                 pair = &arglist->pairs[arglist->count];
5135                                 state = 0;
5136                         } else if (*letter == '\0') {
5137                                 letter--;
5138                                 arglist->count++;
5139                                 pair = &arglist->pairs[arglist->count];
5140                                 state = 0;
5141                         }
5142                         break;
5143
5144                 case 3: /* Parsing list */
5145                         if (*letter == ']')
5146                                 state = 2;
5147                         else if (*letter == '\0')
5148                                 return -EINVAL;
5149                         break;
5150                 }
5151                 letter++;
5152         }
5153 }
5154
5155 int
5156 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5157 {
5158         struct rte_kvargs args;
5159         struct rte_kvargs_pair *pair;
5160         unsigned int i;
5161         int result = 0;
5162
5163         memset(eth_da, 0, sizeof(*eth_da));
5164
5165         result = rte_eth_devargs_tokenise(&args, dargs);
5166         if (result < 0)
5167                 goto parse_cleanup;
5168
5169         for (i = 0; i < args.count; i++) {
5170                 pair = &args.pairs[i];
5171                 if (strcmp("representor", pair->key) == 0) {
5172                         result = rte_eth_devargs_parse_list(pair->value,
5173                                 rte_eth_devargs_parse_representor_ports,
5174                                 eth_da);
5175                         if (result < 0)
5176                                 goto parse_cleanup;
5177                 }
5178         }
5179
5180 parse_cleanup:
5181         if (args.str)
5182                 free(args.str);
5183
5184         return result;
5185 }
5186
5187 RTE_INIT(ethdev_init_log)
5188 {
5189         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
5190         if (rte_eth_dev_logtype >= 0)
5191                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
5192 }