ethdev: replace snprintf with strlcpy for owner
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40
41 #include "rte_ether.h"
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132 };
133
134 #undef RTE_RX_OFFLOAD_BIT2STR
135
136 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
137         { DEV_TX_OFFLOAD_##_name, #_name }
138
139 static const struct {
140         uint64_t offload;
141         const char *name;
142 } rte_tx_offload_names[] = {
143         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
144         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
152         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
157         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
158         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
159         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
160         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
161         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
164         RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA),
165 };
166
167 #undef RTE_TX_OFFLOAD_BIT2STR
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         void *ret_param;                        /**< Return parameter */
180         enum rte_eth_event_type event;          /**< Interrupt event type */
181         uint32_t active;                        /**< Callback is executing */
182 };
183
184 enum {
185         STAT_QMAP_TX = 0,
186         STAT_QMAP_RX
187 };
188
189 int
190 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
191 {
192         int ret;
193         struct rte_devargs devargs = {.args = NULL};
194         const char *bus_param_key;
195         char *bus_str = NULL;
196         char *cls_str = NULL;
197         int str_size;
198
199         memset(iter, 0, sizeof(*iter));
200
201         /*
202          * The devargs string may use various syntaxes:
203          *   - 0000:08:00.0,representor=[1-3]
204          *   - pci:0000:06:00.0,representor=[0,5]
205          *   - class=eth,mac=00:11:22:33:44:55
206          * A new syntax is in development (not yet supported):
207          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
208          */
209
210         /*
211          * Handle pure class filter (i.e. without any bus-level argument),
212          * from future new syntax.
213          * rte_devargs_parse() is not yet supporting the new syntax,
214          * that's why this simple case is temporarily parsed here.
215          */
216 #define iter_anybus_str "class=eth,"
217         if (strncmp(devargs_str, iter_anybus_str,
218                         strlen(iter_anybus_str)) == 0) {
219                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
220                 goto end;
221         }
222
223         /* Split bus, device and parameters. */
224         ret = rte_devargs_parse(&devargs, devargs_str);
225         if (ret != 0)
226                 goto error;
227
228         /*
229          * Assume parameters of old syntax can match only at ethdev level.
230          * Extra parameters will be ignored, thanks to "+" prefix.
231          */
232         str_size = strlen(devargs.args) + 2;
233         cls_str = malloc(str_size);
234         if (cls_str == NULL) {
235                 ret = -ENOMEM;
236                 goto error;
237         }
238         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
239         if (ret != str_size - 1) {
240                 ret = -EINVAL;
241                 goto error;
242         }
243         iter->cls_str = cls_str;
244         free(devargs.args); /* allocated by rte_devargs_parse() */
245         devargs.args = NULL;
246
247         iter->bus = devargs.bus;
248         if (iter->bus->dev_iterate == NULL) {
249                 ret = -ENOTSUP;
250                 goto error;
251         }
252
253         /* Convert bus args to new syntax for use with new API dev_iterate. */
254         if (strcmp(iter->bus->name, "vdev") == 0) {
255                 bus_param_key = "name";
256         } else if (strcmp(iter->bus->name, "pci") == 0) {
257                 bus_param_key = "addr";
258         } else {
259                 ret = -ENOTSUP;
260                 goto error;
261         }
262         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
263         bus_str = malloc(str_size);
264         if (bus_str == NULL) {
265                 ret = -ENOMEM;
266                 goto error;
267         }
268         ret = snprintf(bus_str, str_size, "%s=%s",
269                         bus_param_key, devargs.name);
270         if (ret != str_size - 1) {
271                 ret = -EINVAL;
272                 goto error;
273         }
274         iter->bus_str = bus_str;
275
276 end:
277         iter->cls = rte_class_find_by_name("eth");
278         return 0;
279
280 error:
281         if (ret == -ENOTSUP)
282                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
283                                 iter->bus->name);
284         free(devargs.args);
285         free(bus_str);
286         free(cls_str);
287         return ret;
288 }
289
290 uint16_t
291 rte_eth_iterator_next(struct rte_dev_iterator *iter)
292 {
293         if (iter->cls == NULL) /* invalid ethdev iterator */
294                 return RTE_MAX_ETHPORTS;
295
296         do { /* loop to try all matching rte_device */
297                 /* If not pure ethdev filter and */
298                 if (iter->bus != NULL &&
299                                 /* not in middle of rte_eth_dev iteration, */
300                                 iter->class_device == NULL) {
301                         /* get next rte_device to try. */
302                         iter->device = iter->bus->dev_iterate(
303                                         iter->device, iter->bus_str, iter);
304                         if (iter->device == NULL)
305                                 break; /* no more rte_device candidate */
306                 }
307                 /* A device is matching bus part, need to check ethdev part. */
308                 iter->class_device = iter->cls->dev_iterate(
309                                 iter->class_device, iter->cls_str, iter);
310                 if (iter->class_device != NULL)
311                         return eth_dev_to_id(iter->class_device); /* match */
312         } while (iter->bus != NULL); /* need to try next rte_device */
313
314         /* No more ethdev port to iterate. */
315         rte_eth_iterator_cleanup(iter);
316         return RTE_MAX_ETHPORTS;
317 }
318
319 void
320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
321 {
322         if (iter->bus_str == NULL)
323                 return; /* nothing to free in pure class filter */
324         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
325         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
326         memset(iter, 0, sizeof(*iter));
327 }
328
329 uint16_t
330 rte_eth_find_next(uint16_t port_id)
331 {
332         while (port_id < RTE_MAX_ETHPORTS &&
333                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
334                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
335                 port_id++;
336
337         if (port_id >= RTE_MAX_ETHPORTS)
338                 return RTE_MAX_ETHPORTS;
339
340         return port_id;
341 }
342
343 static void
344 rte_eth_dev_shared_data_prepare(void)
345 {
346         const unsigned flags = 0;
347         const struct rte_memzone *mz;
348
349         rte_spinlock_lock(&rte_eth_shared_data_lock);
350
351         if (rte_eth_dev_shared_data == NULL) {
352                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
353                         /* Allocate port data and ownership shared memory. */
354                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
355                                         sizeof(*rte_eth_dev_shared_data),
356                                         rte_socket_id(), flags);
357                 } else
358                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
359                 if (mz == NULL)
360                         rte_panic("Cannot allocate ethdev shared data\n");
361
362                 rte_eth_dev_shared_data = mz->addr;
363                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
364                         rte_eth_dev_shared_data->next_owner_id =
365                                         RTE_ETH_DEV_NO_OWNER + 1;
366                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
367                         memset(rte_eth_dev_shared_data->data, 0,
368                                sizeof(rte_eth_dev_shared_data->data));
369                 }
370         }
371
372         rte_spinlock_unlock(&rte_eth_shared_data_lock);
373 }
374
375 static bool
376 is_allocated(const struct rte_eth_dev *ethdev)
377 {
378         return ethdev->data->name[0] != '\0';
379 }
380
381 static struct rte_eth_dev *
382 _rte_eth_dev_allocated(const char *name)
383 {
384         unsigned i;
385
386         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
387                 if (rte_eth_devices[i].data != NULL &&
388                     strcmp(rte_eth_devices[i].data->name, name) == 0)
389                         return &rte_eth_devices[i];
390         }
391         return NULL;
392 }
393
394 struct rte_eth_dev *
395 rte_eth_dev_allocated(const char *name)
396 {
397         struct rte_eth_dev *ethdev;
398
399         rte_eth_dev_shared_data_prepare();
400
401         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
402
403         ethdev = _rte_eth_dev_allocated(name);
404
405         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
406
407         return ethdev;
408 }
409
410 static uint16_t
411 rte_eth_dev_find_free_port(void)
412 {
413         unsigned i;
414
415         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
416                 /* Using shared name field to find a free port. */
417                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
418                         RTE_ASSERT(rte_eth_devices[i].state ==
419                                    RTE_ETH_DEV_UNUSED);
420                         return i;
421                 }
422         }
423         return RTE_MAX_ETHPORTS;
424 }
425
426 static struct rte_eth_dev *
427 eth_dev_get(uint16_t port_id)
428 {
429         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
430
431         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
432
433         return eth_dev;
434 }
435
436 struct rte_eth_dev *
437 rte_eth_dev_allocate(const char *name)
438 {
439         uint16_t port_id;
440         struct rte_eth_dev *eth_dev = NULL;
441
442         rte_eth_dev_shared_data_prepare();
443
444         /* Synchronize port creation between primary and secondary threads. */
445         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
446
447         if (_rte_eth_dev_allocated(name) != NULL) {
448                 RTE_ETHDEV_LOG(ERR,
449                         "Ethernet device with name %s already allocated\n",
450                         name);
451                 goto unlock;
452         }
453
454         port_id = rte_eth_dev_find_free_port();
455         if (port_id == RTE_MAX_ETHPORTS) {
456                 RTE_ETHDEV_LOG(ERR,
457                         "Reached maximum number of Ethernet ports\n");
458                 goto unlock;
459         }
460
461         eth_dev = eth_dev_get(port_id);
462         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
463         eth_dev->data->port_id = port_id;
464         eth_dev->data->mtu = ETHER_MTU;
465
466 unlock:
467         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
468
469         return eth_dev;
470 }
471
472 /*
473  * Attach to a port already registered by the primary process, which
474  * makes sure that the same device would have the same port id both
475  * in the primary and secondary process.
476  */
477 struct rte_eth_dev *
478 rte_eth_dev_attach_secondary(const char *name)
479 {
480         uint16_t i;
481         struct rte_eth_dev *eth_dev = NULL;
482
483         rte_eth_dev_shared_data_prepare();
484
485         /* Synchronize port attachment to primary port creation and release. */
486         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
487
488         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
489                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
490                         break;
491         }
492         if (i == RTE_MAX_ETHPORTS) {
493                 RTE_ETHDEV_LOG(ERR,
494                         "Device %s is not driven by the primary process\n",
495                         name);
496         } else {
497                 eth_dev = eth_dev_get(i);
498                 RTE_ASSERT(eth_dev->data->port_id == i);
499         }
500
501         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
502         return eth_dev;
503 }
504
505 int
506 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
507 {
508         if (eth_dev == NULL)
509                 return -EINVAL;
510
511         rte_eth_dev_shared_data_prepare();
512
513         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
514                 _rte_eth_dev_callback_process(eth_dev,
515                                 RTE_ETH_EVENT_DESTROY, NULL);
516
517         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
518
519         eth_dev->state = RTE_ETH_DEV_UNUSED;
520
521         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
522                 rte_free(eth_dev->data->rx_queues);
523                 rte_free(eth_dev->data->tx_queues);
524                 rte_free(eth_dev->data->mac_addrs);
525                 rte_free(eth_dev->data->hash_mac_addrs);
526                 rte_free(eth_dev->data->dev_private);
527                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
528         }
529
530         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
531
532         return 0;
533 }
534
535 int
536 rte_eth_dev_is_valid_port(uint16_t port_id)
537 {
538         if (port_id >= RTE_MAX_ETHPORTS ||
539             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
540                 return 0;
541         else
542                 return 1;
543 }
544
545 static int
546 rte_eth_is_valid_owner_id(uint64_t owner_id)
547 {
548         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
549             rte_eth_dev_shared_data->next_owner_id <= owner_id)
550                 return 0;
551         return 1;
552 }
553
554 uint64_t
555 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
556 {
557         while (port_id < RTE_MAX_ETHPORTS &&
558                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
559                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
560                rte_eth_devices[port_id].data->owner.id != owner_id))
561                 port_id++;
562
563         if (port_id >= RTE_MAX_ETHPORTS)
564                 return RTE_MAX_ETHPORTS;
565
566         return port_id;
567 }
568
569 int __rte_experimental
570 rte_eth_dev_owner_new(uint64_t *owner_id)
571 {
572         rte_eth_dev_shared_data_prepare();
573
574         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
575
576         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
577
578         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
579         return 0;
580 }
581
582 static int
583 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
584                        const struct rte_eth_dev_owner *new_owner)
585 {
586         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
587         struct rte_eth_dev_owner *port_owner;
588
589         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
590                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
591                         port_id);
592                 return -ENODEV;
593         }
594
595         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
596             !rte_eth_is_valid_owner_id(old_owner_id)) {
597                 RTE_ETHDEV_LOG(ERR,
598                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
599                        old_owner_id, new_owner->id);
600                 return -EINVAL;
601         }
602
603         port_owner = &rte_eth_devices[port_id].data->owner;
604         if (port_owner->id != old_owner_id) {
605                 RTE_ETHDEV_LOG(ERR,
606                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
607                         port_id, port_owner->name, port_owner->id);
608                 return -EPERM;
609         }
610
611         /* can not truncate (same structure) */
612         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
613
614         port_owner->id = new_owner->id;
615
616         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
617                 port_id, new_owner->name, new_owner->id);
618
619         return 0;
620 }
621
622 int __rte_experimental
623 rte_eth_dev_owner_set(const uint16_t port_id,
624                       const struct rte_eth_dev_owner *owner)
625 {
626         int ret;
627
628         rte_eth_dev_shared_data_prepare();
629
630         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
631
632         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
633
634         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
635         return ret;
636 }
637
638 int __rte_experimental
639 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
640 {
641         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
642                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
643         int ret;
644
645         rte_eth_dev_shared_data_prepare();
646
647         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
648
649         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
650
651         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
652         return ret;
653 }
654
655 void __rte_experimental
656 rte_eth_dev_owner_delete(const uint64_t owner_id)
657 {
658         uint16_t port_id;
659
660         rte_eth_dev_shared_data_prepare();
661
662         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
663
664         if (rte_eth_is_valid_owner_id(owner_id)) {
665                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
666                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
667                                 memset(&rte_eth_devices[port_id].data->owner, 0,
668                                        sizeof(struct rte_eth_dev_owner));
669                 RTE_ETHDEV_LOG(NOTICE,
670                         "All port owners owned by %016"PRIx64" identifier have removed\n",
671                         owner_id);
672         } else {
673                 RTE_ETHDEV_LOG(ERR,
674                                "Invalid owner id=%016"PRIx64"\n",
675                                owner_id);
676         }
677
678         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
679 }
680
681 int __rte_experimental
682 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
683 {
684         int ret = 0;
685         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
686
687         rte_eth_dev_shared_data_prepare();
688
689         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
690
691         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
692                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
693                         port_id);
694                 ret = -ENODEV;
695         } else {
696                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
697         }
698
699         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
700         return ret;
701 }
702
703 int
704 rte_eth_dev_socket_id(uint16_t port_id)
705 {
706         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
707         return rte_eth_devices[port_id].data->numa_node;
708 }
709
710 void *
711 rte_eth_dev_get_sec_ctx(uint16_t port_id)
712 {
713         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
714         return rte_eth_devices[port_id].security_ctx;
715 }
716
717 uint16_t
718 rte_eth_dev_count(void)
719 {
720         return rte_eth_dev_count_avail();
721 }
722
723 uint16_t
724 rte_eth_dev_count_avail(void)
725 {
726         uint16_t p;
727         uint16_t count;
728
729         count = 0;
730
731         RTE_ETH_FOREACH_DEV(p)
732                 count++;
733
734         return count;
735 }
736
737 uint16_t __rte_experimental
738 rte_eth_dev_count_total(void)
739 {
740         uint16_t port, count = 0;
741
742         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
743                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
744                         count++;
745
746         return count;
747 }
748
749 int
750 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
751 {
752         char *tmp;
753
754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
755
756         if (name == NULL) {
757                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
758                 return -EINVAL;
759         }
760
761         /* shouldn't check 'rte_eth_devices[i].data',
762          * because it might be overwritten by VDEV PMD */
763         tmp = rte_eth_dev_shared_data->data[port_id].name;
764         strcpy(name, tmp);
765         return 0;
766 }
767
768 int
769 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
770 {
771         uint32_t pid;
772
773         if (name == NULL) {
774                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
775                 return -EINVAL;
776         }
777
778         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
779                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
780                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
781                         *port_id = pid;
782                         return 0;
783                 }
784         }
785
786         return -ENODEV;
787 }
788
789 static int
790 eth_err(uint16_t port_id, int ret)
791 {
792         if (ret == 0)
793                 return 0;
794         if (rte_eth_dev_is_removed(port_id))
795                 return -EIO;
796         return ret;
797 }
798
799 static int
800 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
801 {
802         uint16_t old_nb_queues = dev->data->nb_rx_queues;
803         void **rxq;
804         unsigned i;
805
806         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
807                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
808                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
809                                 RTE_CACHE_LINE_SIZE);
810                 if (dev->data->rx_queues == NULL) {
811                         dev->data->nb_rx_queues = 0;
812                         return -(ENOMEM);
813                 }
814         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
815                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
816
817                 rxq = dev->data->rx_queues;
818
819                 for (i = nb_queues; i < old_nb_queues; i++)
820                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
821                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
822                                 RTE_CACHE_LINE_SIZE);
823                 if (rxq == NULL)
824                         return -(ENOMEM);
825                 if (nb_queues > old_nb_queues) {
826                         uint16_t new_qs = nb_queues - old_nb_queues;
827
828                         memset(rxq + old_nb_queues, 0,
829                                 sizeof(rxq[0]) * new_qs);
830                 }
831
832                 dev->data->rx_queues = rxq;
833
834         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
835                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
836
837                 rxq = dev->data->rx_queues;
838
839                 for (i = nb_queues; i < old_nb_queues; i++)
840                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
841
842                 rte_free(dev->data->rx_queues);
843                 dev->data->rx_queues = NULL;
844         }
845         dev->data->nb_rx_queues = nb_queues;
846         return 0;
847 }
848
849 int
850 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
851 {
852         struct rte_eth_dev *dev;
853
854         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
855
856         dev = &rte_eth_devices[port_id];
857         if (!dev->data->dev_started) {
858                 RTE_ETHDEV_LOG(ERR,
859                         "Port %u must be started before start any queue\n",
860                         port_id);
861                 return -EINVAL;
862         }
863
864         if (rx_queue_id >= dev->data->nb_rx_queues) {
865                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
866                 return -EINVAL;
867         }
868
869         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
870
871         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
872                 RTE_ETHDEV_LOG(INFO,
873                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
874                         rx_queue_id, port_id);
875                 return 0;
876         }
877
878         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
879                                                              rx_queue_id));
880
881 }
882
883 int
884 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
885 {
886         struct rte_eth_dev *dev;
887
888         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
889
890         dev = &rte_eth_devices[port_id];
891         if (rx_queue_id >= dev->data->nb_rx_queues) {
892                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
893                 return -EINVAL;
894         }
895
896         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
897
898         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
899                 RTE_ETHDEV_LOG(INFO,
900                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
901                         rx_queue_id, port_id);
902                 return 0;
903         }
904
905         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
906
907 }
908
909 int
910 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
911 {
912         struct rte_eth_dev *dev;
913
914         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
915
916         dev = &rte_eth_devices[port_id];
917         if (!dev->data->dev_started) {
918                 RTE_ETHDEV_LOG(ERR,
919                         "Port %u must be started before start any queue\n",
920                         port_id);
921                 return -EINVAL;
922         }
923
924         if (tx_queue_id >= dev->data->nb_tx_queues) {
925                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
926                 return -EINVAL;
927         }
928
929         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
930
931         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
932                 RTE_ETHDEV_LOG(INFO,
933                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
934                         tx_queue_id, port_id);
935                 return 0;
936         }
937
938         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
939 }
940
941 int
942 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
943 {
944         struct rte_eth_dev *dev;
945
946         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
947
948         dev = &rte_eth_devices[port_id];
949         if (tx_queue_id >= dev->data->nb_tx_queues) {
950                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
951                 return -EINVAL;
952         }
953
954         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
955
956         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
957                 RTE_ETHDEV_LOG(INFO,
958                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
959                         tx_queue_id, port_id);
960                 return 0;
961         }
962
963         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
964
965 }
966
967 static int
968 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
969 {
970         uint16_t old_nb_queues = dev->data->nb_tx_queues;
971         void **txq;
972         unsigned i;
973
974         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
975                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
976                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
977                                                    RTE_CACHE_LINE_SIZE);
978                 if (dev->data->tx_queues == NULL) {
979                         dev->data->nb_tx_queues = 0;
980                         return -(ENOMEM);
981                 }
982         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
983                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
984
985                 txq = dev->data->tx_queues;
986
987                 for (i = nb_queues; i < old_nb_queues; i++)
988                         (*dev->dev_ops->tx_queue_release)(txq[i]);
989                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
990                                   RTE_CACHE_LINE_SIZE);
991                 if (txq == NULL)
992                         return -ENOMEM;
993                 if (nb_queues > old_nb_queues) {
994                         uint16_t new_qs = nb_queues - old_nb_queues;
995
996                         memset(txq + old_nb_queues, 0,
997                                sizeof(txq[0]) * new_qs);
998                 }
999
1000                 dev->data->tx_queues = txq;
1001
1002         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1003                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1004
1005                 txq = dev->data->tx_queues;
1006
1007                 for (i = nb_queues; i < old_nb_queues; i++)
1008                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1009
1010                 rte_free(dev->data->tx_queues);
1011                 dev->data->tx_queues = NULL;
1012         }
1013         dev->data->nb_tx_queues = nb_queues;
1014         return 0;
1015 }
1016
1017 uint32_t
1018 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1019 {
1020         switch (speed) {
1021         case ETH_SPEED_NUM_10M:
1022                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1023         case ETH_SPEED_NUM_100M:
1024                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1025         case ETH_SPEED_NUM_1G:
1026                 return ETH_LINK_SPEED_1G;
1027         case ETH_SPEED_NUM_2_5G:
1028                 return ETH_LINK_SPEED_2_5G;
1029         case ETH_SPEED_NUM_5G:
1030                 return ETH_LINK_SPEED_5G;
1031         case ETH_SPEED_NUM_10G:
1032                 return ETH_LINK_SPEED_10G;
1033         case ETH_SPEED_NUM_20G:
1034                 return ETH_LINK_SPEED_20G;
1035         case ETH_SPEED_NUM_25G:
1036                 return ETH_LINK_SPEED_25G;
1037         case ETH_SPEED_NUM_40G:
1038                 return ETH_LINK_SPEED_40G;
1039         case ETH_SPEED_NUM_50G:
1040                 return ETH_LINK_SPEED_50G;
1041         case ETH_SPEED_NUM_56G:
1042                 return ETH_LINK_SPEED_56G;
1043         case ETH_SPEED_NUM_100G:
1044                 return ETH_LINK_SPEED_100G;
1045         default:
1046                 return 0;
1047         }
1048 }
1049
1050 const char *
1051 rte_eth_dev_rx_offload_name(uint64_t offload)
1052 {
1053         const char *name = "UNKNOWN";
1054         unsigned int i;
1055
1056         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1057                 if (offload == rte_rx_offload_names[i].offload) {
1058                         name = rte_rx_offload_names[i].name;
1059                         break;
1060                 }
1061         }
1062
1063         return name;
1064 }
1065
1066 const char *
1067 rte_eth_dev_tx_offload_name(uint64_t offload)
1068 {
1069         const char *name = "UNKNOWN";
1070         unsigned int i;
1071
1072         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1073                 if (offload == rte_tx_offload_names[i].offload) {
1074                         name = rte_tx_offload_names[i].name;
1075                         break;
1076                 }
1077         }
1078
1079         return name;
1080 }
1081
1082 int
1083 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1084                       const struct rte_eth_conf *dev_conf)
1085 {
1086         struct rte_eth_dev *dev;
1087         struct rte_eth_dev_info dev_info;
1088         struct rte_eth_conf orig_conf;
1089         int diag;
1090         int ret;
1091
1092         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1093
1094         dev = &rte_eth_devices[port_id];
1095
1096         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1098
1099         if (dev->data->dev_started) {
1100                 RTE_ETHDEV_LOG(ERR,
1101                         "Port %u must be stopped to allow configuration\n",
1102                         port_id);
1103                 return -EBUSY;
1104         }
1105
1106          /* Store original config, as rollback required on failure */
1107         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1108
1109         /*
1110          * Copy the dev_conf parameter into the dev structure.
1111          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1112          */
1113         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1114
1115         rte_eth_dev_info_get(port_id, &dev_info);
1116
1117         /* If number of queues specified by application for both Rx and Tx is
1118          * zero, use driver preferred values. This cannot be done individually
1119          * as it is valid for either Tx or Rx (but not both) to be zero.
1120          * If driver does not provide any preferred valued, fall back on
1121          * EAL defaults.
1122          */
1123         if (nb_rx_q == 0 && nb_tx_q == 0) {
1124                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1125                 if (nb_rx_q == 0)
1126                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1127                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1128                 if (nb_tx_q == 0)
1129                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1130         }
1131
1132         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1133                 RTE_ETHDEV_LOG(ERR,
1134                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1135                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1136                 ret = -EINVAL;
1137                 goto rollback;
1138         }
1139
1140         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1141                 RTE_ETHDEV_LOG(ERR,
1142                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1143                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1144                 ret = -EINVAL;
1145                 goto rollback;
1146         }
1147
1148         /*
1149          * Check that the numbers of RX and TX queues are not greater
1150          * than the maximum number of RX and TX queues supported by the
1151          * configured device.
1152          */
1153         if (nb_rx_q > dev_info.max_rx_queues) {
1154                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1155                         port_id, nb_rx_q, dev_info.max_rx_queues);
1156                 ret = -EINVAL;
1157                 goto rollback;
1158         }
1159
1160         if (nb_tx_q > dev_info.max_tx_queues) {
1161                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1162                         port_id, nb_tx_q, dev_info.max_tx_queues);
1163                 ret = -EINVAL;
1164                 goto rollback;
1165         }
1166
1167         /* Check that the device supports requested interrupts */
1168         if ((dev_conf->intr_conf.lsc == 1) &&
1169                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1170                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1171                         dev->device->driver->name);
1172                 ret = -EINVAL;
1173                 goto rollback;
1174         }
1175         if ((dev_conf->intr_conf.rmv == 1) &&
1176                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1177                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1178                         dev->device->driver->name);
1179                 ret = -EINVAL;
1180                 goto rollback;
1181         }
1182
1183         /*
1184          * If jumbo frames are enabled, check that the maximum RX packet
1185          * length is supported by the configured device.
1186          */
1187         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1188                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1189                         RTE_ETHDEV_LOG(ERR,
1190                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1191                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1192                                 dev_info.max_rx_pktlen);
1193                         ret = -EINVAL;
1194                         goto rollback;
1195                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1196                         RTE_ETHDEV_LOG(ERR,
1197                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1198                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1199                                 (unsigned)ETHER_MIN_LEN);
1200                         ret = -EINVAL;
1201                         goto rollback;
1202                 }
1203         } else {
1204                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1205                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1206                         /* Use default value */
1207                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1208                                                         ETHER_MAX_LEN;
1209         }
1210
1211         /* Any requested offloading must be within its device capabilities */
1212         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1213              dev_conf->rxmode.offloads) {
1214                 RTE_ETHDEV_LOG(ERR,
1215                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1216                         "capabilities 0x%"PRIx64" in %s()\n",
1217                         port_id, dev_conf->rxmode.offloads,
1218                         dev_info.rx_offload_capa,
1219                         __func__);
1220                 ret = -EINVAL;
1221                 goto rollback;
1222         }
1223         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1224              dev_conf->txmode.offloads) {
1225                 RTE_ETHDEV_LOG(ERR,
1226                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1227                         "capabilities 0x%"PRIx64" in %s()\n",
1228                         port_id, dev_conf->txmode.offloads,
1229                         dev_info.tx_offload_capa,
1230                         __func__);
1231                 ret = -EINVAL;
1232                 goto rollback;
1233         }
1234
1235         /* Check that device supports requested rss hash functions. */
1236         if ((dev_info.flow_type_rss_offloads |
1237              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1238             dev_info.flow_type_rss_offloads) {
1239                 RTE_ETHDEV_LOG(ERR,
1240                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1241                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1242                         dev_info.flow_type_rss_offloads);
1243                 ret = -EINVAL;
1244                 goto rollback;
1245         }
1246
1247         /*
1248          * Setup new number of RX/TX queues and reconfigure device.
1249          */
1250         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1251         if (diag != 0) {
1252                 RTE_ETHDEV_LOG(ERR,
1253                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1254                         port_id, diag);
1255                 ret = diag;
1256                 goto rollback;
1257         }
1258
1259         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1260         if (diag != 0) {
1261                 RTE_ETHDEV_LOG(ERR,
1262                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1263                         port_id, diag);
1264                 rte_eth_dev_rx_queue_config(dev, 0);
1265                 ret = diag;
1266                 goto rollback;
1267         }
1268
1269         diag = (*dev->dev_ops->dev_configure)(dev);
1270         if (diag != 0) {
1271                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1272                         port_id, diag);
1273                 rte_eth_dev_rx_queue_config(dev, 0);
1274                 rte_eth_dev_tx_queue_config(dev, 0);
1275                 ret = eth_err(port_id, diag);
1276                 goto rollback;
1277         }
1278
1279         /* Initialize Rx profiling if enabled at compilation time. */
1280         diag = __rte_eth_dev_profile_init(port_id, dev);
1281         if (diag != 0) {
1282                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1283                         port_id, diag);
1284                 rte_eth_dev_rx_queue_config(dev, 0);
1285                 rte_eth_dev_tx_queue_config(dev, 0);
1286                 ret = eth_err(port_id, diag);
1287                 goto rollback;
1288         }
1289
1290         return 0;
1291
1292 rollback:
1293         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1294
1295         return ret;
1296 }
1297
1298 void
1299 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1300 {
1301         if (dev->data->dev_started) {
1302                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1303                         dev->data->port_id);
1304                 return;
1305         }
1306
1307         rte_eth_dev_rx_queue_config(dev, 0);
1308         rte_eth_dev_tx_queue_config(dev, 0);
1309
1310         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1311 }
1312
1313 static void
1314 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1315                         struct rte_eth_dev_info *dev_info)
1316 {
1317         struct ether_addr *addr;
1318         uint16_t i;
1319         uint32_t pool = 0;
1320         uint64_t pool_mask;
1321
1322         /* replay MAC address configuration including default MAC */
1323         addr = &dev->data->mac_addrs[0];
1324         if (*dev->dev_ops->mac_addr_set != NULL)
1325                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1326         else if (*dev->dev_ops->mac_addr_add != NULL)
1327                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1328
1329         if (*dev->dev_ops->mac_addr_add != NULL) {
1330                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1331                         addr = &dev->data->mac_addrs[i];
1332
1333                         /* skip zero address */
1334                         if (is_zero_ether_addr(addr))
1335                                 continue;
1336
1337                         pool = 0;
1338                         pool_mask = dev->data->mac_pool_sel[i];
1339
1340                         do {
1341                                 if (pool_mask & 1ULL)
1342                                         (*dev->dev_ops->mac_addr_add)(dev,
1343                                                 addr, i, pool);
1344                                 pool_mask >>= 1;
1345                                 pool++;
1346                         } while (pool_mask);
1347                 }
1348         }
1349 }
1350
1351 static void
1352 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1353                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1354 {
1355         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1356                 rte_eth_dev_mac_restore(dev, dev_info);
1357
1358         /* replay promiscuous configuration */
1359         if (rte_eth_promiscuous_get(port_id) == 1)
1360                 rte_eth_promiscuous_enable(port_id);
1361         else if (rte_eth_promiscuous_get(port_id) == 0)
1362                 rte_eth_promiscuous_disable(port_id);
1363
1364         /* replay all multicast configuration */
1365         if (rte_eth_allmulticast_get(port_id) == 1)
1366                 rte_eth_allmulticast_enable(port_id);
1367         else if (rte_eth_allmulticast_get(port_id) == 0)
1368                 rte_eth_allmulticast_disable(port_id);
1369 }
1370
1371 int
1372 rte_eth_dev_start(uint16_t port_id)
1373 {
1374         struct rte_eth_dev *dev;
1375         struct rte_eth_dev_info dev_info;
1376         int diag;
1377
1378         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1379
1380         dev = &rte_eth_devices[port_id];
1381
1382         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1383
1384         if (dev->data->dev_started != 0) {
1385                 RTE_ETHDEV_LOG(INFO,
1386                         "Device with port_id=%"PRIu16" already started\n",
1387                         port_id);
1388                 return 0;
1389         }
1390
1391         rte_eth_dev_info_get(port_id, &dev_info);
1392
1393         /* Lets restore MAC now if device does not support live change */
1394         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1395                 rte_eth_dev_mac_restore(dev, &dev_info);
1396
1397         diag = (*dev->dev_ops->dev_start)(dev);
1398         if (diag == 0)
1399                 dev->data->dev_started = 1;
1400         else
1401                 return eth_err(port_id, diag);
1402
1403         rte_eth_dev_config_restore(dev, &dev_info, port_id);
1404
1405         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1406                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1407                 (*dev->dev_ops->link_update)(dev, 0);
1408         }
1409         return 0;
1410 }
1411
1412 void
1413 rte_eth_dev_stop(uint16_t port_id)
1414 {
1415         struct rte_eth_dev *dev;
1416
1417         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1418         dev = &rte_eth_devices[port_id];
1419
1420         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1421
1422         if (dev->data->dev_started == 0) {
1423                 RTE_ETHDEV_LOG(INFO,
1424                         "Device with port_id=%"PRIu16" already stopped\n",
1425                         port_id);
1426                 return;
1427         }
1428
1429         dev->data->dev_started = 0;
1430         (*dev->dev_ops->dev_stop)(dev);
1431 }
1432
1433 int
1434 rte_eth_dev_set_link_up(uint16_t port_id)
1435 {
1436         struct rte_eth_dev *dev;
1437
1438         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1439
1440         dev = &rte_eth_devices[port_id];
1441
1442         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1443         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1444 }
1445
1446 int
1447 rte_eth_dev_set_link_down(uint16_t port_id)
1448 {
1449         struct rte_eth_dev *dev;
1450
1451         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1452
1453         dev = &rte_eth_devices[port_id];
1454
1455         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1456         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1457 }
1458
1459 void
1460 rte_eth_dev_close(uint16_t port_id)
1461 {
1462         struct rte_eth_dev *dev;
1463
1464         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1465         dev = &rte_eth_devices[port_id];
1466
1467         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1468         dev->data->dev_started = 0;
1469         (*dev->dev_ops->dev_close)(dev);
1470
1471         /* check behaviour flag - temporary for PMD migration */
1472         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1473                 /* new behaviour: send event + reset state + free all data */
1474                 rte_eth_dev_release_port(dev);
1475                 return;
1476         }
1477         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1478                         "The driver %s should migrate to the new behaviour.\n",
1479                         dev->device->driver->name);
1480         /* old behaviour: only free queue arrays */
1481         dev->data->nb_rx_queues = 0;
1482         rte_free(dev->data->rx_queues);
1483         dev->data->rx_queues = NULL;
1484         dev->data->nb_tx_queues = 0;
1485         rte_free(dev->data->tx_queues);
1486         dev->data->tx_queues = NULL;
1487 }
1488
1489 int
1490 rte_eth_dev_reset(uint16_t port_id)
1491 {
1492         struct rte_eth_dev *dev;
1493         int ret;
1494
1495         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1496         dev = &rte_eth_devices[port_id];
1497
1498         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1499
1500         rte_eth_dev_stop(port_id);
1501         ret = dev->dev_ops->dev_reset(dev);
1502
1503         return eth_err(port_id, ret);
1504 }
1505
1506 int __rte_experimental
1507 rte_eth_dev_is_removed(uint16_t port_id)
1508 {
1509         struct rte_eth_dev *dev;
1510         int ret;
1511
1512         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1513
1514         dev = &rte_eth_devices[port_id];
1515
1516         if (dev->state == RTE_ETH_DEV_REMOVED)
1517                 return 1;
1518
1519         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1520
1521         ret = dev->dev_ops->is_removed(dev);
1522         if (ret != 0)
1523                 /* Device is physically removed. */
1524                 dev->state = RTE_ETH_DEV_REMOVED;
1525
1526         return ret;
1527 }
1528
1529 int
1530 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1531                        uint16_t nb_rx_desc, unsigned int socket_id,
1532                        const struct rte_eth_rxconf *rx_conf,
1533                        struct rte_mempool *mp)
1534 {
1535         int ret;
1536         uint32_t mbp_buf_size;
1537         struct rte_eth_dev *dev;
1538         struct rte_eth_dev_info dev_info;
1539         struct rte_eth_rxconf local_conf;
1540         void **rxq;
1541
1542         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1543
1544         dev = &rte_eth_devices[port_id];
1545         if (rx_queue_id >= dev->data->nb_rx_queues) {
1546                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1547                 return -EINVAL;
1548         }
1549
1550         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1551         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1552
1553         /*
1554          * Check the size of the mbuf data buffer.
1555          * This value must be provided in the private data of the memory pool.
1556          * First check that the memory pool has a valid private data.
1557          */
1558         rte_eth_dev_info_get(port_id, &dev_info);
1559         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1560                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1561                         mp->name, (int)mp->private_data_size,
1562                         (int)sizeof(struct rte_pktmbuf_pool_private));
1563                 return -ENOSPC;
1564         }
1565         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1566
1567         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1568                 RTE_ETHDEV_LOG(ERR,
1569                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1570                         mp->name, (int)mbp_buf_size,
1571                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1572                         (int)RTE_PKTMBUF_HEADROOM,
1573                         (int)dev_info.min_rx_bufsize);
1574                 return -EINVAL;
1575         }
1576
1577         /* Use default specified by driver, if nb_rx_desc is zero */
1578         if (nb_rx_desc == 0) {
1579                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1580                 /* If driver default is also zero, fall back on EAL default */
1581                 if (nb_rx_desc == 0)
1582                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1583         }
1584
1585         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1586                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1587                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1588
1589                 RTE_ETHDEV_LOG(ERR,
1590                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1591                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1592                         dev_info.rx_desc_lim.nb_min,
1593                         dev_info.rx_desc_lim.nb_align);
1594                 return -EINVAL;
1595         }
1596
1597         if (dev->data->dev_started &&
1598                 !(dev_info.dev_capa &
1599                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1600                 return -EBUSY;
1601
1602         if (dev->data->dev_started &&
1603                 (dev->data->rx_queue_state[rx_queue_id] !=
1604                         RTE_ETH_QUEUE_STATE_STOPPED))
1605                 return -EBUSY;
1606
1607         rxq = dev->data->rx_queues;
1608         if (rxq[rx_queue_id]) {
1609                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1610                                         -ENOTSUP);
1611                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1612                 rxq[rx_queue_id] = NULL;
1613         }
1614
1615         if (rx_conf == NULL)
1616                 rx_conf = &dev_info.default_rxconf;
1617
1618         local_conf = *rx_conf;
1619
1620         /*
1621          * If an offloading has already been enabled in
1622          * rte_eth_dev_configure(), it has been enabled on all queues,
1623          * so there is no need to enable it in this queue again.
1624          * The local_conf.offloads input to underlying PMD only carries
1625          * those offloadings which are only enabled on this queue and
1626          * not enabled on all queues.
1627          */
1628         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1629
1630         /*
1631          * New added offloadings for this queue are those not enabled in
1632          * rte_eth_dev_configure() and they must be per-queue type.
1633          * A pure per-port offloading can't be enabled on a queue while
1634          * disabled on another queue. A pure per-port offloading can't
1635          * be enabled for any queue as new added one if it hasn't been
1636          * enabled in rte_eth_dev_configure().
1637          */
1638         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1639              local_conf.offloads) {
1640                 RTE_ETHDEV_LOG(ERR,
1641                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1642                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1643                         port_id, rx_queue_id, local_conf.offloads,
1644                         dev_info.rx_queue_offload_capa,
1645                         __func__);
1646                 return -EINVAL;
1647         }
1648
1649         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1650                                               socket_id, &local_conf, mp);
1651         if (!ret) {
1652                 if (!dev->data->min_rx_buf_size ||
1653                     dev->data->min_rx_buf_size > mbp_buf_size)
1654                         dev->data->min_rx_buf_size = mbp_buf_size;
1655         }
1656
1657         return eth_err(port_id, ret);
1658 }
1659
1660 int
1661 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1662                        uint16_t nb_tx_desc, unsigned int socket_id,
1663                        const struct rte_eth_txconf *tx_conf)
1664 {
1665         struct rte_eth_dev *dev;
1666         struct rte_eth_dev_info dev_info;
1667         struct rte_eth_txconf local_conf;
1668         void **txq;
1669
1670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1671
1672         dev = &rte_eth_devices[port_id];
1673         if (tx_queue_id >= dev->data->nb_tx_queues) {
1674                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1675                 return -EINVAL;
1676         }
1677
1678         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1679         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1680
1681         rte_eth_dev_info_get(port_id, &dev_info);
1682
1683         /* Use default specified by driver, if nb_tx_desc is zero */
1684         if (nb_tx_desc == 0) {
1685                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1686                 /* If driver default is zero, fall back on EAL default */
1687                 if (nb_tx_desc == 0)
1688                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1689         }
1690         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1691             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1692             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1693                 RTE_ETHDEV_LOG(ERR,
1694                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1695                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1696                         dev_info.tx_desc_lim.nb_min,
1697                         dev_info.tx_desc_lim.nb_align);
1698                 return -EINVAL;
1699         }
1700
1701         if (dev->data->dev_started &&
1702                 !(dev_info.dev_capa &
1703                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1704                 return -EBUSY;
1705
1706         if (dev->data->dev_started &&
1707                 (dev->data->tx_queue_state[tx_queue_id] !=
1708                         RTE_ETH_QUEUE_STATE_STOPPED))
1709                 return -EBUSY;
1710
1711         txq = dev->data->tx_queues;
1712         if (txq[tx_queue_id]) {
1713                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1714                                         -ENOTSUP);
1715                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1716                 txq[tx_queue_id] = NULL;
1717         }
1718
1719         if (tx_conf == NULL)
1720                 tx_conf = &dev_info.default_txconf;
1721
1722         local_conf = *tx_conf;
1723
1724         /*
1725          * If an offloading has already been enabled in
1726          * rte_eth_dev_configure(), it has been enabled on all queues,
1727          * so there is no need to enable it in this queue again.
1728          * The local_conf.offloads input to underlying PMD only carries
1729          * those offloadings which are only enabled on this queue and
1730          * not enabled on all queues.
1731          */
1732         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1733
1734         /*
1735          * New added offloadings for this queue are those not enabled in
1736          * rte_eth_dev_configure() and they must be per-queue type.
1737          * A pure per-port offloading can't be enabled on a queue while
1738          * disabled on another queue. A pure per-port offloading can't
1739          * be enabled for any queue as new added one if it hasn't been
1740          * enabled in rte_eth_dev_configure().
1741          */
1742         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1743              local_conf.offloads) {
1744                 RTE_ETHDEV_LOG(ERR,
1745                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1746                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1747                         port_id, tx_queue_id, local_conf.offloads,
1748                         dev_info.tx_queue_offload_capa,
1749                         __func__);
1750                 return -EINVAL;
1751         }
1752
1753         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1754                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1755 }
1756
1757 void
1758 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1759                 void *userdata __rte_unused)
1760 {
1761         unsigned i;
1762
1763         for (i = 0; i < unsent; i++)
1764                 rte_pktmbuf_free(pkts[i]);
1765 }
1766
1767 void
1768 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1769                 void *userdata)
1770 {
1771         uint64_t *count = userdata;
1772         unsigned i;
1773
1774         for (i = 0; i < unsent; i++)
1775                 rte_pktmbuf_free(pkts[i]);
1776
1777         *count += unsent;
1778 }
1779
1780 int
1781 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1782                 buffer_tx_error_fn cbfn, void *userdata)
1783 {
1784         buffer->error_callback = cbfn;
1785         buffer->error_userdata = userdata;
1786         return 0;
1787 }
1788
1789 int
1790 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1791 {
1792         int ret = 0;
1793
1794         if (buffer == NULL)
1795                 return -EINVAL;
1796
1797         buffer->size = size;
1798         if (buffer->error_callback == NULL) {
1799                 ret = rte_eth_tx_buffer_set_err_callback(
1800                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1801         }
1802
1803         return ret;
1804 }
1805
1806 int
1807 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1808 {
1809         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1810         int ret;
1811
1812         /* Validate Input Data. Bail if not valid or not supported. */
1813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1815
1816         /* Call driver to free pending mbufs. */
1817         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1818                                                free_cnt);
1819         return eth_err(port_id, ret);
1820 }
1821
1822 void
1823 rte_eth_promiscuous_enable(uint16_t port_id)
1824 {
1825         struct rte_eth_dev *dev;
1826
1827         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1828         dev = &rte_eth_devices[port_id];
1829
1830         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1831         (*dev->dev_ops->promiscuous_enable)(dev);
1832         dev->data->promiscuous = 1;
1833 }
1834
1835 void
1836 rte_eth_promiscuous_disable(uint16_t port_id)
1837 {
1838         struct rte_eth_dev *dev;
1839
1840         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1841         dev = &rte_eth_devices[port_id];
1842
1843         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1844         dev->data->promiscuous = 0;
1845         (*dev->dev_ops->promiscuous_disable)(dev);
1846 }
1847
1848 int
1849 rte_eth_promiscuous_get(uint16_t port_id)
1850 {
1851         struct rte_eth_dev *dev;
1852
1853         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1854
1855         dev = &rte_eth_devices[port_id];
1856         return dev->data->promiscuous;
1857 }
1858
1859 void
1860 rte_eth_allmulticast_enable(uint16_t port_id)
1861 {
1862         struct rte_eth_dev *dev;
1863
1864         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1865         dev = &rte_eth_devices[port_id];
1866
1867         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1868         (*dev->dev_ops->allmulticast_enable)(dev);
1869         dev->data->all_multicast = 1;
1870 }
1871
1872 void
1873 rte_eth_allmulticast_disable(uint16_t port_id)
1874 {
1875         struct rte_eth_dev *dev;
1876
1877         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1878         dev = &rte_eth_devices[port_id];
1879
1880         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1881         dev->data->all_multicast = 0;
1882         (*dev->dev_ops->allmulticast_disable)(dev);
1883 }
1884
1885 int
1886 rte_eth_allmulticast_get(uint16_t port_id)
1887 {
1888         struct rte_eth_dev *dev;
1889
1890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1891
1892         dev = &rte_eth_devices[port_id];
1893         return dev->data->all_multicast;
1894 }
1895
1896 void
1897 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1898 {
1899         struct rte_eth_dev *dev;
1900
1901         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1902         dev = &rte_eth_devices[port_id];
1903
1904         if (dev->data->dev_conf.intr_conf.lsc &&
1905             dev->data->dev_started)
1906                 rte_eth_linkstatus_get(dev, eth_link);
1907         else {
1908                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1909                 (*dev->dev_ops->link_update)(dev, 1);
1910                 *eth_link = dev->data->dev_link;
1911         }
1912 }
1913
1914 void
1915 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1916 {
1917         struct rte_eth_dev *dev;
1918
1919         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1920         dev = &rte_eth_devices[port_id];
1921
1922         if (dev->data->dev_conf.intr_conf.lsc &&
1923             dev->data->dev_started)
1924                 rte_eth_linkstatus_get(dev, eth_link);
1925         else {
1926                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1927                 (*dev->dev_ops->link_update)(dev, 0);
1928                 *eth_link = dev->data->dev_link;
1929         }
1930 }
1931
1932 int
1933 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1934 {
1935         struct rte_eth_dev *dev;
1936
1937         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1938
1939         dev = &rte_eth_devices[port_id];
1940         memset(stats, 0, sizeof(*stats));
1941
1942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1943         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1944         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1945 }
1946
1947 int
1948 rte_eth_stats_reset(uint16_t port_id)
1949 {
1950         struct rte_eth_dev *dev;
1951
1952         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1953         dev = &rte_eth_devices[port_id];
1954
1955         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1956         (*dev->dev_ops->stats_reset)(dev);
1957         dev->data->rx_mbuf_alloc_failed = 0;
1958
1959         return 0;
1960 }
1961
1962 static inline int
1963 get_xstats_basic_count(struct rte_eth_dev *dev)
1964 {
1965         uint16_t nb_rxqs, nb_txqs;
1966         int count;
1967
1968         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1969         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1970
1971         count = RTE_NB_STATS;
1972         count += nb_rxqs * RTE_NB_RXQ_STATS;
1973         count += nb_txqs * RTE_NB_TXQ_STATS;
1974
1975         return count;
1976 }
1977
1978 static int
1979 get_xstats_count(uint16_t port_id)
1980 {
1981         struct rte_eth_dev *dev;
1982         int count;
1983
1984         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1985         dev = &rte_eth_devices[port_id];
1986         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1987                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1988                                 NULL, 0);
1989                 if (count < 0)
1990                         return eth_err(port_id, count);
1991         }
1992         if (dev->dev_ops->xstats_get_names != NULL) {
1993                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1994                 if (count < 0)
1995                         return eth_err(port_id, count);
1996         } else
1997                 count = 0;
1998
1999
2000         count += get_xstats_basic_count(dev);
2001
2002         return count;
2003 }
2004
2005 int
2006 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2007                 uint64_t *id)
2008 {
2009         int cnt_xstats, idx_xstat;
2010
2011         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2012
2013         if (!id) {
2014                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2015                 return -ENOMEM;
2016         }
2017
2018         if (!xstat_name) {
2019                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2020                 return -ENOMEM;
2021         }
2022
2023         /* Get count */
2024         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2025         if (cnt_xstats  < 0) {
2026                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2027                 return -ENODEV;
2028         }
2029
2030         /* Get id-name lookup table */
2031         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2032
2033         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2034                         port_id, xstats_names, cnt_xstats, NULL)) {
2035                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2036                 return -1;
2037         }
2038
2039         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2040                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2041                         *id = idx_xstat;
2042                         return 0;
2043                 };
2044         }
2045
2046         return -EINVAL;
2047 }
2048
2049 /* retrieve basic stats names */
2050 static int
2051 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2052         struct rte_eth_xstat_name *xstats_names)
2053 {
2054         int cnt_used_entries = 0;
2055         uint32_t idx, id_queue;
2056         uint16_t num_q;
2057
2058         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2059                 snprintf(xstats_names[cnt_used_entries].name,
2060                         sizeof(xstats_names[0].name),
2061                         "%s", rte_stats_strings[idx].name);
2062                 cnt_used_entries++;
2063         }
2064         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2065         for (id_queue = 0; id_queue < num_q; id_queue++) {
2066                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2067                         snprintf(xstats_names[cnt_used_entries].name,
2068                                 sizeof(xstats_names[0].name),
2069                                 "rx_q%u%s",
2070                                 id_queue, rte_rxq_stats_strings[idx].name);
2071                         cnt_used_entries++;
2072                 }
2073
2074         }
2075         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2076         for (id_queue = 0; id_queue < num_q; id_queue++) {
2077                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2078                         snprintf(xstats_names[cnt_used_entries].name,
2079                                 sizeof(xstats_names[0].name),
2080                                 "tx_q%u%s",
2081                                 id_queue, rte_txq_stats_strings[idx].name);
2082                         cnt_used_entries++;
2083                 }
2084         }
2085         return cnt_used_entries;
2086 }
2087
2088 /* retrieve ethdev extended statistics names */
2089 int
2090 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2091         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2092         uint64_t *ids)
2093 {
2094         struct rte_eth_xstat_name *xstats_names_copy;
2095         unsigned int no_basic_stat_requested = 1;
2096         unsigned int no_ext_stat_requested = 1;
2097         unsigned int expected_entries;
2098         unsigned int basic_count;
2099         struct rte_eth_dev *dev;
2100         unsigned int i;
2101         int ret;
2102
2103         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2104         dev = &rte_eth_devices[port_id];
2105
2106         basic_count = get_xstats_basic_count(dev);
2107         ret = get_xstats_count(port_id);
2108         if (ret < 0)
2109                 return ret;
2110         expected_entries = (unsigned int)ret;
2111
2112         /* Return max number of stats if no ids given */
2113         if (!ids) {
2114                 if (!xstats_names)
2115                         return expected_entries;
2116                 else if (xstats_names && size < expected_entries)
2117                         return expected_entries;
2118         }
2119
2120         if (ids && !xstats_names)
2121                 return -EINVAL;
2122
2123         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2124                 uint64_t ids_copy[size];
2125
2126                 for (i = 0; i < size; i++) {
2127                         if (ids[i] < basic_count) {
2128                                 no_basic_stat_requested = 0;
2129                                 break;
2130                         }
2131
2132                         /*
2133                          * Convert ids to xstats ids that PMD knows.
2134                          * ids known by user are basic + extended stats.
2135                          */
2136                         ids_copy[i] = ids[i] - basic_count;
2137                 }
2138
2139                 if (no_basic_stat_requested)
2140                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2141                                         xstats_names, ids_copy, size);
2142         }
2143
2144         /* Retrieve all stats */
2145         if (!ids) {
2146                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2147                                 expected_entries);
2148                 if (num_stats < 0 || num_stats > (int)expected_entries)
2149                         return num_stats;
2150                 else
2151                         return expected_entries;
2152         }
2153
2154         xstats_names_copy = calloc(expected_entries,
2155                 sizeof(struct rte_eth_xstat_name));
2156
2157         if (!xstats_names_copy) {
2158                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2159                 return -ENOMEM;
2160         }
2161
2162         if (ids) {
2163                 for (i = 0; i < size; i++) {
2164                         if (ids[i] >= basic_count) {
2165                                 no_ext_stat_requested = 0;
2166                                 break;
2167                         }
2168                 }
2169         }
2170
2171         /* Fill xstats_names_copy structure */
2172         if (ids && no_ext_stat_requested) {
2173                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2174         } else {
2175                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2176                         expected_entries);
2177                 if (ret < 0) {
2178                         free(xstats_names_copy);
2179                         return ret;
2180                 }
2181         }
2182
2183         /* Filter stats */
2184         for (i = 0; i < size; i++) {
2185                 if (ids[i] >= expected_entries) {
2186                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2187                         free(xstats_names_copy);
2188                         return -1;
2189                 }
2190                 xstats_names[i] = xstats_names_copy[ids[i]];
2191         }
2192
2193         free(xstats_names_copy);
2194         return size;
2195 }
2196
2197 int
2198 rte_eth_xstats_get_names(uint16_t port_id,
2199         struct rte_eth_xstat_name *xstats_names,
2200         unsigned int size)
2201 {
2202         struct rte_eth_dev *dev;
2203         int cnt_used_entries;
2204         int cnt_expected_entries;
2205         int cnt_driver_entries;
2206
2207         cnt_expected_entries = get_xstats_count(port_id);
2208         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2209                         (int)size < cnt_expected_entries)
2210                 return cnt_expected_entries;
2211
2212         /* port_id checked in get_xstats_count() */
2213         dev = &rte_eth_devices[port_id];
2214
2215         cnt_used_entries = rte_eth_basic_stats_get_names(
2216                 dev, xstats_names);
2217
2218         if (dev->dev_ops->xstats_get_names != NULL) {
2219                 /* If there are any driver-specific xstats, append them
2220                  * to end of list.
2221                  */
2222                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2223                         dev,
2224                         xstats_names + cnt_used_entries,
2225                         size - cnt_used_entries);
2226                 if (cnt_driver_entries < 0)
2227                         return eth_err(port_id, cnt_driver_entries);
2228                 cnt_used_entries += cnt_driver_entries;
2229         }
2230
2231         return cnt_used_entries;
2232 }
2233
2234
2235 static int
2236 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2237 {
2238         struct rte_eth_dev *dev;
2239         struct rte_eth_stats eth_stats;
2240         unsigned int count = 0, i, q;
2241         uint64_t val, *stats_ptr;
2242         uint16_t nb_rxqs, nb_txqs;
2243         int ret;
2244
2245         ret = rte_eth_stats_get(port_id, &eth_stats);
2246         if (ret < 0)
2247                 return ret;
2248
2249         dev = &rte_eth_devices[port_id];
2250
2251         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2252         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2253
2254         /* global stats */
2255         for (i = 0; i < RTE_NB_STATS; i++) {
2256                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2257                                         rte_stats_strings[i].offset);
2258                 val = *stats_ptr;
2259                 xstats[count++].value = val;
2260         }
2261
2262         /* per-rxq stats */
2263         for (q = 0; q < nb_rxqs; q++) {
2264                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2265                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2266                                         rte_rxq_stats_strings[i].offset +
2267                                         q * sizeof(uint64_t));
2268                         val = *stats_ptr;
2269                         xstats[count++].value = val;
2270                 }
2271         }
2272
2273         /* per-txq stats */
2274         for (q = 0; q < nb_txqs; q++) {
2275                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2276                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2277                                         rte_txq_stats_strings[i].offset +
2278                                         q * sizeof(uint64_t));
2279                         val = *stats_ptr;
2280                         xstats[count++].value = val;
2281                 }
2282         }
2283         return count;
2284 }
2285
2286 /* retrieve ethdev extended statistics */
2287 int
2288 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2289                          uint64_t *values, unsigned int size)
2290 {
2291         unsigned int no_basic_stat_requested = 1;
2292         unsigned int no_ext_stat_requested = 1;
2293         unsigned int num_xstats_filled;
2294         unsigned int basic_count;
2295         uint16_t expected_entries;
2296         struct rte_eth_dev *dev;
2297         unsigned int i;
2298         int ret;
2299
2300         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2301         ret = get_xstats_count(port_id);
2302         if (ret < 0)
2303                 return ret;
2304         expected_entries = (uint16_t)ret;
2305         struct rte_eth_xstat xstats[expected_entries];
2306         dev = &rte_eth_devices[port_id];
2307         basic_count = get_xstats_basic_count(dev);
2308
2309         /* Return max number of stats if no ids given */
2310         if (!ids) {
2311                 if (!values)
2312                         return expected_entries;
2313                 else if (values && size < expected_entries)
2314                         return expected_entries;
2315         }
2316
2317         if (ids && !values)
2318                 return -EINVAL;
2319
2320         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2321                 unsigned int basic_count = get_xstats_basic_count(dev);
2322                 uint64_t ids_copy[size];
2323
2324                 for (i = 0; i < size; i++) {
2325                         if (ids[i] < basic_count) {
2326                                 no_basic_stat_requested = 0;
2327                                 break;
2328                         }
2329
2330                         /*
2331                          * Convert ids to xstats ids that PMD knows.
2332                          * ids known by user are basic + extended stats.
2333                          */
2334                         ids_copy[i] = ids[i] - basic_count;
2335                 }
2336
2337                 if (no_basic_stat_requested)
2338                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2339                                         values, size);
2340         }
2341
2342         if (ids) {
2343                 for (i = 0; i < size; i++) {
2344                         if (ids[i] >= basic_count) {
2345                                 no_ext_stat_requested = 0;
2346                                 break;
2347                         }
2348                 }
2349         }
2350
2351         /* Fill the xstats structure */
2352         if (ids && no_ext_stat_requested)
2353                 ret = rte_eth_basic_stats_get(port_id, xstats);
2354         else
2355                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2356
2357         if (ret < 0)
2358                 return ret;
2359         num_xstats_filled = (unsigned int)ret;
2360
2361         /* Return all stats */
2362         if (!ids) {
2363                 for (i = 0; i < num_xstats_filled; i++)
2364                         values[i] = xstats[i].value;
2365                 return expected_entries;
2366         }
2367
2368         /* Filter stats */
2369         for (i = 0; i < size; i++) {
2370                 if (ids[i] >= expected_entries) {
2371                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2372                         return -1;
2373                 }
2374                 values[i] = xstats[ids[i]].value;
2375         }
2376         return size;
2377 }
2378
2379 int
2380 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2381         unsigned int n)
2382 {
2383         struct rte_eth_dev *dev;
2384         unsigned int count = 0, i;
2385         signed int xcount = 0;
2386         uint16_t nb_rxqs, nb_txqs;
2387         int ret;
2388
2389         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2390
2391         dev = &rte_eth_devices[port_id];
2392
2393         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2394         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2395
2396         /* Return generic statistics */
2397         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2398                 (nb_txqs * RTE_NB_TXQ_STATS);
2399
2400         /* implemented by the driver */
2401         if (dev->dev_ops->xstats_get != NULL) {
2402                 /* Retrieve the xstats from the driver at the end of the
2403                  * xstats struct.
2404                  */
2405                 xcount = (*dev->dev_ops->xstats_get)(dev,
2406                                      xstats ? xstats + count : NULL,
2407                                      (n > count) ? n - count : 0);
2408
2409                 if (xcount < 0)
2410                         return eth_err(port_id, xcount);
2411         }
2412
2413         if (n < count + xcount || xstats == NULL)
2414                 return count + xcount;
2415
2416         /* now fill the xstats structure */
2417         ret = rte_eth_basic_stats_get(port_id, xstats);
2418         if (ret < 0)
2419                 return ret;
2420         count = ret;
2421
2422         for (i = 0; i < count; i++)
2423                 xstats[i].id = i;
2424         /* add an offset to driver-specific stats */
2425         for ( ; i < count + xcount; i++)
2426                 xstats[i].id += count;
2427
2428         return count + xcount;
2429 }
2430
2431 /* reset ethdev extended statistics */
2432 void
2433 rte_eth_xstats_reset(uint16_t port_id)
2434 {
2435         struct rte_eth_dev *dev;
2436
2437         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2438         dev = &rte_eth_devices[port_id];
2439
2440         /* implemented by the driver */
2441         if (dev->dev_ops->xstats_reset != NULL) {
2442                 (*dev->dev_ops->xstats_reset)(dev);
2443                 return;
2444         }
2445
2446         /* fallback to default */
2447         rte_eth_stats_reset(port_id);
2448 }
2449
2450 static int
2451 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2452                 uint8_t is_rx)
2453 {
2454         struct rte_eth_dev *dev;
2455
2456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2457
2458         dev = &rte_eth_devices[port_id];
2459
2460         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2461
2462         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2463                 return -EINVAL;
2464
2465         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2466                 return -EINVAL;
2467
2468         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2469                 return -EINVAL;
2470
2471         return (*dev->dev_ops->queue_stats_mapping_set)
2472                         (dev, queue_id, stat_idx, is_rx);
2473 }
2474
2475
2476 int
2477 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2478                 uint8_t stat_idx)
2479 {
2480         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2481                                                 stat_idx, STAT_QMAP_TX));
2482 }
2483
2484
2485 int
2486 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2487                 uint8_t stat_idx)
2488 {
2489         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2490                                                 stat_idx, STAT_QMAP_RX));
2491 }
2492
2493 int
2494 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2495 {
2496         struct rte_eth_dev *dev;
2497
2498         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2499         dev = &rte_eth_devices[port_id];
2500
2501         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2502         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2503                                                         fw_version, fw_size));
2504 }
2505
2506 void
2507 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2508 {
2509         struct rte_eth_dev *dev;
2510         const struct rte_eth_desc_lim lim = {
2511                 .nb_max = UINT16_MAX,
2512                 .nb_min = 0,
2513                 .nb_align = 1,
2514         };
2515
2516         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2517         dev = &rte_eth_devices[port_id];
2518
2519         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2520         dev_info->rx_desc_lim = lim;
2521         dev_info->tx_desc_lim = lim;
2522         dev_info->device = dev->device;
2523
2524         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2525         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2526         dev_info->driver_name = dev->device->driver->name;
2527         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2528         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2529
2530         dev_info->dev_flags = &dev->data->dev_flags;
2531 }
2532
2533 int
2534 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2535                                  uint32_t *ptypes, int num)
2536 {
2537         int i, j;
2538         struct rte_eth_dev *dev;
2539         const uint32_t *all_ptypes;
2540
2541         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2542         dev = &rte_eth_devices[port_id];
2543         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2544         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2545
2546         if (!all_ptypes)
2547                 return 0;
2548
2549         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2550                 if (all_ptypes[i] & ptype_mask) {
2551                         if (j < num)
2552                                 ptypes[j] = all_ptypes[i];
2553                         j++;
2554                 }
2555
2556         return j;
2557 }
2558
2559 void
2560 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2561 {
2562         struct rte_eth_dev *dev;
2563
2564         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2565         dev = &rte_eth_devices[port_id];
2566         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2567 }
2568
2569
2570 int
2571 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2572 {
2573         struct rte_eth_dev *dev;
2574
2575         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2576
2577         dev = &rte_eth_devices[port_id];
2578         *mtu = dev->data->mtu;
2579         return 0;
2580 }
2581
2582 int
2583 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2584 {
2585         int ret;
2586         struct rte_eth_dev *dev;
2587
2588         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2589         dev = &rte_eth_devices[port_id];
2590         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2591
2592         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2593         if (!ret)
2594                 dev->data->mtu = mtu;
2595
2596         return eth_err(port_id, ret);
2597 }
2598
2599 int
2600 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2601 {
2602         struct rte_eth_dev *dev;
2603         int ret;
2604
2605         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2606         dev = &rte_eth_devices[port_id];
2607         if (!(dev->data->dev_conf.rxmode.offloads &
2608               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2609                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
2610                         port_id);
2611                 return -ENOSYS;
2612         }
2613
2614         if (vlan_id > 4095) {
2615                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
2616                         port_id, vlan_id);
2617                 return -EINVAL;
2618         }
2619         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2620
2621         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2622         if (ret == 0) {
2623                 struct rte_vlan_filter_conf *vfc;
2624                 int vidx;
2625                 int vbit;
2626
2627                 vfc = &dev->data->vlan_filter_conf;
2628                 vidx = vlan_id / 64;
2629                 vbit = vlan_id % 64;
2630
2631                 if (on)
2632                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2633                 else
2634                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2635         }
2636
2637         return eth_err(port_id, ret);
2638 }
2639
2640 int
2641 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2642                                     int on)
2643 {
2644         struct rte_eth_dev *dev;
2645
2646         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2647         dev = &rte_eth_devices[port_id];
2648         if (rx_queue_id >= dev->data->nb_rx_queues) {
2649                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
2650                 return -EINVAL;
2651         }
2652
2653         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2654         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2655
2656         return 0;
2657 }
2658
2659 int
2660 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2661                                 enum rte_vlan_type vlan_type,
2662                                 uint16_t tpid)
2663 {
2664         struct rte_eth_dev *dev;
2665
2666         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2667         dev = &rte_eth_devices[port_id];
2668         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2669
2670         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2671                                                                tpid));
2672 }
2673
2674 int
2675 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2676 {
2677         struct rte_eth_dev *dev;
2678         int ret = 0;
2679         int mask = 0;
2680         int cur, org = 0;
2681         uint64_t orig_offloads;
2682
2683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2684         dev = &rte_eth_devices[port_id];
2685
2686         /* save original values in case of failure */
2687         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2688
2689         /*check which option changed by application*/
2690         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2691         org = !!(dev->data->dev_conf.rxmode.offloads &
2692                  DEV_RX_OFFLOAD_VLAN_STRIP);
2693         if (cur != org) {
2694                 if (cur)
2695                         dev->data->dev_conf.rxmode.offloads |=
2696                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2697                 else
2698                         dev->data->dev_conf.rxmode.offloads &=
2699                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2700                 mask |= ETH_VLAN_STRIP_MASK;
2701         }
2702
2703         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2704         org = !!(dev->data->dev_conf.rxmode.offloads &
2705                  DEV_RX_OFFLOAD_VLAN_FILTER);
2706         if (cur != org) {
2707                 if (cur)
2708                         dev->data->dev_conf.rxmode.offloads |=
2709                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2710                 else
2711                         dev->data->dev_conf.rxmode.offloads &=
2712                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2713                 mask |= ETH_VLAN_FILTER_MASK;
2714         }
2715
2716         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2717         org = !!(dev->data->dev_conf.rxmode.offloads &
2718                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2719         if (cur != org) {
2720                 if (cur)
2721                         dev->data->dev_conf.rxmode.offloads |=
2722                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2723                 else
2724                         dev->data->dev_conf.rxmode.offloads &=
2725                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2726                 mask |= ETH_VLAN_EXTEND_MASK;
2727         }
2728
2729         /*no change*/
2730         if (mask == 0)
2731                 return ret;
2732
2733         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2734         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2735         if (ret) {
2736                 /* hit an error restore  original values */
2737                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2738         }
2739
2740         return eth_err(port_id, ret);
2741 }
2742
2743 int
2744 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2745 {
2746         struct rte_eth_dev *dev;
2747         int ret = 0;
2748
2749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2750         dev = &rte_eth_devices[port_id];
2751
2752         if (dev->data->dev_conf.rxmode.offloads &
2753             DEV_RX_OFFLOAD_VLAN_STRIP)
2754                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2755
2756         if (dev->data->dev_conf.rxmode.offloads &
2757             DEV_RX_OFFLOAD_VLAN_FILTER)
2758                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2759
2760         if (dev->data->dev_conf.rxmode.offloads &
2761             DEV_RX_OFFLOAD_VLAN_EXTEND)
2762                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2763
2764         return ret;
2765 }
2766
2767 int
2768 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2769 {
2770         struct rte_eth_dev *dev;
2771
2772         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2773         dev = &rte_eth_devices[port_id];
2774         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2775
2776         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2777 }
2778
2779 int
2780 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2781 {
2782         struct rte_eth_dev *dev;
2783
2784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2785         dev = &rte_eth_devices[port_id];
2786         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2787         memset(fc_conf, 0, sizeof(*fc_conf));
2788         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2789 }
2790
2791 int
2792 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2793 {
2794         struct rte_eth_dev *dev;
2795
2796         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2797         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2798                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
2799                 return -EINVAL;
2800         }
2801
2802         dev = &rte_eth_devices[port_id];
2803         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2804         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2805 }
2806
2807 int
2808 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2809                                    struct rte_eth_pfc_conf *pfc_conf)
2810 {
2811         struct rte_eth_dev *dev;
2812
2813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2814         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2815                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
2816                 return -EINVAL;
2817         }
2818
2819         dev = &rte_eth_devices[port_id];
2820         /* High water, low water validation are device specific */
2821         if  (*dev->dev_ops->priority_flow_ctrl_set)
2822                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2823                                         (dev, pfc_conf));
2824         return -ENOTSUP;
2825 }
2826
2827 static int
2828 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2829                         uint16_t reta_size)
2830 {
2831         uint16_t i, num;
2832
2833         if (!reta_conf)
2834                 return -EINVAL;
2835
2836         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2837         for (i = 0; i < num; i++) {
2838                 if (reta_conf[i].mask)
2839                         return 0;
2840         }
2841
2842         return -EINVAL;
2843 }
2844
2845 static int
2846 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2847                          uint16_t reta_size,
2848                          uint16_t max_rxq)
2849 {
2850         uint16_t i, idx, shift;
2851
2852         if (!reta_conf)
2853                 return -EINVAL;
2854
2855         if (max_rxq == 0) {
2856                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
2857                 return -EINVAL;
2858         }
2859
2860         for (i = 0; i < reta_size; i++) {
2861                 idx = i / RTE_RETA_GROUP_SIZE;
2862                 shift = i % RTE_RETA_GROUP_SIZE;
2863                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2864                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2865                         RTE_ETHDEV_LOG(ERR,
2866                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
2867                                 idx, shift,
2868                                 reta_conf[idx].reta[shift], max_rxq);
2869                         return -EINVAL;
2870                 }
2871         }
2872
2873         return 0;
2874 }
2875
2876 int
2877 rte_eth_dev_rss_reta_update(uint16_t port_id,
2878                             struct rte_eth_rss_reta_entry64 *reta_conf,
2879                             uint16_t reta_size)
2880 {
2881         struct rte_eth_dev *dev;
2882         int ret;
2883
2884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2885         /* Check mask bits */
2886         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2887         if (ret < 0)
2888                 return ret;
2889
2890         dev = &rte_eth_devices[port_id];
2891
2892         /* Check entry value */
2893         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2894                                 dev->data->nb_rx_queues);
2895         if (ret < 0)
2896                 return ret;
2897
2898         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2899         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2900                                                              reta_size));
2901 }
2902
2903 int
2904 rte_eth_dev_rss_reta_query(uint16_t port_id,
2905                            struct rte_eth_rss_reta_entry64 *reta_conf,
2906                            uint16_t reta_size)
2907 {
2908         struct rte_eth_dev *dev;
2909         int ret;
2910
2911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2912
2913         /* Check mask bits */
2914         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2915         if (ret < 0)
2916                 return ret;
2917
2918         dev = &rte_eth_devices[port_id];
2919         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2920         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2921                                                             reta_size));
2922 }
2923
2924 int
2925 rte_eth_dev_rss_hash_update(uint16_t port_id,
2926                             struct rte_eth_rss_conf *rss_conf)
2927 {
2928         struct rte_eth_dev *dev;
2929         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2930
2931         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2932         dev = &rte_eth_devices[port_id];
2933         rte_eth_dev_info_get(port_id, &dev_info);
2934         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2935             dev_info.flow_type_rss_offloads) {
2936                 RTE_ETHDEV_LOG(ERR,
2937                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2938                         port_id, rss_conf->rss_hf,
2939                         dev_info.flow_type_rss_offloads);
2940                 return -EINVAL;
2941         }
2942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2943         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2944                                                                  rss_conf));
2945 }
2946
2947 int
2948 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2949                               struct rte_eth_rss_conf *rss_conf)
2950 {
2951         struct rte_eth_dev *dev;
2952
2953         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2954         dev = &rte_eth_devices[port_id];
2955         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2956         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2957                                                                    rss_conf));
2958 }
2959
2960 int
2961 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2962                                 struct rte_eth_udp_tunnel *udp_tunnel)
2963 {
2964         struct rte_eth_dev *dev;
2965
2966         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2967         if (udp_tunnel == NULL) {
2968                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
2969                 return -EINVAL;
2970         }
2971
2972         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2973                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
2974                 return -EINVAL;
2975         }
2976
2977         dev = &rte_eth_devices[port_id];
2978         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2979         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2980                                                                 udp_tunnel));
2981 }
2982
2983 int
2984 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2985                                    struct rte_eth_udp_tunnel *udp_tunnel)
2986 {
2987         struct rte_eth_dev *dev;
2988
2989         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2990         dev = &rte_eth_devices[port_id];
2991
2992         if (udp_tunnel == NULL) {
2993                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
2994                 return -EINVAL;
2995         }
2996
2997         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2998                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
2999                 return -EINVAL;
3000         }
3001
3002         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3003         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3004                                                                 udp_tunnel));
3005 }
3006
3007 int
3008 rte_eth_led_on(uint16_t port_id)
3009 {
3010         struct rte_eth_dev *dev;
3011
3012         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3013         dev = &rte_eth_devices[port_id];
3014         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3015         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3016 }
3017
3018 int
3019 rte_eth_led_off(uint16_t port_id)
3020 {
3021         struct rte_eth_dev *dev;
3022
3023         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3024         dev = &rte_eth_devices[port_id];
3025         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3026         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3027 }
3028
3029 /*
3030  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3031  * an empty spot.
3032  */
3033 static int
3034 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3035 {
3036         struct rte_eth_dev_info dev_info;
3037         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3038         unsigned i;
3039
3040         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3041         rte_eth_dev_info_get(port_id, &dev_info);
3042
3043         for (i = 0; i < dev_info.max_mac_addrs; i++)
3044                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
3045                         return i;
3046
3047         return -1;
3048 }
3049
3050 static const struct ether_addr null_mac_addr;
3051
3052 int
3053 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
3054                         uint32_t pool)
3055 {
3056         struct rte_eth_dev *dev;
3057         int index;
3058         uint64_t pool_mask;
3059         int ret;
3060
3061         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3062         dev = &rte_eth_devices[port_id];
3063         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3064
3065         if (is_zero_ether_addr(addr)) {
3066                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3067                         port_id);
3068                 return -EINVAL;
3069         }
3070         if (pool >= ETH_64_POOLS) {
3071                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3072                 return -EINVAL;
3073         }
3074
3075         index = get_mac_addr_index(port_id, addr);
3076         if (index < 0) {
3077                 index = get_mac_addr_index(port_id, &null_mac_addr);
3078                 if (index < 0) {
3079                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3080                                 port_id);
3081                         return -ENOSPC;
3082                 }
3083         } else {
3084                 pool_mask = dev->data->mac_pool_sel[index];
3085
3086                 /* Check if both MAC address and pool is already there, and do nothing */
3087                 if (pool_mask & (1ULL << pool))
3088                         return 0;
3089         }
3090
3091         /* Update NIC */
3092         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3093
3094         if (ret == 0) {
3095                 /* Update address in NIC data structure */
3096                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3097
3098                 /* Update pool bitmap in NIC data structure */
3099                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3100         }
3101
3102         return eth_err(port_id, ret);
3103 }
3104
3105 int
3106 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
3107 {
3108         struct rte_eth_dev *dev;
3109         int index;
3110
3111         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3112         dev = &rte_eth_devices[port_id];
3113         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3114
3115         index = get_mac_addr_index(port_id, addr);
3116         if (index == 0) {
3117                 RTE_ETHDEV_LOG(ERR,
3118                         "Port %u: Cannot remove default MAC address\n",
3119                         port_id);
3120                 return -EADDRINUSE;
3121         } else if (index < 0)
3122                 return 0;  /* Do nothing if address wasn't found */
3123
3124         /* Update NIC */
3125         (*dev->dev_ops->mac_addr_remove)(dev, index);
3126
3127         /* Update address in NIC data structure */
3128         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3129
3130         /* reset pool bitmap */
3131         dev->data->mac_pool_sel[index] = 0;
3132
3133         return 0;
3134 }
3135
3136 int
3137 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3138 {
3139         struct rte_eth_dev *dev;
3140         int ret;
3141
3142         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3143
3144         if (!is_valid_assigned_ether_addr(addr))
3145                 return -EINVAL;
3146
3147         dev = &rte_eth_devices[port_id];
3148         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3149
3150         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3151         if (ret < 0)
3152                 return ret;
3153
3154         /* Update default address in NIC data structure */
3155         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3156
3157         return 0;
3158 }
3159
3160
3161 /*
3162  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3163  * an empty spot.
3164  */
3165 static int
3166 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3167 {
3168         struct rte_eth_dev_info dev_info;
3169         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3170         unsigned i;
3171
3172         rte_eth_dev_info_get(port_id, &dev_info);
3173         if (!dev->data->hash_mac_addrs)
3174                 return -1;
3175
3176         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3177                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3178                         ETHER_ADDR_LEN) == 0)
3179                         return i;
3180
3181         return -1;
3182 }
3183
3184 int
3185 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3186                                 uint8_t on)
3187 {
3188         int index;
3189         int ret;
3190         struct rte_eth_dev *dev;
3191
3192         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3193
3194         dev = &rte_eth_devices[port_id];
3195         if (is_zero_ether_addr(addr)) {
3196                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3197                         port_id);
3198                 return -EINVAL;
3199         }
3200
3201         index = get_hash_mac_addr_index(port_id, addr);
3202         /* Check if it's already there, and do nothing */
3203         if ((index >= 0) && on)
3204                 return 0;
3205
3206         if (index < 0) {
3207                 if (!on) {
3208                         RTE_ETHDEV_LOG(ERR,
3209                                 "Port %u: the MAC address was not set in UTA\n",
3210                                 port_id);
3211                         return -EINVAL;
3212                 }
3213
3214                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3215                 if (index < 0) {
3216                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3217                                 port_id);
3218                         return -ENOSPC;
3219                 }
3220         }
3221
3222         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3223         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3224         if (ret == 0) {
3225                 /* Update address in NIC data structure */
3226                 if (on)
3227                         ether_addr_copy(addr,
3228                                         &dev->data->hash_mac_addrs[index]);
3229                 else
3230                         ether_addr_copy(&null_mac_addr,
3231                                         &dev->data->hash_mac_addrs[index]);
3232         }
3233
3234         return eth_err(port_id, ret);
3235 }
3236
3237 int
3238 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3239 {
3240         struct rte_eth_dev *dev;
3241
3242         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3243
3244         dev = &rte_eth_devices[port_id];
3245
3246         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3247         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3248                                                                        on));
3249 }
3250
3251 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3252                                         uint16_t tx_rate)
3253 {
3254         struct rte_eth_dev *dev;
3255         struct rte_eth_dev_info dev_info;
3256         struct rte_eth_link link;
3257
3258         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3259
3260         dev = &rte_eth_devices[port_id];
3261         rte_eth_dev_info_get(port_id, &dev_info);
3262         link = dev->data->dev_link;
3263
3264         if (queue_idx > dev_info.max_tx_queues) {
3265                 RTE_ETHDEV_LOG(ERR,
3266                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3267                         port_id, queue_idx);
3268                 return -EINVAL;
3269         }
3270
3271         if (tx_rate > link.link_speed) {
3272                 RTE_ETHDEV_LOG(ERR,
3273                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3274                         tx_rate, link.link_speed);
3275                 return -EINVAL;
3276         }
3277
3278         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3279         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3280                                                         queue_idx, tx_rate));
3281 }
3282
3283 int
3284 rte_eth_mirror_rule_set(uint16_t port_id,
3285                         struct rte_eth_mirror_conf *mirror_conf,
3286                         uint8_t rule_id, uint8_t on)
3287 {
3288         struct rte_eth_dev *dev;
3289
3290         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3291         if (mirror_conf->rule_type == 0) {
3292                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3293                 return -EINVAL;
3294         }
3295
3296         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3297                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3298                         ETH_64_POOLS - 1);
3299                 return -EINVAL;
3300         }
3301
3302         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3303              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3304             (mirror_conf->pool_mask == 0)) {
3305                 RTE_ETHDEV_LOG(ERR,
3306                         "Invalid mirror pool, pool mask can not be 0\n");
3307                 return -EINVAL;
3308         }
3309
3310         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3311             mirror_conf->vlan.vlan_mask == 0) {
3312                 RTE_ETHDEV_LOG(ERR,
3313                         "Invalid vlan mask, vlan mask can not be 0\n");
3314                 return -EINVAL;
3315         }
3316
3317         dev = &rte_eth_devices[port_id];
3318         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3319
3320         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3321                                                 mirror_conf, rule_id, on));
3322 }
3323
3324 int
3325 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3326 {
3327         struct rte_eth_dev *dev;
3328
3329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3330
3331         dev = &rte_eth_devices[port_id];
3332         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3333
3334         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3335                                                                    rule_id));
3336 }
3337
3338 RTE_INIT(eth_dev_init_cb_lists)
3339 {
3340         int i;
3341
3342         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3343                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3344 }
3345
3346 int
3347 rte_eth_dev_callback_register(uint16_t port_id,
3348                         enum rte_eth_event_type event,
3349                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3350 {
3351         struct rte_eth_dev *dev;
3352         struct rte_eth_dev_callback *user_cb;
3353         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3354         uint16_t last_port;
3355
3356         if (!cb_fn)
3357                 return -EINVAL;
3358
3359         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3360                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3361                 return -EINVAL;
3362         }
3363
3364         if (port_id == RTE_ETH_ALL) {
3365                 next_port = 0;
3366                 last_port = RTE_MAX_ETHPORTS - 1;
3367         } else {
3368                 next_port = last_port = port_id;
3369         }
3370
3371         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3372
3373         do {
3374                 dev = &rte_eth_devices[next_port];
3375
3376                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3377                         if (user_cb->cb_fn == cb_fn &&
3378                                 user_cb->cb_arg == cb_arg &&
3379                                 user_cb->event == event) {
3380                                 break;
3381                         }
3382                 }
3383
3384                 /* create a new callback. */
3385                 if (user_cb == NULL) {
3386                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3387                                 sizeof(struct rte_eth_dev_callback), 0);
3388                         if (user_cb != NULL) {
3389                                 user_cb->cb_fn = cb_fn;
3390                                 user_cb->cb_arg = cb_arg;
3391                                 user_cb->event = event;
3392                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3393                                                   user_cb, next);
3394                         } else {
3395                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3396                                 rte_eth_dev_callback_unregister(port_id, event,
3397                                                                 cb_fn, cb_arg);
3398                                 return -ENOMEM;
3399                         }
3400
3401                 }
3402         } while (++next_port <= last_port);
3403
3404         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3405         return 0;
3406 }
3407
3408 int
3409 rte_eth_dev_callback_unregister(uint16_t port_id,
3410                         enum rte_eth_event_type event,
3411                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3412 {
3413         int ret;
3414         struct rte_eth_dev *dev;
3415         struct rte_eth_dev_callback *cb, *next;
3416         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3417         uint16_t last_port;
3418
3419         if (!cb_fn)
3420                 return -EINVAL;
3421
3422         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3423                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3424                 return -EINVAL;
3425         }
3426
3427         if (port_id == RTE_ETH_ALL) {
3428                 next_port = 0;
3429                 last_port = RTE_MAX_ETHPORTS - 1;
3430         } else {
3431                 next_port = last_port = port_id;
3432         }
3433
3434         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3435
3436         do {
3437                 dev = &rte_eth_devices[next_port];
3438                 ret = 0;
3439                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3440                      cb = next) {
3441
3442                         next = TAILQ_NEXT(cb, next);
3443
3444                         if (cb->cb_fn != cb_fn || cb->event != event ||
3445                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3446                                 continue;
3447
3448                         /*
3449                          * if this callback is not executing right now,
3450                          * then remove it.
3451                          */
3452                         if (cb->active == 0) {
3453                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3454                                 rte_free(cb);
3455                         } else {
3456                                 ret = -EAGAIN;
3457                         }
3458                 }
3459         } while (++next_port <= last_port);
3460
3461         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3462         return ret;
3463 }
3464
3465 int
3466 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3467         enum rte_eth_event_type event, void *ret_param)
3468 {
3469         struct rte_eth_dev_callback *cb_lst;
3470         struct rte_eth_dev_callback dev_cb;
3471         int rc = 0;
3472
3473         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3474         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3475                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3476                         continue;
3477                 dev_cb = *cb_lst;
3478                 cb_lst->active = 1;
3479                 if (ret_param != NULL)
3480                         dev_cb.ret_param = ret_param;
3481
3482                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3483                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3484                                 dev_cb.cb_arg, dev_cb.ret_param);
3485                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3486                 cb_lst->active = 0;
3487         }
3488         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3489         return rc;
3490 }
3491
3492 void
3493 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3494 {
3495         if (dev == NULL)
3496                 return;
3497
3498         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3499
3500         dev->state = RTE_ETH_DEV_ATTACHED;
3501 }
3502
3503 int
3504 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3505 {
3506         uint32_t vec;
3507         struct rte_eth_dev *dev;
3508         struct rte_intr_handle *intr_handle;
3509         uint16_t qid;
3510         int rc;
3511
3512         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3513
3514         dev = &rte_eth_devices[port_id];
3515
3516         if (!dev->intr_handle) {
3517                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3518                 return -ENOTSUP;
3519         }
3520
3521         intr_handle = dev->intr_handle;
3522         if (!intr_handle->intr_vec) {
3523                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3524                 return -EPERM;
3525         }
3526
3527         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3528                 vec = intr_handle->intr_vec[qid];
3529                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3530                 if (rc && rc != -EEXIST) {
3531                         RTE_ETHDEV_LOG(ERR,
3532                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3533                                 port_id, qid, op, epfd, vec);
3534                 }
3535         }
3536
3537         return 0;
3538 }
3539
3540 int __rte_experimental
3541 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
3542 {
3543         struct rte_intr_handle *intr_handle;
3544         struct rte_eth_dev *dev;
3545         unsigned int efd_idx;
3546         uint32_t vec;
3547         int fd;
3548
3549         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
3550
3551         dev = &rte_eth_devices[port_id];
3552
3553         if (queue_id >= dev->data->nb_rx_queues) {
3554                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3555                 return -1;
3556         }
3557
3558         if (!dev->intr_handle) {
3559                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3560                 return -1;
3561         }
3562
3563         intr_handle = dev->intr_handle;
3564         if (!intr_handle->intr_vec) {
3565                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3566                 return -1;
3567         }
3568
3569         vec = intr_handle->intr_vec[queue_id];
3570         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
3571                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
3572         fd = intr_handle->efds[efd_idx];
3573
3574         return fd;
3575 }
3576
3577 const struct rte_memzone *
3578 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3579                          uint16_t queue_id, size_t size, unsigned align,
3580                          int socket_id)
3581 {
3582         char z_name[RTE_MEMZONE_NAMESIZE];
3583         const struct rte_memzone *mz;
3584         int rc;
3585
3586         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
3587                       dev->data->port_id, queue_id, ring_name);
3588         if (rc >= RTE_MEMZONE_NAMESIZE) {
3589                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
3590                 rte_errno = ENAMETOOLONG;
3591                 return NULL;
3592         }
3593
3594         mz = rte_memzone_lookup(z_name);
3595         if (mz)
3596                 return mz;
3597
3598         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3599                         RTE_MEMZONE_IOVA_CONTIG, align);
3600 }
3601
3602 int __rte_experimental
3603 rte_eth_dev_create(struct rte_device *device, const char *name,
3604         size_t priv_data_size,
3605         ethdev_bus_specific_init ethdev_bus_specific_init,
3606         void *bus_init_params,
3607         ethdev_init_t ethdev_init, void *init_params)
3608 {
3609         struct rte_eth_dev *ethdev;
3610         int retval;
3611
3612         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3613
3614         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3615                 ethdev = rte_eth_dev_allocate(name);
3616                 if (!ethdev)
3617                         return -ENODEV;
3618
3619                 if (priv_data_size) {
3620                         ethdev->data->dev_private = rte_zmalloc_socket(
3621                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3622                                 device->numa_node);
3623
3624                         if (!ethdev->data->dev_private) {
3625                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
3626                                 retval = -ENOMEM;
3627                                 goto probe_failed;
3628                         }
3629                 }
3630         } else {
3631                 ethdev = rte_eth_dev_attach_secondary(name);
3632                 if (!ethdev) {
3633                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
3634                                 "ethdev doesn't exist");
3635                         return  -ENODEV;
3636                 }
3637         }
3638
3639         ethdev->device = device;
3640
3641         if (ethdev_bus_specific_init) {
3642                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3643                 if (retval) {
3644                         RTE_LOG(ERR, EAL,
3645                                 "ethdev bus specific initialisation failed");
3646                         goto probe_failed;
3647                 }
3648         }
3649
3650         retval = ethdev_init(ethdev, init_params);
3651         if (retval) {
3652                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3653                 goto probe_failed;
3654         }
3655
3656         rte_eth_dev_probing_finish(ethdev);
3657
3658         return retval;
3659
3660 probe_failed:
3661         rte_eth_dev_release_port(ethdev);
3662         return retval;
3663 }
3664
3665 int  __rte_experimental
3666 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3667         ethdev_uninit_t ethdev_uninit)
3668 {
3669         int ret;
3670
3671         ethdev = rte_eth_dev_allocated(ethdev->data->name);
3672         if (!ethdev)
3673                 return -ENODEV;
3674
3675         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3676
3677         ret = ethdev_uninit(ethdev);
3678         if (ret)
3679                 return ret;
3680
3681         return rte_eth_dev_release_port(ethdev);
3682 }
3683
3684 int
3685 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3686                           int epfd, int op, void *data)
3687 {
3688         uint32_t vec;
3689         struct rte_eth_dev *dev;
3690         struct rte_intr_handle *intr_handle;
3691         int rc;
3692
3693         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3694
3695         dev = &rte_eth_devices[port_id];
3696         if (queue_id >= dev->data->nb_rx_queues) {
3697                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3698                 return -EINVAL;
3699         }
3700
3701         if (!dev->intr_handle) {
3702                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3703                 return -ENOTSUP;
3704         }
3705
3706         intr_handle = dev->intr_handle;
3707         if (!intr_handle->intr_vec) {
3708                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3709                 return -EPERM;
3710         }
3711
3712         vec = intr_handle->intr_vec[queue_id];
3713         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3714         if (rc && rc != -EEXIST) {
3715                 RTE_ETHDEV_LOG(ERR,
3716                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3717                         port_id, queue_id, op, epfd, vec);
3718                 return rc;
3719         }
3720
3721         return 0;
3722 }
3723
3724 int
3725 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3726                            uint16_t queue_id)
3727 {
3728         struct rte_eth_dev *dev;
3729
3730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3731
3732         dev = &rte_eth_devices[port_id];
3733
3734         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3735         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3736                                                                 queue_id));
3737 }
3738
3739 int
3740 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3741                             uint16_t queue_id)
3742 {
3743         struct rte_eth_dev *dev;
3744
3745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3746
3747         dev = &rte_eth_devices[port_id];
3748
3749         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3750         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3751                                                                 queue_id));
3752 }
3753
3754
3755 int
3756 rte_eth_dev_filter_supported(uint16_t port_id,
3757                              enum rte_filter_type filter_type)
3758 {
3759         struct rte_eth_dev *dev;
3760
3761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3762
3763         dev = &rte_eth_devices[port_id];
3764         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3765         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3766                                 RTE_ETH_FILTER_NOP, NULL);
3767 }
3768
3769 int
3770 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3771                         enum rte_filter_op filter_op, void *arg)
3772 {
3773         struct rte_eth_dev *dev;
3774
3775         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3776
3777         dev = &rte_eth_devices[port_id];
3778         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3779         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3780                                                              filter_op, arg));
3781 }
3782
3783 const struct rte_eth_rxtx_callback *
3784 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3785                 rte_rx_callback_fn fn, void *user_param)
3786 {
3787 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3788         rte_errno = ENOTSUP;
3789         return NULL;
3790 #endif
3791         /* check input parameters */
3792         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3793                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3794                 rte_errno = EINVAL;
3795                 return NULL;
3796         }
3797         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3798
3799         if (cb == NULL) {
3800                 rte_errno = ENOMEM;
3801                 return NULL;
3802         }
3803
3804         cb->fn.rx = fn;
3805         cb->param = user_param;
3806
3807         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3808         /* Add the callbacks in fifo order. */
3809         struct rte_eth_rxtx_callback *tail =
3810                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3811
3812         if (!tail) {
3813                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3814
3815         } else {
3816                 while (tail->next)
3817                         tail = tail->next;
3818                 tail->next = cb;
3819         }
3820         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3821
3822         return cb;
3823 }
3824
3825 const struct rte_eth_rxtx_callback *
3826 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3827                 rte_rx_callback_fn fn, void *user_param)
3828 {
3829 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3830         rte_errno = ENOTSUP;
3831         return NULL;
3832 #endif
3833         /* check input parameters */
3834         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3835                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3836                 rte_errno = EINVAL;
3837                 return NULL;
3838         }
3839
3840         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3841
3842         if (cb == NULL) {
3843                 rte_errno = ENOMEM;
3844                 return NULL;
3845         }
3846
3847         cb->fn.rx = fn;
3848         cb->param = user_param;
3849
3850         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3851         /* Add the callbacks at fisrt position*/
3852         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3853         rte_smp_wmb();
3854         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3855         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3856
3857         return cb;
3858 }
3859
3860 const struct rte_eth_rxtx_callback *
3861 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3862                 rte_tx_callback_fn fn, void *user_param)
3863 {
3864 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3865         rte_errno = ENOTSUP;
3866         return NULL;
3867 #endif
3868         /* check input parameters */
3869         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3870                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3871                 rte_errno = EINVAL;
3872                 return NULL;
3873         }
3874
3875         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3876
3877         if (cb == NULL) {
3878                 rte_errno = ENOMEM;
3879                 return NULL;
3880         }
3881
3882         cb->fn.tx = fn;
3883         cb->param = user_param;
3884
3885         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3886         /* Add the callbacks in fifo order. */
3887         struct rte_eth_rxtx_callback *tail =
3888                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3889
3890         if (!tail) {
3891                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3892
3893         } else {
3894                 while (tail->next)
3895                         tail = tail->next;
3896                 tail->next = cb;
3897         }
3898         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3899
3900         return cb;
3901 }
3902
3903 int
3904 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3905                 const struct rte_eth_rxtx_callback *user_cb)
3906 {
3907 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3908         return -ENOTSUP;
3909 #endif
3910         /* Check input parameters. */
3911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3912         if (user_cb == NULL ||
3913                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3914                 return -EINVAL;
3915
3916         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3917         struct rte_eth_rxtx_callback *cb;
3918         struct rte_eth_rxtx_callback **prev_cb;
3919         int ret = -EINVAL;
3920
3921         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3922         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3923         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3924                 cb = *prev_cb;
3925                 if (cb == user_cb) {
3926                         /* Remove the user cb from the callback list. */
3927                         *prev_cb = cb->next;
3928                         ret = 0;
3929                         break;
3930                 }
3931         }
3932         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3933
3934         return ret;
3935 }
3936
3937 int
3938 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3939                 const struct rte_eth_rxtx_callback *user_cb)
3940 {
3941 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3942         return -ENOTSUP;
3943 #endif
3944         /* Check input parameters. */
3945         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3946         if (user_cb == NULL ||
3947                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3948                 return -EINVAL;
3949
3950         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3951         int ret = -EINVAL;
3952         struct rte_eth_rxtx_callback *cb;
3953         struct rte_eth_rxtx_callback **prev_cb;
3954
3955         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3956         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3957         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3958                 cb = *prev_cb;
3959                 if (cb == user_cb) {
3960                         /* Remove the user cb from the callback list. */
3961                         *prev_cb = cb->next;
3962                         ret = 0;
3963                         break;
3964                 }
3965         }
3966         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3967
3968         return ret;
3969 }
3970
3971 int
3972 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3973         struct rte_eth_rxq_info *qinfo)
3974 {
3975         struct rte_eth_dev *dev;
3976
3977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3978
3979         if (qinfo == NULL)
3980                 return -EINVAL;
3981
3982         dev = &rte_eth_devices[port_id];
3983         if (queue_id >= dev->data->nb_rx_queues) {
3984                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3985                 return -EINVAL;
3986         }
3987
3988         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3989
3990         memset(qinfo, 0, sizeof(*qinfo));
3991         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3992         return 0;
3993 }
3994
3995 int
3996 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3997         struct rte_eth_txq_info *qinfo)
3998 {
3999         struct rte_eth_dev *dev;
4000
4001         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4002
4003         if (qinfo == NULL)
4004                 return -EINVAL;
4005
4006         dev = &rte_eth_devices[port_id];
4007         if (queue_id >= dev->data->nb_tx_queues) {
4008                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4009                 return -EINVAL;
4010         }
4011
4012         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4013
4014         memset(qinfo, 0, sizeof(*qinfo));
4015         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4016
4017         return 0;
4018 }
4019
4020 int
4021 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4022                              struct ether_addr *mc_addr_set,
4023                              uint32_t nb_mc_addr)
4024 {
4025         struct rte_eth_dev *dev;
4026
4027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4028
4029         dev = &rte_eth_devices[port_id];
4030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4031         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4032                                                 mc_addr_set, nb_mc_addr));
4033 }
4034
4035 int
4036 rte_eth_timesync_enable(uint16_t port_id)
4037 {
4038         struct rte_eth_dev *dev;
4039
4040         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4041         dev = &rte_eth_devices[port_id];
4042
4043         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4044         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4045 }
4046
4047 int
4048 rte_eth_timesync_disable(uint16_t port_id)
4049 {
4050         struct rte_eth_dev *dev;
4051
4052         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4053         dev = &rte_eth_devices[port_id];
4054
4055         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4056         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4057 }
4058
4059 int
4060 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4061                                    uint32_t flags)
4062 {
4063         struct rte_eth_dev *dev;
4064
4065         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4066         dev = &rte_eth_devices[port_id];
4067
4068         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4069         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4070                                 (dev, timestamp, flags));
4071 }
4072
4073 int
4074 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4075                                    struct timespec *timestamp)
4076 {
4077         struct rte_eth_dev *dev;
4078
4079         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4080         dev = &rte_eth_devices[port_id];
4081
4082         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4083         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4084                                 (dev, timestamp));
4085 }
4086
4087 int
4088 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4089 {
4090         struct rte_eth_dev *dev;
4091
4092         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4093         dev = &rte_eth_devices[port_id];
4094
4095         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4096         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4097                                                                       delta));
4098 }
4099
4100 int
4101 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4102 {
4103         struct rte_eth_dev *dev;
4104
4105         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4106         dev = &rte_eth_devices[port_id];
4107
4108         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4109         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4110                                                                 timestamp));
4111 }
4112
4113 int
4114 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4115 {
4116         struct rte_eth_dev *dev;
4117
4118         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4119         dev = &rte_eth_devices[port_id];
4120
4121         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4122         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4123                                                                 timestamp));
4124 }
4125
4126 int
4127 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4128 {
4129         struct rte_eth_dev *dev;
4130
4131         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4132
4133         dev = &rte_eth_devices[port_id];
4134         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4135         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4136 }
4137
4138 int
4139 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4140 {
4141         struct rte_eth_dev *dev;
4142
4143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4144
4145         dev = &rte_eth_devices[port_id];
4146         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4147         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4148 }
4149
4150 int
4151 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4152 {
4153         struct rte_eth_dev *dev;
4154
4155         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4156
4157         dev = &rte_eth_devices[port_id];
4158         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4159         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4160 }
4161
4162 int
4163 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4164 {
4165         struct rte_eth_dev *dev;
4166
4167         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4168
4169         dev = &rte_eth_devices[port_id];
4170         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4171         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4172 }
4173
4174 int __rte_experimental
4175 rte_eth_dev_get_module_info(uint16_t port_id,
4176                             struct rte_eth_dev_module_info *modinfo)
4177 {
4178         struct rte_eth_dev *dev;
4179
4180         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4181
4182         dev = &rte_eth_devices[port_id];
4183         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4184         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4185 }
4186
4187 int __rte_experimental
4188 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4189                               struct rte_dev_eeprom_info *info)
4190 {
4191         struct rte_eth_dev *dev;
4192
4193         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4194
4195         dev = &rte_eth_devices[port_id];
4196         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4197         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4198 }
4199
4200 int
4201 rte_eth_dev_get_dcb_info(uint16_t port_id,
4202                              struct rte_eth_dcb_info *dcb_info)
4203 {
4204         struct rte_eth_dev *dev;
4205
4206         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4207
4208         dev = &rte_eth_devices[port_id];
4209         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4210
4211         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4212         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4213 }
4214
4215 int
4216 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4217                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4218 {
4219         struct rte_eth_dev *dev;
4220
4221         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4222         if (l2_tunnel == NULL) {
4223                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4224                 return -EINVAL;
4225         }
4226
4227         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4228                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4229                 return -EINVAL;
4230         }
4231
4232         dev = &rte_eth_devices[port_id];
4233         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4234                                 -ENOTSUP);
4235         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4236                                                                 l2_tunnel));
4237 }
4238
4239 int
4240 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4241                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4242                                   uint32_t mask,
4243                                   uint8_t en)
4244 {
4245         struct rte_eth_dev *dev;
4246
4247         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4248
4249         if (l2_tunnel == NULL) {
4250                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4251                 return -EINVAL;
4252         }
4253
4254         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4255                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4256                 return -EINVAL;
4257         }
4258
4259         if (mask == 0) {
4260                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4261                 return -EINVAL;
4262         }
4263
4264         dev = &rte_eth_devices[port_id];
4265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4266                                 -ENOTSUP);
4267         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4268                                                         l2_tunnel, mask, en));
4269 }
4270
4271 static void
4272 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4273                            const struct rte_eth_desc_lim *desc_lim)
4274 {
4275         if (desc_lim->nb_align != 0)
4276                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4277
4278         if (desc_lim->nb_max != 0)
4279                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4280
4281         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4282 }
4283
4284 int
4285 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4286                                  uint16_t *nb_rx_desc,
4287                                  uint16_t *nb_tx_desc)
4288 {
4289         struct rte_eth_dev *dev;
4290         struct rte_eth_dev_info dev_info;
4291
4292         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4293
4294         dev = &rte_eth_devices[port_id];
4295         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4296
4297         rte_eth_dev_info_get(port_id, &dev_info);
4298
4299         if (nb_rx_desc != NULL)
4300                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4301
4302         if (nb_tx_desc != NULL)
4303                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4304
4305         return 0;
4306 }
4307
4308 int
4309 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4310 {
4311         struct rte_eth_dev *dev;
4312
4313         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4314
4315         if (pool == NULL)
4316                 return -EINVAL;
4317
4318         dev = &rte_eth_devices[port_id];
4319
4320         if (*dev->dev_ops->pool_ops_supported == NULL)
4321                 return 1; /* all pools are supported */
4322
4323         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4324 }
4325
4326 /**
4327  * A set of values to describe the possible states of a switch domain.
4328  */
4329 enum rte_eth_switch_domain_state {
4330         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4331         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4332 };
4333
4334 /**
4335  * Array of switch domains available for allocation. Array is sized to
4336  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4337  * ethdev ports in a single process.
4338  */
4339 static struct rte_eth_dev_switch {
4340         enum rte_eth_switch_domain_state state;
4341 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4342
4343 int __rte_experimental
4344 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4345 {
4346         unsigned int i;
4347
4348         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4349
4350         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4351                 i < RTE_MAX_ETHPORTS; i++) {
4352                 if (rte_eth_switch_domains[i].state ==
4353                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4354                         rte_eth_switch_domains[i].state =
4355                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4356                         *domain_id = i;
4357                         return 0;
4358                 }
4359         }
4360
4361         return -ENOSPC;
4362 }
4363
4364 int __rte_experimental
4365 rte_eth_switch_domain_free(uint16_t domain_id)
4366 {
4367         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4368                 domain_id >= RTE_MAX_ETHPORTS)
4369                 return -EINVAL;
4370
4371         if (rte_eth_switch_domains[domain_id].state !=
4372                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4373                 return -EINVAL;
4374
4375         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4376
4377         return 0;
4378 }
4379
4380 static int
4381 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4382 {
4383         int state;
4384         struct rte_kvargs_pair *pair;
4385         char *letter;
4386
4387         arglist->str = strdup(str_in);
4388         if (arglist->str == NULL)
4389                 return -ENOMEM;
4390
4391         letter = arglist->str;
4392         state = 0;
4393         arglist->count = 0;
4394         pair = &arglist->pairs[0];
4395         while (1) {
4396                 switch (state) {
4397                 case 0: /* Initial */
4398                         if (*letter == '=')
4399                                 return -EINVAL;
4400                         else if (*letter == '\0')
4401                                 return 0;
4402
4403                         state = 1;
4404                         pair->key = letter;
4405                         /* fall-thru */
4406
4407                 case 1: /* Parsing key */
4408                         if (*letter == '=') {
4409                                 *letter = '\0';
4410                                 pair->value = letter + 1;
4411                                 state = 2;
4412                         } else if (*letter == ',' || *letter == '\0')
4413                                 return -EINVAL;
4414                         break;
4415
4416
4417                 case 2: /* Parsing value */
4418                         if (*letter == '[')
4419                                 state = 3;
4420                         else if (*letter == ',') {
4421                                 *letter = '\0';
4422                                 arglist->count++;
4423                                 pair = &arglist->pairs[arglist->count];
4424                                 state = 0;
4425                         } else if (*letter == '\0') {
4426                                 letter--;
4427                                 arglist->count++;
4428                                 pair = &arglist->pairs[arglist->count];
4429                                 state = 0;
4430                         }
4431                         break;
4432
4433                 case 3: /* Parsing list */
4434                         if (*letter == ']')
4435                                 state = 2;
4436                         else if (*letter == '\0')
4437                                 return -EINVAL;
4438                         break;
4439                 }
4440                 letter++;
4441         }
4442 }
4443
4444 int __rte_experimental
4445 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4446 {
4447         struct rte_kvargs args;
4448         struct rte_kvargs_pair *pair;
4449         unsigned int i;
4450         int result = 0;
4451
4452         memset(eth_da, 0, sizeof(*eth_da));
4453
4454         result = rte_eth_devargs_tokenise(&args, dargs);
4455         if (result < 0)
4456                 goto parse_cleanup;
4457
4458         for (i = 0; i < args.count; i++) {
4459                 pair = &args.pairs[i];
4460                 if (strcmp("representor", pair->key) == 0) {
4461                         result = rte_eth_devargs_parse_list(pair->value,
4462                                 rte_eth_devargs_parse_representor_ports,
4463                                 eth_da);
4464                         if (result < 0)
4465                                 goto parse_cleanup;
4466                 }
4467         }
4468
4469 parse_cleanup:
4470         if (args.str)
4471                 free(args.str);
4472
4473         return result;
4474 }
4475
4476 RTE_INIT(ethdev_init_log)
4477 {
4478         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
4479         if (rte_eth_dev_logtype >= 0)
4480                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
4481 }