ethdev: remove error return on RSS hash check
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_kvargs.h>
38
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
43
44 static int ethdev_logtype;
45
46 #define ethdev_log(level, fmt, ...) \
47         rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51 static uint8_t eth_dev_last_created_port;
52
53 /* spinlock for eth device callbacks */
54 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for add/remove rx callbacks */
57 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* spinlock for add/remove tx callbacks */
60 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62 /* spinlock for shared data allocation */
63 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
64
65 /* store statistics names and its offset in stats structure  */
66 struct rte_eth_xstats_name_off {
67         char name[RTE_ETH_XSTATS_NAME_SIZE];
68         unsigned offset;
69 };
70
71 /* Shared memory between primary and secondary processes. */
72 static struct {
73         uint64_t next_owner_id;
74         rte_spinlock_t ownership_lock;
75         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
76 } *rte_eth_dev_shared_data;
77
78 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
79         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
80         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
81         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
82         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
83         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
84         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
85         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
86         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
87                 rx_nombuf)},
88 };
89
90 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
91
92 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
93         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
94         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
95         {"errors", offsetof(struct rte_eth_stats, q_errors)},
96 };
97
98 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
99                 sizeof(rte_rxq_stats_strings[0]))
100
101 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
102         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
103         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
104 };
105 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
106                 sizeof(rte_txq_stats_strings[0]))
107
108 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
109         { DEV_RX_OFFLOAD_##_name, #_name }
110
111 static const struct {
112         uint64_t offload;
113         const char *name;
114 } rte_rx_offload_names[] = {
115         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
119         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
120         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
121         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
122         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
123         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
125         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
126         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
127         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
128         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
129         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
130         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
131 };
132
133 #undef RTE_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } rte_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160 };
161
162 #undef RTE_TX_OFFLOAD_BIT2STR
163
164 /**
165  * The user application callback description.
166  *
167  * It contains callback address to be registered by user application,
168  * the pointer to the parameters for callback, and the event type.
169  */
170 struct rte_eth_dev_callback {
171         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
172         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
173         void *cb_arg;                           /**< Parameter for callback */
174         void *ret_param;                        /**< Return parameter */
175         enum rte_eth_event_type event;          /**< Interrupt event type */
176         uint32_t active;                        /**< Callback is executing */
177 };
178
179 enum {
180         STAT_QMAP_TX = 0,
181         STAT_QMAP_RX
182 };
183
184 uint16_t
185 rte_eth_find_next(uint16_t port_id)
186 {
187         while (port_id < RTE_MAX_ETHPORTS &&
188                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
189                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
190                 port_id++;
191
192         if (port_id >= RTE_MAX_ETHPORTS)
193                 return RTE_MAX_ETHPORTS;
194
195         return port_id;
196 }
197
198 static void
199 rte_eth_dev_shared_data_prepare(void)
200 {
201         const unsigned flags = 0;
202         const struct rte_memzone *mz;
203
204         rte_spinlock_lock(&rte_eth_shared_data_lock);
205
206         if (rte_eth_dev_shared_data == NULL) {
207                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
208                         /* Allocate port data and ownership shared memory. */
209                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
210                                         sizeof(*rte_eth_dev_shared_data),
211                                         rte_socket_id(), flags);
212                 } else
213                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
214                 if (mz == NULL)
215                         rte_panic("Cannot allocate ethdev shared data\n");
216
217                 rte_eth_dev_shared_data = mz->addr;
218                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
219                         rte_eth_dev_shared_data->next_owner_id =
220                                         RTE_ETH_DEV_NO_OWNER + 1;
221                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
222                         memset(rte_eth_dev_shared_data->data, 0,
223                                sizeof(rte_eth_dev_shared_data->data));
224                 }
225         }
226
227         rte_spinlock_unlock(&rte_eth_shared_data_lock);
228 }
229
230 struct rte_eth_dev *
231 rte_eth_dev_allocated(const char *name)
232 {
233         unsigned i;
234
235         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
236                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
237                     strcmp(rte_eth_devices[i].data->name, name) == 0)
238                         return &rte_eth_devices[i];
239         }
240         return NULL;
241 }
242
243 static uint16_t
244 rte_eth_dev_find_free_port(void)
245 {
246         unsigned i;
247
248         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
249                 /* Using shared name field to find a free port. */
250                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
251                         RTE_ASSERT(rte_eth_devices[i].state ==
252                                    RTE_ETH_DEV_UNUSED);
253                         return i;
254                 }
255         }
256         return RTE_MAX_ETHPORTS;
257 }
258
259 static struct rte_eth_dev *
260 eth_dev_get(uint16_t port_id)
261 {
262         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
263
264         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
265         eth_dev->state = RTE_ETH_DEV_ATTACHED;
266
267         eth_dev_last_created_port = port_id;
268
269         return eth_dev;
270 }
271
272 struct rte_eth_dev *
273 rte_eth_dev_allocate(const char *name)
274 {
275         uint16_t port_id;
276         struct rte_eth_dev *eth_dev = NULL;
277
278         rte_eth_dev_shared_data_prepare();
279
280         /* Synchronize port creation between primary and secondary threads. */
281         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
282
283         port_id = rte_eth_dev_find_free_port();
284         if (port_id == RTE_MAX_ETHPORTS) {
285                 ethdev_log(ERR, "Reached maximum number of Ethernet ports");
286                 goto unlock;
287         }
288
289         if (rte_eth_dev_allocated(name) != NULL) {
290                 ethdev_log(ERR,
291                         "Ethernet Device with name %s already allocated!",
292                         name);
293                 goto unlock;
294         }
295
296         eth_dev = eth_dev_get(port_id);
297         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
298         eth_dev->data->port_id = port_id;
299         eth_dev->data->mtu = ETHER_MTU;
300
301 unlock:
302         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
303
304         if (eth_dev != NULL)
305                 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
306
307         return eth_dev;
308 }
309
310 /*
311  * Attach to a port already registered by the primary process, which
312  * makes sure that the same device would have the same port id both
313  * in the primary and secondary process.
314  */
315 struct rte_eth_dev *
316 rte_eth_dev_attach_secondary(const char *name)
317 {
318         uint16_t i;
319         struct rte_eth_dev *eth_dev = NULL;
320
321         rte_eth_dev_shared_data_prepare();
322
323         /* Synchronize port attachment to primary port creation and release. */
324         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
325
326         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
327                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
328                         break;
329         }
330         if (i == RTE_MAX_ETHPORTS) {
331                 RTE_PMD_DEBUG_TRACE(
332                         "device %s is not driven by the primary process\n",
333                         name);
334         } else {
335                 eth_dev = eth_dev_get(i);
336                 RTE_ASSERT(eth_dev->data->port_id == i);
337         }
338
339         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
340         return eth_dev;
341 }
342
343 int
344 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
345 {
346         if (eth_dev == NULL)
347                 return -EINVAL;
348
349         rte_eth_dev_shared_data_prepare();
350
351         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
352
353         eth_dev->state = RTE_ETH_DEV_UNUSED;
354
355         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
356
357         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
358
359         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
360
361         return 0;
362 }
363
364 int
365 rte_eth_dev_is_valid_port(uint16_t port_id)
366 {
367         if (port_id >= RTE_MAX_ETHPORTS ||
368             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
369                 return 0;
370         else
371                 return 1;
372 }
373
374 static int
375 rte_eth_is_valid_owner_id(uint64_t owner_id)
376 {
377         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
378             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
379                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
380                 return 0;
381         }
382         return 1;
383 }
384
385 uint64_t
386 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
387 {
388         while (port_id < RTE_MAX_ETHPORTS &&
389                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
390                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
391                rte_eth_devices[port_id].data->owner.id != owner_id))
392                 port_id++;
393
394         if (port_id >= RTE_MAX_ETHPORTS)
395                 return RTE_MAX_ETHPORTS;
396
397         return port_id;
398 }
399
400 int __rte_experimental
401 rte_eth_dev_owner_new(uint64_t *owner_id)
402 {
403         rte_eth_dev_shared_data_prepare();
404
405         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
406
407         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
408
409         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
410         return 0;
411 }
412
413 static int
414 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
415                        const struct rte_eth_dev_owner *new_owner)
416 {
417         struct rte_eth_dev_owner *port_owner;
418         int sret;
419
420         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
421
422         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
423             !rte_eth_is_valid_owner_id(old_owner_id))
424                 return -EINVAL;
425
426         port_owner = &rte_eth_devices[port_id].data->owner;
427         if (port_owner->id != old_owner_id) {
428                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
429                                     " by %s_%016lX.\n", port_id,
430                                     port_owner->name, port_owner->id);
431                 return -EPERM;
432         }
433
434         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
435                         new_owner->name);
436         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
437                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
438                                     port_id);
439
440         port_owner->id = new_owner->id;
441
442         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
443                             new_owner->name, new_owner->id);
444
445         return 0;
446 }
447
448 int __rte_experimental
449 rte_eth_dev_owner_set(const uint16_t port_id,
450                       const struct rte_eth_dev_owner *owner)
451 {
452         int ret;
453
454         rte_eth_dev_shared_data_prepare();
455
456         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
457
458         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
459
460         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
461         return ret;
462 }
463
464 int __rte_experimental
465 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
466 {
467         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
468                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
469         int ret;
470
471         rte_eth_dev_shared_data_prepare();
472
473         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
474
475         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
476
477         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
478         return ret;
479 }
480
481 void __rte_experimental
482 rte_eth_dev_owner_delete(const uint64_t owner_id)
483 {
484         uint16_t port_id;
485
486         rte_eth_dev_shared_data_prepare();
487
488         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
489
490         if (rte_eth_is_valid_owner_id(owner_id)) {
491                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
492                         memset(&rte_eth_devices[port_id].data->owner, 0,
493                                sizeof(struct rte_eth_dev_owner));
494                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
495                                     " have removed.\n", owner_id);
496         }
497
498         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
499 }
500
501 int __rte_experimental
502 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
503 {
504         int ret = 0;
505
506         rte_eth_dev_shared_data_prepare();
507
508         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
509
510         if (!rte_eth_dev_is_valid_port(port_id)) {
511                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
512                 ret = -ENODEV;
513         } else {
514                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
515                            sizeof(*owner));
516         }
517
518         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
519         return ret;
520 }
521
522 int
523 rte_eth_dev_socket_id(uint16_t port_id)
524 {
525         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
526         return rte_eth_devices[port_id].data->numa_node;
527 }
528
529 void *
530 rte_eth_dev_get_sec_ctx(uint16_t port_id)
531 {
532         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
533         return rte_eth_devices[port_id].security_ctx;
534 }
535
536 uint16_t
537 rte_eth_dev_count(void)
538 {
539         return rte_eth_dev_count_avail();
540 }
541
542 uint16_t
543 rte_eth_dev_count_avail(void)
544 {
545         uint16_t p;
546         uint16_t count;
547
548         count = 0;
549
550         RTE_ETH_FOREACH_DEV(p)
551                 count++;
552
553         return count;
554 }
555
556 uint16_t __rte_experimental
557 rte_eth_dev_count_total(void)
558 {
559         uint16_t port, count = 0;
560
561         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
562                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
563                         count++;
564
565         return count;
566 }
567
568 int
569 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
570 {
571         char *tmp;
572
573         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
574
575         if (name == NULL) {
576                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
577                 return -EINVAL;
578         }
579
580         /* shouldn't check 'rte_eth_devices[i].data',
581          * because it might be overwritten by VDEV PMD */
582         tmp = rte_eth_dev_shared_data->data[port_id].name;
583         strcpy(name, tmp);
584         return 0;
585 }
586
587 int
588 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
589 {
590         uint32_t pid;
591
592         if (name == NULL) {
593                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
594                 return -EINVAL;
595         }
596
597         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
598                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
599                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
600                         *port_id = pid;
601                         return 0;
602                 }
603         }
604
605         return -ENODEV;
606 }
607
608 static int
609 eth_err(uint16_t port_id, int ret)
610 {
611         if (ret == 0)
612                 return 0;
613         if (rte_eth_dev_is_removed(port_id))
614                 return -EIO;
615         return ret;
616 }
617
618 /* attach the new device, then store port_id of the device */
619 int
620 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
621 {
622         int current = rte_eth_dev_count_total();
623         struct rte_devargs da;
624         int ret = -1;
625
626         memset(&da, 0, sizeof(da));
627
628         if ((devargs == NULL) || (port_id == NULL)) {
629                 ret = -EINVAL;
630                 goto err;
631         }
632
633         /* parse devargs */
634         if (rte_devargs_parse(&da, "%s", devargs))
635                 goto err;
636
637         ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
638         if (ret < 0)
639                 goto err;
640
641         /* no point looking at the port count if no port exists */
642         if (!rte_eth_dev_count_total()) {
643                 ethdev_log(ERR, "No port found for device (%s)", da.name);
644                 ret = -1;
645                 goto err;
646         }
647
648         /* if nothing happened, there is a bug here, since some driver told us
649          * it did attach a device, but did not create a port.
650          * FIXME: race condition in case of plug-out of another device
651          */
652         if (current == rte_eth_dev_count_total()) {
653                 ret = -1;
654                 goto err;
655         }
656
657         *port_id = eth_dev_last_created_port;
658         ret = 0;
659
660 err:
661         free(da.args);
662         return ret;
663 }
664
665 /* detach the device, then store the name of the device */
666 int
667 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
668 {
669         struct rte_device *dev;
670         struct rte_bus *bus;
671         uint32_t dev_flags;
672         int ret = -1;
673
674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
675
676         dev_flags = rte_eth_devices[port_id].data->dev_flags;
677         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
678                 ethdev_log(ERR,
679                         "Port %" PRIu16 " is bonded, cannot detach", port_id);
680                 return -ENOTSUP;
681         }
682
683         dev = rte_eth_devices[port_id].device;
684         if (dev == NULL)
685                 return -EINVAL;
686
687         bus = rte_bus_find_by_device(dev);
688         if (bus == NULL)
689                 return -ENOENT;
690
691         ret = rte_eal_hotplug_remove(bus->name, dev->name);
692         if (ret < 0)
693                 return ret;
694
695         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
696         return 0;
697 }
698
699 static int
700 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
701 {
702         uint16_t old_nb_queues = dev->data->nb_rx_queues;
703         void **rxq;
704         unsigned i;
705
706         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
707                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
708                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
709                                 RTE_CACHE_LINE_SIZE);
710                 if (dev->data->rx_queues == NULL) {
711                         dev->data->nb_rx_queues = 0;
712                         return -(ENOMEM);
713                 }
714         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
715                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
716
717                 rxq = dev->data->rx_queues;
718
719                 for (i = nb_queues; i < old_nb_queues; i++)
720                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
721                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
722                                 RTE_CACHE_LINE_SIZE);
723                 if (rxq == NULL)
724                         return -(ENOMEM);
725                 if (nb_queues > old_nb_queues) {
726                         uint16_t new_qs = nb_queues - old_nb_queues;
727
728                         memset(rxq + old_nb_queues, 0,
729                                 sizeof(rxq[0]) * new_qs);
730                 }
731
732                 dev->data->rx_queues = rxq;
733
734         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
735                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
736
737                 rxq = dev->data->rx_queues;
738
739                 for (i = nb_queues; i < old_nb_queues; i++)
740                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
741
742                 rte_free(dev->data->rx_queues);
743                 dev->data->rx_queues = NULL;
744         }
745         dev->data->nb_rx_queues = nb_queues;
746         return 0;
747 }
748
749 int
750 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
751 {
752         struct rte_eth_dev *dev;
753
754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
755
756         dev = &rte_eth_devices[port_id];
757         if (!dev->data->dev_started) {
758                 RTE_PMD_DEBUG_TRACE(
759                     "port %d must be started before start any queue\n", port_id);
760                 return -EINVAL;
761         }
762
763         if (rx_queue_id >= dev->data->nb_rx_queues) {
764                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
765                 return -EINVAL;
766         }
767
768         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
769
770         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
771                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
772                         " already started\n",
773                         rx_queue_id, port_id);
774                 return 0;
775         }
776
777         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
778                                                              rx_queue_id));
779
780 }
781
782 int
783 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
784 {
785         struct rte_eth_dev *dev;
786
787         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
788
789         dev = &rte_eth_devices[port_id];
790         if (rx_queue_id >= dev->data->nb_rx_queues) {
791                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
792                 return -EINVAL;
793         }
794
795         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
796
797         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
798                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
799                         " already stopped\n",
800                         rx_queue_id, port_id);
801                 return 0;
802         }
803
804         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
805
806 }
807
808 int
809 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
810 {
811         struct rte_eth_dev *dev;
812
813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
814
815         dev = &rte_eth_devices[port_id];
816         if (!dev->data->dev_started) {
817                 RTE_PMD_DEBUG_TRACE(
818                     "port %d must be started before start any queue\n", port_id);
819                 return -EINVAL;
820         }
821
822         if (tx_queue_id >= dev->data->nb_tx_queues) {
823                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
824                 return -EINVAL;
825         }
826
827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
828
829         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
830                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
831                         " already started\n",
832                         tx_queue_id, port_id);
833                 return 0;
834         }
835
836         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
837                                                              tx_queue_id));
838
839 }
840
841 int
842 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
843 {
844         struct rte_eth_dev *dev;
845
846         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
847
848         dev = &rte_eth_devices[port_id];
849         if (tx_queue_id >= dev->data->nb_tx_queues) {
850                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
851                 return -EINVAL;
852         }
853
854         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
855
856         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
857                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
858                         " already stopped\n",
859                         tx_queue_id, port_id);
860                 return 0;
861         }
862
863         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
864
865 }
866
867 static int
868 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
869 {
870         uint16_t old_nb_queues = dev->data->nb_tx_queues;
871         void **txq;
872         unsigned i;
873
874         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
875                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
876                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
877                                                    RTE_CACHE_LINE_SIZE);
878                 if (dev->data->tx_queues == NULL) {
879                         dev->data->nb_tx_queues = 0;
880                         return -(ENOMEM);
881                 }
882         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
883                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
884
885                 txq = dev->data->tx_queues;
886
887                 for (i = nb_queues; i < old_nb_queues; i++)
888                         (*dev->dev_ops->tx_queue_release)(txq[i]);
889                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
890                                   RTE_CACHE_LINE_SIZE);
891                 if (txq == NULL)
892                         return -ENOMEM;
893                 if (nb_queues > old_nb_queues) {
894                         uint16_t new_qs = nb_queues - old_nb_queues;
895
896                         memset(txq + old_nb_queues, 0,
897                                sizeof(txq[0]) * new_qs);
898                 }
899
900                 dev->data->tx_queues = txq;
901
902         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
903                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
904
905                 txq = dev->data->tx_queues;
906
907                 for (i = nb_queues; i < old_nb_queues; i++)
908                         (*dev->dev_ops->tx_queue_release)(txq[i]);
909
910                 rte_free(dev->data->tx_queues);
911                 dev->data->tx_queues = NULL;
912         }
913         dev->data->nb_tx_queues = nb_queues;
914         return 0;
915 }
916
917 uint32_t
918 rte_eth_speed_bitflag(uint32_t speed, int duplex)
919 {
920         switch (speed) {
921         case ETH_SPEED_NUM_10M:
922                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
923         case ETH_SPEED_NUM_100M:
924                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
925         case ETH_SPEED_NUM_1G:
926                 return ETH_LINK_SPEED_1G;
927         case ETH_SPEED_NUM_2_5G:
928                 return ETH_LINK_SPEED_2_5G;
929         case ETH_SPEED_NUM_5G:
930                 return ETH_LINK_SPEED_5G;
931         case ETH_SPEED_NUM_10G:
932                 return ETH_LINK_SPEED_10G;
933         case ETH_SPEED_NUM_20G:
934                 return ETH_LINK_SPEED_20G;
935         case ETH_SPEED_NUM_25G:
936                 return ETH_LINK_SPEED_25G;
937         case ETH_SPEED_NUM_40G:
938                 return ETH_LINK_SPEED_40G;
939         case ETH_SPEED_NUM_50G:
940                 return ETH_LINK_SPEED_50G;
941         case ETH_SPEED_NUM_56G:
942                 return ETH_LINK_SPEED_56G;
943         case ETH_SPEED_NUM_100G:
944                 return ETH_LINK_SPEED_100G;
945         default:
946                 return 0;
947         }
948 }
949
950 /**
951  * A conversion function from rxmode bitfield API.
952  */
953 static void
954 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
955                                     uint64_t *rx_offloads)
956 {
957         uint64_t offloads = 0;
958
959         if (rxmode->header_split == 1)
960                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
961         if (rxmode->hw_ip_checksum == 1)
962                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
963         if (rxmode->hw_vlan_filter == 1)
964                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
965         if (rxmode->hw_vlan_strip == 1)
966                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
967         if (rxmode->hw_vlan_extend == 1)
968                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
969         if (rxmode->jumbo_frame == 1)
970                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
971         if (rxmode->hw_strip_crc == 1)
972                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
973         if (rxmode->enable_scatter == 1)
974                 offloads |= DEV_RX_OFFLOAD_SCATTER;
975         if (rxmode->enable_lro == 1)
976                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
977         if (rxmode->hw_timestamp == 1)
978                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
979         if (rxmode->security == 1)
980                 offloads |= DEV_RX_OFFLOAD_SECURITY;
981
982         *rx_offloads = offloads;
983 }
984
985 const char * __rte_experimental
986 rte_eth_dev_rx_offload_name(uint64_t offload)
987 {
988         const char *name = "UNKNOWN";
989         unsigned int i;
990
991         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
992                 if (offload == rte_rx_offload_names[i].offload) {
993                         name = rte_rx_offload_names[i].name;
994                         break;
995                 }
996         }
997
998         return name;
999 }
1000
1001 const char * __rte_experimental
1002 rte_eth_dev_tx_offload_name(uint64_t offload)
1003 {
1004         const char *name = "UNKNOWN";
1005         unsigned int i;
1006
1007         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1008                 if (offload == rte_tx_offload_names[i].offload) {
1009                         name = rte_tx_offload_names[i].name;
1010                         break;
1011                 }
1012         }
1013
1014         return name;
1015 }
1016
1017 int
1018 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1019                       const struct rte_eth_conf *dev_conf)
1020 {
1021         struct rte_eth_dev *dev;
1022         struct rte_eth_dev_info dev_info;
1023         struct rte_eth_conf local_conf = *dev_conf;
1024         int diag;
1025
1026         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1027
1028         dev = &rte_eth_devices[port_id];
1029
1030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1031         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1032
1033         /* If number of queues specified by application for both Rx and Tx is
1034          * zero, use driver preferred values. This cannot be done individually
1035          * as it is valid for either Tx or Rx (but not both) to be zero.
1036          * If driver does not provide any preferred valued, fall back on
1037          * EAL defaults.
1038          */
1039         if (nb_rx_q == 0 && nb_tx_q == 0) {
1040                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1041                 if (nb_rx_q == 0)
1042                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1043                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1044                 if (nb_tx_q == 0)
1045                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1046         }
1047
1048         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1049                 RTE_PMD_DEBUG_TRACE(
1050                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1051                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1052                 return -EINVAL;
1053         }
1054
1055         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1056                 RTE_PMD_DEBUG_TRACE(
1057                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1058                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1059                 return -EINVAL;
1060         }
1061
1062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1063         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1064
1065         if (dev->data->dev_started) {
1066                 RTE_PMD_DEBUG_TRACE(
1067                     "port %d must be stopped to allow configuration\n", port_id);
1068                 return -EBUSY;
1069         }
1070
1071         /*
1072          * Convert between the offloads API to enable PMDs to support
1073          * only one of them.
1074          */
1075         if (dev_conf->rxmode.ignore_offload_bitfield == 0)
1076                 rte_eth_convert_rx_offload_bitfield(
1077                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1078
1079         /* Copy the dev_conf parameter into the dev structure */
1080         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1081
1082         /*
1083          * Check that the numbers of RX and TX queues are not greater
1084          * than the maximum number of RX and TX queues supported by the
1085          * configured device.
1086          */
1087         if (nb_rx_q > dev_info.max_rx_queues) {
1088                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1089                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1090                 return -EINVAL;
1091         }
1092
1093         if (nb_tx_q > dev_info.max_tx_queues) {
1094                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1095                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1096                 return -EINVAL;
1097         }
1098
1099         /* Check that the device supports requested interrupts */
1100         if ((dev_conf->intr_conf.lsc == 1) &&
1101                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1102                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1103                                         dev->device->driver->name);
1104                         return -EINVAL;
1105         }
1106         if ((dev_conf->intr_conf.rmv == 1) &&
1107             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1108                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1109                                     dev->device->driver->name);
1110                 return -EINVAL;
1111         }
1112
1113         /*
1114          * If jumbo frames are enabled, check that the maximum RX packet
1115          * length is supported by the configured device.
1116          */
1117         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1118                 if (dev_conf->rxmode.max_rx_pkt_len >
1119                     dev_info.max_rx_pktlen) {
1120                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1121                                 " > max valid value %u\n",
1122                                 port_id,
1123                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1124                                 (unsigned)dev_info.max_rx_pktlen);
1125                         return -EINVAL;
1126                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1127                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1128                                 " < min valid value %u\n",
1129                                 port_id,
1130                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1131                                 (unsigned)ETHER_MIN_LEN);
1132                         return -EINVAL;
1133                 }
1134         } else {
1135                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1136                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1137                         /* Use default value */
1138                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1139                                                         ETHER_MAX_LEN;
1140         }
1141
1142         /* Check that device supports requested rss hash functions. */
1143         if ((dev_info.flow_type_rss_offloads |
1144              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1145             dev_info.flow_type_rss_offloads) {
1146                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
1147                                     "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1148                                     port_id,
1149                                     dev_conf->rx_adv_conf.rss_conf.rss_hf,
1150                                     dev_info.flow_type_rss_offloads);
1151         }
1152
1153         /*
1154          * Setup new number of RX/TX queues and reconfigure device.
1155          */
1156         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1157         if (diag != 0) {
1158                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1159                                 port_id, diag);
1160                 return diag;
1161         }
1162
1163         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1164         if (diag != 0) {
1165                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1166                                 port_id, diag);
1167                 rte_eth_dev_rx_queue_config(dev, 0);
1168                 return diag;
1169         }
1170
1171         diag = (*dev->dev_ops->dev_configure)(dev);
1172         if (diag != 0) {
1173                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1174                                 port_id, diag);
1175                 rte_eth_dev_rx_queue_config(dev, 0);
1176                 rte_eth_dev_tx_queue_config(dev, 0);
1177                 return eth_err(port_id, diag);
1178         }
1179
1180         /* Initialize Rx profiling if enabled at compilation time. */
1181         diag = __rte_eth_profile_rx_init(port_id, dev);
1182         if (diag != 0) {
1183                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1184                                 port_id, diag);
1185                 rte_eth_dev_rx_queue_config(dev, 0);
1186                 rte_eth_dev_tx_queue_config(dev, 0);
1187                 return eth_err(port_id, diag);
1188         }
1189
1190         return 0;
1191 }
1192
1193 void
1194 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1195 {
1196         if (dev->data->dev_started) {
1197                 RTE_PMD_DEBUG_TRACE(
1198                         "port %d must be stopped to allow reset\n",
1199                         dev->data->port_id);
1200                 return;
1201         }
1202
1203         rte_eth_dev_rx_queue_config(dev, 0);
1204         rte_eth_dev_tx_queue_config(dev, 0);
1205
1206         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1207 }
1208
1209 static void
1210 rte_eth_dev_config_restore(uint16_t port_id)
1211 {
1212         struct rte_eth_dev *dev;
1213         struct rte_eth_dev_info dev_info;
1214         struct ether_addr *addr;
1215         uint16_t i;
1216         uint32_t pool = 0;
1217         uint64_t pool_mask;
1218
1219         dev = &rte_eth_devices[port_id];
1220
1221         rte_eth_dev_info_get(port_id, &dev_info);
1222
1223         /* replay MAC address configuration including default MAC */
1224         addr = &dev->data->mac_addrs[0];
1225         if (*dev->dev_ops->mac_addr_set != NULL)
1226                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1227         else if (*dev->dev_ops->mac_addr_add != NULL)
1228                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1229
1230         if (*dev->dev_ops->mac_addr_add != NULL) {
1231                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1232                         addr = &dev->data->mac_addrs[i];
1233
1234                         /* skip zero address */
1235                         if (is_zero_ether_addr(addr))
1236                                 continue;
1237
1238                         pool = 0;
1239                         pool_mask = dev->data->mac_pool_sel[i];
1240
1241                         do {
1242                                 if (pool_mask & 1ULL)
1243                                         (*dev->dev_ops->mac_addr_add)(dev,
1244                                                 addr, i, pool);
1245                                 pool_mask >>= 1;
1246                                 pool++;
1247                         } while (pool_mask);
1248                 }
1249         }
1250
1251         /* replay promiscuous configuration */
1252         if (rte_eth_promiscuous_get(port_id) == 1)
1253                 rte_eth_promiscuous_enable(port_id);
1254         else if (rte_eth_promiscuous_get(port_id) == 0)
1255                 rte_eth_promiscuous_disable(port_id);
1256
1257         /* replay all multicast configuration */
1258         if (rte_eth_allmulticast_get(port_id) == 1)
1259                 rte_eth_allmulticast_enable(port_id);
1260         else if (rte_eth_allmulticast_get(port_id) == 0)
1261                 rte_eth_allmulticast_disable(port_id);
1262 }
1263
1264 int
1265 rte_eth_dev_start(uint16_t port_id)
1266 {
1267         struct rte_eth_dev *dev;
1268         int diag;
1269
1270         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1271
1272         dev = &rte_eth_devices[port_id];
1273
1274         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1275
1276         if (dev->data->dev_started != 0) {
1277                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1278                         " already started\n",
1279                         port_id);
1280                 return 0;
1281         }
1282
1283         diag = (*dev->dev_ops->dev_start)(dev);
1284         if (diag == 0)
1285                 dev->data->dev_started = 1;
1286         else
1287                 return eth_err(port_id, diag);
1288
1289         rte_eth_dev_config_restore(port_id);
1290
1291         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1292                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1293                 (*dev->dev_ops->link_update)(dev, 0);
1294         }
1295         return 0;
1296 }
1297
1298 void
1299 rte_eth_dev_stop(uint16_t port_id)
1300 {
1301         struct rte_eth_dev *dev;
1302
1303         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1304         dev = &rte_eth_devices[port_id];
1305
1306         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1307
1308         if (dev->data->dev_started == 0) {
1309                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1310                         " already stopped\n",
1311                         port_id);
1312                 return;
1313         }
1314
1315         dev->data->dev_started = 0;
1316         (*dev->dev_ops->dev_stop)(dev);
1317 }
1318
1319 int
1320 rte_eth_dev_set_link_up(uint16_t port_id)
1321 {
1322         struct rte_eth_dev *dev;
1323
1324         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1325
1326         dev = &rte_eth_devices[port_id];
1327
1328         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1329         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1330 }
1331
1332 int
1333 rte_eth_dev_set_link_down(uint16_t port_id)
1334 {
1335         struct rte_eth_dev *dev;
1336
1337         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1338
1339         dev = &rte_eth_devices[port_id];
1340
1341         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1342         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1343 }
1344
1345 void
1346 rte_eth_dev_close(uint16_t port_id)
1347 {
1348         struct rte_eth_dev *dev;
1349
1350         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1351         dev = &rte_eth_devices[port_id];
1352
1353         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1354         dev->data->dev_started = 0;
1355         (*dev->dev_ops->dev_close)(dev);
1356
1357         dev->data->nb_rx_queues = 0;
1358         rte_free(dev->data->rx_queues);
1359         dev->data->rx_queues = NULL;
1360         dev->data->nb_tx_queues = 0;
1361         rte_free(dev->data->tx_queues);
1362         dev->data->tx_queues = NULL;
1363 }
1364
1365 int
1366 rte_eth_dev_reset(uint16_t port_id)
1367 {
1368         struct rte_eth_dev *dev;
1369         int ret;
1370
1371         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1372         dev = &rte_eth_devices[port_id];
1373
1374         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1375
1376         rte_eth_dev_stop(port_id);
1377         ret = dev->dev_ops->dev_reset(dev);
1378
1379         return eth_err(port_id, ret);
1380 }
1381
1382 int __rte_experimental
1383 rte_eth_dev_is_removed(uint16_t port_id)
1384 {
1385         struct rte_eth_dev *dev;
1386         int ret;
1387
1388         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1389
1390         dev = &rte_eth_devices[port_id];
1391
1392         if (dev->state == RTE_ETH_DEV_REMOVED)
1393                 return 1;
1394
1395         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1396
1397         ret = dev->dev_ops->is_removed(dev);
1398         if (ret != 0)
1399                 /* Device is physically removed. */
1400                 dev->state = RTE_ETH_DEV_REMOVED;
1401
1402         return ret;
1403 }
1404
1405 int
1406 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1407                        uint16_t nb_rx_desc, unsigned int socket_id,
1408                        const struct rte_eth_rxconf *rx_conf,
1409                        struct rte_mempool *mp)
1410 {
1411         int ret;
1412         uint32_t mbp_buf_size;
1413         struct rte_eth_dev *dev;
1414         struct rte_eth_dev_info dev_info;
1415         struct rte_eth_rxconf local_conf;
1416         void **rxq;
1417
1418         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1419
1420         dev = &rte_eth_devices[port_id];
1421         if (rx_queue_id >= dev->data->nb_rx_queues) {
1422                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1423                 return -EINVAL;
1424         }
1425
1426         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1427         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1428
1429         /*
1430          * Check the size of the mbuf data buffer.
1431          * This value must be provided in the private data of the memory pool.
1432          * First check that the memory pool has a valid private data.
1433          */
1434         rte_eth_dev_info_get(port_id, &dev_info);
1435         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1436                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1437                                 mp->name, (int) mp->private_data_size,
1438                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1439                 return -ENOSPC;
1440         }
1441         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1442
1443         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1444                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1445                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1446                                 "=%d)\n",
1447                                 mp->name,
1448                                 (int)mbp_buf_size,
1449                                 (int)(RTE_PKTMBUF_HEADROOM +
1450                                       dev_info.min_rx_bufsize),
1451                                 (int)RTE_PKTMBUF_HEADROOM,
1452                                 (int)dev_info.min_rx_bufsize);
1453                 return -EINVAL;
1454         }
1455
1456         /* Use default specified by driver, if nb_rx_desc is zero */
1457         if (nb_rx_desc == 0) {
1458                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1459                 /* If driver default is also zero, fall back on EAL default */
1460                 if (nb_rx_desc == 0)
1461                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1462         }
1463
1464         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1465                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1466                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1467
1468                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1469                         "should be: <= %hu, = %hu, and a product of %hu\n",
1470                         nb_rx_desc,
1471                         dev_info.rx_desc_lim.nb_max,
1472                         dev_info.rx_desc_lim.nb_min,
1473                         dev_info.rx_desc_lim.nb_align);
1474                 return -EINVAL;
1475         }
1476
1477         if (dev->data->dev_started &&
1478                 !(dev_info.dev_capa &
1479                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1480                 return -EBUSY;
1481
1482         if (dev->data->rx_queue_state[rx_queue_id] !=
1483                 RTE_ETH_QUEUE_STATE_STOPPED)
1484                 return -EBUSY;
1485
1486         rxq = dev->data->rx_queues;
1487         if (rxq[rx_queue_id]) {
1488                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1489                                         -ENOTSUP);
1490                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1491                 rxq[rx_queue_id] = NULL;
1492         }
1493
1494         if (rx_conf == NULL)
1495                 rx_conf = &dev_info.default_rxconf;
1496
1497         local_conf = *rx_conf;
1498         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1499                 /**
1500                  * Reflect port offloads to queue offloads in order for
1501                  * offloads to not be discarded.
1502                  */
1503                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1504                                                     &local_conf.offloads);
1505         }
1506
1507         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1508                                               socket_id, &local_conf, mp);
1509         if (!ret) {
1510                 if (!dev->data->min_rx_buf_size ||
1511                     dev->data->min_rx_buf_size > mbp_buf_size)
1512                         dev->data->min_rx_buf_size = mbp_buf_size;
1513         }
1514
1515         return eth_err(port_id, ret);
1516 }
1517
1518 /**
1519  * A conversion function from txq_flags API.
1520  */
1521 static void
1522 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1523 {
1524         uint64_t offloads = 0;
1525
1526         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1527                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1528         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1529                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1530         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1531                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1532         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1533                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1534         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1535                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1536         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1537             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1538                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1539
1540         *tx_offloads = offloads;
1541 }
1542
1543 int
1544 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1545                        uint16_t nb_tx_desc, unsigned int socket_id,
1546                        const struct rte_eth_txconf *tx_conf)
1547 {
1548         struct rte_eth_dev *dev;
1549         struct rte_eth_dev_info dev_info;
1550         struct rte_eth_txconf local_conf;
1551         void **txq;
1552
1553         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1554
1555         dev = &rte_eth_devices[port_id];
1556         if (tx_queue_id >= dev->data->nb_tx_queues) {
1557                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1558                 return -EINVAL;
1559         }
1560
1561         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1562         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1563
1564         rte_eth_dev_info_get(port_id, &dev_info);
1565
1566         /* Use default specified by driver, if nb_tx_desc is zero */
1567         if (nb_tx_desc == 0) {
1568                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1569                 /* If driver default is zero, fall back on EAL default */
1570                 if (nb_tx_desc == 0)
1571                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1572         }
1573         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1574             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1575             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1576                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1577                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1578                                 nb_tx_desc,
1579                                 dev_info.tx_desc_lim.nb_max,
1580                                 dev_info.tx_desc_lim.nb_min,
1581                                 dev_info.tx_desc_lim.nb_align);
1582                 return -EINVAL;
1583         }
1584
1585         if (dev->data->dev_started &&
1586                 !(dev_info.dev_capa &
1587                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1588                 return -EBUSY;
1589
1590         if (dev->data->tx_queue_state[tx_queue_id] !=
1591                 RTE_ETH_QUEUE_STATE_STOPPED)
1592                 return -EBUSY;
1593
1594         txq = dev->data->tx_queues;
1595         if (txq[tx_queue_id]) {
1596                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1597                                         -ENOTSUP);
1598                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1599                 txq[tx_queue_id] = NULL;
1600         }
1601
1602         if (tx_conf == NULL)
1603                 tx_conf = &dev_info.default_txconf;
1604
1605         /*
1606          * Convert between the offloads API to enable PMDs to support
1607          * only one of them.
1608          */
1609         local_conf = *tx_conf;
1610         if (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
1611                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1612                                           &local_conf.offloads);
1613         }
1614
1615         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1616                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1617 }
1618
1619 void
1620 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1621                 void *userdata __rte_unused)
1622 {
1623         unsigned i;
1624
1625         for (i = 0; i < unsent; i++)
1626                 rte_pktmbuf_free(pkts[i]);
1627 }
1628
1629 void
1630 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1631                 void *userdata)
1632 {
1633         uint64_t *count = userdata;
1634         unsigned i;
1635
1636         for (i = 0; i < unsent; i++)
1637                 rte_pktmbuf_free(pkts[i]);
1638
1639         *count += unsent;
1640 }
1641
1642 int
1643 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1644                 buffer_tx_error_fn cbfn, void *userdata)
1645 {
1646         buffer->error_callback = cbfn;
1647         buffer->error_userdata = userdata;
1648         return 0;
1649 }
1650
1651 int
1652 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1653 {
1654         int ret = 0;
1655
1656         if (buffer == NULL)
1657                 return -EINVAL;
1658
1659         buffer->size = size;
1660         if (buffer->error_callback == NULL) {
1661                 ret = rte_eth_tx_buffer_set_err_callback(
1662                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1663         }
1664
1665         return ret;
1666 }
1667
1668 int
1669 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1670 {
1671         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1672         int ret;
1673
1674         /* Validate Input Data. Bail if not valid or not supported. */
1675         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1676         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1677
1678         /* Call driver to free pending mbufs. */
1679         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1680                                                free_cnt);
1681         return eth_err(port_id, ret);
1682 }
1683
1684 void
1685 rte_eth_promiscuous_enable(uint16_t port_id)
1686 {
1687         struct rte_eth_dev *dev;
1688
1689         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1690         dev = &rte_eth_devices[port_id];
1691
1692         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1693         (*dev->dev_ops->promiscuous_enable)(dev);
1694         dev->data->promiscuous = 1;
1695 }
1696
1697 void
1698 rte_eth_promiscuous_disable(uint16_t port_id)
1699 {
1700         struct rte_eth_dev *dev;
1701
1702         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1703         dev = &rte_eth_devices[port_id];
1704
1705         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1706         dev->data->promiscuous = 0;
1707         (*dev->dev_ops->promiscuous_disable)(dev);
1708 }
1709
1710 int
1711 rte_eth_promiscuous_get(uint16_t port_id)
1712 {
1713         struct rte_eth_dev *dev;
1714
1715         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1716
1717         dev = &rte_eth_devices[port_id];
1718         return dev->data->promiscuous;
1719 }
1720
1721 void
1722 rte_eth_allmulticast_enable(uint16_t port_id)
1723 {
1724         struct rte_eth_dev *dev;
1725
1726         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1727         dev = &rte_eth_devices[port_id];
1728
1729         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1730         (*dev->dev_ops->allmulticast_enable)(dev);
1731         dev->data->all_multicast = 1;
1732 }
1733
1734 void
1735 rte_eth_allmulticast_disable(uint16_t port_id)
1736 {
1737         struct rte_eth_dev *dev;
1738
1739         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1740         dev = &rte_eth_devices[port_id];
1741
1742         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1743         dev->data->all_multicast = 0;
1744         (*dev->dev_ops->allmulticast_disable)(dev);
1745 }
1746
1747 int
1748 rte_eth_allmulticast_get(uint16_t port_id)
1749 {
1750         struct rte_eth_dev *dev;
1751
1752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1753
1754         dev = &rte_eth_devices[port_id];
1755         return dev->data->all_multicast;
1756 }
1757
1758 void
1759 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1760 {
1761         struct rte_eth_dev *dev;
1762
1763         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1764         dev = &rte_eth_devices[port_id];
1765
1766         if (dev->data->dev_conf.intr_conf.lsc &&
1767             dev->data->dev_started)
1768                 rte_eth_linkstatus_get(dev, eth_link);
1769         else {
1770                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1771                 (*dev->dev_ops->link_update)(dev, 1);
1772                 *eth_link = dev->data->dev_link;
1773         }
1774 }
1775
1776 void
1777 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1778 {
1779         struct rte_eth_dev *dev;
1780
1781         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1782         dev = &rte_eth_devices[port_id];
1783
1784         if (dev->data->dev_conf.intr_conf.lsc &&
1785             dev->data->dev_started)
1786                 rte_eth_linkstatus_get(dev, eth_link);
1787         else {
1788                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1789                 (*dev->dev_ops->link_update)(dev, 0);
1790                 *eth_link = dev->data->dev_link;
1791         }
1792 }
1793
1794 int
1795 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1796 {
1797         struct rte_eth_dev *dev;
1798
1799         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1800
1801         dev = &rte_eth_devices[port_id];
1802         memset(stats, 0, sizeof(*stats));
1803
1804         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1805         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1806         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1807 }
1808
1809 int
1810 rte_eth_stats_reset(uint16_t port_id)
1811 {
1812         struct rte_eth_dev *dev;
1813
1814         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1815         dev = &rte_eth_devices[port_id];
1816
1817         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1818         (*dev->dev_ops->stats_reset)(dev);
1819         dev->data->rx_mbuf_alloc_failed = 0;
1820
1821         return 0;
1822 }
1823
1824 static inline int
1825 get_xstats_basic_count(struct rte_eth_dev *dev)
1826 {
1827         uint16_t nb_rxqs, nb_txqs;
1828         int count;
1829
1830         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1831         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1832
1833         count = RTE_NB_STATS;
1834         count += nb_rxqs * RTE_NB_RXQ_STATS;
1835         count += nb_txqs * RTE_NB_TXQ_STATS;
1836
1837         return count;
1838 }
1839
1840 static int
1841 get_xstats_count(uint16_t port_id)
1842 {
1843         struct rte_eth_dev *dev;
1844         int count;
1845
1846         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1847         dev = &rte_eth_devices[port_id];
1848         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1849                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1850                                 NULL, 0);
1851                 if (count < 0)
1852                         return eth_err(port_id, count);
1853         }
1854         if (dev->dev_ops->xstats_get_names != NULL) {
1855                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1856                 if (count < 0)
1857                         return eth_err(port_id, count);
1858         } else
1859                 count = 0;
1860
1861
1862         count += get_xstats_basic_count(dev);
1863
1864         return count;
1865 }
1866
1867 int
1868 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1869                 uint64_t *id)
1870 {
1871         int cnt_xstats, idx_xstat;
1872
1873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1874
1875         if (!id) {
1876                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1877                 return -ENOMEM;
1878         }
1879
1880         if (!xstat_name) {
1881                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1882                 return -ENOMEM;
1883         }
1884
1885         /* Get count */
1886         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1887         if (cnt_xstats  < 0) {
1888                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1889                 return -ENODEV;
1890         }
1891
1892         /* Get id-name lookup table */
1893         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1894
1895         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1896                         port_id, xstats_names, cnt_xstats, NULL)) {
1897                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1898                 return -1;
1899         }
1900
1901         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1902                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1903                         *id = idx_xstat;
1904                         return 0;
1905                 };
1906         }
1907
1908         return -EINVAL;
1909 }
1910
1911 /* retrieve basic stats names */
1912 static int
1913 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1914         struct rte_eth_xstat_name *xstats_names)
1915 {
1916         int cnt_used_entries = 0;
1917         uint32_t idx, id_queue;
1918         uint16_t num_q;
1919
1920         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1921                 snprintf(xstats_names[cnt_used_entries].name,
1922                         sizeof(xstats_names[0].name),
1923                         "%s", rte_stats_strings[idx].name);
1924                 cnt_used_entries++;
1925         }
1926         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1927         for (id_queue = 0; id_queue < num_q; id_queue++) {
1928                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1929                         snprintf(xstats_names[cnt_used_entries].name,
1930                                 sizeof(xstats_names[0].name),
1931                                 "rx_q%u%s",
1932                                 id_queue, rte_rxq_stats_strings[idx].name);
1933                         cnt_used_entries++;
1934                 }
1935
1936         }
1937         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1938         for (id_queue = 0; id_queue < num_q; id_queue++) {
1939                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1940                         snprintf(xstats_names[cnt_used_entries].name,
1941                                 sizeof(xstats_names[0].name),
1942                                 "tx_q%u%s",
1943                                 id_queue, rte_txq_stats_strings[idx].name);
1944                         cnt_used_entries++;
1945                 }
1946         }
1947         return cnt_used_entries;
1948 }
1949
1950 /* retrieve ethdev extended statistics names */
1951 int
1952 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1953         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1954         uint64_t *ids)
1955 {
1956         struct rte_eth_xstat_name *xstats_names_copy;
1957         unsigned int no_basic_stat_requested = 1;
1958         unsigned int no_ext_stat_requested = 1;
1959         unsigned int expected_entries;
1960         unsigned int basic_count;
1961         struct rte_eth_dev *dev;
1962         unsigned int i;
1963         int ret;
1964
1965         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1966         dev = &rte_eth_devices[port_id];
1967
1968         basic_count = get_xstats_basic_count(dev);
1969         ret = get_xstats_count(port_id);
1970         if (ret < 0)
1971                 return ret;
1972         expected_entries = (unsigned int)ret;
1973
1974         /* Return max number of stats if no ids given */
1975         if (!ids) {
1976                 if (!xstats_names)
1977                         return expected_entries;
1978                 else if (xstats_names && size < expected_entries)
1979                         return expected_entries;
1980         }
1981
1982         if (ids && !xstats_names)
1983                 return -EINVAL;
1984
1985         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1986                 uint64_t ids_copy[size];
1987
1988                 for (i = 0; i < size; i++) {
1989                         if (ids[i] < basic_count) {
1990                                 no_basic_stat_requested = 0;
1991                                 break;
1992                         }
1993
1994                         /*
1995                          * Convert ids to xstats ids that PMD knows.
1996                          * ids known by user are basic + extended stats.
1997                          */
1998                         ids_copy[i] = ids[i] - basic_count;
1999                 }
2000
2001                 if (no_basic_stat_requested)
2002                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2003                                         xstats_names, ids_copy, size);
2004         }
2005
2006         /* Retrieve all stats */
2007         if (!ids) {
2008                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2009                                 expected_entries);
2010                 if (num_stats < 0 || num_stats > (int)expected_entries)
2011                         return num_stats;
2012                 else
2013                         return expected_entries;
2014         }
2015
2016         xstats_names_copy = calloc(expected_entries,
2017                 sizeof(struct rte_eth_xstat_name));
2018
2019         if (!xstats_names_copy) {
2020                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2021                 return -ENOMEM;
2022         }
2023
2024         if (ids) {
2025                 for (i = 0; i < size; i++) {
2026                         if (ids[i] >= basic_count) {
2027                                 no_ext_stat_requested = 0;
2028                                 break;
2029                         }
2030                 }
2031         }
2032
2033         /* Fill xstats_names_copy structure */
2034         if (ids && no_ext_stat_requested) {
2035                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2036         } else {
2037                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2038                         expected_entries);
2039                 if (ret < 0) {
2040                         free(xstats_names_copy);
2041                         return ret;
2042                 }
2043         }
2044
2045         /* Filter stats */
2046         for (i = 0; i < size; i++) {
2047                 if (ids[i] >= expected_entries) {
2048                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2049                         free(xstats_names_copy);
2050                         return -1;
2051                 }
2052                 xstats_names[i] = xstats_names_copy[ids[i]];
2053         }
2054
2055         free(xstats_names_copy);
2056         return size;
2057 }
2058
2059 int
2060 rte_eth_xstats_get_names(uint16_t port_id,
2061         struct rte_eth_xstat_name *xstats_names,
2062         unsigned int size)
2063 {
2064         struct rte_eth_dev *dev;
2065         int cnt_used_entries;
2066         int cnt_expected_entries;
2067         int cnt_driver_entries;
2068
2069         cnt_expected_entries = get_xstats_count(port_id);
2070         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2071                         (int)size < cnt_expected_entries)
2072                 return cnt_expected_entries;
2073
2074         /* port_id checked in get_xstats_count() */
2075         dev = &rte_eth_devices[port_id];
2076
2077         cnt_used_entries = rte_eth_basic_stats_get_names(
2078                 dev, xstats_names);
2079
2080         if (dev->dev_ops->xstats_get_names != NULL) {
2081                 /* If there are any driver-specific xstats, append them
2082                  * to end of list.
2083                  */
2084                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2085                         dev,
2086                         xstats_names + cnt_used_entries,
2087                         size - cnt_used_entries);
2088                 if (cnt_driver_entries < 0)
2089                         return eth_err(port_id, cnt_driver_entries);
2090                 cnt_used_entries += cnt_driver_entries;
2091         }
2092
2093         return cnt_used_entries;
2094 }
2095
2096
2097 static int
2098 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2099 {
2100         struct rte_eth_dev *dev;
2101         struct rte_eth_stats eth_stats;
2102         unsigned int count = 0, i, q;
2103         uint64_t val, *stats_ptr;
2104         uint16_t nb_rxqs, nb_txqs;
2105         int ret;
2106
2107         ret = rte_eth_stats_get(port_id, &eth_stats);
2108         if (ret < 0)
2109                 return ret;
2110
2111         dev = &rte_eth_devices[port_id];
2112
2113         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2114         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2115
2116         /* global stats */
2117         for (i = 0; i < RTE_NB_STATS; i++) {
2118                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2119                                         rte_stats_strings[i].offset);
2120                 val = *stats_ptr;
2121                 xstats[count++].value = val;
2122         }
2123
2124         /* per-rxq stats */
2125         for (q = 0; q < nb_rxqs; q++) {
2126                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2127                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2128                                         rte_rxq_stats_strings[i].offset +
2129                                         q * sizeof(uint64_t));
2130                         val = *stats_ptr;
2131                         xstats[count++].value = val;
2132                 }
2133         }
2134
2135         /* per-txq stats */
2136         for (q = 0; q < nb_txqs; q++) {
2137                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2138                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2139                                         rte_txq_stats_strings[i].offset +
2140                                         q * sizeof(uint64_t));
2141                         val = *stats_ptr;
2142                         xstats[count++].value = val;
2143                 }
2144         }
2145         return count;
2146 }
2147
2148 /* retrieve ethdev extended statistics */
2149 int
2150 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2151                          uint64_t *values, unsigned int size)
2152 {
2153         unsigned int no_basic_stat_requested = 1;
2154         unsigned int no_ext_stat_requested = 1;
2155         unsigned int num_xstats_filled;
2156         unsigned int basic_count;
2157         uint16_t expected_entries;
2158         struct rte_eth_dev *dev;
2159         unsigned int i;
2160         int ret;
2161
2162         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2163         ret = get_xstats_count(port_id);
2164         if (ret < 0)
2165                 return ret;
2166         expected_entries = (uint16_t)ret;
2167         struct rte_eth_xstat xstats[expected_entries];
2168         dev = &rte_eth_devices[port_id];
2169         basic_count = get_xstats_basic_count(dev);
2170
2171         /* Return max number of stats if no ids given */
2172         if (!ids) {
2173                 if (!values)
2174                         return expected_entries;
2175                 else if (values && size < expected_entries)
2176                         return expected_entries;
2177         }
2178
2179         if (ids && !values)
2180                 return -EINVAL;
2181
2182         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2183                 unsigned int basic_count = get_xstats_basic_count(dev);
2184                 uint64_t ids_copy[size];
2185
2186                 for (i = 0; i < size; i++) {
2187                         if (ids[i] < basic_count) {
2188                                 no_basic_stat_requested = 0;
2189                                 break;
2190                         }
2191
2192                         /*
2193                          * Convert ids to xstats ids that PMD knows.
2194                          * ids known by user are basic + extended stats.
2195                          */
2196                         ids_copy[i] = ids[i] - basic_count;
2197                 }
2198
2199                 if (no_basic_stat_requested)
2200                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2201                                         values, size);
2202         }
2203
2204         if (ids) {
2205                 for (i = 0; i < size; i++) {
2206                         if (ids[i] >= basic_count) {
2207                                 no_ext_stat_requested = 0;
2208                                 break;
2209                         }
2210                 }
2211         }
2212
2213         /* Fill the xstats structure */
2214         if (ids && no_ext_stat_requested)
2215                 ret = rte_eth_basic_stats_get(port_id, xstats);
2216         else
2217                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2218
2219         if (ret < 0)
2220                 return ret;
2221         num_xstats_filled = (unsigned int)ret;
2222
2223         /* Return all stats */
2224         if (!ids) {
2225                 for (i = 0; i < num_xstats_filled; i++)
2226                         values[i] = xstats[i].value;
2227                 return expected_entries;
2228         }
2229
2230         /* Filter stats */
2231         for (i = 0; i < size; i++) {
2232                 if (ids[i] >= expected_entries) {
2233                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2234                         return -1;
2235                 }
2236                 values[i] = xstats[ids[i]].value;
2237         }
2238         return size;
2239 }
2240
2241 int
2242 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2243         unsigned int n)
2244 {
2245         struct rte_eth_dev *dev;
2246         unsigned int count = 0, i;
2247         signed int xcount = 0;
2248         uint16_t nb_rxqs, nb_txqs;
2249         int ret;
2250
2251         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2252
2253         dev = &rte_eth_devices[port_id];
2254
2255         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2256         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2257
2258         /* Return generic statistics */
2259         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2260                 (nb_txqs * RTE_NB_TXQ_STATS);
2261
2262         /* implemented by the driver */
2263         if (dev->dev_ops->xstats_get != NULL) {
2264                 /* Retrieve the xstats from the driver at the end of the
2265                  * xstats struct.
2266                  */
2267                 xcount = (*dev->dev_ops->xstats_get)(dev,
2268                                      xstats ? xstats + count : NULL,
2269                                      (n > count) ? n - count : 0);
2270
2271                 if (xcount < 0)
2272                         return eth_err(port_id, xcount);
2273         }
2274
2275         if (n < count + xcount || xstats == NULL)
2276                 return count + xcount;
2277
2278         /* now fill the xstats structure */
2279         ret = rte_eth_basic_stats_get(port_id, xstats);
2280         if (ret < 0)
2281                 return ret;
2282         count = ret;
2283
2284         for (i = 0; i < count; i++)
2285                 xstats[i].id = i;
2286         /* add an offset to driver-specific stats */
2287         for ( ; i < count + xcount; i++)
2288                 xstats[i].id += count;
2289
2290         return count + xcount;
2291 }
2292
2293 /* reset ethdev extended statistics */
2294 void
2295 rte_eth_xstats_reset(uint16_t port_id)
2296 {
2297         struct rte_eth_dev *dev;
2298
2299         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2300         dev = &rte_eth_devices[port_id];
2301
2302         /* implemented by the driver */
2303         if (dev->dev_ops->xstats_reset != NULL) {
2304                 (*dev->dev_ops->xstats_reset)(dev);
2305                 return;
2306         }
2307
2308         /* fallback to default */
2309         rte_eth_stats_reset(port_id);
2310 }
2311
2312 static int
2313 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2314                 uint8_t is_rx)
2315 {
2316         struct rte_eth_dev *dev;
2317
2318         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2319
2320         dev = &rte_eth_devices[port_id];
2321
2322         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2323         return (*dev->dev_ops->queue_stats_mapping_set)
2324                         (dev, queue_id, stat_idx, is_rx);
2325 }
2326
2327
2328 int
2329 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2330                 uint8_t stat_idx)
2331 {
2332         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2333                                                 stat_idx, STAT_QMAP_TX));
2334 }
2335
2336
2337 int
2338 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2339                 uint8_t stat_idx)
2340 {
2341         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2342                                                 stat_idx, STAT_QMAP_RX));
2343 }
2344
2345 int
2346 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2347 {
2348         struct rte_eth_dev *dev;
2349
2350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2351         dev = &rte_eth_devices[port_id];
2352
2353         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2354         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2355                                                         fw_version, fw_size));
2356 }
2357
2358 void
2359 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2360 {
2361         struct rte_eth_dev *dev;
2362         const struct rte_eth_desc_lim lim = {
2363                 .nb_max = UINT16_MAX,
2364                 .nb_min = 0,
2365                 .nb_align = 1,
2366         };
2367
2368         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2369         dev = &rte_eth_devices[port_id];
2370
2371         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2372         dev_info->rx_desc_lim = lim;
2373         dev_info->tx_desc_lim = lim;
2374         dev_info->device = dev->device;
2375
2376         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2377         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2378         dev_info->driver_name = dev->device->driver->name;
2379         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2380         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2381
2382         dev_info->dev_flags = &dev->data->dev_flags;
2383 }
2384
2385 int
2386 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2387                                  uint32_t *ptypes, int num)
2388 {
2389         int i, j;
2390         struct rte_eth_dev *dev;
2391         const uint32_t *all_ptypes;
2392
2393         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2394         dev = &rte_eth_devices[port_id];
2395         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2396         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2397
2398         if (!all_ptypes)
2399                 return 0;
2400
2401         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2402                 if (all_ptypes[i] & ptype_mask) {
2403                         if (j < num)
2404                                 ptypes[j] = all_ptypes[i];
2405                         j++;
2406                 }
2407
2408         return j;
2409 }
2410
2411 void
2412 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2413 {
2414         struct rte_eth_dev *dev;
2415
2416         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2417         dev = &rte_eth_devices[port_id];
2418         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2419 }
2420
2421
2422 int
2423 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2424 {
2425         struct rte_eth_dev *dev;
2426
2427         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2428
2429         dev = &rte_eth_devices[port_id];
2430         *mtu = dev->data->mtu;
2431         return 0;
2432 }
2433
2434 int
2435 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2436 {
2437         int ret;
2438         struct rte_eth_dev *dev;
2439
2440         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2441         dev = &rte_eth_devices[port_id];
2442         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2443
2444         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2445         if (!ret)
2446                 dev->data->mtu = mtu;
2447
2448         return eth_err(port_id, ret);
2449 }
2450
2451 int
2452 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2453 {
2454         struct rte_eth_dev *dev;
2455         int ret;
2456
2457         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2458         dev = &rte_eth_devices[port_id];
2459         if (!(dev->data->dev_conf.rxmode.offloads &
2460               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2461                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2462                 return -ENOSYS;
2463         }
2464
2465         if (vlan_id > 4095) {
2466                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2467                                 port_id, (unsigned) vlan_id);
2468                 return -EINVAL;
2469         }
2470         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2471
2472         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2473         if (ret == 0) {
2474                 struct rte_vlan_filter_conf *vfc;
2475                 int vidx;
2476                 int vbit;
2477
2478                 vfc = &dev->data->vlan_filter_conf;
2479                 vidx = vlan_id / 64;
2480                 vbit = vlan_id % 64;
2481
2482                 if (on)
2483                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2484                 else
2485                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2486         }
2487
2488         return eth_err(port_id, ret);
2489 }
2490
2491 int
2492 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2493                                     int on)
2494 {
2495         struct rte_eth_dev *dev;
2496
2497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2498         dev = &rte_eth_devices[port_id];
2499         if (rx_queue_id >= dev->data->nb_rx_queues) {
2500                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2501                 return -EINVAL;
2502         }
2503
2504         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2505         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2506
2507         return 0;
2508 }
2509
2510 int
2511 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2512                                 enum rte_vlan_type vlan_type,
2513                                 uint16_t tpid)
2514 {
2515         struct rte_eth_dev *dev;
2516
2517         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2518         dev = &rte_eth_devices[port_id];
2519         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2520
2521         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2522                                                                tpid));
2523 }
2524
2525 int
2526 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2527 {
2528         struct rte_eth_dev *dev;
2529         int ret = 0;
2530         int mask = 0;
2531         int cur, org = 0;
2532         uint64_t orig_offloads;
2533
2534         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2535         dev = &rte_eth_devices[port_id];
2536
2537         /* save original values in case of failure */
2538         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2539
2540         /*check which option changed by application*/
2541         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2542         org = !!(dev->data->dev_conf.rxmode.offloads &
2543                  DEV_RX_OFFLOAD_VLAN_STRIP);
2544         if (cur != org) {
2545                 if (cur)
2546                         dev->data->dev_conf.rxmode.offloads |=
2547                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2548                 else
2549                         dev->data->dev_conf.rxmode.offloads &=
2550                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2551                 mask |= ETH_VLAN_STRIP_MASK;
2552         }
2553
2554         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2555         org = !!(dev->data->dev_conf.rxmode.offloads &
2556                  DEV_RX_OFFLOAD_VLAN_FILTER);
2557         if (cur != org) {
2558                 if (cur)
2559                         dev->data->dev_conf.rxmode.offloads |=
2560                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2561                 else
2562                         dev->data->dev_conf.rxmode.offloads &=
2563                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2564                 mask |= ETH_VLAN_FILTER_MASK;
2565         }
2566
2567         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2568         org = !!(dev->data->dev_conf.rxmode.offloads &
2569                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2570         if (cur != org) {
2571                 if (cur)
2572                         dev->data->dev_conf.rxmode.offloads |=
2573                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2574                 else
2575                         dev->data->dev_conf.rxmode.offloads &=
2576                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2577                 mask |= ETH_VLAN_EXTEND_MASK;
2578         }
2579
2580         /*no change*/
2581         if (mask == 0)
2582                 return ret;
2583
2584         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2585         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2586         if (ret) {
2587                 /* hit an error restore  original values */
2588                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2589         }
2590
2591         return eth_err(port_id, ret);
2592 }
2593
2594 int
2595 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2596 {
2597         struct rte_eth_dev *dev;
2598         int ret = 0;
2599
2600         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2601         dev = &rte_eth_devices[port_id];
2602
2603         if (dev->data->dev_conf.rxmode.offloads &
2604             DEV_RX_OFFLOAD_VLAN_STRIP)
2605                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2606
2607         if (dev->data->dev_conf.rxmode.offloads &
2608             DEV_RX_OFFLOAD_VLAN_FILTER)
2609                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2610
2611         if (dev->data->dev_conf.rxmode.offloads &
2612             DEV_RX_OFFLOAD_VLAN_EXTEND)
2613                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2614
2615         return ret;
2616 }
2617
2618 int
2619 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2620 {
2621         struct rte_eth_dev *dev;
2622
2623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2624         dev = &rte_eth_devices[port_id];
2625         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2626
2627         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2628 }
2629
2630 int
2631 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2632 {
2633         struct rte_eth_dev *dev;
2634
2635         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2636         dev = &rte_eth_devices[port_id];
2637         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2638         memset(fc_conf, 0, sizeof(*fc_conf));
2639         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2640 }
2641
2642 int
2643 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2644 {
2645         struct rte_eth_dev *dev;
2646
2647         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2648         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2649                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2650                 return -EINVAL;
2651         }
2652
2653         dev = &rte_eth_devices[port_id];
2654         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2655         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2656 }
2657
2658 int
2659 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2660                                    struct rte_eth_pfc_conf *pfc_conf)
2661 {
2662         struct rte_eth_dev *dev;
2663
2664         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2665         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2666                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2667                 return -EINVAL;
2668         }
2669
2670         dev = &rte_eth_devices[port_id];
2671         /* High water, low water validation are device specific */
2672         if  (*dev->dev_ops->priority_flow_ctrl_set)
2673                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2674                                         (dev, pfc_conf));
2675         return -ENOTSUP;
2676 }
2677
2678 static int
2679 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2680                         uint16_t reta_size)
2681 {
2682         uint16_t i, num;
2683
2684         if (!reta_conf)
2685                 return -EINVAL;
2686
2687         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2688         for (i = 0; i < num; i++) {
2689                 if (reta_conf[i].mask)
2690                         return 0;
2691         }
2692
2693         return -EINVAL;
2694 }
2695
2696 static int
2697 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2698                          uint16_t reta_size,
2699                          uint16_t max_rxq)
2700 {
2701         uint16_t i, idx, shift;
2702
2703         if (!reta_conf)
2704                 return -EINVAL;
2705
2706         if (max_rxq == 0) {
2707                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2708                 return -EINVAL;
2709         }
2710
2711         for (i = 0; i < reta_size; i++) {
2712                 idx = i / RTE_RETA_GROUP_SIZE;
2713                 shift = i % RTE_RETA_GROUP_SIZE;
2714                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2715                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2716                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2717                                 "the maximum rxq index: %u\n", idx, shift,
2718                                 reta_conf[idx].reta[shift], max_rxq);
2719                         return -EINVAL;
2720                 }
2721         }
2722
2723         return 0;
2724 }
2725
2726 int
2727 rte_eth_dev_rss_reta_update(uint16_t port_id,
2728                             struct rte_eth_rss_reta_entry64 *reta_conf,
2729                             uint16_t reta_size)
2730 {
2731         struct rte_eth_dev *dev;
2732         int ret;
2733
2734         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2735         /* Check mask bits */
2736         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2737         if (ret < 0)
2738                 return ret;
2739
2740         dev = &rte_eth_devices[port_id];
2741
2742         /* Check entry value */
2743         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2744                                 dev->data->nb_rx_queues);
2745         if (ret < 0)
2746                 return ret;
2747
2748         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2749         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2750                                                              reta_size));
2751 }
2752
2753 int
2754 rte_eth_dev_rss_reta_query(uint16_t port_id,
2755                            struct rte_eth_rss_reta_entry64 *reta_conf,
2756                            uint16_t reta_size)
2757 {
2758         struct rte_eth_dev *dev;
2759         int ret;
2760
2761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2762
2763         /* Check mask bits */
2764         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2765         if (ret < 0)
2766                 return ret;
2767
2768         dev = &rte_eth_devices[port_id];
2769         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2770         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2771                                                             reta_size));
2772 }
2773
2774 int
2775 rte_eth_dev_rss_hash_update(uint16_t port_id,
2776                             struct rte_eth_rss_conf *rss_conf)
2777 {
2778         struct rte_eth_dev *dev;
2779         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2780
2781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2782         dev = &rte_eth_devices[port_id];
2783         rte_eth_dev_info_get(port_id, &dev_info);
2784         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2785             dev_info.flow_type_rss_offloads) {
2786                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
2787                                     "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2788                                     port_id,
2789                                     rss_conf->rss_hf,
2790                                     dev_info.flow_type_rss_offloads);
2791         }
2792         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2793         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2794                                                                  rss_conf));
2795 }
2796
2797 int
2798 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2799                               struct rte_eth_rss_conf *rss_conf)
2800 {
2801         struct rte_eth_dev *dev;
2802
2803         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2804         dev = &rte_eth_devices[port_id];
2805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2806         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2807                                                                    rss_conf));
2808 }
2809
2810 int
2811 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2812                                 struct rte_eth_udp_tunnel *udp_tunnel)
2813 {
2814         struct rte_eth_dev *dev;
2815
2816         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2817         if (udp_tunnel == NULL) {
2818                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2819                 return -EINVAL;
2820         }
2821
2822         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2823                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2824                 return -EINVAL;
2825         }
2826
2827         dev = &rte_eth_devices[port_id];
2828         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2829         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2830                                                                 udp_tunnel));
2831 }
2832
2833 int
2834 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2835                                    struct rte_eth_udp_tunnel *udp_tunnel)
2836 {
2837         struct rte_eth_dev *dev;
2838
2839         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2840         dev = &rte_eth_devices[port_id];
2841
2842         if (udp_tunnel == NULL) {
2843                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2844                 return -EINVAL;
2845         }
2846
2847         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2848                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2849                 return -EINVAL;
2850         }
2851
2852         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2853         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2854                                                                 udp_tunnel));
2855 }
2856
2857 int
2858 rte_eth_led_on(uint16_t port_id)
2859 {
2860         struct rte_eth_dev *dev;
2861
2862         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2863         dev = &rte_eth_devices[port_id];
2864         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2865         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2866 }
2867
2868 int
2869 rte_eth_led_off(uint16_t port_id)
2870 {
2871         struct rte_eth_dev *dev;
2872
2873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2874         dev = &rte_eth_devices[port_id];
2875         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2876         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2877 }
2878
2879 /*
2880  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2881  * an empty spot.
2882  */
2883 static int
2884 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2885 {
2886         struct rte_eth_dev_info dev_info;
2887         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2888         unsigned i;
2889
2890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2891         rte_eth_dev_info_get(port_id, &dev_info);
2892
2893         for (i = 0; i < dev_info.max_mac_addrs; i++)
2894                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2895                         return i;
2896
2897         return -1;
2898 }
2899
2900 static const struct ether_addr null_mac_addr;
2901
2902 int
2903 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2904                         uint32_t pool)
2905 {
2906         struct rte_eth_dev *dev;
2907         int index;
2908         uint64_t pool_mask;
2909         int ret;
2910
2911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2912         dev = &rte_eth_devices[port_id];
2913         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2914
2915         if (is_zero_ether_addr(addr)) {
2916                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2917                         port_id);
2918                 return -EINVAL;
2919         }
2920         if (pool >= ETH_64_POOLS) {
2921                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2922                 return -EINVAL;
2923         }
2924
2925         index = get_mac_addr_index(port_id, addr);
2926         if (index < 0) {
2927                 index = get_mac_addr_index(port_id, &null_mac_addr);
2928                 if (index < 0) {
2929                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2930                                 port_id);
2931                         return -ENOSPC;
2932                 }
2933         } else {
2934                 pool_mask = dev->data->mac_pool_sel[index];
2935
2936                 /* Check if both MAC address and pool is already there, and do nothing */
2937                 if (pool_mask & (1ULL << pool))
2938                         return 0;
2939         }
2940
2941         /* Update NIC */
2942         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2943
2944         if (ret == 0) {
2945                 /* Update address in NIC data structure */
2946                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2947
2948                 /* Update pool bitmap in NIC data structure */
2949                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2950         }
2951
2952         return eth_err(port_id, ret);
2953 }
2954
2955 int
2956 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2957 {
2958         struct rte_eth_dev *dev;
2959         int index;
2960
2961         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2962         dev = &rte_eth_devices[port_id];
2963         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2964
2965         index = get_mac_addr_index(port_id, addr);
2966         if (index == 0) {
2967                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2968                 return -EADDRINUSE;
2969         } else if (index < 0)
2970                 return 0;  /* Do nothing if address wasn't found */
2971
2972         /* Update NIC */
2973         (*dev->dev_ops->mac_addr_remove)(dev, index);
2974
2975         /* Update address in NIC data structure */
2976         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2977
2978         /* reset pool bitmap */
2979         dev->data->mac_pool_sel[index] = 0;
2980
2981         return 0;
2982 }
2983
2984 int
2985 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2986 {
2987         struct rte_eth_dev *dev;
2988         int ret;
2989
2990         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2991
2992         if (!is_valid_assigned_ether_addr(addr))
2993                 return -EINVAL;
2994
2995         dev = &rte_eth_devices[port_id];
2996         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2997
2998         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
2999         if (ret < 0)
3000                 return ret;
3001
3002         /* Update default address in NIC data structure */
3003         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3004
3005         return 0;
3006 }
3007
3008
3009 /*
3010  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3011  * an empty spot.
3012  */
3013 static int
3014 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3015 {
3016         struct rte_eth_dev_info dev_info;
3017         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3018         unsigned i;
3019
3020         rte_eth_dev_info_get(port_id, &dev_info);
3021         if (!dev->data->hash_mac_addrs)
3022                 return -1;
3023
3024         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3025                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3026                         ETHER_ADDR_LEN) == 0)
3027                         return i;
3028
3029         return -1;
3030 }
3031
3032 int
3033 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3034                                 uint8_t on)
3035 {
3036         int index;
3037         int ret;
3038         struct rte_eth_dev *dev;
3039
3040         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3041
3042         dev = &rte_eth_devices[port_id];
3043         if (is_zero_ether_addr(addr)) {
3044                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3045                         port_id);
3046                 return -EINVAL;
3047         }
3048
3049         index = get_hash_mac_addr_index(port_id, addr);
3050         /* Check if it's already there, and do nothing */
3051         if ((index >= 0) && on)
3052                 return 0;
3053
3054         if (index < 0) {
3055                 if (!on) {
3056                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3057                                 "set in UTA\n", port_id);
3058                         return -EINVAL;
3059                 }
3060
3061                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3062                 if (index < 0) {
3063                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3064                                         port_id);
3065                         return -ENOSPC;
3066                 }
3067         }
3068
3069         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3070         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3071         if (ret == 0) {
3072                 /* Update address in NIC data structure */
3073                 if (on)
3074                         ether_addr_copy(addr,
3075                                         &dev->data->hash_mac_addrs[index]);
3076                 else
3077                         ether_addr_copy(&null_mac_addr,
3078                                         &dev->data->hash_mac_addrs[index]);
3079         }
3080
3081         return eth_err(port_id, ret);
3082 }
3083
3084 int
3085 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3086 {
3087         struct rte_eth_dev *dev;
3088
3089         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3090
3091         dev = &rte_eth_devices[port_id];
3092
3093         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3094         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3095                                                                        on));
3096 }
3097
3098 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3099                                         uint16_t tx_rate)
3100 {
3101         struct rte_eth_dev *dev;
3102         struct rte_eth_dev_info dev_info;
3103         struct rte_eth_link link;
3104
3105         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3106
3107         dev = &rte_eth_devices[port_id];
3108         rte_eth_dev_info_get(port_id, &dev_info);
3109         link = dev->data->dev_link;
3110
3111         if (queue_idx > dev_info.max_tx_queues) {
3112                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3113                                 "invalid queue id=%d\n", port_id, queue_idx);
3114                 return -EINVAL;
3115         }
3116
3117         if (tx_rate > link.link_speed) {
3118                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3119                                 "bigger than link speed= %d\n",
3120                         tx_rate, link.link_speed);
3121                 return -EINVAL;
3122         }
3123
3124         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3125         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3126                                                         queue_idx, tx_rate));
3127 }
3128
3129 int
3130 rte_eth_mirror_rule_set(uint16_t port_id,
3131                         struct rte_eth_mirror_conf *mirror_conf,
3132                         uint8_t rule_id, uint8_t on)
3133 {
3134         struct rte_eth_dev *dev;
3135
3136         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3137         if (mirror_conf->rule_type == 0) {
3138                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3139                 return -EINVAL;
3140         }
3141
3142         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3143                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3144                                 ETH_64_POOLS - 1);
3145                 return -EINVAL;
3146         }
3147
3148         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3149              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3150             (mirror_conf->pool_mask == 0)) {
3151                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3152                 return -EINVAL;
3153         }
3154
3155         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3156             mirror_conf->vlan.vlan_mask == 0) {
3157                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3158                 return -EINVAL;
3159         }
3160
3161         dev = &rte_eth_devices[port_id];
3162         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3163
3164         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3165                                                 mirror_conf, rule_id, on));
3166 }
3167
3168 int
3169 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3170 {
3171         struct rte_eth_dev *dev;
3172
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3174
3175         dev = &rte_eth_devices[port_id];
3176         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3177
3178         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3179                                                                    rule_id));
3180 }
3181
3182 RTE_INIT(eth_dev_init_cb_lists)
3183 {
3184         int i;
3185
3186         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3187                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3188 }
3189
3190 int
3191 rte_eth_dev_callback_register(uint16_t port_id,
3192                         enum rte_eth_event_type event,
3193                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3194 {
3195         struct rte_eth_dev *dev;
3196         struct rte_eth_dev_callback *user_cb;
3197         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3198         uint16_t last_port;
3199
3200         if (!cb_fn)
3201                 return -EINVAL;
3202
3203         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3204                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3205                 return -EINVAL;
3206         }
3207
3208         if (port_id == RTE_ETH_ALL) {
3209                 next_port = 0;
3210                 last_port = RTE_MAX_ETHPORTS - 1;
3211         } else {
3212                 next_port = last_port = port_id;
3213         }
3214
3215         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3216
3217         do {
3218                 dev = &rte_eth_devices[next_port];
3219
3220                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3221                         if (user_cb->cb_fn == cb_fn &&
3222                                 user_cb->cb_arg == cb_arg &&
3223                                 user_cb->event == event) {
3224                                 break;
3225                         }
3226                 }
3227
3228                 /* create a new callback. */
3229                 if (user_cb == NULL) {
3230                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3231                                 sizeof(struct rte_eth_dev_callback), 0);
3232                         if (user_cb != NULL) {
3233                                 user_cb->cb_fn = cb_fn;
3234                                 user_cb->cb_arg = cb_arg;
3235                                 user_cb->event = event;
3236                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3237                                                   user_cb, next);
3238                         } else {
3239                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3240                                 rte_eth_dev_callback_unregister(port_id, event,
3241                                                                 cb_fn, cb_arg);
3242                                 return -ENOMEM;
3243                         }
3244
3245                 }
3246         } while (++next_port <= last_port);
3247
3248         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3249         return 0;
3250 }
3251
3252 int
3253 rte_eth_dev_callback_unregister(uint16_t port_id,
3254                         enum rte_eth_event_type event,
3255                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3256 {
3257         int ret;
3258         struct rte_eth_dev *dev;
3259         struct rte_eth_dev_callback *cb, *next;
3260         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3261         uint16_t last_port;
3262
3263         if (!cb_fn)
3264                 return -EINVAL;
3265
3266         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3267                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3268                 return -EINVAL;
3269         }
3270
3271         if (port_id == RTE_ETH_ALL) {
3272                 next_port = 0;
3273                 last_port = RTE_MAX_ETHPORTS - 1;
3274         } else {
3275                 next_port = last_port = port_id;
3276         }
3277
3278         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3279
3280         do {
3281                 dev = &rte_eth_devices[next_port];
3282                 ret = 0;
3283                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3284                      cb = next) {
3285
3286                         next = TAILQ_NEXT(cb, next);
3287
3288                         if (cb->cb_fn != cb_fn || cb->event != event ||
3289                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3290                                 continue;
3291
3292                         /*
3293                          * if this callback is not executing right now,
3294                          * then remove it.
3295                          */
3296                         if (cb->active == 0) {
3297                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3298                                 rte_free(cb);
3299                         } else {
3300                                 ret = -EAGAIN;
3301                         }
3302                 }
3303         } while (++next_port <= last_port);
3304
3305         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3306         return ret;
3307 }
3308
3309 int
3310 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3311         enum rte_eth_event_type event, void *ret_param)
3312 {
3313         struct rte_eth_dev_callback *cb_lst;
3314         struct rte_eth_dev_callback dev_cb;
3315         int rc = 0;
3316
3317         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3318         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3319                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3320                         continue;
3321                 dev_cb = *cb_lst;
3322                 cb_lst->active = 1;
3323                 if (ret_param != NULL)
3324                         dev_cb.ret_param = ret_param;
3325
3326                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3327                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3328                                 dev_cb.cb_arg, dev_cb.ret_param);
3329                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3330                 cb_lst->active = 0;
3331         }
3332         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3333         return rc;
3334 }
3335
3336 int
3337 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3338 {
3339         uint32_t vec;
3340         struct rte_eth_dev *dev;
3341         struct rte_intr_handle *intr_handle;
3342         uint16_t qid;
3343         int rc;
3344
3345         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3346
3347         dev = &rte_eth_devices[port_id];
3348
3349         if (!dev->intr_handle) {
3350                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3351                 return -ENOTSUP;
3352         }
3353
3354         intr_handle = dev->intr_handle;
3355         if (!intr_handle->intr_vec) {
3356                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3357                 return -EPERM;
3358         }
3359
3360         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3361                 vec = intr_handle->intr_vec[qid];
3362                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3363                 if (rc && rc != -EEXIST) {
3364                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3365                                         " op %d epfd %d vec %u\n",
3366                                         port_id, qid, op, epfd, vec);
3367                 }
3368         }
3369
3370         return 0;
3371 }
3372
3373 const struct rte_memzone *
3374 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3375                          uint16_t queue_id, size_t size, unsigned align,
3376                          int socket_id)
3377 {
3378         char z_name[RTE_MEMZONE_NAMESIZE];
3379         const struct rte_memzone *mz;
3380
3381         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3382                  dev->device->driver->name, ring_name,
3383                  dev->data->port_id, queue_id);
3384
3385         mz = rte_memzone_lookup(z_name);
3386         if (mz)
3387                 return mz;
3388
3389         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3390                         RTE_MEMZONE_IOVA_CONTIG, align);
3391 }
3392
3393 int __rte_experimental
3394 rte_eth_dev_create(struct rte_device *device, const char *name,
3395         size_t priv_data_size,
3396         ethdev_bus_specific_init ethdev_bus_specific_init,
3397         void *bus_init_params,
3398         ethdev_init_t ethdev_init, void *init_params)
3399 {
3400         struct rte_eth_dev *ethdev;
3401         int retval;
3402
3403         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3404
3405         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3406                 ethdev = rte_eth_dev_allocate(name);
3407                 if (!ethdev) {
3408                         retval = -ENODEV;
3409                         goto probe_failed;
3410                 }
3411
3412                 if (priv_data_size) {
3413                         ethdev->data->dev_private = rte_zmalloc_socket(
3414                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3415                                 device->numa_node);
3416
3417                         if (!ethdev->data->dev_private) {
3418                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
3419                                 retval = -ENOMEM;
3420                                 goto probe_failed;
3421                         }
3422                 }
3423         } else {
3424                 ethdev = rte_eth_dev_attach_secondary(name);
3425                 if (!ethdev) {
3426                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
3427                                 "ethdev doesn't exist");
3428                         retval = -ENODEV;
3429                         goto probe_failed;
3430                 }
3431         }
3432
3433         ethdev->device = device;
3434
3435         if (ethdev_bus_specific_init) {
3436                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3437                 if (retval) {
3438                         RTE_LOG(ERR, EAL,
3439                                 "ethdev bus specific initialisation failed");
3440                         goto probe_failed;
3441                 }
3442         }
3443
3444         retval = ethdev_init(ethdev, init_params);
3445         if (retval) {
3446                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3447                 goto probe_failed;
3448         }
3449
3450         return retval;
3451 probe_failed:
3452         /* free ports private data if primary process */
3453         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3454                 rte_free(ethdev->data->dev_private);
3455
3456         rte_eth_dev_release_port(ethdev);
3457
3458         return retval;
3459 }
3460
3461 int  __rte_experimental
3462 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3463         ethdev_uninit_t ethdev_uninit)
3464 {
3465         int ret;
3466
3467         ethdev = rte_eth_dev_allocated(ethdev->data->name);
3468         if (!ethdev)
3469                 return -ENODEV;
3470
3471         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3472         if (ethdev_uninit) {
3473                 ret = ethdev_uninit(ethdev);
3474                 if (ret)
3475                         return ret;
3476         }
3477
3478         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3479                 rte_free(ethdev->data->dev_private);
3480
3481         ethdev->data->dev_private = NULL;
3482
3483         return rte_eth_dev_release_port(ethdev);
3484 }
3485
3486 int
3487 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3488                           int epfd, int op, void *data)
3489 {
3490         uint32_t vec;
3491         struct rte_eth_dev *dev;
3492         struct rte_intr_handle *intr_handle;
3493         int rc;
3494
3495         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3496
3497         dev = &rte_eth_devices[port_id];
3498         if (queue_id >= dev->data->nb_rx_queues) {
3499                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3500                 return -EINVAL;
3501         }
3502
3503         if (!dev->intr_handle) {
3504                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3505                 return -ENOTSUP;
3506         }
3507
3508         intr_handle = dev->intr_handle;
3509         if (!intr_handle->intr_vec) {
3510                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3511                 return -EPERM;
3512         }
3513
3514         vec = intr_handle->intr_vec[queue_id];
3515         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3516         if (rc && rc != -EEXIST) {
3517                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3518                                 " op %d epfd %d vec %u\n",
3519                                 port_id, queue_id, op, epfd, vec);
3520                 return rc;
3521         }
3522
3523         return 0;
3524 }
3525
3526 int
3527 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3528                            uint16_t queue_id)
3529 {
3530         struct rte_eth_dev *dev;
3531
3532         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3533
3534         dev = &rte_eth_devices[port_id];
3535
3536         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3537         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3538                                                                 queue_id));
3539 }
3540
3541 int
3542 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3543                             uint16_t queue_id)
3544 {
3545         struct rte_eth_dev *dev;
3546
3547         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3548
3549         dev = &rte_eth_devices[port_id];
3550
3551         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3552         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3553                                                                 queue_id));
3554 }
3555
3556
3557 int
3558 rte_eth_dev_filter_supported(uint16_t port_id,
3559                              enum rte_filter_type filter_type)
3560 {
3561         struct rte_eth_dev *dev;
3562
3563         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3564
3565         dev = &rte_eth_devices[port_id];
3566         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3567         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3568                                 RTE_ETH_FILTER_NOP, NULL);
3569 }
3570
3571 int
3572 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3573                         enum rte_filter_op filter_op, void *arg)
3574 {
3575         struct rte_eth_dev *dev;
3576
3577         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3578
3579         dev = &rte_eth_devices[port_id];
3580         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3581         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3582                                                              filter_op, arg));
3583 }
3584
3585 const struct rte_eth_rxtx_callback *
3586 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3587                 rte_rx_callback_fn fn, void *user_param)
3588 {
3589 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3590         rte_errno = ENOTSUP;
3591         return NULL;
3592 #endif
3593         /* check input parameters */
3594         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3595                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3596                 rte_errno = EINVAL;
3597                 return NULL;
3598         }
3599         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3600
3601         if (cb == NULL) {
3602                 rte_errno = ENOMEM;
3603                 return NULL;
3604         }
3605
3606         cb->fn.rx = fn;
3607         cb->param = user_param;
3608
3609         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3610         /* Add the callbacks in fifo order. */
3611         struct rte_eth_rxtx_callback *tail =
3612                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3613
3614         if (!tail) {
3615                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3616
3617         } else {
3618                 while (tail->next)
3619                         tail = tail->next;
3620                 tail->next = cb;
3621         }
3622         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3623
3624         return cb;
3625 }
3626
3627 const struct rte_eth_rxtx_callback *
3628 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3629                 rte_rx_callback_fn fn, void *user_param)
3630 {
3631 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3632         rte_errno = ENOTSUP;
3633         return NULL;
3634 #endif
3635         /* check input parameters */
3636         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3637                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3638                 rte_errno = EINVAL;
3639                 return NULL;
3640         }
3641
3642         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3643
3644         if (cb == NULL) {
3645                 rte_errno = ENOMEM;
3646                 return NULL;
3647         }
3648
3649         cb->fn.rx = fn;
3650         cb->param = user_param;
3651
3652         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3653         /* Add the callbacks at fisrt position*/
3654         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3655         rte_smp_wmb();
3656         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3657         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3658
3659         return cb;
3660 }
3661
3662 const struct rte_eth_rxtx_callback *
3663 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3664                 rte_tx_callback_fn fn, void *user_param)
3665 {
3666 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3667         rte_errno = ENOTSUP;
3668         return NULL;
3669 #endif
3670         /* check input parameters */
3671         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3672                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3673                 rte_errno = EINVAL;
3674                 return NULL;
3675         }
3676
3677         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3678
3679         if (cb == NULL) {
3680                 rte_errno = ENOMEM;
3681                 return NULL;
3682         }
3683
3684         cb->fn.tx = fn;
3685         cb->param = user_param;
3686
3687         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3688         /* Add the callbacks in fifo order. */
3689         struct rte_eth_rxtx_callback *tail =
3690                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3691
3692         if (!tail) {
3693                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3694
3695         } else {
3696                 while (tail->next)
3697                         tail = tail->next;
3698                 tail->next = cb;
3699         }
3700         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3701
3702         return cb;
3703 }
3704
3705 int
3706 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3707                 const struct rte_eth_rxtx_callback *user_cb)
3708 {
3709 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3710         return -ENOTSUP;
3711 #endif
3712         /* Check input parameters. */
3713         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3714         if (user_cb == NULL ||
3715                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3716                 return -EINVAL;
3717
3718         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3719         struct rte_eth_rxtx_callback *cb;
3720         struct rte_eth_rxtx_callback **prev_cb;
3721         int ret = -EINVAL;
3722
3723         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3724         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3725         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3726                 cb = *prev_cb;
3727                 if (cb == user_cb) {
3728                         /* Remove the user cb from the callback list. */
3729                         *prev_cb = cb->next;
3730                         ret = 0;
3731                         break;
3732                 }
3733         }
3734         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3735
3736         return ret;
3737 }
3738
3739 int
3740 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3741                 const struct rte_eth_rxtx_callback *user_cb)
3742 {
3743 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3744         return -ENOTSUP;
3745 #endif
3746         /* Check input parameters. */
3747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3748         if (user_cb == NULL ||
3749                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3750                 return -EINVAL;
3751
3752         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3753         int ret = -EINVAL;
3754         struct rte_eth_rxtx_callback *cb;
3755         struct rte_eth_rxtx_callback **prev_cb;
3756
3757         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3758         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3759         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3760                 cb = *prev_cb;
3761                 if (cb == user_cb) {
3762                         /* Remove the user cb from the callback list. */
3763                         *prev_cb = cb->next;
3764                         ret = 0;
3765                         break;
3766                 }
3767         }
3768         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3769
3770         return ret;
3771 }
3772
3773 int
3774 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3775         struct rte_eth_rxq_info *qinfo)
3776 {
3777         struct rte_eth_dev *dev;
3778
3779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3780
3781         if (qinfo == NULL)
3782                 return -EINVAL;
3783
3784         dev = &rte_eth_devices[port_id];
3785         if (queue_id >= dev->data->nb_rx_queues) {
3786                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3787                 return -EINVAL;
3788         }
3789
3790         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3791
3792         memset(qinfo, 0, sizeof(*qinfo));
3793         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3794         return 0;
3795 }
3796
3797 int
3798 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3799         struct rte_eth_txq_info *qinfo)
3800 {
3801         struct rte_eth_dev *dev;
3802
3803         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3804
3805         if (qinfo == NULL)
3806                 return -EINVAL;
3807
3808         dev = &rte_eth_devices[port_id];
3809         if (queue_id >= dev->data->nb_tx_queues) {
3810                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3811                 return -EINVAL;
3812         }
3813
3814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3815
3816         memset(qinfo, 0, sizeof(*qinfo));
3817         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3818         return 0;
3819 }
3820
3821 int
3822 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3823                              struct ether_addr *mc_addr_set,
3824                              uint32_t nb_mc_addr)
3825 {
3826         struct rte_eth_dev *dev;
3827
3828         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3829
3830         dev = &rte_eth_devices[port_id];
3831         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3832         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3833                                                 mc_addr_set, nb_mc_addr));
3834 }
3835
3836 int
3837 rte_eth_timesync_enable(uint16_t port_id)
3838 {
3839         struct rte_eth_dev *dev;
3840
3841         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3842         dev = &rte_eth_devices[port_id];
3843
3844         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3845         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3846 }
3847
3848 int
3849 rte_eth_timesync_disable(uint16_t port_id)
3850 {
3851         struct rte_eth_dev *dev;
3852
3853         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3854         dev = &rte_eth_devices[port_id];
3855
3856         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3857         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3858 }
3859
3860 int
3861 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3862                                    uint32_t flags)
3863 {
3864         struct rte_eth_dev *dev;
3865
3866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3867         dev = &rte_eth_devices[port_id];
3868
3869         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3870         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3871                                 (dev, timestamp, flags));
3872 }
3873
3874 int
3875 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3876                                    struct timespec *timestamp)
3877 {
3878         struct rte_eth_dev *dev;
3879
3880         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3881         dev = &rte_eth_devices[port_id];
3882
3883         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3884         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3885                                 (dev, timestamp));
3886 }
3887
3888 int
3889 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3890 {
3891         struct rte_eth_dev *dev;
3892
3893         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3894         dev = &rte_eth_devices[port_id];
3895
3896         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3897         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3898                                                                       delta));
3899 }
3900
3901 int
3902 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3903 {
3904         struct rte_eth_dev *dev;
3905
3906         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3907         dev = &rte_eth_devices[port_id];
3908
3909         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3910         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3911                                                                 timestamp));
3912 }
3913
3914 int
3915 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3916 {
3917         struct rte_eth_dev *dev;
3918
3919         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3920         dev = &rte_eth_devices[port_id];
3921
3922         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3923         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3924                                                                 timestamp));
3925 }
3926
3927 int
3928 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3929 {
3930         struct rte_eth_dev *dev;
3931
3932         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3933
3934         dev = &rte_eth_devices[port_id];
3935         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3936         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3937 }
3938
3939 int
3940 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3941 {
3942         struct rte_eth_dev *dev;
3943
3944         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3945
3946         dev = &rte_eth_devices[port_id];
3947         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3948         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3949 }
3950
3951 int
3952 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3953 {
3954         struct rte_eth_dev *dev;
3955
3956         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3957
3958         dev = &rte_eth_devices[port_id];
3959         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3960         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3961 }
3962
3963 int
3964 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3965 {
3966         struct rte_eth_dev *dev;
3967
3968         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3969
3970         dev = &rte_eth_devices[port_id];
3971         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3972         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3973 }
3974
3975 int __rte_experimental
3976 rte_eth_dev_get_module_info(uint16_t port_id,
3977                             struct rte_eth_dev_module_info *modinfo)
3978 {
3979         struct rte_eth_dev *dev;
3980
3981         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3982
3983         dev = &rte_eth_devices[port_id];
3984         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
3985         return (*dev->dev_ops->get_module_info)(dev, modinfo);
3986 }
3987
3988 int __rte_experimental
3989 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3990                               struct rte_dev_eeprom_info *info)
3991 {
3992         struct rte_eth_dev *dev;
3993
3994         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3995
3996         dev = &rte_eth_devices[port_id];
3997         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
3998         return (*dev->dev_ops->get_module_eeprom)(dev, info);
3999 }
4000
4001 int
4002 rte_eth_dev_get_dcb_info(uint16_t port_id,
4003                              struct rte_eth_dcb_info *dcb_info)
4004 {
4005         struct rte_eth_dev *dev;
4006
4007         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4008
4009         dev = &rte_eth_devices[port_id];
4010         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4011
4012         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4013         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4014 }
4015
4016 int
4017 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4018                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4019 {
4020         struct rte_eth_dev *dev;
4021
4022         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4023         if (l2_tunnel == NULL) {
4024                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4025                 return -EINVAL;
4026         }
4027
4028         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4029                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
4030                 return -EINVAL;
4031         }
4032
4033         dev = &rte_eth_devices[port_id];
4034         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4035                                 -ENOTSUP);
4036         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4037                                                                 l2_tunnel));
4038 }
4039
4040 int
4041 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4042                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4043                                   uint32_t mask,
4044                                   uint8_t en)
4045 {
4046         struct rte_eth_dev *dev;
4047
4048         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4049
4050         if (l2_tunnel == NULL) {
4051                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4052                 return -EINVAL;
4053         }
4054
4055         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4056                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
4057                 return -EINVAL;
4058         }
4059
4060         if (mask == 0) {
4061                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
4062                 return -EINVAL;
4063         }
4064
4065         dev = &rte_eth_devices[port_id];
4066         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4067                                 -ENOTSUP);
4068         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4069                                                         l2_tunnel, mask, en));
4070 }
4071
4072 static void
4073 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4074                            const struct rte_eth_desc_lim *desc_lim)
4075 {
4076         if (desc_lim->nb_align != 0)
4077                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4078
4079         if (desc_lim->nb_max != 0)
4080                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4081
4082         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4083 }
4084
4085 int
4086 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4087                                  uint16_t *nb_rx_desc,
4088                                  uint16_t *nb_tx_desc)
4089 {
4090         struct rte_eth_dev *dev;
4091         struct rte_eth_dev_info dev_info;
4092
4093         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4094
4095         dev = &rte_eth_devices[port_id];
4096         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4097
4098         rte_eth_dev_info_get(port_id, &dev_info);
4099
4100         if (nb_rx_desc != NULL)
4101                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4102
4103         if (nb_tx_desc != NULL)
4104                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4105
4106         return 0;
4107 }
4108
4109 int
4110 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4111 {
4112         struct rte_eth_dev *dev;
4113
4114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4115
4116         if (pool == NULL)
4117                 return -EINVAL;
4118
4119         dev = &rte_eth_devices[port_id];
4120
4121         if (*dev->dev_ops->pool_ops_supported == NULL)
4122                 return 1; /* all pools are supported */
4123
4124         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4125 }
4126
4127 /**
4128  * A set of values to describe the possible states of a switch domain.
4129  */
4130 enum rte_eth_switch_domain_state {
4131         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4132         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4133 };
4134
4135 /**
4136  * Array of switch domains available for allocation. Array is sized to
4137  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4138  * ethdev ports in a single process.
4139  */
4140 struct rte_eth_dev_switch {
4141         enum rte_eth_switch_domain_state state;
4142 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4143
4144 int __rte_experimental
4145 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4146 {
4147         unsigned int i;
4148
4149         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4150
4151         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4152                 i < RTE_MAX_ETHPORTS; i++) {
4153                 if (rte_eth_switch_domains[i].state ==
4154                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4155                         rte_eth_switch_domains[i].state =
4156                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4157                         *domain_id = i;
4158                         return 0;
4159                 }
4160         }
4161
4162         return -ENOSPC;
4163 }
4164
4165 int __rte_experimental
4166 rte_eth_switch_domain_free(uint16_t domain_id)
4167 {
4168         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4169                 domain_id >= RTE_MAX_ETHPORTS)
4170                 return -EINVAL;
4171
4172         if (rte_eth_switch_domains[domain_id].state !=
4173                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4174                 return -EINVAL;
4175
4176         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4177
4178         return 0;
4179 }
4180
4181 typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
4182
4183 static int
4184 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4185 {
4186         int state;
4187         struct rte_kvargs_pair *pair;
4188         char *letter;
4189
4190         arglist->str = strdup(str_in);
4191         if (arglist->str == NULL)
4192                 return -ENOMEM;
4193
4194         letter = arglist->str;
4195         state = 0;
4196         arglist->count = 0;
4197         pair = &arglist->pairs[0];
4198         while (1) {
4199                 switch (state) {
4200                 case 0: /* Initial */
4201                         if (*letter == '=')
4202                                 return -EINVAL;
4203                         else if (*letter == '\0')
4204                                 return 0;
4205
4206                         state = 1;
4207                         pair->key = letter;
4208                         /* fall-thru */
4209
4210                 case 1: /* Parsing key */
4211                         if (*letter == '=') {
4212                                 *letter = '\0';
4213                                 pair->value = letter + 1;
4214                                 state = 2;
4215                         } else if (*letter == ',' || *letter == '\0')
4216                                 return -EINVAL;
4217                         break;
4218
4219
4220                 case 2: /* Parsing value */
4221                         if (*letter == '[')
4222                                 state = 3;
4223                         else if (*letter == ',') {
4224                                 *letter = '\0';
4225                                 arglist->count++;
4226                                 pair = &arglist->pairs[arglist->count];
4227                                 state = 0;
4228                         } else if (*letter == '\0') {
4229                                 letter--;
4230                                 arglist->count++;
4231                                 pair = &arglist->pairs[arglist->count];
4232                                 state = 0;
4233                         }
4234                         break;
4235
4236                 case 3: /* Parsing list */
4237                         if (*letter == ']')
4238                                 state = 2;
4239                         else if (*letter == '\0')
4240                                 return -EINVAL;
4241                         break;
4242                 }
4243                 letter++;
4244         }
4245 }
4246
4247 static int
4248 rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
4249         void *data)
4250 {
4251         char *str_start;
4252         int state;
4253         int result;
4254
4255         if (*str != '[')
4256                 /* Single element, not a list */
4257                 return callback(str, data);
4258
4259         /* Sanity check, then strip the brackets */
4260         str_start = &str[strlen(str) - 1];
4261         if (*str_start != ']') {
4262                 RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
4263                 return -EINVAL;
4264         }
4265         str++;
4266         *str_start = '\0';
4267
4268         /* Process list elements */
4269         state = 0;
4270         while (1) {
4271                 if (state == 0) {
4272                         if (*str == '\0')
4273                                 break;
4274                         if (*str != ',') {
4275                                 str_start = str;
4276                                 state = 1;
4277                         }
4278                 } else if (state == 1) {
4279                         if (*str == ',' || *str == '\0') {
4280                                 if (str > str_start) {
4281                                         /* Non-empty string fragment */
4282                                         *str = '\0';
4283                                         result = callback(str_start, data);
4284                                         if (result < 0)
4285                                                 return result;
4286                                 }
4287                                 state = 0;
4288                         }
4289                 }
4290                 str++;
4291         }
4292         return 0;
4293 }
4294
4295 static int
4296 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
4297         const uint16_t max_list)
4298 {
4299         uint16_t lo, hi, val;
4300         int result;
4301
4302         result = sscanf(str, "%hu-%hu", &lo, &hi);
4303         if (result == 1) {
4304                 if (*len_list >= max_list)
4305                         return -ENOMEM;
4306                 list[(*len_list)++] = lo;
4307         } else if (result == 2) {
4308                 if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
4309                         return -EINVAL;
4310                 for (val = lo; val <= hi; val++) {
4311                         if (*len_list >= max_list)
4312                                 return -ENOMEM;
4313                         list[(*len_list)++] = val;
4314                 }
4315         } else
4316                 return -EINVAL;
4317         return 0;
4318 }
4319
4320
4321 static int
4322 rte_eth_devargs_parse_representor_ports(char *str, void *data)
4323 {
4324         struct rte_eth_devargs *eth_da = data;
4325
4326         return rte_eth_devargs_process_range(str, eth_da->representor_ports,
4327                 &eth_da->nb_representor_ports, RTE_MAX_ETHPORTS);
4328 }
4329
4330 int __rte_experimental
4331 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4332 {
4333         struct rte_kvargs args;
4334         struct rte_kvargs_pair *pair;
4335         unsigned int i;
4336         int result = 0;
4337
4338         memset(eth_da, 0, sizeof(*eth_da));
4339
4340         result = rte_eth_devargs_tokenise(&args, dargs);
4341         if (result < 0)
4342                 goto parse_cleanup;
4343
4344         for (i = 0; i < args.count; i++) {
4345                 pair = &args.pairs[i];
4346                 if (strcmp("representor", pair->key) == 0) {
4347                         result = rte_eth_devargs_parse_list(pair->value,
4348                                 rte_eth_devargs_parse_representor_ports,
4349                                 eth_da);
4350                         if (result < 0)
4351                                 goto parse_cleanup;
4352                 }
4353         }
4354
4355 parse_cleanup:
4356         if (args.str)
4357                 free(args.str);
4358
4359         return result;
4360 }
4361
4362 RTE_INIT(ethdev_init_log);
4363 static void
4364 ethdev_init_log(void)
4365 {
4366         ethdev_logtype = rte_log_register("lib.ethdev");
4367         if (ethdev_logtype >= 0)
4368                 rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);
4369 }