ethdev: rename folder to library name
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_kvargs.h>
38
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
43
44 static int ethdev_logtype;
45
46 #define ethdev_log(level, fmt, ...) \
47         rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51 static uint8_t eth_dev_last_created_port;
52
53 /* spinlock for eth device callbacks */
54 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for add/remove rx callbacks */
57 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* spinlock for add/remove tx callbacks */
60 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62 /* spinlock for shared data allocation */
63 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
64
65 /* store statistics names and its offset in stats structure  */
66 struct rte_eth_xstats_name_off {
67         char name[RTE_ETH_XSTATS_NAME_SIZE];
68         unsigned offset;
69 };
70
71 /* Shared memory between primary and secondary processes. */
72 static struct {
73         uint64_t next_owner_id;
74         rte_spinlock_t ownership_lock;
75         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
76 } *rte_eth_dev_shared_data;
77
78 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
79         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
80         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
81         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
82         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
83         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
84         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
85         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
86         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
87                 rx_nombuf)},
88 };
89
90 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
91
92 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
93         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
94         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
95         {"errors", offsetof(struct rte_eth_stats, q_errors)},
96 };
97
98 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
99                 sizeof(rte_rxq_stats_strings[0]))
100
101 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
102         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
103         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
104 };
105 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
106                 sizeof(rte_txq_stats_strings[0]))
107
108 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
109         { DEV_RX_OFFLOAD_##_name, #_name }
110
111 static const struct {
112         uint64_t offload;
113         const char *name;
114 } rte_rx_offload_names[] = {
115         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
119         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
120         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
121         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
122         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
123         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
125         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
126         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
127         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
128         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
129         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
130         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
131 };
132
133 #undef RTE_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } rte_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160 };
161
162 #undef RTE_TX_OFFLOAD_BIT2STR
163
164 /**
165  * The user application callback description.
166  *
167  * It contains callback address to be registered by user application,
168  * the pointer to the parameters for callback, and the event type.
169  */
170 struct rte_eth_dev_callback {
171         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
172         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
173         void *cb_arg;                           /**< Parameter for callback */
174         void *ret_param;                        /**< Return parameter */
175         enum rte_eth_event_type event;          /**< Interrupt event type */
176         uint32_t active;                        /**< Callback is executing */
177 };
178
179 enum {
180         STAT_QMAP_TX = 0,
181         STAT_QMAP_RX
182 };
183
184 uint16_t
185 rte_eth_find_next(uint16_t port_id)
186 {
187         while (port_id < RTE_MAX_ETHPORTS &&
188                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
189                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
190                 port_id++;
191
192         if (port_id >= RTE_MAX_ETHPORTS)
193                 return RTE_MAX_ETHPORTS;
194
195         return port_id;
196 }
197
198 static void
199 rte_eth_dev_shared_data_prepare(void)
200 {
201         const unsigned flags = 0;
202         const struct rte_memzone *mz;
203
204         rte_spinlock_lock(&rte_eth_shared_data_lock);
205
206         if (rte_eth_dev_shared_data == NULL) {
207                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
208                         /* Allocate port data and ownership shared memory. */
209                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
210                                         sizeof(*rte_eth_dev_shared_data),
211                                         rte_socket_id(), flags);
212                 } else
213                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
214                 if (mz == NULL)
215                         rte_panic("Cannot allocate ethdev shared data\n");
216
217                 rte_eth_dev_shared_data = mz->addr;
218                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
219                         rte_eth_dev_shared_data->next_owner_id =
220                                         RTE_ETH_DEV_NO_OWNER + 1;
221                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
222                         memset(rte_eth_dev_shared_data->data, 0,
223                                sizeof(rte_eth_dev_shared_data->data));
224                 }
225         }
226
227         rte_spinlock_unlock(&rte_eth_shared_data_lock);
228 }
229
230 struct rte_eth_dev *
231 rte_eth_dev_allocated(const char *name)
232 {
233         unsigned i;
234
235         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
236                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
237                     strcmp(rte_eth_devices[i].data->name, name) == 0)
238                         return &rte_eth_devices[i];
239         }
240         return NULL;
241 }
242
243 static uint16_t
244 rte_eth_dev_find_free_port(void)
245 {
246         unsigned i;
247
248         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
249                 /* Using shared name field to find a free port. */
250                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
251                         RTE_ASSERT(rte_eth_devices[i].state ==
252                                    RTE_ETH_DEV_UNUSED);
253                         return i;
254                 }
255         }
256         return RTE_MAX_ETHPORTS;
257 }
258
259 static struct rte_eth_dev *
260 eth_dev_get(uint16_t port_id)
261 {
262         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
263
264         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
265         eth_dev->state = RTE_ETH_DEV_ATTACHED;
266
267         eth_dev_last_created_port = port_id;
268
269         return eth_dev;
270 }
271
272 struct rte_eth_dev *
273 rte_eth_dev_allocate(const char *name)
274 {
275         uint16_t port_id;
276         struct rte_eth_dev *eth_dev = NULL;
277
278         rte_eth_dev_shared_data_prepare();
279
280         /* Synchronize port creation between primary and secondary threads. */
281         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
282
283         port_id = rte_eth_dev_find_free_port();
284         if (port_id == RTE_MAX_ETHPORTS) {
285                 ethdev_log(ERR, "Reached maximum number of Ethernet ports");
286                 goto unlock;
287         }
288
289         if (rte_eth_dev_allocated(name) != NULL) {
290                 ethdev_log(ERR,
291                         "Ethernet Device with name %s already allocated!",
292                         name);
293                 goto unlock;
294         }
295
296         eth_dev = eth_dev_get(port_id);
297         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
298         eth_dev->data->port_id = port_id;
299         eth_dev->data->mtu = ETHER_MTU;
300
301 unlock:
302         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
303
304         if (eth_dev != NULL)
305                 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
306
307         return eth_dev;
308 }
309
310 /*
311  * Attach to a port already registered by the primary process, which
312  * makes sure that the same device would have the same port id both
313  * in the primary and secondary process.
314  */
315 struct rte_eth_dev *
316 rte_eth_dev_attach_secondary(const char *name)
317 {
318         uint16_t i;
319         struct rte_eth_dev *eth_dev = NULL;
320
321         rte_eth_dev_shared_data_prepare();
322
323         /* Synchronize port attachment to primary port creation and release. */
324         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
325
326         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
327                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
328                         break;
329         }
330         if (i == RTE_MAX_ETHPORTS) {
331                 RTE_PMD_DEBUG_TRACE(
332                         "device %s is not driven by the primary process\n",
333                         name);
334         } else {
335                 eth_dev = eth_dev_get(i);
336                 RTE_ASSERT(eth_dev->data->port_id == i);
337         }
338
339         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
340         return eth_dev;
341 }
342
343 int
344 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
345 {
346         if (eth_dev == NULL)
347                 return -EINVAL;
348
349         rte_eth_dev_shared_data_prepare();
350
351         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
352
353         eth_dev->state = RTE_ETH_DEV_UNUSED;
354
355         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
356
357         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
358
359         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
360
361         return 0;
362 }
363
364 int
365 rte_eth_dev_is_valid_port(uint16_t port_id)
366 {
367         if (port_id >= RTE_MAX_ETHPORTS ||
368             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
369                 return 0;
370         else
371                 return 1;
372 }
373
374 static int
375 rte_eth_is_valid_owner_id(uint64_t owner_id)
376 {
377         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
378             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
379                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
380                 return 0;
381         }
382         return 1;
383 }
384
385 uint64_t
386 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
387 {
388         while (port_id < RTE_MAX_ETHPORTS &&
389                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
390                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
391                rte_eth_devices[port_id].data->owner.id != owner_id))
392                 port_id++;
393
394         if (port_id >= RTE_MAX_ETHPORTS)
395                 return RTE_MAX_ETHPORTS;
396
397         return port_id;
398 }
399
400 int __rte_experimental
401 rte_eth_dev_owner_new(uint64_t *owner_id)
402 {
403         rte_eth_dev_shared_data_prepare();
404
405         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
406
407         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
408
409         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
410         return 0;
411 }
412
413 static int
414 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
415                        const struct rte_eth_dev_owner *new_owner)
416 {
417         struct rte_eth_dev_owner *port_owner;
418         int sret;
419
420         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
421
422         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
423             !rte_eth_is_valid_owner_id(old_owner_id))
424                 return -EINVAL;
425
426         port_owner = &rte_eth_devices[port_id].data->owner;
427         if (port_owner->id != old_owner_id) {
428                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
429                                     " by %s_%016lX.\n", port_id,
430                                     port_owner->name, port_owner->id);
431                 return -EPERM;
432         }
433
434         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
435                         new_owner->name);
436         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
437                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
438                                     port_id);
439
440         port_owner->id = new_owner->id;
441
442         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
443                             new_owner->name, new_owner->id);
444
445         return 0;
446 }
447
448 int __rte_experimental
449 rte_eth_dev_owner_set(const uint16_t port_id,
450                       const struct rte_eth_dev_owner *owner)
451 {
452         int ret;
453
454         rte_eth_dev_shared_data_prepare();
455
456         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
457
458         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
459
460         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
461         return ret;
462 }
463
464 int __rte_experimental
465 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
466 {
467         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
468                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
469         int ret;
470
471         rte_eth_dev_shared_data_prepare();
472
473         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
474
475         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
476
477         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
478         return ret;
479 }
480
481 void __rte_experimental
482 rte_eth_dev_owner_delete(const uint64_t owner_id)
483 {
484         uint16_t port_id;
485
486         rte_eth_dev_shared_data_prepare();
487
488         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
489
490         if (rte_eth_is_valid_owner_id(owner_id)) {
491                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
492                         memset(&rte_eth_devices[port_id].data->owner, 0,
493                                sizeof(struct rte_eth_dev_owner));
494                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
495                                     " have removed.\n", owner_id);
496         }
497
498         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
499 }
500
501 int __rte_experimental
502 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
503 {
504         int ret = 0;
505
506         rte_eth_dev_shared_data_prepare();
507
508         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
509
510         if (!rte_eth_dev_is_valid_port(port_id)) {
511                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
512                 ret = -ENODEV;
513         } else {
514                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
515                            sizeof(*owner));
516         }
517
518         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
519         return ret;
520 }
521
522 int
523 rte_eth_dev_socket_id(uint16_t port_id)
524 {
525         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
526         return rte_eth_devices[port_id].data->numa_node;
527 }
528
529 void *
530 rte_eth_dev_get_sec_ctx(uint16_t port_id)
531 {
532         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
533         return rte_eth_devices[port_id].security_ctx;
534 }
535
536 uint16_t
537 rte_eth_dev_count(void)
538 {
539         return rte_eth_dev_count_avail();
540 }
541
542 uint16_t
543 rte_eth_dev_count_avail(void)
544 {
545         uint16_t p;
546         uint16_t count;
547
548         count = 0;
549
550         RTE_ETH_FOREACH_DEV(p)
551                 count++;
552
553         return count;
554 }
555
556 uint16_t __rte_experimental
557 rte_eth_dev_count_total(void)
558 {
559         uint16_t port, count = 0;
560
561         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
562                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
563                         count++;
564
565         return count;
566 }
567
568 int
569 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
570 {
571         char *tmp;
572
573         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
574
575         if (name == NULL) {
576                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
577                 return -EINVAL;
578         }
579
580         /* shouldn't check 'rte_eth_devices[i].data',
581          * because it might be overwritten by VDEV PMD */
582         tmp = rte_eth_dev_shared_data->data[port_id].name;
583         strcpy(name, tmp);
584         return 0;
585 }
586
587 int
588 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
589 {
590         uint32_t pid;
591
592         if (name == NULL) {
593                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
594                 return -EINVAL;
595         }
596
597         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
598                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
599                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
600                         *port_id = pid;
601                         return 0;
602                 }
603         }
604
605         return -ENODEV;
606 }
607
608 static int
609 eth_err(uint16_t port_id, int ret)
610 {
611         if (ret == 0)
612                 return 0;
613         if (rte_eth_dev_is_removed(port_id))
614                 return -EIO;
615         return ret;
616 }
617
618 /* attach the new device, then store port_id of the device */
619 int
620 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
621 {
622         int current = rte_eth_dev_count_total();
623         struct rte_devargs da;
624         int ret = -1;
625
626         memset(&da, 0, sizeof(da));
627
628         if ((devargs == NULL) || (port_id == NULL)) {
629                 ret = -EINVAL;
630                 goto err;
631         }
632
633         /* parse devargs */
634         if (rte_devargs_parse(&da, "%s", devargs))
635                 goto err;
636
637         ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
638         if (ret < 0)
639                 goto err;
640
641         /* no point looking at the port count if no port exists */
642         if (!rte_eth_dev_count_total()) {
643                 ethdev_log(ERR, "No port found for device (%s)", da.name);
644                 ret = -1;
645                 goto err;
646         }
647
648         /* if nothing happened, there is a bug here, since some driver told us
649          * it did attach a device, but did not create a port.
650          * FIXME: race condition in case of plug-out of another device
651          */
652         if (current == rte_eth_dev_count_total()) {
653                 ret = -1;
654                 goto err;
655         }
656
657         *port_id = eth_dev_last_created_port;
658         ret = 0;
659
660 err:
661         free(da.args);
662         return ret;
663 }
664
665 /* detach the device, then store the name of the device */
666 int
667 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
668 {
669         struct rte_device *dev;
670         struct rte_bus *bus;
671         uint32_t dev_flags;
672         int ret = -1;
673
674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
675
676         dev_flags = rte_eth_devices[port_id].data->dev_flags;
677         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
678                 ethdev_log(ERR,
679                         "Port %" PRIu16 " is bonded, cannot detach", port_id);
680                 return -ENOTSUP;
681         }
682
683         dev = rte_eth_devices[port_id].device;
684         if (dev == NULL)
685                 return -EINVAL;
686
687         bus = rte_bus_find_by_device(dev);
688         if (bus == NULL)
689                 return -ENOENT;
690
691         ret = rte_eal_hotplug_remove(bus->name, dev->name);
692         if (ret < 0)
693                 return ret;
694
695         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
696         return 0;
697 }
698
699 static int
700 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
701 {
702         uint16_t old_nb_queues = dev->data->nb_rx_queues;
703         void **rxq;
704         unsigned i;
705
706         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
707                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
708                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
709                                 RTE_CACHE_LINE_SIZE);
710                 if (dev->data->rx_queues == NULL) {
711                         dev->data->nb_rx_queues = 0;
712                         return -(ENOMEM);
713                 }
714         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
715                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
716
717                 rxq = dev->data->rx_queues;
718
719                 for (i = nb_queues; i < old_nb_queues; i++)
720                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
721                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
722                                 RTE_CACHE_LINE_SIZE);
723                 if (rxq == NULL)
724                         return -(ENOMEM);
725                 if (nb_queues > old_nb_queues) {
726                         uint16_t new_qs = nb_queues - old_nb_queues;
727
728                         memset(rxq + old_nb_queues, 0,
729                                 sizeof(rxq[0]) * new_qs);
730                 }
731
732                 dev->data->rx_queues = rxq;
733
734         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
735                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
736
737                 rxq = dev->data->rx_queues;
738
739                 for (i = nb_queues; i < old_nb_queues; i++)
740                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
741
742                 rte_free(dev->data->rx_queues);
743                 dev->data->rx_queues = NULL;
744         }
745         dev->data->nb_rx_queues = nb_queues;
746         return 0;
747 }
748
749 int
750 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
751 {
752         struct rte_eth_dev *dev;
753
754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
755
756         dev = &rte_eth_devices[port_id];
757         if (!dev->data->dev_started) {
758                 RTE_PMD_DEBUG_TRACE(
759                     "port %d must be started before start any queue\n", port_id);
760                 return -EINVAL;
761         }
762
763         if (rx_queue_id >= dev->data->nb_rx_queues) {
764                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
765                 return -EINVAL;
766         }
767
768         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
769
770         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
771                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
772                         " already started\n",
773                         rx_queue_id, port_id);
774                 return 0;
775         }
776
777         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
778                                                              rx_queue_id));
779
780 }
781
782 int
783 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
784 {
785         struct rte_eth_dev *dev;
786
787         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
788
789         dev = &rte_eth_devices[port_id];
790         if (rx_queue_id >= dev->data->nb_rx_queues) {
791                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
792                 return -EINVAL;
793         }
794
795         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
796
797         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
798                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
799                         " already stopped\n",
800                         rx_queue_id, port_id);
801                 return 0;
802         }
803
804         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
805
806 }
807
808 int
809 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
810 {
811         struct rte_eth_dev *dev;
812
813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
814
815         dev = &rte_eth_devices[port_id];
816         if (!dev->data->dev_started) {
817                 RTE_PMD_DEBUG_TRACE(
818                     "port %d must be started before start any queue\n", port_id);
819                 return -EINVAL;
820         }
821
822         if (tx_queue_id >= dev->data->nb_tx_queues) {
823                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
824                 return -EINVAL;
825         }
826
827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
828
829         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
830                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
831                         " already started\n",
832                         tx_queue_id, port_id);
833                 return 0;
834         }
835
836         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
837                                                              tx_queue_id));
838
839 }
840
841 int
842 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
843 {
844         struct rte_eth_dev *dev;
845
846         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
847
848         dev = &rte_eth_devices[port_id];
849         if (tx_queue_id >= dev->data->nb_tx_queues) {
850                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
851                 return -EINVAL;
852         }
853
854         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
855
856         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
857                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
858                         " already stopped\n",
859                         tx_queue_id, port_id);
860                 return 0;
861         }
862
863         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
864
865 }
866
867 static int
868 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
869 {
870         uint16_t old_nb_queues = dev->data->nb_tx_queues;
871         void **txq;
872         unsigned i;
873
874         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
875                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
876                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
877                                                    RTE_CACHE_LINE_SIZE);
878                 if (dev->data->tx_queues == NULL) {
879                         dev->data->nb_tx_queues = 0;
880                         return -(ENOMEM);
881                 }
882         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
883                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
884
885                 txq = dev->data->tx_queues;
886
887                 for (i = nb_queues; i < old_nb_queues; i++)
888                         (*dev->dev_ops->tx_queue_release)(txq[i]);
889                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
890                                   RTE_CACHE_LINE_SIZE);
891                 if (txq == NULL)
892                         return -ENOMEM;
893                 if (nb_queues > old_nb_queues) {
894                         uint16_t new_qs = nb_queues - old_nb_queues;
895
896                         memset(txq + old_nb_queues, 0,
897                                sizeof(txq[0]) * new_qs);
898                 }
899
900                 dev->data->tx_queues = txq;
901
902         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
903                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
904
905                 txq = dev->data->tx_queues;
906
907                 for (i = nb_queues; i < old_nb_queues; i++)
908                         (*dev->dev_ops->tx_queue_release)(txq[i]);
909
910                 rte_free(dev->data->tx_queues);
911                 dev->data->tx_queues = NULL;
912         }
913         dev->data->nb_tx_queues = nb_queues;
914         return 0;
915 }
916
917 uint32_t
918 rte_eth_speed_bitflag(uint32_t speed, int duplex)
919 {
920         switch (speed) {
921         case ETH_SPEED_NUM_10M:
922                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
923         case ETH_SPEED_NUM_100M:
924                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
925         case ETH_SPEED_NUM_1G:
926                 return ETH_LINK_SPEED_1G;
927         case ETH_SPEED_NUM_2_5G:
928                 return ETH_LINK_SPEED_2_5G;
929         case ETH_SPEED_NUM_5G:
930                 return ETH_LINK_SPEED_5G;
931         case ETH_SPEED_NUM_10G:
932                 return ETH_LINK_SPEED_10G;
933         case ETH_SPEED_NUM_20G:
934                 return ETH_LINK_SPEED_20G;
935         case ETH_SPEED_NUM_25G:
936                 return ETH_LINK_SPEED_25G;
937         case ETH_SPEED_NUM_40G:
938                 return ETH_LINK_SPEED_40G;
939         case ETH_SPEED_NUM_50G:
940                 return ETH_LINK_SPEED_50G;
941         case ETH_SPEED_NUM_56G:
942                 return ETH_LINK_SPEED_56G;
943         case ETH_SPEED_NUM_100G:
944                 return ETH_LINK_SPEED_100G;
945         default:
946                 return 0;
947         }
948 }
949
950 /**
951  * A conversion function from rxmode bitfield API.
952  */
953 static void
954 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
955                                     uint64_t *rx_offloads)
956 {
957         uint64_t offloads = 0;
958
959         if (rxmode->header_split == 1)
960                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
961         if (rxmode->hw_ip_checksum == 1)
962                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
963         if (rxmode->hw_vlan_filter == 1)
964                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
965         if (rxmode->hw_vlan_strip == 1)
966                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
967         if (rxmode->hw_vlan_extend == 1)
968                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
969         if (rxmode->jumbo_frame == 1)
970                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
971         if (rxmode->hw_strip_crc == 1)
972                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
973         if (rxmode->enable_scatter == 1)
974                 offloads |= DEV_RX_OFFLOAD_SCATTER;
975         if (rxmode->enable_lro == 1)
976                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
977         if (rxmode->hw_timestamp == 1)
978                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
979         if (rxmode->security == 1)
980                 offloads |= DEV_RX_OFFLOAD_SECURITY;
981
982         *rx_offloads = offloads;
983 }
984
985 const char * __rte_experimental
986 rte_eth_dev_rx_offload_name(uint64_t offload)
987 {
988         const char *name = "UNKNOWN";
989         unsigned int i;
990
991         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
992                 if (offload == rte_rx_offload_names[i].offload) {
993                         name = rte_rx_offload_names[i].name;
994                         break;
995                 }
996         }
997
998         return name;
999 }
1000
1001 const char * __rte_experimental
1002 rte_eth_dev_tx_offload_name(uint64_t offload)
1003 {
1004         const char *name = "UNKNOWN";
1005         unsigned int i;
1006
1007         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1008                 if (offload == rte_tx_offload_names[i].offload) {
1009                         name = rte_tx_offload_names[i].name;
1010                         break;
1011                 }
1012         }
1013
1014         return name;
1015 }
1016
1017 int
1018 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1019                       const struct rte_eth_conf *dev_conf)
1020 {
1021         struct rte_eth_dev *dev;
1022         struct rte_eth_dev_info dev_info;
1023         struct rte_eth_conf local_conf = *dev_conf;
1024         int diag;
1025
1026         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1027
1028         dev = &rte_eth_devices[port_id];
1029
1030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1031         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1032
1033         /* If number of queues specified by application for both Rx and Tx is
1034          * zero, use driver preferred values. This cannot be done individually
1035          * as it is valid for either Tx or Rx (but not both) to be zero.
1036          * If driver does not provide any preferred valued, fall back on
1037          * EAL defaults.
1038          */
1039         if (nb_rx_q == 0 && nb_tx_q == 0) {
1040                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1041                 if (nb_rx_q == 0)
1042                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1043                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1044                 if (nb_tx_q == 0)
1045                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1046         }
1047
1048         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1049                 RTE_PMD_DEBUG_TRACE(
1050                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1051                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1052                 return -EINVAL;
1053         }
1054
1055         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1056                 RTE_PMD_DEBUG_TRACE(
1057                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1058                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1059                 return -EINVAL;
1060         }
1061
1062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1063         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1064
1065         if (dev->data->dev_started) {
1066                 RTE_PMD_DEBUG_TRACE(
1067                     "port %d must be stopped to allow configuration\n", port_id);
1068                 return -EBUSY;
1069         }
1070
1071         /*
1072          * Convert between the offloads API to enable PMDs to support
1073          * only one of them.
1074          */
1075         if (dev_conf->rxmode.ignore_offload_bitfield == 0)
1076                 rte_eth_convert_rx_offload_bitfield(
1077                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1078
1079         /* Copy the dev_conf parameter into the dev structure */
1080         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1081
1082         /*
1083          * Check that the numbers of RX and TX queues are not greater
1084          * than the maximum number of RX and TX queues supported by the
1085          * configured device.
1086          */
1087         if (nb_rx_q > dev_info.max_rx_queues) {
1088                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1089                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1090                 return -EINVAL;
1091         }
1092
1093         if (nb_tx_q > dev_info.max_tx_queues) {
1094                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1095                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1096                 return -EINVAL;
1097         }
1098
1099         /* Check that the device supports requested interrupts */
1100         if ((dev_conf->intr_conf.lsc == 1) &&
1101                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1102                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1103                                         dev->device->driver->name);
1104                         return -EINVAL;
1105         }
1106         if ((dev_conf->intr_conf.rmv == 1) &&
1107             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1108                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1109                                     dev->device->driver->name);
1110                 return -EINVAL;
1111         }
1112
1113         /*
1114          * If jumbo frames are enabled, check that the maximum RX packet
1115          * length is supported by the configured device.
1116          */
1117         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1118                 if (dev_conf->rxmode.max_rx_pkt_len >
1119                     dev_info.max_rx_pktlen) {
1120                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1121                                 " > max valid value %u\n",
1122                                 port_id,
1123                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1124                                 (unsigned)dev_info.max_rx_pktlen);
1125                         return -EINVAL;
1126                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1127                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1128                                 " < min valid value %u\n",
1129                                 port_id,
1130                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1131                                 (unsigned)ETHER_MIN_LEN);
1132                         return -EINVAL;
1133                 }
1134         } else {
1135                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1136                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1137                         /* Use default value */
1138                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1139                                                         ETHER_MAX_LEN;
1140         }
1141
1142         /* Check that device supports requested rss hash functions. */
1143         if ((dev_info.flow_type_rss_offloads |
1144              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1145             dev_info.flow_type_rss_offloads) {
1146                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
1147                                     "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1148                                     port_id,
1149                                     dev_conf->rx_adv_conf.rss_conf.rss_hf,
1150                                     dev_info.flow_type_rss_offloads);
1151                 return -EINVAL;
1152         }
1153
1154         /*
1155          * Setup new number of RX/TX queues and reconfigure device.
1156          */
1157         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1158         if (diag != 0) {
1159                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1160                                 port_id, diag);
1161                 return diag;
1162         }
1163
1164         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1165         if (diag != 0) {
1166                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1167                                 port_id, diag);
1168                 rte_eth_dev_rx_queue_config(dev, 0);
1169                 return diag;
1170         }
1171
1172         diag = (*dev->dev_ops->dev_configure)(dev);
1173         if (diag != 0) {
1174                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1175                                 port_id, diag);
1176                 rte_eth_dev_rx_queue_config(dev, 0);
1177                 rte_eth_dev_tx_queue_config(dev, 0);
1178                 return eth_err(port_id, diag);
1179         }
1180
1181         /* Initialize Rx profiling if enabled at compilation time. */
1182         diag = __rte_eth_profile_rx_init(port_id, dev);
1183         if (diag != 0) {
1184                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1185                                 port_id, diag);
1186                 rte_eth_dev_rx_queue_config(dev, 0);
1187                 rte_eth_dev_tx_queue_config(dev, 0);
1188                 return eth_err(port_id, diag);
1189         }
1190
1191         return 0;
1192 }
1193
1194 void
1195 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1196 {
1197         if (dev->data->dev_started) {
1198                 RTE_PMD_DEBUG_TRACE(
1199                         "port %d must be stopped to allow reset\n",
1200                         dev->data->port_id);
1201                 return;
1202         }
1203
1204         rte_eth_dev_rx_queue_config(dev, 0);
1205         rte_eth_dev_tx_queue_config(dev, 0);
1206
1207         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1208 }
1209
1210 static void
1211 rte_eth_dev_config_restore(uint16_t port_id)
1212 {
1213         struct rte_eth_dev *dev;
1214         struct rte_eth_dev_info dev_info;
1215         struct ether_addr *addr;
1216         uint16_t i;
1217         uint32_t pool = 0;
1218         uint64_t pool_mask;
1219
1220         dev = &rte_eth_devices[port_id];
1221
1222         rte_eth_dev_info_get(port_id, &dev_info);
1223
1224         /* replay MAC address configuration including default MAC */
1225         addr = &dev->data->mac_addrs[0];
1226         if (*dev->dev_ops->mac_addr_set != NULL)
1227                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1228         else if (*dev->dev_ops->mac_addr_add != NULL)
1229                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1230
1231         if (*dev->dev_ops->mac_addr_add != NULL) {
1232                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1233                         addr = &dev->data->mac_addrs[i];
1234
1235                         /* skip zero address */
1236                         if (is_zero_ether_addr(addr))
1237                                 continue;
1238
1239                         pool = 0;
1240                         pool_mask = dev->data->mac_pool_sel[i];
1241
1242                         do {
1243                                 if (pool_mask & 1ULL)
1244                                         (*dev->dev_ops->mac_addr_add)(dev,
1245                                                 addr, i, pool);
1246                                 pool_mask >>= 1;
1247                                 pool++;
1248                         } while (pool_mask);
1249                 }
1250         }
1251
1252         /* replay promiscuous configuration */
1253         if (rte_eth_promiscuous_get(port_id) == 1)
1254                 rte_eth_promiscuous_enable(port_id);
1255         else if (rte_eth_promiscuous_get(port_id) == 0)
1256                 rte_eth_promiscuous_disable(port_id);
1257
1258         /* replay all multicast configuration */
1259         if (rte_eth_allmulticast_get(port_id) == 1)
1260                 rte_eth_allmulticast_enable(port_id);
1261         else if (rte_eth_allmulticast_get(port_id) == 0)
1262                 rte_eth_allmulticast_disable(port_id);
1263 }
1264
1265 int
1266 rte_eth_dev_start(uint16_t port_id)
1267 {
1268         struct rte_eth_dev *dev;
1269         int diag;
1270
1271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1272
1273         dev = &rte_eth_devices[port_id];
1274
1275         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1276
1277         if (dev->data->dev_started != 0) {
1278                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1279                         " already started\n",
1280                         port_id);
1281                 return 0;
1282         }
1283
1284         diag = (*dev->dev_ops->dev_start)(dev);
1285         if (diag == 0)
1286                 dev->data->dev_started = 1;
1287         else
1288                 return eth_err(port_id, diag);
1289
1290         rte_eth_dev_config_restore(port_id);
1291
1292         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1293                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1294                 (*dev->dev_ops->link_update)(dev, 0);
1295         }
1296         return 0;
1297 }
1298
1299 void
1300 rte_eth_dev_stop(uint16_t port_id)
1301 {
1302         struct rte_eth_dev *dev;
1303
1304         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1305         dev = &rte_eth_devices[port_id];
1306
1307         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1308
1309         if (dev->data->dev_started == 0) {
1310                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1311                         " already stopped\n",
1312                         port_id);
1313                 return;
1314         }
1315
1316         dev->data->dev_started = 0;
1317         (*dev->dev_ops->dev_stop)(dev);
1318 }
1319
1320 int
1321 rte_eth_dev_set_link_up(uint16_t port_id)
1322 {
1323         struct rte_eth_dev *dev;
1324
1325         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1326
1327         dev = &rte_eth_devices[port_id];
1328
1329         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1330         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1331 }
1332
1333 int
1334 rte_eth_dev_set_link_down(uint16_t port_id)
1335 {
1336         struct rte_eth_dev *dev;
1337
1338         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1339
1340         dev = &rte_eth_devices[port_id];
1341
1342         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1343         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1344 }
1345
1346 void
1347 rte_eth_dev_close(uint16_t port_id)
1348 {
1349         struct rte_eth_dev *dev;
1350
1351         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1352         dev = &rte_eth_devices[port_id];
1353
1354         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1355         dev->data->dev_started = 0;
1356         (*dev->dev_ops->dev_close)(dev);
1357
1358         dev->data->nb_rx_queues = 0;
1359         rte_free(dev->data->rx_queues);
1360         dev->data->rx_queues = NULL;
1361         dev->data->nb_tx_queues = 0;
1362         rte_free(dev->data->tx_queues);
1363         dev->data->tx_queues = NULL;
1364 }
1365
1366 int
1367 rte_eth_dev_reset(uint16_t port_id)
1368 {
1369         struct rte_eth_dev *dev;
1370         int ret;
1371
1372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1373         dev = &rte_eth_devices[port_id];
1374
1375         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1376
1377         rte_eth_dev_stop(port_id);
1378         ret = dev->dev_ops->dev_reset(dev);
1379
1380         return eth_err(port_id, ret);
1381 }
1382
1383 int __rte_experimental
1384 rte_eth_dev_is_removed(uint16_t port_id)
1385 {
1386         struct rte_eth_dev *dev;
1387         int ret;
1388
1389         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1390
1391         dev = &rte_eth_devices[port_id];
1392
1393         if (dev->state == RTE_ETH_DEV_REMOVED)
1394                 return 1;
1395
1396         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1397
1398         ret = dev->dev_ops->is_removed(dev);
1399         if (ret != 0)
1400                 /* Device is physically removed. */
1401                 dev->state = RTE_ETH_DEV_REMOVED;
1402
1403         return ret;
1404 }
1405
1406 int
1407 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1408                        uint16_t nb_rx_desc, unsigned int socket_id,
1409                        const struct rte_eth_rxconf *rx_conf,
1410                        struct rte_mempool *mp)
1411 {
1412         int ret;
1413         uint32_t mbp_buf_size;
1414         struct rte_eth_dev *dev;
1415         struct rte_eth_dev_info dev_info;
1416         struct rte_eth_rxconf local_conf;
1417         void **rxq;
1418
1419         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1420
1421         dev = &rte_eth_devices[port_id];
1422         if (rx_queue_id >= dev->data->nb_rx_queues) {
1423                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1424                 return -EINVAL;
1425         }
1426
1427         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1428         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1429
1430         /*
1431          * Check the size of the mbuf data buffer.
1432          * This value must be provided in the private data of the memory pool.
1433          * First check that the memory pool has a valid private data.
1434          */
1435         rte_eth_dev_info_get(port_id, &dev_info);
1436         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1437                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1438                                 mp->name, (int) mp->private_data_size,
1439                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1440                 return -ENOSPC;
1441         }
1442         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1443
1444         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1445                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1446                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1447                                 "=%d)\n",
1448                                 mp->name,
1449                                 (int)mbp_buf_size,
1450                                 (int)(RTE_PKTMBUF_HEADROOM +
1451                                       dev_info.min_rx_bufsize),
1452                                 (int)RTE_PKTMBUF_HEADROOM,
1453                                 (int)dev_info.min_rx_bufsize);
1454                 return -EINVAL;
1455         }
1456
1457         /* Use default specified by driver, if nb_rx_desc is zero */
1458         if (nb_rx_desc == 0) {
1459                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1460                 /* If driver default is also zero, fall back on EAL default */
1461                 if (nb_rx_desc == 0)
1462                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1463         }
1464
1465         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1466                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1467                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1468
1469                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1470                         "should be: <= %hu, = %hu, and a product of %hu\n",
1471                         nb_rx_desc,
1472                         dev_info.rx_desc_lim.nb_max,
1473                         dev_info.rx_desc_lim.nb_min,
1474                         dev_info.rx_desc_lim.nb_align);
1475                 return -EINVAL;
1476         }
1477
1478         if (dev->data->dev_started &&
1479                 !(dev_info.dev_capa &
1480                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1481                 return -EBUSY;
1482
1483         if (dev->data->rx_queue_state[rx_queue_id] !=
1484                 RTE_ETH_QUEUE_STATE_STOPPED)
1485                 return -EBUSY;
1486
1487         rxq = dev->data->rx_queues;
1488         if (rxq[rx_queue_id]) {
1489                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1490                                         -ENOTSUP);
1491                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1492                 rxq[rx_queue_id] = NULL;
1493         }
1494
1495         if (rx_conf == NULL)
1496                 rx_conf = &dev_info.default_rxconf;
1497
1498         local_conf = *rx_conf;
1499         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1500                 /**
1501                  * Reflect port offloads to queue offloads in order for
1502                  * offloads to not be discarded.
1503                  */
1504                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1505                                                     &local_conf.offloads);
1506         }
1507
1508         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1509                                               socket_id, &local_conf, mp);
1510         if (!ret) {
1511                 if (!dev->data->min_rx_buf_size ||
1512                     dev->data->min_rx_buf_size > mbp_buf_size)
1513                         dev->data->min_rx_buf_size = mbp_buf_size;
1514         }
1515
1516         return eth_err(port_id, ret);
1517 }
1518
1519 /**
1520  * A conversion function from txq_flags API.
1521  */
1522 static void
1523 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1524 {
1525         uint64_t offloads = 0;
1526
1527         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1528                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1529         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1530                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1531         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1532                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1533         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1534                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1535         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1536                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1537         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1538             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1539                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1540
1541         *tx_offloads = offloads;
1542 }
1543
1544 int
1545 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1546                        uint16_t nb_tx_desc, unsigned int socket_id,
1547                        const struct rte_eth_txconf *tx_conf)
1548 {
1549         struct rte_eth_dev *dev;
1550         struct rte_eth_dev_info dev_info;
1551         struct rte_eth_txconf local_conf;
1552         void **txq;
1553
1554         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1555
1556         dev = &rte_eth_devices[port_id];
1557         if (tx_queue_id >= dev->data->nb_tx_queues) {
1558                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1559                 return -EINVAL;
1560         }
1561
1562         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1563         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1564
1565         rte_eth_dev_info_get(port_id, &dev_info);
1566
1567         /* Use default specified by driver, if nb_tx_desc is zero */
1568         if (nb_tx_desc == 0) {
1569                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1570                 /* If driver default is zero, fall back on EAL default */
1571                 if (nb_tx_desc == 0)
1572                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1573         }
1574         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1575             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1576             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1577                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1578                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1579                                 nb_tx_desc,
1580                                 dev_info.tx_desc_lim.nb_max,
1581                                 dev_info.tx_desc_lim.nb_min,
1582                                 dev_info.tx_desc_lim.nb_align);
1583                 return -EINVAL;
1584         }
1585
1586         if (dev->data->dev_started &&
1587                 !(dev_info.dev_capa &
1588                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1589                 return -EBUSY;
1590
1591         if (dev->data->tx_queue_state[tx_queue_id] !=
1592                 RTE_ETH_QUEUE_STATE_STOPPED)
1593                 return -EBUSY;
1594
1595         txq = dev->data->tx_queues;
1596         if (txq[tx_queue_id]) {
1597                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1598                                         -ENOTSUP);
1599                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1600                 txq[tx_queue_id] = NULL;
1601         }
1602
1603         if (tx_conf == NULL)
1604                 tx_conf = &dev_info.default_txconf;
1605
1606         /*
1607          * Convert between the offloads API to enable PMDs to support
1608          * only one of them.
1609          */
1610         local_conf = *tx_conf;
1611         if (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
1612                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1613                                           &local_conf.offloads);
1614         }
1615
1616         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1617                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1618 }
1619
1620 void
1621 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1622                 void *userdata __rte_unused)
1623 {
1624         unsigned i;
1625
1626         for (i = 0; i < unsent; i++)
1627                 rte_pktmbuf_free(pkts[i]);
1628 }
1629
1630 void
1631 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1632                 void *userdata)
1633 {
1634         uint64_t *count = userdata;
1635         unsigned i;
1636
1637         for (i = 0; i < unsent; i++)
1638                 rte_pktmbuf_free(pkts[i]);
1639
1640         *count += unsent;
1641 }
1642
1643 int
1644 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1645                 buffer_tx_error_fn cbfn, void *userdata)
1646 {
1647         buffer->error_callback = cbfn;
1648         buffer->error_userdata = userdata;
1649         return 0;
1650 }
1651
1652 int
1653 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1654 {
1655         int ret = 0;
1656
1657         if (buffer == NULL)
1658                 return -EINVAL;
1659
1660         buffer->size = size;
1661         if (buffer->error_callback == NULL) {
1662                 ret = rte_eth_tx_buffer_set_err_callback(
1663                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1664         }
1665
1666         return ret;
1667 }
1668
1669 int
1670 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1671 {
1672         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1673         int ret;
1674
1675         /* Validate Input Data. Bail if not valid or not supported. */
1676         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1677         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1678
1679         /* Call driver to free pending mbufs. */
1680         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1681                                                free_cnt);
1682         return eth_err(port_id, ret);
1683 }
1684
1685 void
1686 rte_eth_promiscuous_enable(uint16_t port_id)
1687 {
1688         struct rte_eth_dev *dev;
1689
1690         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1691         dev = &rte_eth_devices[port_id];
1692
1693         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1694         (*dev->dev_ops->promiscuous_enable)(dev);
1695         dev->data->promiscuous = 1;
1696 }
1697
1698 void
1699 rte_eth_promiscuous_disable(uint16_t port_id)
1700 {
1701         struct rte_eth_dev *dev;
1702
1703         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1704         dev = &rte_eth_devices[port_id];
1705
1706         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1707         dev->data->promiscuous = 0;
1708         (*dev->dev_ops->promiscuous_disable)(dev);
1709 }
1710
1711 int
1712 rte_eth_promiscuous_get(uint16_t port_id)
1713 {
1714         struct rte_eth_dev *dev;
1715
1716         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1717
1718         dev = &rte_eth_devices[port_id];
1719         return dev->data->promiscuous;
1720 }
1721
1722 void
1723 rte_eth_allmulticast_enable(uint16_t port_id)
1724 {
1725         struct rte_eth_dev *dev;
1726
1727         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1728         dev = &rte_eth_devices[port_id];
1729
1730         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1731         (*dev->dev_ops->allmulticast_enable)(dev);
1732         dev->data->all_multicast = 1;
1733 }
1734
1735 void
1736 rte_eth_allmulticast_disable(uint16_t port_id)
1737 {
1738         struct rte_eth_dev *dev;
1739
1740         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1741         dev = &rte_eth_devices[port_id];
1742
1743         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1744         dev->data->all_multicast = 0;
1745         (*dev->dev_ops->allmulticast_disable)(dev);
1746 }
1747
1748 int
1749 rte_eth_allmulticast_get(uint16_t port_id)
1750 {
1751         struct rte_eth_dev *dev;
1752
1753         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1754
1755         dev = &rte_eth_devices[port_id];
1756         return dev->data->all_multicast;
1757 }
1758
1759 void
1760 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1761 {
1762         struct rte_eth_dev *dev;
1763
1764         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1765         dev = &rte_eth_devices[port_id];
1766
1767         if (dev->data->dev_conf.intr_conf.lsc &&
1768             dev->data->dev_started)
1769                 rte_eth_linkstatus_get(dev, eth_link);
1770         else {
1771                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1772                 (*dev->dev_ops->link_update)(dev, 1);
1773                 *eth_link = dev->data->dev_link;
1774         }
1775 }
1776
1777 void
1778 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1779 {
1780         struct rte_eth_dev *dev;
1781
1782         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1783         dev = &rte_eth_devices[port_id];
1784
1785         if (dev->data->dev_conf.intr_conf.lsc &&
1786             dev->data->dev_started)
1787                 rte_eth_linkstatus_get(dev, eth_link);
1788         else {
1789                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1790                 (*dev->dev_ops->link_update)(dev, 0);
1791                 *eth_link = dev->data->dev_link;
1792         }
1793 }
1794
1795 int
1796 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1797 {
1798         struct rte_eth_dev *dev;
1799
1800         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1801
1802         dev = &rte_eth_devices[port_id];
1803         memset(stats, 0, sizeof(*stats));
1804
1805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1806         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1807         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1808 }
1809
1810 int
1811 rte_eth_stats_reset(uint16_t port_id)
1812 {
1813         struct rte_eth_dev *dev;
1814
1815         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1816         dev = &rte_eth_devices[port_id];
1817
1818         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1819         (*dev->dev_ops->stats_reset)(dev);
1820         dev->data->rx_mbuf_alloc_failed = 0;
1821
1822         return 0;
1823 }
1824
1825 static inline int
1826 get_xstats_basic_count(struct rte_eth_dev *dev)
1827 {
1828         uint16_t nb_rxqs, nb_txqs;
1829         int count;
1830
1831         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1832         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1833
1834         count = RTE_NB_STATS;
1835         count += nb_rxqs * RTE_NB_RXQ_STATS;
1836         count += nb_txqs * RTE_NB_TXQ_STATS;
1837
1838         return count;
1839 }
1840
1841 static int
1842 get_xstats_count(uint16_t port_id)
1843 {
1844         struct rte_eth_dev *dev;
1845         int count;
1846
1847         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1848         dev = &rte_eth_devices[port_id];
1849         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1850                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1851                                 NULL, 0);
1852                 if (count < 0)
1853                         return eth_err(port_id, count);
1854         }
1855         if (dev->dev_ops->xstats_get_names != NULL) {
1856                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1857                 if (count < 0)
1858                         return eth_err(port_id, count);
1859         } else
1860                 count = 0;
1861
1862
1863         count += get_xstats_basic_count(dev);
1864
1865         return count;
1866 }
1867
1868 int
1869 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1870                 uint64_t *id)
1871 {
1872         int cnt_xstats, idx_xstat;
1873
1874         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1875
1876         if (!id) {
1877                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1878                 return -ENOMEM;
1879         }
1880
1881         if (!xstat_name) {
1882                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1883                 return -ENOMEM;
1884         }
1885
1886         /* Get count */
1887         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1888         if (cnt_xstats  < 0) {
1889                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1890                 return -ENODEV;
1891         }
1892
1893         /* Get id-name lookup table */
1894         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1895
1896         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1897                         port_id, xstats_names, cnt_xstats, NULL)) {
1898                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1899                 return -1;
1900         }
1901
1902         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1903                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1904                         *id = idx_xstat;
1905                         return 0;
1906                 };
1907         }
1908
1909         return -EINVAL;
1910 }
1911
1912 /* retrieve basic stats names */
1913 static int
1914 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1915         struct rte_eth_xstat_name *xstats_names)
1916 {
1917         int cnt_used_entries = 0;
1918         uint32_t idx, id_queue;
1919         uint16_t num_q;
1920
1921         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1922                 snprintf(xstats_names[cnt_used_entries].name,
1923                         sizeof(xstats_names[0].name),
1924                         "%s", rte_stats_strings[idx].name);
1925                 cnt_used_entries++;
1926         }
1927         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1928         for (id_queue = 0; id_queue < num_q; id_queue++) {
1929                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1930                         snprintf(xstats_names[cnt_used_entries].name,
1931                                 sizeof(xstats_names[0].name),
1932                                 "rx_q%u%s",
1933                                 id_queue, rte_rxq_stats_strings[idx].name);
1934                         cnt_used_entries++;
1935                 }
1936
1937         }
1938         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1939         for (id_queue = 0; id_queue < num_q; id_queue++) {
1940                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1941                         snprintf(xstats_names[cnt_used_entries].name,
1942                                 sizeof(xstats_names[0].name),
1943                                 "tx_q%u%s",
1944                                 id_queue, rte_txq_stats_strings[idx].name);
1945                         cnt_used_entries++;
1946                 }
1947         }
1948         return cnt_used_entries;
1949 }
1950
1951 /* retrieve ethdev extended statistics names */
1952 int
1953 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1954         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1955         uint64_t *ids)
1956 {
1957         struct rte_eth_xstat_name *xstats_names_copy;
1958         unsigned int no_basic_stat_requested = 1;
1959         unsigned int no_ext_stat_requested = 1;
1960         unsigned int expected_entries;
1961         unsigned int basic_count;
1962         struct rte_eth_dev *dev;
1963         unsigned int i;
1964         int ret;
1965
1966         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1967         dev = &rte_eth_devices[port_id];
1968
1969         basic_count = get_xstats_basic_count(dev);
1970         ret = get_xstats_count(port_id);
1971         if (ret < 0)
1972                 return ret;
1973         expected_entries = (unsigned int)ret;
1974
1975         /* Return max number of stats if no ids given */
1976         if (!ids) {
1977                 if (!xstats_names)
1978                         return expected_entries;
1979                 else if (xstats_names && size < expected_entries)
1980                         return expected_entries;
1981         }
1982
1983         if (ids && !xstats_names)
1984                 return -EINVAL;
1985
1986         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1987                 uint64_t ids_copy[size];
1988
1989                 for (i = 0; i < size; i++) {
1990                         if (ids[i] < basic_count) {
1991                                 no_basic_stat_requested = 0;
1992                                 break;
1993                         }
1994
1995                         /*
1996                          * Convert ids to xstats ids that PMD knows.
1997                          * ids known by user are basic + extended stats.
1998                          */
1999                         ids_copy[i] = ids[i] - basic_count;
2000                 }
2001
2002                 if (no_basic_stat_requested)
2003                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2004                                         xstats_names, ids_copy, size);
2005         }
2006
2007         /* Retrieve all stats */
2008         if (!ids) {
2009                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2010                                 expected_entries);
2011                 if (num_stats < 0 || num_stats > (int)expected_entries)
2012                         return num_stats;
2013                 else
2014                         return expected_entries;
2015         }
2016
2017         xstats_names_copy = calloc(expected_entries,
2018                 sizeof(struct rte_eth_xstat_name));
2019
2020         if (!xstats_names_copy) {
2021                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2022                 return -ENOMEM;
2023         }
2024
2025         if (ids) {
2026                 for (i = 0; i < size; i++) {
2027                         if (ids[i] >= basic_count) {
2028                                 no_ext_stat_requested = 0;
2029                                 break;
2030                         }
2031                 }
2032         }
2033
2034         /* Fill xstats_names_copy structure */
2035         if (ids && no_ext_stat_requested) {
2036                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2037         } else {
2038                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2039                         expected_entries);
2040                 if (ret < 0) {
2041                         free(xstats_names_copy);
2042                         return ret;
2043                 }
2044         }
2045
2046         /* Filter stats */
2047         for (i = 0; i < size; i++) {
2048                 if (ids[i] >= expected_entries) {
2049                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2050                         free(xstats_names_copy);
2051                         return -1;
2052                 }
2053                 xstats_names[i] = xstats_names_copy[ids[i]];
2054         }
2055
2056         free(xstats_names_copy);
2057         return size;
2058 }
2059
2060 int
2061 rte_eth_xstats_get_names(uint16_t port_id,
2062         struct rte_eth_xstat_name *xstats_names,
2063         unsigned int size)
2064 {
2065         struct rte_eth_dev *dev;
2066         int cnt_used_entries;
2067         int cnt_expected_entries;
2068         int cnt_driver_entries;
2069
2070         cnt_expected_entries = get_xstats_count(port_id);
2071         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2072                         (int)size < cnt_expected_entries)
2073                 return cnt_expected_entries;
2074
2075         /* port_id checked in get_xstats_count() */
2076         dev = &rte_eth_devices[port_id];
2077
2078         cnt_used_entries = rte_eth_basic_stats_get_names(
2079                 dev, xstats_names);
2080
2081         if (dev->dev_ops->xstats_get_names != NULL) {
2082                 /* If there are any driver-specific xstats, append them
2083                  * to end of list.
2084                  */
2085                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2086                         dev,
2087                         xstats_names + cnt_used_entries,
2088                         size - cnt_used_entries);
2089                 if (cnt_driver_entries < 0)
2090                         return eth_err(port_id, cnt_driver_entries);
2091                 cnt_used_entries += cnt_driver_entries;
2092         }
2093
2094         return cnt_used_entries;
2095 }
2096
2097
2098 static int
2099 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2100 {
2101         struct rte_eth_dev *dev;
2102         struct rte_eth_stats eth_stats;
2103         unsigned int count = 0, i, q;
2104         uint64_t val, *stats_ptr;
2105         uint16_t nb_rxqs, nb_txqs;
2106         int ret;
2107
2108         ret = rte_eth_stats_get(port_id, &eth_stats);
2109         if (ret < 0)
2110                 return ret;
2111
2112         dev = &rte_eth_devices[port_id];
2113
2114         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2115         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2116
2117         /* global stats */
2118         for (i = 0; i < RTE_NB_STATS; i++) {
2119                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2120                                         rte_stats_strings[i].offset);
2121                 val = *stats_ptr;
2122                 xstats[count++].value = val;
2123         }
2124
2125         /* per-rxq stats */
2126         for (q = 0; q < nb_rxqs; q++) {
2127                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2128                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2129                                         rte_rxq_stats_strings[i].offset +
2130                                         q * sizeof(uint64_t));
2131                         val = *stats_ptr;
2132                         xstats[count++].value = val;
2133                 }
2134         }
2135
2136         /* per-txq stats */
2137         for (q = 0; q < nb_txqs; q++) {
2138                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2139                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2140                                         rte_txq_stats_strings[i].offset +
2141                                         q * sizeof(uint64_t));
2142                         val = *stats_ptr;
2143                         xstats[count++].value = val;
2144                 }
2145         }
2146         return count;
2147 }
2148
2149 /* retrieve ethdev extended statistics */
2150 int
2151 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2152                          uint64_t *values, unsigned int size)
2153 {
2154         unsigned int no_basic_stat_requested = 1;
2155         unsigned int no_ext_stat_requested = 1;
2156         unsigned int num_xstats_filled;
2157         unsigned int basic_count;
2158         uint16_t expected_entries;
2159         struct rte_eth_dev *dev;
2160         unsigned int i;
2161         int ret;
2162
2163         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2164         ret = get_xstats_count(port_id);
2165         if (ret < 0)
2166                 return ret;
2167         expected_entries = (uint16_t)ret;
2168         struct rte_eth_xstat xstats[expected_entries];
2169         dev = &rte_eth_devices[port_id];
2170         basic_count = get_xstats_basic_count(dev);
2171
2172         /* Return max number of stats if no ids given */
2173         if (!ids) {
2174                 if (!values)
2175                         return expected_entries;
2176                 else if (values && size < expected_entries)
2177                         return expected_entries;
2178         }
2179
2180         if (ids && !values)
2181                 return -EINVAL;
2182
2183         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2184                 unsigned int basic_count = get_xstats_basic_count(dev);
2185                 uint64_t ids_copy[size];
2186
2187                 for (i = 0; i < size; i++) {
2188                         if (ids[i] < basic_count) {
2189                                 no_basic_stat_requested = 0;
2190                                 break;
2191                         }
2192
2193                         /*
2194                          * Convert ids to xstats ids that PMD knows.
2195                          * ids known by user are basic + extended stats.
2196                          */
2197                         ids_copy[i] = ids[i] - basic_count;
2198                 }
2199
2200                 if (no_basic_stat_requested)
2201                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2202                                         values, size);
2203         }
2204
2205         if (ids) {
2206                 for (i = 0; i < size; i++) {
2207                         if (ids[i] >= basic_count) {
2208                                 no_ext_stat_requested = 0;
2209                                 break;
2210                         }
2211                 }
2212         }
2213
2214         /* Fill the xstats structure */
2215         if (ids && no_ext_stat_requested)
2216                 ret = rte_eth_basic_stats_get(port_id, xstats);
2217         else
2218                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2219
2220         if (ret < 0)
2221                 return ret;
2222         num_xstats_filled = (unsigned int)ret;
2223
2224         /* Return all stats */
2225         if (!ids) {
2226                 for (i = 0; i < num_xstats_filled; i++)
2227                         values[i] = xstats[i].value;
2228                 return expected_entries;
2229         }
2230
2231         /* Filter stats */
2232         for (i = 0; i < size; i++) {
2233                 if (ids[i] >= expected_entries) {
2234                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2235                         return -1;
2236                 }
2237                 values[i] = xstats[ids[i]].value;
2238         }
2239         return size;
2240 }
2241
2242 int
2243 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2244         unsigned int n)
2245 {
2246         struct rte_eth_dev *dev;
2247         unsigned int count = 0, i;
2248         signed int xcount = 0;
2249         uint16_t nb_rxqs, nb_txqs;
2250         int ret;
2251
2252         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2253
2254         dev = &rte_eth_devices[port_id];
2255
2256         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2257         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2258
2259         /* Return generic statistics */
2260         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2261                 (nb_txqs * RTE_NB_TXQ_STATS);
2262
2263         /* implemented by the driver */
2264         if (dev->dev_ops->xstats_get != NULL) {
2265                 /* Retrieve the xstats from the driver at the end of the
2266                  * xstats struct.
2267                  */
2268                 xcount = (*dev->dev_ops->xstats_get)(dev,
2269                                      xstats ? xstats + count : NULL,
2270                                      (n > count) ? n - count : 0);
2271
2272                 if (xcount < 0)
2273                         return eth_err(port_id, xcount);
2274         }
2275
2276         if (n < count + xcount || xstats == NULL)
2277                 return count + xcount;
2278
2279         /* now fill the xstats structure */
2280         ret = rte_eth_basic_stats_get(port_id, xstats);
2281         if (ret < 0)
2282                 return ret;
2283         count = ret;
2284
2285         for (i = 0; i < count; i++)
2286                 xstats[i].id = i;
2287         /* add an offset to driver-specific stats */
2288         for ( ; i < count + xcount; i++)
2289                 xstats[i].id += count;
2290
2291         return count + xcount;
2292 }
2293
2294 /* reset ethdev extended statistics */
2295 void
2296 rte_eth_xstats_reset(uint16_t port_id)
2297 {
2298         struct rte_eth_dev *dev;
2299
2300         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2301         dev = &rte_eth_devices[port_id];
2302
2303         /* implemented by the driver */
2304         if (dev->dev_ops->xstats_reset != NULL) {
2305                 (*dev->dev_ops->xstats_reset)(dev);
2306                 return;
2307         }
2308
2309         /* fallback to default */
2310         rte_eth_stats_reset(port_id);
2311 }
2312
2313 static int
2314 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2315                 uint8_t is_rx)
2316 {
2317         struct rte_eth_dev *dev;
2318
2319         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2320
2321         dev = &rte_eth_devices[port_id];
2322
2323         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2324         return (*dev->dev_ops->queue_stats_mapping_set)
2325                         (dev, queue_id, stat_idx, is_rx);
2326 }
2327
2328
2329 int
2330 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2331                 uint8_t stat_idx)
2332 {
2333         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2334                                                 stat_idx, STAT_QMAP_TX));
2335 }
2336
2337
2338 int
2339 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2340                 uint8_t stat_idx)
2341 {
2342         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2343                                                 stat_idx, STAT_QMAP_RX));
2344 }
2345
2346 int
2347 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2348 {
2349         struct rte_eth_dev *dev;
2350
2351         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2352         dev = &rte_eth_devices[port_id];
2353
2354         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2355         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2356                                                         fw_version, fw_size));
2357 }
2358
2359 void
2360 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2361 {
2362         struct rte_eth_dev *dev;
2363         const struct rte_eth_desc_lim lim = {
2364                 .nb_max = UINT16_MAX,
2365                 .nb_min = 0,
2366                 .nb_align = 1,
2367         };
2368
2369         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2370         dev = &rte_eth_devices[port_id];
2371
2372         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2373         dev_info->rx_desc_lim = lim;
2374         dev_info->tx_desc_lim = lim;
2375         dev_info->device = dev->device;
2376
2377         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2378         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2379         dev_info->driver_name = dev->device->driver->name;
2380         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2381         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2382
2383         dev_info->dev_flags = &dev->data->dev_flags;
2384 }
2385
2386 int
2387 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2388                                  uint32_t *ptypes, int num)
2389 {
2390         int i, j;
2391         struct rte_eth_dev *dev;
2392         const uint32_t *all_ptypes;
2393
2394         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2395         dev = &rte_eth_devices[port_id];
2396         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2397         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2398
2399         if (!all_ptypes)
2400                 return 0;
2401
2402         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2403                 if (all_ptypes[i] & ptype_mask) {
2404                         if (j < num)
2405                                 ptypes[j] = all_ptypes[i];
2406                         j++;
2407                 }
2408
2409         return j;
2410 }
2411
2412 void
2413 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2414 {
2415         struct rte_eth_dev *dev;
2416
2417         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2418         dev = &rte_eth_devices[port_id];
2419         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2420 }
2421
2422
2423 int
2424 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2425 {
2426         struct rte_eth_dev *dev;
2427
2428         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2429
2430         dev = &rte_eth_devices[port_id];
2431         *mtu = dev->data->mtu;
2432         return 0;
2433 }
2434
2435 int
2436 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2437 {
2438         int ret;
2439         struct rte_eth_dev *dev;
2440
2441         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2442         dev = &rte_eth_devices[port_id];
2443         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2444
2445         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2446         if (!ret)
2447                 dev->data->mtu = mtu;
2448
2449         return eth_err(port_id, ret);
2450 }
2451
2452 int
2453 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2454 {
2455         struct rte_eth_dev *dev;
2456         int ret;
2457
2458         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2459         dev = &rte_eth_devices[port_id];
2460         if (!(dev->data->dev_conf.rxmode.offloads &
2461               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2462                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2463                 return -ENOSYS;
2464         }
2465
2466         if (vlan_id > 4095) {
2467                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2468                                 port_id, (unsigned) vlan_id);
2469                 return -EINVAL;
2470         }
2471         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2472
2473         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2474         if (ret == 0) {
2475                 struct rte_vlan_filter_conf *vfc;
2476                 int vidx;
2477                 int vbit;
2478
2479                 vfc = &dev->data->vlan_filter_conf;
2480                 vidx = vlan_id / 64;
2481                 vbit = vlan_id % 64;
2482
2483                 if (on)
2484                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2485                 else
2486                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2487         }
2488
2489         return eth_err(port_id, ret);
2490 }
2491
2492 int
2493 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2494                                     int on)
2495 {
2496         struct rte_eth_dev *dev;
2497
2498         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2499         dev = &rte_eth_devices[port_id];
2500         if (rx_queue_id >= dev->data->nb_rx_queues) {
2501                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2502                 return -EINVAL;
2503         }
2504
2505         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2506         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2507
2508         return 0;
2509 }
2510
2511 int
2512 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2513                                 enum rte_vlan_type vlan_type,
2514                                 uint16_t tpid)
2515 {
2516         struct rte_eth_dev *dev;
2517
2518         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2519         dev = &rte_eth_devices[port_id];
2520         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2521
2522         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2523                                                                tpid));
2524 }
2525
2526 int
2527 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2528 {
2529         struct rte_eth_dev *dev;
2530         int ret = 0;
2531         int mask = 0;
2532         int cur, org = 0;
2533         uint64_t orig_offloads;
2534
2535         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2536         dev = &rte_eth_devices[port_id];
2537
2538         /* save original values in case of failure */
2539         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2540
2541         /*check which option changed by application*/
2542         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2543         org = !!(dev->data->dev_conf.rxmode.offloads &
2544                  DEV_RX_OFFLOAD_VLAN_STRIP);
2545         if (cur != org) {
2546                 if (cur)
2547                         dev->data->dev_conf.rxmode.offloads |=
2548                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2549                 else
2550                         dev->data->dev_conf.rxmode.offloads &=
2551                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2552                 mask |= ETH_VLAN_STRIP_MASK;
2553         }
2554
2555         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2556         org = !!(dev->data->dev_conf.rxmode.offloads &
2557                  DEV_RX_OFFLOAD_VLAN_FILTER);
2558         if (cur != org) {
2559                 if (cur)
2560                         dev->data->dev_conf.rxmode.offloads |=
2561                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2562                 else
2563                         dev->data->dev_conf.rxmode.offloads &=
2564                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2565                 mask |= ETH_VLAN_FILTER_MASK;
2566         }
2567
2568         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2569         org = !!(dev->data->dev_conf.rxmode.offloads &
2570                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2571         if (cur != org) {
2572                 if (cur)
2573                         dev->data->dev_conf.rxmode.offloads |=
2574                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2575                 else
2576                         dev->data->dev_conf.rxmode.offloads &=
2577                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2578                 mask |= ETH_VLAN_EXTEND_MASK;
2579         }
2580
2581         /*no change*/
2582         if (mask == 0)
2583                 return ret;
2584
2585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2586         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2587         if (ret) {
2588                 /* hit an error restore  original values */
2589                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2590         }
2591
2592         return eth_err(port_id, ret);
2593 }
2594
2595 int
2596 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2597 {
2598         struct rte_eth_dev *dev;
2599         int ret = 0;
2600
2601         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2602         dev = &rte_eth_devices[port_id];
2603
2604         if (dev->data->dev_conf.rxmode.offloads &
2605             DEV_RX_OFFLOAD_VLAN_STRIP)
2606                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2607
2608         if (dev->data->dev_conf.rxmode.offloads &
2609             DEV_RX_OFFLOAD_VLAN_FILTER)
2610                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2611
2612         if (dev->data->dev_conf.rxmode.offloads &
2613             DEV_RX_OFFLOAD_VLAN_EXTEND)
2614                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2615
2616         return ret;
2617 }
2618
2619 int
2620 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2621 {
2622         struct rte_eth_dev *dev;
2623
2624         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2625         dev = &rte_eth_devices[port_id];
2626         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2627
2628         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2629 }
2630
2631 int
2632 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2633 {
2634         struct rte_eth_dev *dev;
2635
2636         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2637         dev = &rte_eth_devices[port_id];
2638         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2639         memset(fc_conf, 0, sizeof(*fc_conf));
2640         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2641 }
2642
2643 int
2644 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2645 {
2646         struct rte_eth_dev *dev;
2647
2648         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2649         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2650                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2651                 return -EINVAL;
2652         }
2653
2654         dev = &rte_eth_devices[port_id];
2655         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2656         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2657 }
2658
2659 int
2660 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2661                                    struct rte_eth_pfc_conf *pfc_conf)
2662 {
2663         struct rte_eth_dev *dev;
2664
2665         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2666         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2667                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2668                 return -EINVAL;
2669         }
2670
2671         dev = &rte_eth_devices[port_id];
2672         /* High water, low water validation are device specific */
2673         if  (*dev->dev_ops->priority_flow_ctrl_set)
2674                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2675                                         (dev, pfc_conf));
2676         return -ENOTSUP;
2677 }
2678
2679 static int
2680 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2681                         uint16_t reta_size)
2682 {
2683         uint16_t i, num;
2684
2685         if (!reta_conf)
2686                 return -EINVAL;
2687
2688         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2689         for (i = 0; i < num; i++) {
2690                 if (reta_conf[i].mask)
2691                         return 0;
2692         }
2693
2694         return -EINVAL;
2695 }
2696
2697 static int
2698 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2699                          uint16_t reta_size,
2700                          uint16_t max_rxq)
2701 {
2702         uint16_t i, idx, shift;
2703
2704         if (!reta_conf)
2705                 return -EINVAL;
2706
2707         if (max_rxq == 0) {
2708                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2709                 return -EINVAL;
2710         }
2711
2712         for (i = 0; i < reta_size; i++) {
2713                 idx = i / RTE_RETA_GROUP_SIZE;
2714                 shift = i % RTE_RETA_GROUP_SIZE;
2715                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2716                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2717                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2718                                 "the maximum rxq index: %u\n", idx, shift,
2719                                 reta_conf[idx].reta[shift], max_rxq);
2720                         return -EINVAL;
2721                 }
2722         }
2723
2724         return 0;
2725 }
2726
2727 int
2728 rte_eth_dev_rss_reta_update(uint16_t port_id,
2729                             struct rte_eth_rss_reta_entry64 *reta_conf,
2730                             uint16_t reta_size)
2731 {
2732         struct rte_eth_dev *dev;
2733         int ret;
2734
2735         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2736         /* Check mask bits */
2737         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2738         if (ret < 0)
2739                 return ret;
2740
2741         dev = &rte_eth_devices[port_id];
2742
2743         /* Check entry value */
2744         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2745                                 dev->data->nb_rx_queues);
2746         if (ret < 0)
2747                 return ret;
2748
2749         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2750         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2751                                                              reta_size));
2752 }
2753
2754 int
2755 rte_eth_dev_rss_reta_query(uint16_t port_id,
2756                            struct rte_eth_rss_reta_entry64 *reta_conf,
2757                            uint16_t reta_size)
2758 {
2759         struct rte_eth_dev *dev;
2760         int ret;
2761
2762         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2763
2764         /* Check mask bits */
2765         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2766         if (ret < 0)
2767                 return ret;
2768
2769         dev = &rte_eth_devices[port_id];
2770         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2771         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2772                                                             reta_size));
2773 }
2774
2775 int
2776 rte_eth_dev_rss_hash_update(uint16_t port_id,
2777                             struct rte_eth_rss_conf *rss_conf)
2778 {
2779         struct rte_eth_dev *dev;
2780         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2781
2782         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2783         dev = &rte_eth_devices[port_id];
2784         rte_eth_dev_info_get(port_id, &dev_info);
2785         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2786             dev_info.flow_type_rss_offloads) {
2787                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
2788                                     "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2789                                     port_id,
2790                                     rss_conf->rss_hf,
2791                                     dev_info.flow_type_rss_offloads);
2792                 return -EINVAL;
2793         }
2794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2795         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2796                                                                  rss_conf));
2797 }
2798
2799 int
2800 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2801                               struct rte_eth_rss_conf *rss_conf)
2802 {
2803         struct rte_eth_dev *dev;
2804
2805         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2806         dev = &rte_eth_devices[port_id];
2807         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2808         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2809                                                                    rss_conf));
2810 }
2811
2812 int
2813 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2814                                 struct rte_eth_udp_tunnel *udp_tunnel)
2815 {
2816         struct rte_eth_dev *dev;
2817
2818         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2819         if (udp_tunnel == NULL) {
2820                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2821                 return -EINVAL;
2822         }
2823
2824         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2825                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2826                 return -EINVAL;
2827         }
2828
2829         dev = &rte_eth_devices[port_id];
2830         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2831         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2832                                                                 udp_tunnel));
2833 }
2834
2835 int
2836 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2837                                    struct rte_eth_udp_tunnel *udp_tunnel)
2838 {
2839         struct rte_eth_dev *dev;
2840
2841         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2842         dev = &rte_eth_devices[port_id];
2843
2844         if (udp_tunnel == NULL) {
2845                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2846                 return -EINVAL;
2847         }
2848
2849         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2850                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2851                 return -EINVAL;
2852         }
2853
2854         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2855         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2856                                                                 udp_tunnel));
2857 }
2858
2859 int
2860 rte_eth_led_on(uint16_t port_id)
2861 {
2862         struct rte_eth_dev *dev;
2863
2864         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2865         dev = &rte_eth_devices[port_id];
2866         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2867         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2868 }
2869
2870 int
2871 rte_eth_led_off(uint16_t port_id)
2872 {
2873         struct rte_eth_dev *dev;
2874
2875         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2876         dev = &rte_eth_devices[port_id];
2877         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2878         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2879 }
2880
2881 /*
2882  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2883  * an empty spot.
2884  */
2885 static int
2886 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2887 {
2888         struct rte_eth_dev_info dev_info;
2889         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2890         unsigned i;
2891
2892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2893         rte_eth_dev_info_get(port_id, &dev_info);
2894
2895         for (i = 0; i < dev_info.max_mac_addrs; i++)
2896                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2897                         return i;
2898
2899         return -1;
2900 }
2901
2902 static const struct ether_addr null_mac_addr;
2903
2904 int
2905 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2906                         uint32_t pool)
2907 {
2908         struct rte_eth_dev *dev;
2909         int index;
2910         uint64_t pool_mask;
2911         int ret;
2912
2913         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2914         dev = &rte_eth_devices[port_id];
2915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2916
2917         if (is_zero_ether_addr(addr)) {
2918                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2919                         port_id);
2920                 return -EINVAL;
2921         }
2922         if (pool >= ETH_64_POOLS) {
2923                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2924                 return -EINVAL;
2925         }
2926
2927         index = get_mac_addr_index(port_id, addr);
2928         if (index < 0) {
2929                 index = get_mac_addr_index(port_id, &null_mac_addr);
2930                 if (index < 0) {
2931                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2932                                 port_id);
2933                         return -ENOSPC;
2934                 }
2935         } else {
2936                 pool_mask = dev->data->mac_pool_sel[index];
2937
2938                 /* Check if both MAC address and pool is already there, and do nothing */
2939                 if (pool_mask & (1ULL << pool))
2940                         return 0;
2941         }
2942
2943         /* Update NIC */
2944         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2945
2946         if (ret == 0) {
2947                 /* Update address in NIC data structure */
2948                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2949
2950                 /* Update pool bitmap in NIC data structure */
2951                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2952         }
2953
2954         return eth_err(port_id, ret);
2955 }
2956
2957 int
2958 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2959 {
2960         struct rte_eth_dev *dev;
2961         int index;
2962
2963         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2964         dev = &rte_eth_devices[port_id];
2965         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2966
2967         index = get_mac_addr_index(port_id, addr);
2968         if (index == 0) {
2969                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2970                 return -EADDRINUSE;
2971         } else if (index < 0)
2972                 return 0;  /* Do nothing if address wasn't found */
2973
2974         /* Update NIC */
2975         (*dev->dev_ops->mac_addr_remove)(dev, index);
2976
2977         /* Update address in NIC data structure */
2978         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2979
2980         /* reset pool bitmap */
2981         dev->data->mac_pool_sel[index] = 0;
2982
2983         return 0;
2984 }
2985
2986 int
2987 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2988 {
2989         struct rte_eth_dev *dev;
2990         int ret;
2991
2992         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2993
2994         if (!is_valid_assigned_ether_addr(addr))
2995                 return -EINVAL;
2996
2997         dev = &rte_eth_devices[port_id];
2998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2999
3000         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3001         if (ret < 0)
3002                 return ret;
3003
3004         /* Update default address in NIC data structure */
3005         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3006
3007         return 0;
3008 }
3009
3010
3011 /*
3012  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3013  * an empty spot.
3014  */
3015 static int
3016 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3017 {
3018         struct rte_eth_dev_info dev_info;
3019         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3020         unsigned i;
3021
3022         rte_eth_dev_info_get(port_id, &dev_info);
3023         if (!dev->data->hash_mac_addrs)
3024                 return -1;
3025
3026         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3027                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3028                         ETHER_ADDR_LEN) == 0)
3029                         return i;
3030
3031         return -1;
3032 }
3033
3034 int
3035 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3036                                 uint8_t on)
3037 {
3038         int index;
3039         int ret;
3040         struct rte_eth_dev *dev;
3041
3042         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3043
3044         dev = &rte_eth_devices[port_id];
3045         if (is_zero_ether_addr(addr)) {
3046                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3047                         port_id);
3048                 return -EINVAL;
3049         }
3050
3051         index = get_hash_mac_addr_index(port_id, addr);
3052         /* Check if it's already there, and do nothing */
3053         if ((index >= 0) && on)
3054                 return 0;
3055
3056         if (index < 0) {
3057                 if (!on) {
3058                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3059                                 "set in UTA\n", port_id);
3060                         return -EINVAL;
3061                 }
3062
3063                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3064                 if (index < 0) {
3065                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3066                                         port_id);
3067                         return -ENOSPC;
3068                 }
3069         }
3070
3071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3072         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3073         if (ret == 0) {
3074                 /* Update address in NIC data structure */
3075                 if (on)
3076                         ether_addr_copy(addr,
3077                                         &dev->data->hash_mac_addrs[index]);
3078                 else
3079                         ether_addr_copy(&null_mac_addr,
3080                                         &dev->data->hash_mac_addrs[index]);
3081         }
3082
3083         return eth_err(port_id, ret);
3084 }
3085
3086 int
3087 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3088 {
3089         struct rte_eth_dev *dev;
3090
3091         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3092
3093         dev = &rte_eth_devices[port_id];
3094
3095         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3096         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3097                                                                        on));
3098 }
3099
3100 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3101                                         uint16_t tx_rate)
3102 {
3103         struct rte_eth_dev *dev;
3104         struct rte_eth_dev_info dev_info;
3105         struct rte_eth_link link;
3106
3107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3108
3109         dev = &rte_eth_devices[port_id];
3110         rte_eth_dev_info_get(port_id, &dev_info);
3111         link = dev->data->dev_link;
3112
3113         if (queue_idx > dev_info.max_tx_queues) {
3114                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3115                                 "invalid queue id=%d\n", port_id, queue_idx);
3116                 return -EINVAL;
3117         }
3118
3119         if (tx_rate > link.link_speed) {
3120                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3121                                 "bigger than link speed= %d\n",
3122                         tx_rate, link.link_speed);
3123                 return -EINVAL;
3124         }
3125
3126         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3127         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3128                                                         queue_idx, tx_rate));
3129 }
3130
3131 int
3132 rte_eth_mirror_rule_set(uint16_t port_id,
3133                         struct rte_eth_mirror_conf *mirror_conf,
3134                         uint8_t rule_id, uint8_t on)
3135 {
3136         struct rte_eth_dev *dev;
3137
3138         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3139         if (mirror_conf->rule_type == 0) {
3140                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3141                 return -EINVAL;
3142         }
3143
3144         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3145                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3146                                 ETH_64_POOLS - 1);
3147                 return -EINVAL;
3148         }
3149
3150         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3151              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3152             (mirror_conf->pool_mask == 0)) {
3153                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3154                 return -EINVAL;
3155         }
3156
3157         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3158             mirror_conf->vlan.vlan_mask == 0) {
3159                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3160                 return -EINVAL;
3161         }
3162
3163         dev = &rte_eth_devices[port_id];
3164         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3165
3166         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3167                                                 mirror_conf, rule_id, on));
3168 }
3169
3170 int
3171 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3172 {
3173         struct rte_eth_dev *dev;
3174
3175         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3176
3177         dev = &rte_eth_devices[port_id];
3178         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3179
3180         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3181                                                                    rule_id));
3182 }
3183
3184 RTE_INIT(eth_dev_init_cb_lists)
3185 {
3186         int i;
3187
3188         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3189                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3190 }
3191
3192 int
3193 rte_eth_dev_callback_register(uint16_t port_id,
3194                         enum rte_eth_event_type event,
3195                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3196 {
3197         struct rte_eth_dev *dev;
3198         struct rte_eth_dev_callback *user_cb;
3199         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3200         uint16_t last_port;
3201
3202         if (!cb_fn)
3203                 return -EINVAL;
3204
3205         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3206                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3207                 return -EINVAL;
3208         }
3209
3210         if (port_id == RTE_ETH_ALL) {
3211                 next_port = 0;
3212                 last_port = RTE_MAX_ETHPORTS - 1;
3213         } else {
3214                 next_port = last_port = port_id;
3215         }
3216
3217         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3218
3219         do {
3220                 dev = &rte_eth_devices[next_port];
3221
3222                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3223                         if (user_cb->cb_fn == cb_fn &&
3224                                 user_cb->cb_arg == cb_arg &&
3225                                 user_cb->event == event) {
3226                                 break;
3227                         }
3228                 }
3229
3230                 /* create a new callback. */
3231                 if (user_cb == NULL) {
3232                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3233                                 sizeof(struct rte_eth_dev_callback), 0);
3234                         if (user_cb != NULL) {
3235                                 user_cb->cb_fn = cb_fn;
3236                                 user_cb->cb_arg = cb_arg;
3237                                 user_cb->event = event;
3238                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3239                                                   user_cb, next);
3240                         } else {
3241                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3242                                 rte_eth_dev_callback_unregister(port_id, event,
3243                                                                 cb_fn, cb_arg);
3244                                 return -ENOMEM;
3245                         }
3246
3247                 }
3248         } while (++next_port <= last_port);
3249
3250         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3251         return 0;
3252 }
3253
3254 int
3255 rte_eth_dev_callback_unregister(uint16_t port_id,
3256                         enum rte_eth_event_type event,
3257                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3258 {
3259         int ret;
3260         struct rte_eth_dev *dev;
3261         struct rte_eth_dev_callback *cb, *next;
3262         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3263         uint16_t last_port;
3264
3265         if (!cb_fn)
3266                 return -EINVAL;
3267
3268         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3269                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3270                 return -EINVAL;
3271         }
3272
3273         if (port_id == RTE_ETH_ALL) {
3274                 next_port = 0;
3275                 last_port = RTE_MAX_ETHPORTS - 1;
3276         } else {
3277                 next_port = last_port = port_id;
3278         }
3279
3280         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3281
3282         do {
3283                 dev = &rte_eth_devices[next_port];
3284                 ret = 0;
3285                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3286                      cb = next) {
3287
3288                         next = TAILQ_NEXT(cb, next);
3289
3290                         if (cb->cb_fn != cb_fn || cb->event != event ||
3291                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3292                                 continue;
3293
3294                         /*
3295                          * if this callback is not executing right now,
3296                          * then remove it.
3297                          */
3298                         if (cb->active == 0) {
3299                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3300                                 rte_free(cb);
3301                         } else {
3302                                 ret = -EAGAIN;
3303                         }
3304                 }
3305         } while (++next_port <= last_port);
3306
3307         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3308         return ret;
3309 }
3310
3311 int
3312 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3313         enum rte_eth_event_type event, void *ret_param)
3314 {
3315         struct rte_eth_dev_callback *cb_lst;
3316         struct rte_eth_dev_callback dev_cb;
3317         int rc = 0;
3318
3319         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3320         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3321                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3322                         continue;
3323                 dev_cb = *cb_lst;
3324                 cb_lst->active = 1;
3325                 if (ret_param != NULL)
3326                         dev_cb.ret_param = ret_param;
3327
3328                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3329                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3330                                 dev_cb.cb_arg, dev_cb.ret_param);
3331                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3332                 cb_lst->active = 0;
3333         }
3334         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3335         return rc;
3336 }
3337
3338 int
3339 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3340 {
3341         uint32_t vec;
3342         struct rte_eth_dev *dev;
3343         struct rte_intr_handle *intr_handle;
3344         uint16_t qid;
3345         int rc;
3346
3347         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3348
3349         dev = &rte_eth_devices[port_id];
3350
3351         if (!dev->intr_handle) {
3352                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3353                 return -ENOTSUP;
3354         }
3355
3356         intr_handle = dev->intr_handle;
3357         if (!intr_handle->intr_vec) {
3358                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3359                 return -EPERM;
3360         }
3361
3362         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3363                 vec = intr_handle->intr_vec[qid];
3364                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3365                 if (rc && rc != -EEXIST) {
3366                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3367                                         " op %d epfd %d vec %u\n",
3368                                         port_id, qid, op, epfd, vec);
3369                 }
3370         }
3371
3372         return 0;
3373 }
3374
3375 const struct rte_memzone *
3376 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3377                          uint16_t queue_id, size_t size, unsigned align,
3378                          int socket_id)
3379 {
3380         char z_name[RTE_MEMZONE_NAMESIZE];
3381         const struct rte_memzone *mz;
3382
3383         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3384                  dev->device->driver->name, ring_name,
3385                  dev->data->port_id, queue_id);
3386
3387         mz = rte_memzone_lookup(z_name);
3388         if (mz)
3389                 return mz;
3390
3391         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3392                         RTE_MEMZONE_IOVA_CONTIG, align);
3393 }
3394
3395 int __rte_experimental
3396 rte_eth_dev_create(struct rte_device *device, const char *name,
3397         size_t priv_data_size,
3398         ethdev_bus_specific_init ethdev_bus_specific_init,
3399         void *bus_init_params,
3400         ethdev_init_t ethdev_init, void *init_params)
3401 {
3402         struct rte_eth_dev *ethdev;
3403         int retval;
3404
3405         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3406
3407         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3408                 ethdev = rte_eth_dev_allocate(name);
3409                 if (!ethdev) {
3410                         retval = -ENODEV;
3411                         goto probe_failed;
3412                 }
3413
3414                 if (priv_data_size) {
3415                         ethdev->data->dev_private = rte_zmalloc_socket(
3416                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3417                                 device->numa_node);
3418
3419                         if (!ethdev->data->dev_private) {
3420                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
3421                                 retval = -ENOMEM;
3422                                 goto probe_failed;
3423                         }
3424                 }
3425         } else {
3426                 ethdev = rte_eth_dev_attach_secondary(name);
3427                 if (!ethdev) {
3428                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
3429                                 "ethdev doesn't exist");
3430                         retval = -ENODEV;
3431                         goto probe_failed;
3432                 }
3433         }
3434
3435         ethdev->device = device;
3436
3437         if (ethdev_bus_specific_init) {
3438                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3439                 if (retval) {
3440                         RTE_LOG(ERR, EAL,
3441                                 "ethdev bus specific initialisation failed");
3442                         goto probe_failed;
3443                 }
3444         }
3445
3446         retval = ethdev_init(ethdev, init_params);
3447         if (retval) {
3448                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3449                 goto probe_failed;
3450         }
3451
3452         return retval;
3453 probe_failed:
3454         /* free ports private data if primary process */
3455         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3456                 rte_free(ethdev->data->dev_private);
3457
3458         rte_eth_dev_release_port(ethdev);
3459
3460         return retval;
3461 }
3462
3463 int  __rte_experimental
3464 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3465         ethdev_uninit_t ethdev_uninit)
3466 {
3467         int ret;
3468
3469         ethdev = rte_eth_dev_allocated(ethdev->data->name);
3470         if (!ethdev)
3471                 return -ENODEV;
3472
3473         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3474         if (ethdev_uninit) {
3475                 ret = ethdev_uninit(ethdev);
3476                 if (ret)
3477                         return ret;
3478         }
3479
3480         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3481                 rte_free(ethdev->data->dev_private);
3482
3483         ethdev->data->dev_private = NULL;
3484
3485         return rte_eth_dev_release_port(ethdev);
3486 }
3487
3488 int
3489 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3490                           int epfd, int op, void *data)
3491 {
3492         uint32_t vec;
3493         struct rte_eth_dev *dev;
3494         struct rte_intr_handle *intr_handle;
3495         int rc;
3496
3497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3498
3499         dev = &rte_eth_devices[port_id];
3500         if (queue_id >= dev->data->nb_rx_queues) {
3501                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3502                 return -EINVAL;
3503         }
3504
3505         if (!dev->intr_handle) {
3506                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3507                 return -ENOTSUP;
3508         }
3509
3510         intr_handle = dev->intr_handle;
3511         if (!intr_handle->intr_vec) {
3512                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3513                 return -EPERM;
3514         }
3515
3516         vec = intr_handle->intr_vec[queue_id];
3517         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3518         if (rc && rc != -EEXIST) {
3519                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3520                                 " op %d epfd %d vec %u\n",
3521                                 port_id, queue_id, op, epfd, vec);
3522                 return rc;
3523         }
3524
3525         return 0;
3526 }
3527
3528 int
3529 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3530                            uint16_t queue_id)
3531 {
3532         struct rte_eth_dev *dev;
3533
3534         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3535
3536         dev = &rte_eth_devices[port_id];
3537
3538         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3539         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3540                                                                 queue_id));
3541 }
3542
3543 int
3544 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3545                             uint16_t queue_id)
3546 {
3547         struct rte_eth_dev *dev;
3548
3549         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3550
3551         dev = &rte_eth_devices[port_id];
3552
3553         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3554         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3555                                                                 queue_id));
3556 }
3557
3558
3559 int
3560 rte_eth_dev_filter_supported(uint16_t port_id,
3561                              enum rte_filter_type filter_type)
3562 {
3563         struct rte_eth_dev *dev;
3564
3565         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3566
3567         dev = &rte_eth_devices[port_id];
3568         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3569         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3570                                 RTE_ETH_FILTER_NOP, NULL);
3571 }
3572
3573 int
3574 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3575                         enum rte_filter_op filter_op, void *arg)
3576 {
3577         struct rte_eth_dev *dev;
3578
3579         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3580
3581         dev = &rte_eth_devices[port_id];
3582         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3583         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3584                                                              filter_op, arg));
3585 }
3586
3587 const struct rte_eth_rxtx_callback *
3588 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3589                 rte_rx_callback_fn fn, void *user_param)
3590 {
3591 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3592         rte_errno = ENOTSUP;
3593         return NULL;
3594 #endif
3595         /* check input parameters */
3596         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3597                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3598                 rte_errno = EINVAL;
3599                 return NULL;
3600         }
3601         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3602
3603         if (cb == NULL) {
3604                 rte_errno = ENOMEM;
3605                 return NULL;
3606         }
3607
3608         cb->fn.rx = fn;
3609         cb->param = user_param;
3610
3611         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3612         /* Add the callbacks in fifo order. */
3613         struct rte_eth_rxtx_callback *tail =
3614                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3615
3616         if (!tail) {
3617                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3618
3619         } else {
3620                 while (tail->next)
3621                         tail = tail->next;
3622                 tail->next = cb;
3623         }
3624         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3625
3626         return cb;
3627 }
3628
3629 const struct rte_eth_rxtx_callback *
3630 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3631                 rte_rx_callback_fn fn, void *user_param)
3632 {
3633 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3634         rte_errno = ENOTSUP;
3635         return NULL;
3636 #endif
3637         /* check input parameters */
3638         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3639                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3640                 rte_errno = EINVAL;
3641                 return NULL;
3642         }
3643
3644         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3645
3646         if (cb == NULL) {
3647                 rte_errno = ENOMEM;
3648                 return NULL;
3649         }
3650
3651         cb->fn.rx = fn;
3652         cb->param = user_param;
3653
3654         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3655         /* Add the callbacks at fisrt position*/
3656         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3657         rte_smp_wmb();
3658         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3659         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3660
3661         return cb;
3662 }
3663
3664 const struct rte_eth_rxtx_callback *
3665 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3666                 rte_tx_callback_fn fn, void *user_param)
3667 {
3668 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3669         rte_errno = ENOTSUP;
3670         return NULL;
3671 #endif
3672         /* check input parameters */
3673         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3674                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3675                 rte_errno = EINVAL;
3676                 return NULL;
3677         }
3678
3679         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3680
3681         if (cb == NULL) {
3682                 rte_errno = ENOMEM;
3683                 return NULL;
3684         }
3685
3686         cb->fn.tx = fn;
3687         cb->param = user_param;
3688
3689         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3690         /* Add the callbacks in fifo order. */
3691         struct rte_eth_rxtx_callback *tail =
3692                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3693
3694         if (!tail) {
3695                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3696
3697         } else {
3698                 while (tail->next)
3699                         tail = tail->next;
3700                 tail->next = cb;
3701         }
3702         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3703
3704         return cb;
3705 }
3706
3707 int
3708 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3709                 const struct rte_eth_rxtx_callback *user_cb)
3710 {
3711 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3712         return -ENOTSUP;
3713 #endif
3714         /* Check input parameters. */
3715         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3716         if (user_cb == NULL ||
3717                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3718                 return -EINVAL;
3719
3720         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3721         struct rte_eth_rxtx_callback *cb;
3722         struct rte_eth_rxtx_callback **prev_cb;
3723         int ret = -EINVAL;
3724
3725         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3726         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3727         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3728                 cb = *prev_cb;
3729                 if (cb == user_cb) {
3730                         /* Remove the user cb from the callback list. */
3731                         *prev_cb = cb->next;
3732                         ret = 0;
3733                         break;
3734                 }
3735         }
3736         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3737
3738         return ret;
3739 }
3740
3741 int
3742 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3743                 const struct rte_eth_rxtx_callback *user_cb)
3744 {
3745 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3746         return -ENOTSUP;
3747 #endif
3748         /* Check input parameters. */
3749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3750         if (user_cb == NULL ||
3751                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3752                 return -EINVAL;
3753
3754         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3755         int ret = -EINVAL;
3756         struct rte_eth_rxtx_callback *cb;
3757         struct rte_eth_rxtx_callback **prev_cb;
3758
3759         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3760         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3761         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3762                 cb = *prev_cb;
3763                 if (cb == user_cb) {
3764                         /* Remove the user cb from the callback list. */
3765                         *prev_cb = cb->next;
3766                         ret = 0;
3767                         break;
3768                 }
3769         }
3770         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3771
3772         return ret;
3773 }
3774
3775 int
3776 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3777         struct rte_eth_rxq_info *qinfo)
3778 {
3779         struct rte_eth_dev *dev;
3780
3781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3782
3783         if (qinfo == NULL)
3784                 return -EINVAL;
3785
3786         dev = &rte_eth_devices[port_id];
3787         if (queue_id >= dev->data->nb_rx_queues) {
3788                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3789                 return -EINVAL;
3790         }
3791
3792         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3793
3794         memset(qinfo, 0, sizeof(*qinfo));
3795         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3796         return 0;
3797 }
3798
3799 int
3800 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3801         struct rte_eth_txq_info *qinfo)
3802 {
3803         struct rte_eth_dev *dev;
3804
3805         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3806
3807         if (qinfo == NULL)
3808                 return -EINVAL;
3809
3810         dev = &rte_eth_devices[port_id];
3811         if (queue_id >= dev->data->nb_tx_queues) {
3812                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3813                 return -EINVAL;
3814         }
3815
3816         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3817
3818         memset(qinfo, 0, sizeof(*qinfo));
3819         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3820         return 0;
3821 }
3822
3823 int
3824 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3825                              struct ether_addr *mc_addr_set,
3826                              uint32_t nb_mc_addr)
3827 {
3828         struct rte_eth_dev *dev;
3829
3830         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3831
3832         dev = &rte_eth_devices[port_id];
3833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3834         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3835                                                 mc_addr_set, nb_mc_addr));
3836 }
3837
3838 int
3839 rte_eth_timesync_enable(uint16_t port_id)
3840 {
3841         struct rte_eth_dev *dev;
3842
3843         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3844         dev = &rte_eth_devices[port_id];
3845
3846         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3847         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3848 }
3849
3850 int
3851 rte_eth_timesync_disable(uint16_t port_id)
3852 {
3853         struct rte_eth_dev *dev;
3854
3855         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3856         dev = &rte_eth_devices[port_id];
3857
3858         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3859         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3860 }
3861
3862 int
3863 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3864                                    uint32_t flags)
3865 {
3866         struct rte_eth_dev *dev;
3867
3868         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3869         dev = &rte_eth_devices[port_id];
3870
3871         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3872         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3873                                 (dev, timestamp, flags));
3874 }
3875
3876 int
3877 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3878                                    struct timespec *timestamp)
3879 {
3880         struct rte_eth_dev *dev;
3881
3882         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3883         dev = &rte_eth_devices[port_id];
3884
3885         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3886         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3887                                 (dev, timestamp));
3888 }
3889
3890 int
3891 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3892 {
3893         struct rte_eth_dev *dev;
3894
3895         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3896         dev = &rte_eth_devices[port_id];
3897
3898         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3899         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3900                                                                       delta));
3901 }
3902
3903 int
3904 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3905 {
3906         struct rte_eth_dev *dev;
3907
3908         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3909         dev = &rte_eth_devices[port_id];
3910
3911         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3912         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3913                                                                 timestamp));
3914 }
3915
3916 int
3917 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3918 {
3919         struct rte_eth_dev *dev;
3920
3921         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3922         dev = &rte_eth_devices[port_id];
3923
3924         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3925         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3926                                                                 timestamp));
3927 }
3928
3929 int
3930 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3931 {
3932         struct rte_eth_dev *dev;
3933
3934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3935
3936         dev = &rte_eth_devices[port_id];
3937         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3938         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3939 }
3940
3941 int
3942 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3943 {
3944         struct rte_eth_dev *dev;
3945
3946         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3947
3948         dev = &rte_eth_devices[port_id];
3949         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3950         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3951 }
3952
3953 int
3954 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3955 {
3956         struct rte_eth_dev *dev;
3957
3958         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3959
3960         dev = &rte_eth_devices[port_id];
3961         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3962         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3963 }
3964
3965 int
3966 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3967 {
3968         struct rte_eth_dev *dev;
3969
3970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3971
3972         dev = &rte_eth_devices[port_id];
3973         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3974         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3975 }
3976
3977 int __rte_experimental
3978 rte_eth_dev_get_module_info(uint16_t port_id,
3979                             struct rte_eth_dev_module_info *modinfo)
3980 {
3981         struct rte_eth_dev *dev;
3982
3983         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3984
3985         dev = &rte_eth_devices[port_id];
3986         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
3987         return (*dev->dev_ops->get_module_info)(dev, modinfo);
3988 }
3989
3990 int __rte_experimental
3991 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3992                               struct rte_dev_eeprom_info *info)
3993 {
3994         struct rte_eth_dev *dev;
3995
3996         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3997
3998         dev = &rte_eth_devices[port_id];
3999         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4000         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4001 }
4002
4003 int
4004 rte_eth_dev_get_dcb_info(uint16_t port_id,
4005                              struct rte_eth_dcb_info *dcb_info)
4006 {
4007         struct rte_eth_dev *dev;
4008
4009         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4010
4011         dev = &rte_eth_devices[port_id];
4012         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4013
4014         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4015         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4016 }
4017
4018 int
4019 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4020                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4021 {
4022         struct rte_eth_dev *dev;
4023
4024         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4025         if (l2_tunnel == NULL) {
4026                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4027                 return -EINVAL;
4028         }
4029
4030         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4031                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
4032                 return -EINVAL;
4033         }
4034
4035         dev = &rte_eth_devices[port_id];
4036         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4037                                 -ENOTSUP);
4038         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4039                                                                 l2_tunnel));
4040 }
4041
4042 int
4043 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4044                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4045                                   uint32_t mask,
4046                                   uint8_t en)
4047 {
4048         struct rte_eth_dev *dev;
4049
4050         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4051
4052         if (l2_tunnel == NULL) {
4053                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4054                 return -EINVAL;
4055         }
4056
4057         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4058                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
4059                 return -EINVAL;
4060         }
4061
4062         if (mask == 0) {
4063                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
4064                 return -EINVAL;
4065         }
4066
4067         dev = &rte_eth_devices[port_id];
4068         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4069                                 -ENOTSUP);
4070         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4071                                                         l2_tunnel, mask, en));
4072 }
4073
4074 static void
4075 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4076                            const struct rte_eth_desc_lim *desc_lim)
4077 {
4078         if (desc_lim->nb_align != 0)
4079                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4080
4081         if (desc_lim->nb_max != 0)
4082                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4083
4084         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4085 }
4086
4087 int
4088 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4089                                  uint16_t *nb_rx_desc,
4090                                  uint16_t *nb_tx_desc)
4091 {
4092         struct rte_eth_dev *dev;
4093         struct rte_eth_dev_info dev_info;
4094
4095         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4096
4097         dev = &rte_eth_devices[port_id];
4098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4099
4100         rte_eth_dev_info_get(port_id, &dev_info);
4101
4102         if (nb_rx_desc != NULL)
4103                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4104
4105         if (nb_tx_desc != NULL)
4106                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4107
4108         return 0;
4109 }
4110
4111 int
4112 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4113 {
4114         struct rte_eth_dev *dev;
4115
4116         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4117
4118         if (pool == NULL)
4119                 return -EINVAL;
4120
4121         dev = &rte_eth_devices[port_id];
4122
4123         if (*dev->dev_ops->pool_ops_supported == NULL)
4124                 return 1; /* all pools are supported */
4125
4126         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4127 }
4128
4129 /**
4130  * A set of values to describe the possible states of a switch domain.
4131  */
4132 enum rte_eth_switch_domain_state {
4133         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4134         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4135 };
4136
4137 /**
4138  * Array of switch domains available for allocation. Array is sized to
4139  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4140  * ethdev ports in a single process.
4141  */
4142 struct rte_eth_dev_switch {
4143         enum rte_eth_switch_domain_state state;
4144 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4145
4146 int __rte_experimental
4147 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4148 {
4149         unsigned int i;
4150
4151         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4152
4153         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4154                 i < RTE_MAX_ETHPORTS; i++) {
4155                 if (rte_eth_switch_domains[i].state ==
4156                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4157                         rte_eth_switch_domains[i].state =
4158                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4159                         *domain_id = i;
4160                         return 0;
4161                 }
4162         }
4163
4164         return -ENOSPC;
4165 }
4166
4167 int __rte_experimental
4168 rte_eth_switch_domain_free(uint16_t domain_id)
4169 {
4170         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4171                 domain_id >= RTE_MAX_ETHPORTS)
4172                 return -EINVAL;
4173
4174         if (rte_eth_switch_domains[domain_id].state !=
4175                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4176                 return -EINVAL;
4177
4178         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4179
4180         return 0;
4181 }
4182
4183 typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
4184
4185 static int
4186 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4187 {
4188         int state;
4189         struct rte_kvargs_pair *pair;
4190         char *letter;
4191
4192         arglist->str = strdup(str_in);
4193         if (arglist->str == NULL)
4194                 return -ENOMEM;
4195
4196         letter = arglist->str;
4197         state = 0;
4198         arglist->count = 0;
4199         pair = &arglist->pairs[0];
4200         while (1) {
4201                 switch (state) {
4202                 case 0: /* Initial */
4203                         if (*letter == '=')
4204                                 return -EINVAL;
4205                         else if (*letter == '\0')
4206                                 return 0;
4207
4208                         state = 1;
4209                         pair->key = letter;
4210                         /* fall-thru */
4211
4212                 case 1: /* Parsing key */
4213                         if (*letter == '=') {
4214                                 *letter = '\0';
4215                                 pair->value = letter + 1;
4216                                 state = 2;
4217                         } else if (*letter == ',' || *letter == '\0')
4218                                 return -EINVAL;
4219                         break;
4220
4221
4222                 case 2: /* Parsing value */
4223                         if (*letter == '[')
4224                                 state = 3;
4225                         else if (*letter == ',') {
4226                                 *letter = '\0';
4227                                 arglist->count++;
4228                                 pair = &arglist->pairs[arglist->count];
4229                                 state = 0;
4230                         } else if (*letter == '\0') {
4231                                 letter--;
4232                                 arglist->count++;
4233                                 pair = &arglist->pairs[arglist->count];
4234                                 state = 0;
4235                         }
4236                         break;
4237
4238                 case 3: /* Parsing list */
4239                         if (*letter == ']')
4240                                 state = 2;
4241                         else if (*letter == '\0')
4242                                 return -EINVAL;
4243                         break;
4244                 }
4245                 letter++;
4246         }
4247 }
4248
4249 static int
4250 rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
4251         void *data)
4252 {
4253         char *str_start;
4254         int state;
4255         int result;
4256
4257         if (*str != '[')
4258                 /* Single element, not a list */
4259                 return callback(str, data);
4260
4261         /* Sanity check, then strip the brackets */
4262         str_start = &str[strlen(str) - 1];
4263         if (*str_start != ']') {
4264                 RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
4265                 return -EINVAL;
4266         }
4267         str++;
4268         *str_start = '\0';
4269
4270         /* Process list elements */
4271         state = 0;
4272         while (1) {
4273                 if (state == 0) {
4274                         if (*str == '\0')
4275                                 break;
4276                         if (*str != ',') {
4277                                 str_start = str;
4278                                 state = 1;
4279                         }
4280                 } else if (state == 1) {
4281                         if (*str == ',' || *str == '\0') {
4282                                 if (str > str_start) {
4283                                         /* Non-empty string fragment */
4284                                         *str = '\0';
4285                                         result = callback(str_start, data);
4286                                         if (result < 0)
4287                                                 return result;
4288                                 }
4289                                 state = 0;
4290                         }
4291                 }
4292                 str++;
4293         }
4294         return 0;
4295 }
4296
4297 static int
4298 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
4299         const uint16_t max_list)
4300 {
4301         uint16_t lo, hi, val;
4302         int result;
4303
4304         result = sscanf(str, "%hu-%hu", &lo, &hi);
4305         if (result == 1) {
4306                 if (*len_list >= max_list)
4307                         return -ENOMEM;
4308                 list[(*len_list)++] = lo;
4309         } else if (result == 2) {
4310                 if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
4311                         return -EINVAL;
4312                 for (val = lo; val <= hi; val++) {
4313                         if (*len_list >= max_list)
4314                                 return -ENOMEM;
4315                         list[(*len_list)++] = val;
4316                 }
4317         } else
4318                 return -EINVAL;
4319         return 0;
4320 }
4321
4322
4323 static int
4324 rte_eth_devargs_parse_representor_ports(char *str, void *data)
4325 {
4326         struct rte_eth_devargs *eth_da = data;
4327
4328         return rte_eth_devargs_process_range(str, eth_da->representor_ports,
4329                 &eth_da->nb_representor_ports, RTE_MAX_ETHPORTS);
4330 }
4331
4332 int __rte_experimental
4333 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4334 {
4335         struct rte_kvargs args;
4336         struct rte_kvargs_pair *pair;
4337         unsigned int i;
4338         int result = 0;
4339
4340         memset(eth_da, 0, sizeof(*eth_da));
4341
4342         result = rte_eth_devargs_tokenise(&args, dargs);
4343         if (result < 0)
4344                 goto parse_cleanup;
4345
4346         for (i = 0; i < args.count; i++) {
4347                 pair = &args.pairs[i];
4348                 if (strcmp("representor", pair->key) == 0) {
4349                         result = rte_eth_devargs_parse_list(pair->value,
4350                                 rte_eth_devargs_parse_representor_ports,
4351                                 eth_da);
4352                         if (result < 0)
4353                                 goto parse_cleanup;
4354                 }
4355         }
4356
4357 parse_cleanup:
4358         if (args.str)
4359                 free(args.str);
4360
4361         return result;
4362 }
4363
4364 RTE_INIT(ethdev_init_log);
4365 static void
4366 ethdev_init_log(void)
4367 {
4368         ethdev_logtype = rte_log_register("lib.ethdev");
4369         if (ethdev_logtype >= 0)
4370                 rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);
4371 }