ethdev: convert Tx offloads to Tx queue config
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_kvargs.h>
38
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
43
44 static int ethdev_logtype;
45
46 #define ethdev_log(level, fmt, ...) \
47         rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51 static uint8_t eth_dev_last_created_port;
52
53 /* spinlock for eth device callbacks */
54 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for add/remove rx callbacks */
57 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* spinlock for add/remove tx callbacks */
60 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62 /* spinlock for shared data allocation */
63 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
64
65 /* store statistics names and its offset in stats structure  */
66 struct rte_eth_xstats_name_off {
67         char name[RTE_ETH_XSTATS_NAME_SIZE];
68         unsigned offset;
69 };
70
71 /* Shared memory between primary and secondary processes. */
72 static struct {
73         uint64_t next_owner_id;
74         rte_spinlock_t ownership_lock;
75         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
76 } *rte_eth_dev_shared_data;
77
78 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
79         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
80         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
81         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
82         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
83         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
84         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
85         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
86         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
87                 rx_nombuf)},
88 };
89
90 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
91
92 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
93         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
94         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
95         {"errors", offsetof(struct rte_eth_stats, q_errors)},
96 };
97
98 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
99                 sizeof(rte_rxq_stats_strings[0]))
100
101 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
102         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
103         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
104 };
105 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
106                 sizeof(rte_txq_stats_strings[0]))
107
108 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
109         { DEV_RX_OFFLOAD_##_name, #_name }
110
111 static const struct {
112         uint64_t offload;
113         const char *name;
114 } rte_rx_offload_names[] = {
115         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
119         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
120         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
121         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
122         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
123         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
125         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
126         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
127         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
128         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
129         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
130         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
131 };
132
133 #undef RTE_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } rte_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160 };
161
162 #undef RTE_TX_OFFLOAD_BIT2STR
163
164 /**
165  * The user application callback description.
166  *
167  * It contains callback address to be registered by user application,
168  * the pointer to the parameters for callback, and the event type.
169  */
170 struct rte_eth_dev_callback {
171         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
172         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
173         void *cb_arg;                           /**< Parameter for callback */
174         void *ret_param;                        /**< Return parameter */
175         enum rte_eth_event_type event;          /**< Interrupt event type */
176         uint32_t active;                        /**< Callback is executing */
177 };
178
179 enum {
180         STAT_QMAP_TX = 0,
181         STAT_QMAP_RX
182 };
183
184 uint16_t
185 rte_eth_find_next(uint16_t port_id)
186 {
187         while (port_id < RTE_MAX_ETHPORTS &&
188                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
189                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
190                 port_id++;
191
192         if (port_id >= RTE_MAX_ETHPORTS)
193                 return RTE_MAX_ETHPORTS;
194
195         return port_id;
196 }
197
198 static void
199 rte_eth_dev_shared_data_prepare(void)
200 {
201         const unsigned flags = 0;
202         const struct rte_memzone *mz;
203
204         rte_spinlock_lock(&rte_eth_shared_data_lock);
205
206         if (rte_eth_dev_shared_data == NULL) {
207                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
208                         /* Allocate port data and ownership shared memory. */
209                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
210                                         sizeof(*rte_eth_dev_shared_data),
211                                         rte_socket_id(), flags);
212                 } else
213                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
214                 if (mz == NULL)
215                         rte_panic("Cannot allocate ethdev shared data\n");
216
217                 rte_eth_dev_shared_data = mz->addr;
218                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
219                         rte_eth_dev_shared_data->next_owner_id =
220                                         RTE_ETH_DEV_NO_OWNER + 1;
221                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
222                         memset(rte_eth_dev_shared_data->data, 0,
223                                sizeof(rte_eth_dev_shared_data->data));
224                 }
225         }
226
227         rte_spinlock_unlock(&rte_eth_shared_data_lock);
228 }
229
230 struct rte_eth_dev *
231 rte_eth_dev_allocated(const char *name)
232 {
233         unsigned i;
234
235         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
236                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
237                     strcmp(rte_eth_devices[i].data->name, name) == 0)
238                         return &rte_eth_devices[i];
239         }
240         return NULL;
241 }
242
243 static uint16_t
244 rte_eth_dev_find_free_port(void)
245 {
246         unsigned i;
247
248         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
249                 /* Using shared name field to find a free port. */
250                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
251                         RTE_ASSERT(rte_eth_devices[i].state ==
252                                    RTE_ETH_DEV_UNUSED);
253                         return i;
254                 }
255         }
256         return RTE_MAX_ETHPORTS;
257 }
258
259 static struct rte_eth_dev *
260 eth_dev_get(uint16_t port_id)
261 {
262         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
263
264         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
265         eth_dev->state = RTE_ETH_DEV_ATTACHED;
266
267         eth_dev_last_created_port = port_id;
268
269         return eth_dev;
270 }
271
272 struct rte_eth_dev *
273 rte_eth_dev_allocate(const char *name)
274 {
275         uint16_t port_id;
276         struct rte_eth_dev *eth_dev = NULL;
277
278         rte_eth_dev_shared_data_prepare();
279
280         /* Synchronize port creation between primary and secondary threads. */
281         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
282
283         port_id = rte_eth_dev_find_free_port();
284         if (port_id == RTE_MAX_ETHPORTS) {
285                 ethdev_log(ERR, "Reached maximum number of Ethernet ports");
286                 goto unlock;
287         }
288
289         if (rte_eth_dev_allocated(name) != NULL) {
290                 ethdev_log(ERR,
291                         "Ethernet Device with name %s already allocated!",
292                         name);
293                 goto unlock;
294         }
295
296         eth_dev = eth_dev_get(port_id);
297         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
298         eth_dev->data->port_id = port_id;
299         eth_dev->data->mtu = ETHER_MTU;
300
301 unlock:
302         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
303
304         if (eth_dev != NULL)
305                 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
306
307         return eth_dev;
308 }
309
310 /*
311  * Attach to a port already registered by the primary process, which
312  * makes sure that the same device would have the same port id both
313  * in the primary and secondary process.
314  */
315 struct rte_eth_dev *
316 rte_eth_dev_attach_secondary(const char *name)
317 {
318         uint16_t i;
319         struct rte_eth_dev *eth_dev = NULL;
320
321         rte_eth_dev_shared_data_prepare();
322
323         /* Synchronize port attachment to primary port creation and release. */
324         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
325
326         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
327                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
328                         break;
329         }
330         if (i == RTE_MAX_ETHPORTS) {
331                 RTE_PMD_DEBUG_TRACE(
332                         "device %s is not driven by the primary process\n",
333                         name);
334         } else {
335                 eth_dev = eth_dev_get(i);
336                 RTE_ASSERT(eth_dev->data->port_id == i);
337         }
338
339         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
340         return eth_dev;
341 }
342
343 int
344 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
345 {
346         if (eth_dev == NULL)
347                 return -EINVAL;
348
349         rte_eth_dev_shared_data_prepare();
350
351         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
352
353         eth_dev->state = RTE_ETH_DEV_UNUSED;
354
355         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
356
357         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
358
359         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
360
361         return 0;
362 }
363
364 int
365 rte_eth_dev_is_valid_port(uint16_t port_id)
366 {
367         if (port_id >= RTE_MAX_ETHPORTS ||
368             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
369                 return 0;
370         else
371                 return 1;
372 }
373
374 static int
375 rte_eth_is_valid_owner_id(uint64_t owner_id)
376 {
377         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
378             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
379                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
380                 return 0;
381         }
382         return 1;
383 }
384
385 uint64_t
386 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
387 {
388         while (port_id < RTE_MAX_ETHPORTS &&
389                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
390                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
391                rte_eth_devices[port_id].data->owner.id != owner_id))
392                 port_id++;
393
394         if (port_id >= RTE_MAX_ETHPORTS)
395                 return RTE_MAX_ETHPORTS;
396
397         return port_id;
398 }
399
400 int __rte_experimental
401 rte_eth_dev_owner_new(uint64_t *owner_id)
402 {
403         rte_eth_dev_shared_data_prepare();
404
405         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
406
407         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
408
409         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
410         return 0;
411 }
412
413 static int
414 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
415                        const struct rte_eth_dev_owner *new_owner)
416 {
417         struct rte_eth_dev_owner *port_owner;
418         int sret;
419
420         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
421
422         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
423             !rte_eth_is_valid_owner_id(old_owner_id))
424                 return -EINVAL;
425
426         port_owner = &rte_eth_devices[port_id].data->owner;
427         if (port_owner->id != old_owner_id) {
428                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
429                                     " by %s_%016lX.\n", port_id,
430                                     port_owner->name, port_owner->id);
431                 return -EPERM;
432         }
433
434         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
435                         new_owner->name);
436         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
437                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
438                                     port_id);
439
440         port_owner->id = new_owner->id;
441
442         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
443                             new_owner->name, new_owner->id);
444
445         return 0;
446 }
447
448 int __rte_experimental
449 rte_eth_dev_owner_set(const uint16_t port_id,
450                       const struct rte_eth_dev_owner *owner)
451 {
452         int ret;
453
454         rte_eth_dev_shared_data_prepare();
455
456         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
457
458         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
459
460         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
461         return ret;
462 }
463
464 int __rte_experimental
465 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
466 {
467         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
468                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
469         int ret;
470
471         rte_eth_dev_shared_data_prepare();
472
473         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
474
475         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
476
477         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
478         return ret;
479 }
480
481 void __rte_experimental
482 rte_eth_dev_owner_delete(const uint64_t owner_id)
483 {
484         uint16_t port_id;
485
486         rte_eth_dev_shared_data_prepare();
487
488         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
489
490         if (rte_eth_is_valid_owner_id(owner_id)) {
491                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
492                         memset(&rte_eth_devices[port_id].data->owner, 0,
493                                sizeof(struct rte_eth_dev_owner));
494                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
495                                     " have removed.\n", owner_id);
496         }
497
498         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
499 }
500
501 int __rte_experimental
502 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
503 {
504         int ret = 0;
505
506         rte_eth_dev_shared_data_prepare();
507
508         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
509
510         if (!rte_eth_dev_is_valid_port(port_id)) {
511                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
512                 ret = -ENODEV;
513         } else {
514                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
515                            sizeof(*owner));
516         }
517
518         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
519         return ret;
520 }
521
522 int
523 rte_eth_dev_socket_id(uint16_t port_id)
524 {
525         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
526         return rte_eth_devices[port_id].data->numa_node;
527 }
528
529 void *
530 rte_eth_dev_get_sec_ctx(uint16_t port_id)
531 {
532         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
533         return rte_eth_devices[port_id].security_ctx;
534 }
535
536 uint16_t
537 rte_eth_dev_count(void)
538 {
539         return rte_eth_dev_count_avail();
540 }
541
542 uint16_t
543 rte_eth_dev_count_avail(void)
544 {
545         uint16_t p;
546         uint16_t count;
547
548         count = 0;
549
550         RTE_ETH_FOREACH_DEV(p)
551                 count++;
552
553         return count;
554 }
555
556 uint16_t __rte_experimental
557 rte_eth_dev_count_total(void)
558 {
559         uint16_t port, count = 0;
560
561         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
562                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
563                         count++;
564
565         return count;
566 }
567
568 int
569 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
570 {
571         char *tmp;
572
573         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
574
575         if (name == NULL) {
576                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
577                 return -EINVAL;
578         }
579
580         /* shouldn't check 'rte_eth_devices[i].data',
581          * because it might be overwritten by VDEV PMD */
582         tmp = rte_eth_dev_shared_data->data[port_id].name;
583         strcpy(name, tmp);
584         return 0;
585 }
586
587 int
588 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
589 {
590         uint32_t pid;
591
592         if (name == NULL) {
593                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
594                 return -EINVAL;
595         }
596
597         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
598                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
599                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
600                         *port_id = pid;
601                         return 0;
602                 }
603         }
604
605         return -ENODEV;
606 }
607
608 static int
609 eth_err(uint16_t port_id, int ret)
610 {
611         if (ret == 0)
612                 return 0;
613         if (rte_eth_dev_is_removed(port_id))
614                 return -EIO;
615         return ret;
616 }
617
618 /* attach the new device, then store port_id of the device */
619 int
620 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
621 {
622         int current = rte_eth_dev_count_total();
623         struct rte_devargs da;
624         int ret = -1;
625
626         memset(&da, 0, sizeof(da));
627
628         if ((devargs == NULL) || (port_id == NULL)) {
629                 ret = -EINVAL;
630                 goto err;
631         }
632
633         /* parse devargs */
634         if (rte_devargs_parse(&da, "%s", devargs))
635                 goto err;
636
637         ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
638         if (ret < 0)
639                 goto err;
640
641         /* no point looking at the port count if no port exists */
642         if (!rte_eth_dev_count_total()) {
643                 ethdev_log(ERR, "No port found for device (%s)", da.name);
644                 ret = -1;
645                 goto err;
646         }
647
648         /* if nothing happened, there is a bug here, since some driver told us
649          * it did attach a device, but did not create a port.
650          * FIXME: race condition in case of plug-out of another device
651          */
652         if (current == rte_eth_dev_count_total()) {
653                 ret = -1;
654                 goto err;
655         }
656
657         *port_id = eth_dev_last_created_port;
658         ret = 0;
659
660 err:
661         free(da.args);
662         return ret;
663 }
664
665 /* detach the device, then store the name of the device */
666 int
667 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
668 {
669         struct rte_device *dev;
670         struct rte_bus *bus;
671         uint32_t dev_flags;
672         int ret = -1;
673
674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
675
676         dev_flags = rte_eth_devices[port_id].data->dev_flags;
677         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
678                 ethdev_log(ERR,
679                         "Port %" PRIu16 " is bonded, cannot detach", port_id);
680                 return -ENOTSUP;
681         }
682
683         dev = rte_eth_devices[port_id].device;
684         if (dev == NULL)
685                 return -EINVAL;
686
687         bus = rte_bus_find_by_device(dev);
688         if (bus == NULL)
689                 return -ENOENT;
690
691         ret = rte_eal_hotplug_remove(bus->name, dev->name);
692         if (ret < 0)
693                 return ret;
694
695         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
696         return 0;
697 }
698
699 static int
700 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
701 {
702         uint16_t old_nb_queues = dev->data->nb_rx_queues;
703         void **rxq;
704         unsigned i;
705
706         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
707                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
708                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
709                                 RTE_CACHE_LINE_SIZE);
710                 if (dev->data->rx_queues == NULL) {
711                         dev->data->nb_rx_queues = 0;
712                         return -(ENOMEM);
713                 }
714         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
715                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
716
717                 rxq = dev->data->rx_queues;
718
719                 for (i = nb_queues; i < old_nb_queues; i++)
720                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
721                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
722                                 RTE_CACHE_LINE_SIZE);
723                 if (rxq == NULL)
724                         return -(ENOMEM);
725                 if (nb_queues > old_nb_queues) {
726                         uint16_t new_qs = nb_queues - old_nb_queues;
727
728                         memset(rxq + old_nb_queues, 0,
729                                 sizeof(rxq[0]) * new_qs);
730                 }
731
732                 dev->data->rx_queues = rxq;
733
734         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
735                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
736
737                 rxq = dev->data->rx_queues;
738
739                 for (i = nb_queues; i < old_nb_queues; i++)
740                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
741
742                 rte_free(dev->data->rx_queues);
743                 dev->data->rx_queues = NULL;
744         }
745         dev->data->nb_rx_queues = nb_queues;
746         return 0;
747 }
748
749 int
750 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
751 {
752         struct rte_eth_dev *dev;
753
754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
755
756         dev = &rte_eth_devices[port_id];
757         if (!dev->data->dev_started) {
758                 RTE_PMD_DEBUG_TRACE(
759                     "port %d must be started before start any queue\n", port_id);
760                 return -EINVAL;
761         }
762
763         if (rx_queue_id >= dev->data->nb_rx_queues) {
764                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
765                 return -EINVAL;
766         }
767
768         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
769
770         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
771                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
772                         " already started\n",
773                         rx_queue_id, port_id);
774                 return 0;
775         }
776
777         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
778                                                              rx_queue_id));
779
780 }
781
782 int
783 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
784 {
785         struct rte_eth_dev *dev;
786
787         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
788
789         dev = &rte_eth_devices[port_id];
790         if (rx_queue_id >= dev->data->nb_rx_queues) {
791                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
792                 return -EINVAL;
793         }
794
795         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
796
797         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
798                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
799                         " already stopped\n",
800                         rx_queue_id, port_id);
801                 return 0;
802         }
803
804         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
805
806 }
807
808 int
809 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
810 {
811         struct rte_eth_dev *dev;
812
813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
814
815         dev = &rte_eth_devices[port_id];
816         if (!dev->data->dev_started) {
817                 RTE_PMD_DEBUG_TRACE(
818                     "port %d must be started before start any queue\n", port_id);
819                 return -EINVAL;
820         }
821
822         if (tx_queue_id >= dev->data->nb_tx_queues) {
823                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
824                 return -EINVAL;
825         }
826
827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
828
829         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
830                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
831                         " already started\n",
832                         tx_queue_id, port_id);
833                 return 0;
834         }
835
836         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
837                                                              tx_queue_id));
838
839 }
840
841 int
842 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
843 {
844         struct rte_eth_dev *dev;
845
846         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
847
848         dev = &rte_eth_devices[port_id];
849         if (tx_queue_id >= dev->data->nb_tx_queues) {
850                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
851                 return -EINVAL;
852         }
853
854         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
855
856         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
857                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
858                         " already stopped\n",
859                         tx_queue_id, port_id);
860                 return 0;
861         }
862
863         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
864
865 }
866
867 static int
868 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
869 {
870         uint16_t old_nb_queues = dev->data->nb_tx_queues;
871         void **txq;
872         unsigned i;
873
874         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
875                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
876                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
877                                                    RTE_CACHE_LINE_SIZE);
878                 if (dev->data->tx_queues == NULL) {
879                         dev->data->nb_tx_queues = 0;
880                         return -(ENOMEM);
881                 }
882         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
883                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
884
885                 txq = dev->data->tx_queues;
886
887                 for (i = nb_queues; i < old_nb_queues; i++)
888                         (*dev->dev_ops->tx_queue_release)(txq[i]);
889                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
890                                   RTE_CACHE_LINE_SIZE);
891                 if (txq == NULL)
892                         return -ENOMEM;
893                 if (nb_queues > old_nb_queues) {
894                         uint16_t new_qs = nb_queues - old_nb_queues;
895
896                         memset(txq + old_nb_queues, 0,
897                                sizeof(txq[0]) * new_qs);
898                 }
899
900                 dev->data->tx_queues = txq;
901
902         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
903                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
904
905                 txq = dev->data->tx_queues;
906
907                 for (i = nb_queues; i < old_nb_queues; i++)
908                         (*dev->dev_ops->tx_queue_release)(txq[i]);
909
910                 rte_free(dev->data->tx_queues);
911                 dev->data->tx_queues = NULL;
912         }
913         dev->data->nb_tx_queues = nb_queues;
914         return 0;
915 }
916
917 uint32_t
918 rte_eth_speed_bitflag(uint32_t speed, int duplex)
919 {
920         switch (speed) {
921         case ETH_SPEED_NUM_10M:
922                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
923         case ETH_SPEED_NUM_100M:
924                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
925         case ETH_SPEED_NUM_1G:
926                 return ETH_LINK_SPEED_1G;
927         case ETH_SPEED_NUM_2_5G:
928                 return ETH_LINK_SPEED_2_5G;
929         case ETH_SPEED_NUM_5G:
930                 return ETH_LINK_SPEED_5G;
931         case ETH_SPEED_NUM_10G:
932                 return ETH_LINK_SPEED_10G;
933         case ETH_SPEED_NUM_20G:
934                 return ETH_LINK_SPEED_20G;
935         case ETH_SPEED_NUM_25G:
936                 return ETH_LINK_SPEED_25G;
937         case ETH_SPEED_NUM_40G:
938                 return ETH_LINK_SPEED_40G;
939         case ETH_SPEED_NUM_50G:
940                 return ETH_LINK_SPEED_50G;
941         case ETH_SPEED_NUM_56G:
942                 return ETH_LINK_SPEED_56G;
943         case ETH_SPEED_NUM_100G:
944                 return ETH_LINK_SPEED_100G;
945         default:
946                 return 0;
947         }
948 }
949
950 /**
951  * A conversion function from rxmode bitfield API.
952  */
953 static void
954 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
955                                     uint64_t *rx_offloads)
956 {
957         uint64_t offloads = 0;
958
959         if (rxmode->header_split == 1)
960                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
961         if (rxmode->hw_ip_checksum == 1)
962                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
963         if (rxmode->hw_vlan_filter == 1)
964                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
965         if (rxmode->hw_vlan_strip == 1)
966                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
967         if (rxmode->hw_vlan_extend == 1)
968                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
969         if (rxmode->jumbo_frame == 1)
970                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
971         if (rxmode->hw_strip_crc == 1)
972                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
973         if (rxmode->enable_scatter == 1)
974                 offloads |= DEV_RX_OFFLOAD_SCATTER;
975         if (rxmode->enable_lro == 1)
976                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
977         if (rxmode->hw_timestamp == 1)
978                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
979         if (rxmode->security == 1)
980                 offloads |= DEV_RX_OFFLOAD_SECURITY;
981
982         *rx_offloads = offloads;
983 }
984
985 const char * __rte_experimental
986 rte_eth_dev_rx_offload_name(uint64_t offload)
987 {
988         const char *name = "UNKNOWN";
989         unsigned int i;
990
991         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
992                 if (offload == rte_rx_offload_names[i].offload) {
993                         name = rte_rx_offload_names[i].name;
994                         break;
995                 }
996         }
997
998         return name;
999 }
1000
1001 const char * __rte_experimental
1002 rte_eth_dev_tx_offload_name(uint64_t offload)
1003 {
1004         const char *name = "UNKNOWN";
1005         unsigned int i;
1006
1007         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1008                 if (offload == rte_tx_offload_names[i].offload) {
1009                         name = rte_tx_offload_names[i].name;
1010                         break;
1011                 }
1012         }
1013
1014         return name;
1015 }
1016
1017 int
1018 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1019                       const struct rte_eth_conf *dev_conf)
1020 {
1021         struct rte_eth_dev *dev;
1022         struct rte_eth_dev_info dev_info;
1023         struct rte_eth_conf local_conf = *dev_conf;
1024         int diag;
1025
1026         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1027
1028         dev = &rte_eth_devices[port_id];
1029
1030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1031         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1032
1033         /* If number of queues specified by application for both Rx and Tx is
1034          * zero, use driver preferred values. This cannot be done individually
1035          * as it is valid for either Tx or Rx (but not both) to be zero.
1036          * If driver does not provide any preferred valued, fall back on
1037          * EAL defaults.
1038          */
1039         if (nb_rx_q == 0 && nb_tx_q == 0) {
1040                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1041                 if (nb_rx_q == 0)
1042                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1043                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1044                 if (nb_tx_q == 0)
1045                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1046         }
1047
1048         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1049                 RTE_PMD_DEBUG_TRACE(
1050                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1051                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1052                 return -EINVAL;
1053         }
1054
1055         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1056                 RTE_PMD_DEBUG_TRACE(
1057                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1058                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1059                 return -EINVAL;
1060         }
1061
1062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1063         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1064
1065         if (dev->data->dev_started) {
1066                 RTE_PMD_DEBUG_TRACE(
1067                     "port %d must be stopped to allow configuration\n", port_id);
1068                 return -EBUSY;
1069         }
1070
1071         /*
1072          * Convert between the offloads API to enable PMDs to support
1073          * only one of them.
1074          */
1075         if (dev_conf->rxmode.ignore_offload_bitfield == 0)
1076                 rte_eth_convert_rx_offload_bitfield(
1077                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1078
1079         /* Copy the dev_conf parameter into the dev structure */
1080         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1081
1082         /*
1083          * Check that the numbers of RX and TX queues are not greater
1084          * than the maximum number of RX and TX queues supported by the
1085          * configured device.
1086          */
1087         if (nb_rx_q > dev_info.max_rx_queues) {
1088                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1089                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1090                 return -EINVAL;
1091         }
1092
1093         if (nb_tx_q > dev_info.max_tx_queues) {
1094                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1095                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1096                 return -EINVAL;
1097         }
1098
1099         /* Check that the device supports requested interrupts */
1100         if ((dev_conf->intr_conf.lsc == 1) &&
1101                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1102                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1103                                         dev->device->driver->name);
1104                         return -EINVAL;
1105         }
1106         if ((dev_conf->intr_conf.rmv == 1) &&
1107             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1108                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1109                                     dev->device->driver->name);
1110                 return -EINVAL;
1111         }
1112
1113         /*
1114          * If jumbo frames are enabled, check that the maximum RX packet
1115          * length is supported by the configured device.
1116          */
1117         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1118                 if (dev_conf->rxmode.max_rx_pkt_len >
1119                     dev_info.max_rx_pktlen) {
1120                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1121                                 " > max valid value %u\n",
1122                                 port_id,
1123                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1124                                 (unsigned)dev_info.max_rx_pktlen);
1125                         return -EINVAL;
1126                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1127                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1128                                 " < min valid value %u\n",
1129                                 port_id,
1130                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1131                                 (unsigned)ETHER_MIN_LEN);
1132                         return -EINVAL;
1133                 }
1134         } else {
1135                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1136                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1137                         /* Use default value */
1138                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1139                                                         ETHER_MAX_LEN;
1140         }
1141
1142         /* Check that device supports requested rss hash functions. */
1143         if ((dev_info.flow_type_rss_offloads |
1144              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1145             dev_info.flow_type_rss_offloads) {
1146                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
1147                                     "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1148                                     port_id,
1149                                     dev_conf->rx_adv_conf.rss_conf.rss_hf,
1150                                     dev_info.flow_type_rss_offloads);
1151         }
1152
1153         /*
1154          * Setup new number of RX/TX queues and reconfigure device.
1155          */
1156         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1157         if (diag != 0) {
1158                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1159                                 port_id, diag);
1160                 return diag;
1161         }
1162
1163         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1164         if (diag != 0) {
1165                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1166                                 port_id, diag);
1167                 rte_eth_dev_rx_queue_config(dev, 0);
1168                 return diag;
1169         }
1170
1171         diag = (*dev->dev_ops->dev_configure)(dev);
1172         if (diag != 0) {
1173                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1174                                 port_id, diag);
1175                 rte_eth_dev_rx_queue_config(dev, 0);
1176                 rte_eth_dev_tx_queue_config(dev, 0);
1177                 return eth_err(port_id, diag);
1178         }
1179
1180         /* Initialize Rx profiling if enabled at compilation time. */
1181         diag = __rte_eth_profile_rx_init(port_id, dev);
1182         if (diag != 0) {
1183                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1184                                 port_id, diag);
1185                 rte_eth_dev_rx_queue_config(dev, 0);
1186                 rte_eth_dev_tx_queue_config(dev, 0);
1187                 return eth_err(port_id, diag);
1188         }
1189
1190         return 0;
1191 }
1192
1193 void
1194 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1195 {
1196         if (dev->data->dev_started) {
1197                 RTE_PMD_DEBUG_TRACE(
1198                         "port %d must be stopped to allow reset\n",
1199                         dev->data->port_id);
1200                 return;
1201         }
1202
1203         rte_eth_dev_rx_queue_config(dev, 0);
1204         rte_eth_dev_tx_queue_config(dev, 0);
1205
1206         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1207 }
1208
1209 static void
1210 rte_eth_dev_config_restore(uint16_t port_id)
1211 {
1212         struct rte_eth_dev *dev;
1213         struct rte_eth_dev_info dev_info;
1214         struct ether_addr *addr;
1215         uint16_t i;
1216         uint32_t pool = 0;
1217         uint64_t pool_mask;
1218
1219         dev = &rte_eth_devices[port_id];
1220
1221         rte_eth_dev_info_get(port_id, &dev_info);
1222
1223         /* replay MAC address configuration including default MAC */
1224         addr = &dev->data->mac_addrs[0];
1225         if (*dev->dev_ops->mac_addr_set != NULL)
1226                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1227         else if (*dev->dev_ops->mac_addr_add != NULL)
1228                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1229
1230         if (*dev->dev_ops->mac_addr_add != NULL) {
1231                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1232                         addr = &dev->data->mac_addrs[i];
1233
1234                         /* skip zero address */
1235                         if (is_zero_ether_addr(addr))
1236                                 continue;
1237
1238                         pool = 0;
1239                         pool_mask = dev->data->mac_pool_sel[i];
1240
1241                         do {
1242                                 if (pool_mask & 1ULL)
1243                                         (*dev->dev_ops->mac_addr_add)(dev,
1244                                                 addr, i, pool);
1245                                 pool_mask >>= 1;
1246                                 pool++;
1247                         } while (pool_mask);
1248                 }
1249         }
1250
1251         /* replay promiscuous configuration */
1252         if (rte_eth_promiscuous_get(port_id) == 1)
1253                 rte_eth_promiscuous_enable(port_id);
1254         else if (rte_eth_promiscuous_get(port_id) == 0)
1255                 rte_eth_promiscuous_disable(port_id);
1256
1257         /* replay all multicast configuration */
1258         if (rte_eth_allmulticast_get(port_id) == 1)
1259                 rte_eth_allmulticast_enable(port_id);
1260         else if (rte_eth_allmulticast_get(port_id) == 0)
1261                 rte_eth_allmulticast_disable(port_id);
1262 }
1263
1264 int
1265 rte_eth_dev_start(uint16_t port_id)
1266 {
1267         struct rte_eth_dev *dev;
1268         int diag;
1269
1270         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1271
1272         dev = &rte_eth_devices[port_id];
1273
1274         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1275
1276         if (dev->data->dev_started != 0) {
1277                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1278                         " already started\n",
1279                         port_id);
1280                 return 0;
1281         }
1282
1283         diag = (*dev->dev_ops->dev_start)(dev);
1284         if (diag == 0)
1285                 dev->data->dev_started = 1;
1286         else
1287                 return eth_err(port_id, diag);
1288
1289         rte_eth_dev_config_restore(port_id);
1290
1291         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1292                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1293                 (*dev->dev_ops->link_update)(dev, 0);
1294         }
1295         return 0;
1296 }
1297
1298 void
1299 rte_eth_dev_stop(uint16_t port_id)
1300 {
1301         struct rte_eth_dev *dev;
1302
1303         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1304         dev = &rte_eth_devices[port_id];
1305
1306         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1307
1308         if (dev->data->dev_started == 0) {
1309                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1310                         " already stopped\n",
1311                         port_id);
1312                 return;
1313         }
1314
1315         dev->data->dev_started = 0;
1316         (*dev->dev_ops->dev_stop)(dev);
1317 }
1318
1319 int
1320 rte_eth_dev_set_link_up(uint16_t port_id)
1321 {
1322         struct rte_eth_dev *dev;
1323
1324         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1325
1326         dev = &rte_eth_devices[port_id];
1327
1328         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1329         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1330 }
1331
1332 int
1333 rte_eth_dev_set_link_down(uint16_t port_id)
1334 {
1335         struct rte_eth_dev *dev;
1336
1337         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1338
1339         dev = &rte_eth_devices[port_id];
1340
1341         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1342         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1343 }
1344
1345 void
1346 rte_eth_dev_close(uint16_t port_id)
1347 {
1348         struct rte_eth_dev *dev;
1349
1350         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1351         dev = &rte_eth_devices[port_id];
1352
1353         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1354         dev->data->dev_started = 0;
1355         (*dev->dev_ops->dev_close)(dev);
1356
1357         dev->data->nb_rx_queues = 0;
1358         rte_free(dev->data->rx_queues);
1359         dev->data->rx_queues = NULL;
1360         dev->data->nb_tx_queues = 0;
1361         rte_free(dev->data->tx_queues);
1362         dev->data->tx_queues = NULL;
1363 }
1364
1365 int
1366 rte_eth_dev_reset(uint16_t port_id)
1367 {
1368         struct rte_eth_dev *dev;
1369         int ret;
1370
1371         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1372         dev = &rte_eth_devices[port_id];
1373
1374         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1375
1376         rte_eth_dev_stop(port_id);
1377         ret = dev->dev_ops->dev_reset(dev);
1378
1379         return eth_err(port_id, ret);
1380 }
1381
1382 int __rte_experimental
1383 rte_eth_dev_is_removed(uint16_t port_id)
1384 {
1385         struct rte_eth_dev *dev;
1386         int ret;
1387
1388         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1389
1390         dev = &rte_eth_devices[port_id];
1391
1392         if (dev->state == RTE_ETH_DEV_REMOVED)
1393                 return 1;
1394
1395         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1396
1397         ret = dev->dev_ops->is_removed(dev);
1398         if (ret != 0)
1399                 /* Device is physically removed. */
1400                 dev->state = RTE_ETH_DEV_REMOVED;
1401
1402         return ret;
1403 }
1404
1405 int
1406 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1407                        uint16_t nb_rx_desc, unsigned int socket_id,
1408                        const struct rte_eth_rxconf *rx_conf,
1409                        struct rte_mempool *mp)
1410 {
1411         int ret;
1412         uint32_t mbp_buf_size;
1413         struct rte_eth_dev *dev;
1414         struct rte_eth_dev_info dev_info;
1415         struct rte_eth_rxconf local_conf;
1416         void **rxq;
1417
1418         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1419
1420         dev = &rte_eth_devices[port_id];
1421         if (rx_queue_id >= dev->data->nb_rx_queues) {
1422                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1423                 return -EINVAL;
1424         }
1425
1426         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1427         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1428
1429         /*
1430          * Check the size of the mbuf data buffer.
1431          * This value must be provided in the private data of the memory pool.
1432          * First check that the memory pool has a valid private data.
1433          */
1434         rte_eth_dev_info_get(port_id, &dev_info);
1435         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1436                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1437                                 mp->name, (int) mp->private_data_size,
1438                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1439                 return -ENOSPC;
1440         }
1441         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1442
1443         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1444                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1445                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1446                                 "=%d)\n",
1447                                 mp->name,
1448                                 (int)mbp_buf_size,
1449                                 (int)(RTE_PKTMBUF_HEADROOM +
1450                                       dev_info.min_rx_bufsize),
1451                                 (int)RTE_PKTMBUF_HEADROOM,
1452                                 (int)dev_info.min_rx_bufsize);
1453                 return -EINVAL;
1454         }
1455
1456         /* Use default specified by driver, if nb_rx_desc is zero */
1457         if (nb_rx_desc == 0) {
1458                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1459                 /* If driver default is also zero, fall back on EAL default */
1460                 if (nb_rx_desc == 0)
1461                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1462         }
1463
1464         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1465                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1466                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1467
1468                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1469                         "should be: <= %hu, = %hu, and a product of %hu\n",
1470                         nb_rx_desc,
1471                         dev_info.rx_desc_lim.nb_max,
1472                         dev_info.rx_desc_lim.nb_min,
1473                         dev_info.rx_desc_lim.nb_align);
1474                 return -EINVAL;
1475         }
1476
1477         if (dev->data->dev_started &&
1478                 !(dev_info.dev_capa &
1479                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1480                 return -EBUSY;
1481
1482         if (dev->data->rx_queue_state[rx_queue_id] !=
1483                 RTE_ETH_QUEUE_STATE_STOPPED)
1484                 return -EBUSY;
1485
1486         rxq = dev->data->rx_queues;
1487         if (rxq[rx_queue_id]) {
1488                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1489                                         -ENOTSUP);
1490                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1491                 rxq[rx_queue_id] = NULL;
1492         }
1493
1494         if (rx_conf == NULL)
1495                 rx_conf = &dev_info.default_rxconf;
1496
1497         local_conf = *rx_conf;
1498         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1499                 /**
1500                  * Reflect port offloads to queue offloads in order for
1501                  * offloads to not be discarded.
1502                  */
1503                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1504                                                     &local_conf.offloads);
1505         }
1506
1507         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1508                                               socket_id, &local_conf, mp);
1509         if (!ret) {
1510                 if (!dev->data->min_rx_buf_size ||
1511                     dev->data->min_rx_buf_size > mbp_buf_size)
1512                         dev->data->min_rx_buf_size = mbp_buf_size;
1513         }
1514
1515         return eth_err(port_id, ret);
1516 }
1517
1518 /**
1519  * Convert from tx offloads to txq_flags.
1520  */
1521 static void
1522 rte_eth_convert_tx_offload(const uint64_t tx_offloads, uint32_t *txq_flags)
1523 {
1524         uint32_t flags = 0;
1525
1526         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1527                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1528         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1529                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1530         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1531                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1532         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1533                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1534         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1535                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1536         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1537                 flags |= ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP;
1538
1539         *txq_flags = flags;
1540 }
1541
1542 /**
1543  * A conversion function from txq_flags API.
1544  */
1545 static void
1546 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1547 {
1548         uint64_t offloads = 0;
1549
1550         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1551                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1552         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1553                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1554         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1555                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1556         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1557                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1558         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1559                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1560         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1561             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1562                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1563
1564         *tx_offloads = offloads;
1565 }
1566
1567 int
1568 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1569                        uint16_t nb_tx_desc, unsigned int socket_id,
1570                        const struct rte_eth_txconf *tx_conf)
1571 {
1572         struct rte_eth_dev *dev;
1573         struct rte_eth_dev_info dev_info;
1574         struct rte_eth_txconf local_conf;
1575         void **txq;
1576
1577         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1578
1579         dev = &rte_eth_devices[port_id];
1580         if (tx_queue_id >= dev->data->nb_tx_queues) {
1581                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1582                 return -EINVAL;
1583         }
1584
1585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1586         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1587
1588         rte_eth_dev_info_get(port_id, &dev_info);
1589
1590         /* Use default specified by driver, if nb_tx_desc is zero */
1591         if (nb_tx_desc == 0) {
1592                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1593                 /* If driver default is zero, fall back on EAL default */
1594                 if (nb_tx_desc == 0)
1595                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1596         }
1597         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1598             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1599             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1600                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1601                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1602                                 nb_tx_desc,
1603                                 dev_info.tx_desc_lim.nb_max,
1604                                 dev_info.tx_desc_lim.nb_min,
1605                                 dev_info.tx_desc_lim.nb_align);
1606                 return -EINVAL;
1607         }
1608
1609         if (dev->data->dev_started &&
1610                 !(dev_info.dev_capa &
1611                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1612                 return -EBUSY;
1613
1614         if (dev->data->tx_queue_state[tx_queue_id] !=
1615                 RTE_ETH_QUEUE_STATE_STOPPED)
1616                 return -EBUSY;
1617
1618         txq = dev->data->tx_queues;
1619         if (txq[tx_queue_id]) {
1620                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1621                                         -ENOTSUP);
1622                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1623                 txq[tx_queue_id] = NULL;
1624         }
1625
1626         if (tx_conf == NULL)
1627                 tx_conf = &dev_info.default_txconf;
1628
1629         /*
1630          * Convert between the offloads API to enable PMDs to support
1631          * only one of them.
1632          */
1633         local_conf = *tx_conf;
1634         if (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
1635                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1636                                           &local_conf.offloads);
1637         }
1638
1639         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1640                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1641 }
1642
1643 void
1644 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1645                 void *userdata __rte_unused)
1646 {
1647         unsigned i;
1648
1649         for (i = 0; i < unsent; i++)
1650                 rte_pktmbuf_free(pkts[i]);
1651 }
1652
1653 void
1654 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1655                 void *userdata)
1656 {
1657         uint64_t *count = userdata;
1658         unsigned i;
1659
1660         for (i = 0; i < unsent; i++)
1661                 rte_pktmbuf_free(pkts[i]);
1662
1663         *count += unsent;
1664 }
1665
1666 int
1667 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1668                 buffer_tx_error_fn cbfn, void *userdata)
1669 {
1670         buffer->error_callback = cbfn;
1671         buffer->error_userdata = userdata;
1672         return 0;
1673 }
1674
1675 int
1676 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1677 {
1678         int ret = 0;
1679
1680         if (buffer == NULL)
1681                 return -EINVAL;
1682
1683         buffer->size = size;
1684         if (buffer->error_callback == NULL) {
1685                 ret = rte_eth_tx_buffer_set_err_callback(
1686                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1687         }
1688
1689         return ret;
1690 }
1691
1692 int
1693 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1694 {
1695         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1696         int ret;
1697
1698         /* Validate Input Data. Bail if not valid or not supported. */
1699         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1700         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1701
1702         /* Call driver to free pending mbufs. */
1703         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1704                                                free_cnt);
1705         return eth_err(port_id, ret);
1706 }
1707
1708 void
1709 rte_eth_promiscuous_enable(uint16_t port_id)
1710 {
1711         struct rte_eth_dev *dev;
1712
1713         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1714         dev = &rte_eth_devices[port_id];
1715
1716         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1717         (*dev->dev_ops->promiscuous_enable)(dev);
1718         dev->data->promiscuous = 1;
1719 }
1720
1721 void
1722 rte_eth_promiscuous_disable(uint16_t port_id)
1723 {
1724         struct rte_eth_dev *dev;
1725
1726         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1727         dev = &rte_eth_devices[port_id];
1728
1729         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1730         dev->data->promiscuous = 0;
1731         (*dev->dev_ops->promiscuous_disable)(dev);
1732 }
1733
1734 int
1735 rte_eth_promiscuous_get(uint16_t port_id)
1736 {
1737         struct rte_eth_dev *dev;
1738
1739         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1740
1741         dev = &rte_eth_devices[port_id];
1742         return dev->data->promiscuous;
1743 }
1744
1745 void
1746 rte_eth_allmulticast_enable(uint16_t port_id)
1747 {
1748         struct rte_eth_dev *dev;
1749
1750         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1751         dev = &rte_eth_devices[port_id];
1752
1753         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1754         (*dev->dev_ops->allmulticast_enable)(dev);
1755         dev->data->all_multicast = 1;
1756 }
1757
1758 void
1759 rte_eth_allmulticast_disable(uint16_t port_id)
1760 {
1761         struct rte_eth_dev *dev;
1762
1763         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1764         dev = &rte_eth_devices[port_id];
1765
1766         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1767         dev->data->all_multicast = 0;
1768         (*dev->dev_ops->allmulticast_disable)(dev);
1769 }
1770
1771 int
1772 rte_eth_allmulticast_get(uint16_t port_id)
1773 {
1774         struct rte_eth_dev *dev;
1775
1776         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1777
1778         dev = &rte_eth_devices[port_id];
1779         return dev->data->all_multicast;
1780 }
1781
1782 void
1783 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1784 {
1785         struct rte_eth_dev *dev;
1786
1787         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1788         dev = &rte_eth_devices[port_id];
1789
1790         if (dev->data->dev_conf.intr_conf.lsc &&
1791             dev->data->dev_started)
1792                 rte_eth_linkstatus_get(dev, eth_link);
1793         else {
1794                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1795                 (*dev->dev_ops->link_update)(dev, 1);
1796                 *eth_link = dev->data->dev_link;
1797         }
1798 }
1799
1800 void
1801 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1802 {
1803         struct rte_eth_dev *dev;
1804
1805         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1806         dev = &rte_eth_devices[port_id];
1807
1808         if (dev->data->dev_conf.intr_conf.lsc &&
1809             dev->data->dev_started)
1810                 rte_eth_linkstatus_get(dev, eth_link);
1811         else {
1812                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1813                 (*dev->dev_ops->link_update)(dev, 0);
1814                 *eth_link = dev->data->dev_link;
1815         }
1816 }
1817
1818 int
1819 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1820 {
1821         struct rte_eth_dev *dev;
1822
1823         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1824
1825         dev = &rte_eth_devices[port_id];
1826         memset(stats, 0, sizeof(*stats));
1827
1828         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1829         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1830         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1831 }
1832
1833 int
1834 rte_eth_stats_reset(uint16_t port_id)
1835 {
1836         struct rte_eth_dev *dev;
1837
1838         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1839         dev = &rte_eth_devices[port_id];
1840
1841         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1842         (*dev->dev_ops->stats_reset)(dev);
1843         dev->data->rx_mbuf_alloc_failed = 0;
1844
1845         return 0;
1846 }
1847
1848 static inline int
1849 get_xstats_basic_count(struct rte_eth_dev *dev)
1850 {
1851         uint16_t nb_rxqs, nb_txqs;
1852         int count;
1853
1854         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1855         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1856
1857         count = RTE_NB_STATS;
1858         count += nb_rxqs * RTE_NB_RXQ_STATS;
1859         count += nb_txqs * RTE_NB_TXQ_STATS;
1860
1861         return count;
1862 }
1863
1864 static int
1865 get_xstats_count(uint16_t port_id)
1866 {
1867         struct rte_eth_dev *dev;
1868         int count;
1869
1870         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1871         dev = &rte_eth_devices[port_id];
1872         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1873                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1874                                 NULL, 0);
1875                 if (count < 0)
1876                         return eth_err(port_id, count);
1877         }
1878         if (dev->dev_ops->xstats_get_names != NULL) {
1879                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1880                 if (count < 0)
1881                         return eth_err(port_id, count);
1882         } else
1883                 count = 0;
1884
1885
1886         count += get_xstats_basic_count(dev);
1887
1888         return count;
1889 }
1890
1891 int
1892 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1893                 uint64_t *id)
1894 {
1895         int cnt_xstats, idx_xstat;
1896
1897         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1898
1899         if (!id) {
1900                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1901                 return -ENOMEM;
1902         }
1903
1904         if (!xstat_name) {
1905                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1906                 return -ENOMEM;
1907         }
1908
1909         /* Get count */
1910         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1911         if (cnt_xstats  < 0) {
1912                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1913                 return -ENODEV;
1914         }
1915
1916         /* Get id-name lookup table */
1917         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1918
1919         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1920                         port_id, xstats_names, cnt_xstats, NULL)) {
1921                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1922                 return -1;
1923         }
1924
1925         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1926                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1927                         *id = idx_xstat;
1928                         return 0;
1929                 };
1930         }
1931
1932         return -EINVAL;
1933 }
1934
1935 /* retrieve basic stats names */
1936 static int
1937 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1938         struct rte_eth_xstat_name *xstats_names)
1939 {
1940         int cnt_used_entries = 0;
1941         uint32_t idx, id_queue;
1942         uint16_t num_q;
1943
1944         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1945                 snprintf(xstats_names[cnt_used_entries].name,
1946                         sizeof(xstats_names[0].name),
1947                         "%s", rte_stats_strings[idx].name);
1948                 cnt_used_entries++;
1949         }
1950         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1951         for (id_queue = 0; id_queue < num_q; id_queue++) {
1952                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1953                         snprintf(xstats_names[cnt_used_entries].name,
1954                                 sizeof(xstats_names[0].name),
1955                                 "rx_q%u%s",
1956                                 id_queue, rte_rxq_stats_strings[idx].name);
1957                         cnt_used_entries++;
1958                 }
1959
1960         }
1961         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1962         for (id_queue = 0; id_queue < num_q; id_queue++) {
1963                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1964                         snprintf(xstats_names[cnt_used_entries].name,
1965                                 sizeof(xstats_names[0].name),
1966                                 "tx_q%u%s",
1967                                 id_queue, rte_txq_stats_strings[idx].name);
1968                         cnt_used_entries++;
1969                 }
1970         }
1971         return cnt_used_entries;
1972 }
1973
1974 /* retrieve ethdev extended statistics names */
1975 int
1976 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1977         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1978         uint64_t *ids)
1979 {
1980         struct rte_eth_xstat_name *xstats_names_copy;
1981         unsigned int no_basic_stat_requested = 1;
1982         unsigned int no_ext_stat_requested = 1;
1983         unsigned int expected_entries;
1984         unsigned int basic_count;
1985         struct rte_eth_dev *dev;
1986         unsigned int i;
1987         int ret;
1988
1989         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1990         dev = &rte_eth_devices[port_id];
1991
1992         basic_count = get_xstats_basic_count(dev);
1993         ret = get_xstats_count(port_id);
1994         if (ret < 0)
1995                 return ret;
1996         expected_entries = (unsigned int)ret;
1997
1998         /* Return max number of stats if no ids given */
1999         if (!ids) {
2000                 if (!xstats_names)
2001                         return expected_entries;
2002                 else if (xstats_names && size < expected_entries)
2003                         return expected_entries;
2004         }
2005
2006         if (ids && !xstats_names)
2007                 return -EINVAL;
2008
2009         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2010                 uint64_t ids_copy[size];
2011
2012                 for (i = 0; i < size; i++) {
2013                         if (ids[i] < basic_count) {
2014                                 no_basic_stat_requested = 0;
2015                                 break;
2016                         }
2017
2018                         /*
2019                          * Convert ids to xstats ids that PMD knows.
2020                          * ids known by user are basic + extended stats.
2021                          */
2022                         ids_copy[i] = ids[i] - basic_count;
2023                 }
2024
2025                 if (no_basic_stat_requested)
2026                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2027                                         xstats_names, ids_copy, size);
2028         }
2029
2030         /* Retrieve all stats */
2031         if (!ids) {
2032                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2033                                 expected_entries);
2034                 if (num_stats < 0 || num_stats > (int)expected_entries)
2035                         return num_stats;
2036                 else
2037                         return expected_entries;
2038         }
2039
2040         xstats_names_copy = calloc(expected_entries,
2041                 sizeof(struct rte_eth_xstat_name));
2042
2043         if (!xstats_names_copy) {
2044                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2045                 return -ENOMEM;
2046         }
2047
2048         if (ids) {
2049                 for (i = 0; i < size; i++) {
2050                         if (ids[i] >= basic_count) {
2051                                 no_ext_stat_requested = 0;
2052                                 break;
2053                         }
2054                 }
2055         }
2056
2057         /* Fill xstats_names_copy structure */
2058         if (ids && no_ext_stat_requested) {
2059                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2060         } else {
2061                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2062                         expected_entries);
2063                 if (ret < 0) {
2064                         free(xstats_names_copy);
2065                         return ret;
2066                 }
2067         }
2068
2069         /* Filter stats */
2070         for (i = 0; i < size; i++) {
2071                 if (ids[i] >= expected_entries) {
2072                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2073                         free(xstats_names_copy);
2074                         return -1;
2075                 }
2076                 xstats_names[i] = xstats_names_copy[ids[i]];
2077         }
2078
2079         free(xstats_names_copy);
2080         return size;
2081 }
2082
2083 int
2084 rte_eth_xstats_get_names(uint16_t port_id,
2085         struct rte_eth_xstat_name *xstats_names,
2086         unsigned int size)
2087 {
2088         struct rte_eth_dev *dev;
2089         int cnt_used_entries;
2090         int cnt_expected_entries;
2091         int cnt_driver_entries;
2092
2093         cnt_expected_entries = get_xstats_count(port_id);
2094         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2095                         (int)size < cnt_expected_entries)
2096                 return cnt_expected_entries;
2097
2098         /* port_id checked in get_xstats_count() */
2099         dev = &rte_eth_devices[port_id];
2100
2101         cnt_used_entries = rte_eth_basic_stats_get_names(
2102                 dev, xstats_names);
2103
2104         if (dev->dev_ops->xstats_get_names != NULL) {
2105                 /* If there are any driver-specific xstats, append them
2106                  * to end of list.
2107                  */
2108                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2109                         dev,
2110                         xstats_names + cnt_used_entries,
2111                         size - cnt_used_entries);
2112                 if (cnt_driver_entries < 0)
2113                         return eth_err(port_id, cnt_driver_entries);
2114                 cnt_used_entries += cnt_driver_entries;
2115         }
2116
2117         return cnt_used_entries;
2118 }
2119
2120
2121 static int
2122 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2123 {
2124         struct rte_eth_dev *dev;
2125         struct rte_eth_stats eth_stats;
2126         unsigned int count = 0, i, q;
2127         uint64_t val, *stats_ptr;
2128         uint16_t nb_rxqs, nb_txqs;
2129         int ret;
2130
2131         ret = rte_eth_stats_get(port_id, &eth_stats);
2132         if (ret < 0)
2133                 return ret;
2134
2135         dev = &rte_eth_devices[port_id];
2136
2137         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2138         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2139
2140         /* global stats */
2141         for (i = 0; i < RTE_NB_STATS; i++) {
2142                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2143                                         rte_stats_strings[i].offset);
2144                 val = *stats_ptr;
2145                 xstats[count++].value = val;
2146         }
2147
2148         /* per-rxq stats */
2149         for (q = 0; q < nb_rxqs; q++) {
2150                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2151                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2152                                         rte_rxq_stats_strings[i].offset +
2153                                         q * sizeof(uint64_t));
2154                         val = *stats_ptr;
2155                         xstats[count++].value = val;
2156                 }
2157         }
2158
2159         /* per-txq stats */
2160         for (q = 0; q < nb_txqs; q++) {
2161                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2162                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2163                                         rte_txq_stats_strings[i].offset +
2164                                         q * sizeof(uint64_t));
2165                         val = *stats_ptr;
2166                         xstats[count++].value = val;
2167                 }
2168         }
2169         return count;
2170 }
2171
2172 /* retrieve ethdev extended statistics */
2173 int
2174 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2175                          uint64_t *values, unsigned int size)
2176 {
2177         unsigned int no_basic_stat_requested = 1;
2178         unsigned int no_ext_stat_requested = 1;
2179         unsigned int num_xstats_filled;
2180         unsigned int basic_count;
2181         uint16_t expected_entries;
2182         struct rte_eth_dev *dev;
2183         unsigned int i;
2184         int ret;
2185
2186         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2187         ret = get_xstats_count(port_id);
2188         if (ret < 0)
2189                 return ret;
2190         expected_entries = (uint16_t)ret;
2191         struct rte_eth_xstat xstats[expected_entries];
2192         dev = &rte_eth_devices[port_id];
2193         basic_count = get_xstats_basic_count(dev);
2194
2195         /* Return max number of stats if no ids given */
2196         if (!ids) {
2197                 if (!values)
2198                         return expected_entries;
2199                 else if (values && size < expected_entries)
2200                         return expected_entries;
2201         }
2202
2203         if (ids && !values)
2204                 return -EINVAL;
2205
2206         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2207                 unsigned int basic_count = get_xstats_basic_count(dev);
2208                 uint64_t ids_copy[size];
2209
2210                 for (i = 0; i < size; i++) {
2211                         if (ids[i] < basic_count) {
2212                                 no_basic_stat_requested = 0;
2213                                 break;
2214                         }
2215
2216                         /*
2217                          * Convert ids to xstats ids that PMD knows.
2218                          * ids known by user are basic + extended stats.
2219                          */
2220                         ids_copy[i] = ids[i] - basic_count;
2221                 }
2222
2223                 if (no_basic_stat_requested)
2224                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2225                                         values, size);
2226         }
2227
2228         if (ids) {
2229                 for (i = 0; i < size; i++) {
2230                         if (ids[i] >= basic_count) {
2231                                 no_ext_stat_requested = 0;
2232                                 break;
2233                         }
2234                 }
2235         }
2236
2237         /* Fill the xstats structure */
2238         if (ids && no_ext_stat_requested)
2239                 ret = rte_eth_basic_stats_get(port_id, xstats);
2240         else
2241                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2242
2243         if (ret < 0)
2244                 return ret;
2245         num_xstats_filled = (unsigned int)ret;
2246
2247         /* Return all stats */
2248         if (!ids) {
2249                 for (i = 0; i < num_xstats_filled; i++)
2250                         values[i] = xstats[i].value;
2251                 return expected_entries;
2252         }
2253
2254         /* Filter stats */
2255         for (i = 0; i < size; i++) {
2256                 if (ids[i] >= expected_entries) {
2257                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2258                         return -1;
2259                 }
2260                 values[i] = xstats[ids[i]].value;
2261         }
2262         return size;
2263 }
2264
2265 int
2266 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2267         unsigned int n)
2268 {
2269         struct rte_eth_dev *dev;
2270         unsigned int count = 0, i;
2271         signed int xcount = 0;
2272         uint16_t nb_rxqs, nb_txqs;
2273         int ret;
2274
2275         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2276
2277         dev = &rte_eth_devices[port_id];
2278
2279         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2280         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2281
2282         /* Return generic statistics */
2283         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2284                 (nb_txqs * RTE_NB_TXQ_STATS);
2285
2286         /* implemented by the driver */
2287         if (dev->dev_ops->xstats_get != NULL) {
2288                 /* Retrieve the xstats from the driver at the end of the
2289                  * xstats struct.
2290                  */
2291                 xcount = (*dev->dev_ops->xstats_get)(dev,
2292                                      xstats ? xstats + count : NULL,
2293                                      (n > count) ? n - count : 0);
2294
2295                 if (xcount < 0)
2296                         return eth_err(port_id, xcount);
2297         }
2298
2299         if (n < count + xcount || xstats == NULL)
2300                 return count + xcount;
2301
2302         /* now fill the xstats structure */
2303         ret = rte_eth_basic_stats_get(port_id, xstats);
2304         if (ret < 0)
2305                 return ret;
2306         count = ret;
2307
2308         for (i = 0; i < count; i++)
2309                 xstats[i].id = i;
2310         /* add an offset to driver-specific stats */
2311         for ( ; i < count + xcount; i++)
2312                 xstats[i].id += count;
2313
2314         return count + xcount;
2315 }
2316
2317 /* reset ethdev extended statistics */
2318 void
2319 rte_eth_xstats_reset(uint16_t port_id)
2320 {
2321         struct rte_eth_dev *dev;
2322
2323         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2324         dev = &rte_eth_devices[port_id];
2325
2326         /* implemented by the driver */
2327         if (dev->dev_ops->xstats_reset != NULL) {
2328                 (*dev->dev_ops->xstats_reset)(dev);
2329                 return;
2330         }
2331
2332         /* fallback to default */
2333         rte_eth_stats_reset(port_id);
2334 }
2335
2336 static int
2337 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2338                 uint8_t is_rx)
2339 {
2340         struct rte_eth_dev *dev;
2341
2342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2343
2344         dev = &rte_eth_devices[port_id];
2345
2346         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2347         return (*dev->dev_ops->queue_stats_mapping_set)
2348                         (dev, queue_id, stat_idx, is_rx);
2349 }
2350
2351
2352 int
2353 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2354                 uint8_t stat_idx)
2355 {
2356         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2357                                                 stat_idx, STAT_QMAP_TX));
2358 }
2359
2360
2361 int
2362 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2363                 uint8_t stat_idx)
2364 {
2365         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2366                                                 stat_idx, STAT_QMAP_RX));
2367 }
2368
2369 int
2370 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2371 {
2372         struct rte_eth_dev *dev;
2373
2374         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2375         dev = &rte_eth_devices[port_id];
2376
2377         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2378         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2379                                                         fw_version, fw_size));
2380 }
2381
2382 void
2383 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2384 {
2385         struct rte_eth_dev *dev;
2386         struct rte_eth_txconf *txconf;
2387         const struct rte_eth_desc_lim lim = {
2388                 .nb_max = UINT16_MAX,
2389                 .nb_min = 0,
2390                 .nb_align = 1,
2391         };
2392
2393         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2394         dev = &rte_eth_devices[port_id];
2395
2396         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2397         dev_info->rx_desc_lim = lim;
2398         dev_info->tx_desc_lim = lim;
2399         dev_info->device = dev->device;
2400
2401         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2402         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2403         dev_info->driver_name = dev->device->driver->name;
2404         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2405         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2406
2407         dev_info->dev_flags = &dev->data->dev_flags;
2408         txconf = &dev_info->default_txconf;
2409         /* convert offload to txq_flags to support legacy app */
2410         rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
2411 }
2412
2413 int
2414 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2415                                  uint32_t *ptypes, int num)
2416 {
2417         int i, j;
2418         struct rte_eth_dev *dev;
2419         const uint32_t *all_ptypes;
2420
2421         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2422         dev = &rte_eth_devices[port_id];
2423         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2424         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2425
2426         if (!all_ptypes)
2427                 return 0;
2428
2429         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2430                 if (all_ptypes[i] & ptype_mask) {
2431                         if (j < num)
2432                                 ptypes[j] = all_ptypes[i];
2433                         j++;
2434                 }
2435
2436         return j;
2437 }
2438
2439 void
2440 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2441 {
2442         struct rte_eth_dev *dev;
2443
2444         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2445         dev = &rte_eth_devices[port_id];
2446         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2447 }
2448
2449
2450 int
2451 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2452 {
2453         struct rte_eth_dev *dev;
2454
2455         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2456
2457         dev = &rte_eth_devices[port_id];
2458         *mtu = dev->data->mtu;
2459         return 0;
2460 }
2461
2462 int
2463 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2464 {
2465         int ret;
2466         struct rte_eth_dev *dev;
2467
2468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2469         dev = &rte_eth_devices[port_id];
2470         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2471
2472         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2473         if (!ret)
2474                 dev->data->mtu = mtu;
2475
2476         return eth_err(port_id, ret);
2477 }
2478
2479 int
2480 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2481 {
2482         struct rte_eth_dev *dev;
2483         int ret;
2484
2485         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2486         dev = &rte_eth_devices[port_id];
2487         if (!(dev->data->dev_conf.rxmode.offloads &
2488               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2489                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2490                 return -ENOSYS;
2491         }
2492
2493         if (vlan_id > 4095) {
2494                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2495                                 port_id, (unsigned) vlan_id);
2496                 return -EINVAL;
2497         }
2498         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2499
2500         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2501         if (ret == 0) {
2502                 struct rte_vlan_filter_conf *vfc;
2503                 int vidx;
2504                 int vbit;
2505
2506                 vfc = &dev->data->vlan_filter_conf;
2507                 vidx = vlan_id / 64;
2508                 vbit = vlan_id % 64;
2509
2510                 if (on)
2511                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2512                 else
2513                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2514         }
2515
2516         return eth_err(port_id, ret);
2517 }
2518
2519 int
2520 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2521                                     int on)
2522 {
2523         struct rte_eth_dev *dev;
2524
2525         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2526         dev = &rte_eth_devices[port_id];
2527         if (rx_queue_id >= dev->data->nb_rx_queues) {
2528                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2529                 return -EINVAL;
2530         }
2531
2532         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2533         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2534
2535         return 0;
2536 }
2537
2538 int
2539 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2540                                 enum rte_vlan_type vlan_type,
2541                                 uint16_t tpid)
2542 {
2543         struct rte_eth_dev *dev;
2544
2545         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2546         dev = &rte_eth_devices[port_id];
2547         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2548
2549         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2550                                                                tpid));
2551 }
2552
2553 int
2554 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2555 {
2556         struct rte_eth_dev *dev;
2557         int ret = 0;
2558         int mask = 0;
2559         int cur, org = 0;
2560         uint64_t orig_offloads;
2561
2562         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2563         dev = &rte_eth_devices[port_id];
2564
2565         /* save original values in case of failure */
2566         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2567
2568         /*check which option changed by application*/
2569         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2570         org = !!(dev->data->dev_conf.rxmode.offloads &
2571                  DEV_RX_OFFLOAD_VLAN_STRIP);
2572         if (cur != org) {
2573                 if (cur)
2574                         dev->data->dev_conf.rxmode.offloads |=
2575                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2576                 else
2577                         dev->data->dev_conf.rxmode.offloads &=
2578                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2579                 mask |= ETH_VLAN_STRIP_MASK;
2580         }
2581
2582         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2583         org = !!(dev->data->dev_conf.rxmode.offloads &
2584                  DEV_RX_OFFLOAD_VLAN_FILTER);
2585         if (cur != org) {
2586                 if (cur)
2587                         dev->data->dev_conf.rxmode.offloads |=
2588                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2589                 else
2590                         dev->data->dev_conf.rxmode.offloads &=
2591                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2592                 mask |= ETH_VLAN_FILTER_MASK;
2593         }
2594
2595         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2596         org = !!(dev->data->dev_conf.rxmode.offloads &
2597                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2598         if (cur != org) {
2599                 if (cur)
2600                         dev->data->dev_conf.rxmode.offloads |=
2601                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2602                 else
2603                         dev->data->dev_conf.rxmode.offloads &=
2604                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2605                 mask |= ETH_VLAN_EXTEND_MASK;
2606         }
2607
2608         /*no change*/
2609         if (mask == 0)
2610                 return ret;
2611
2612         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2613         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2614         if (ret) {
2615                 /* hit an error restore  original values */
2616                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2617         }
2618
2619         return eth_err(port_id, ret);
2620 }
2621
2622 int
2623 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2624 {
2625         struct rte_eth_dev *dev;
2626         int ret = 0;
2627
2628         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2629         dev = &rte_eth_devices[port_id];
2630
2631         if (dev->data->dev_conf.rxmode.offloads &
2632             DEV_RX_OFFLOAD_VLAN_STRIP)
2633                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2634
2635         if (dev->data->dev_conf.rxmode.offloads &
2636             DEV_RX_OFFLOAD_VLAN_FILTER)
2637                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2638
2639         if (dev->data->dev_conf.rxmode.offloads &
2640             DEV_RX_OFFLOAD_VLAN_EXTEND)
2641                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2642
2643         return ret;
2644 }
2645
2646 int
2647 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2648 {
2649         struct rte_eth_dev *dev;
2650
2651         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2652         dev = &rte_eth_devices[port_id];
2653         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2654
2655         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2656 }
2657
2658 int
2659 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2660 {
2661         struct rte_eth_dev *dev;
2662
2663         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2664         dev = &rte_eth_devices[port_id];
2665         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2666         memset(fc_conf, 0, sizeof(*fc_conf));
2667         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2668 }
2669
2670 int
2671 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2672 {
2673         struct rte_eth_dev *dev;
2674
2675         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2676         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2677                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2678                 return -EINVAL;
2679         }
2680
2681         dev = &rte_eth_devices[port_id];
2682         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2683         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2684 }
2685
2686 int
2687 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2688                                    struct rte_eth_pfc_conf *pfc_conf)
2689 {
2690         struct rte_eth_dev *dev;
2691
2692         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2693         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2694                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2695                 return -EINVAL;
2696         }
2697
2698         dev = &rte_eth_devices[port_id];
2699         /* High water, low water validation are device specific */
2700         if  (*dev->dev_ops->priority_flow_ctrl_set)
2701                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2702                                         (dev, pfc_conf));
2703         return -ENOTSUP;
2704 }
2705
2706 static int
2707 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2708                         uint16_t reta_size)
2709 {
2710         uint16_t i, num;
2711
2712         if (!reta_conf)
2713                 return -EINVAL;
2714
2715         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2716         for (i = 0; i < num; i++) {
2717                 if (reta_conf[i].mask)
2718                         return 0;
2719         }
2720
2721         return -EINVAL;
2722 }
2723
2724 static int
2725 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2726                          uint16_t reta_size,
2727                          uint16_t max_rxq)
2728 {
2729         uint16_t i, idx, shift;
2730
2731         if (!reta_conf)
2732                 return -EINVAL;
2733
2734         if (max_rxq == 0) {
2735                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2736                 return -EINVAL;
2737         }
2738
2739         for (i = 0; i < reta_size; i++) {
2740                 idx = i / RTE_RETA_GROUP_SIZE;
2741                 shift = i % RTE_RETA_GROUP_SIZE;
2742                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2743                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2744                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2745                                 "the maximum rxq index: %u\n", idx, shift,
2746                                 reta_conf[idx].reta[shift], max_rxq);
2747                         return -EINVAL;
2748                 }
2749         }
2750
2751         return 0;
2752 }
2753
2754 int
2755 rte_eth_dev_rss_reta_update(uint16_t port_id,
2756                             struct rte_eth_rss_reta_entry64 *reta_conf,
2757                             uint16_t reta_size)
2758 {
2759         struct rte_eth_dev *dev;
2760         int ret;
2761
2762         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2763         /* Check mask bits */
2764         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2765         if (ret < 0)
2766                 return ret;
2767
2768         dev = &rte_eth_devices[port_id];
2769
2770         /* Check entry value */
2771         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2772                                 dev->data->nb_rx_queues);
2773         if (ret < 0)
2774                 return ret;
2775
2776         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2777         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2778                                                              reta_size));
2779 }
2780
2781 int
2782 rte_eth_dev_rss_reta_query(uint16_t port_id,
2783                            struct rte_eth_rss_reta_entry64 *reta_conf,
2784                            uint16_t reta_size)
2785 {
2786         struct rte_eth_dev *dev;
2787         int ret;
2788
2789         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2790
2791         /* Check mask bits */
2792         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2793         if (ret < 0)
2794                 return ret;
2795
2796         dev = &rte_eth_devices[port_id];
2797         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2798         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2799                                                             reta_size));
2800 }
2801
2802 int
2803 rte_eth_dev_rss_hash_update(uint16_t port_id,
2804                             struct rte_eth_rss_conf *rss_conf)
2805 {
2806         struct rte_eth_dev *dev;
2807         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2808
2809         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2810         dev = &rte_eth_devices[port_id];
2811         rte_eth_dev_info_get(port_id, &dev_info);
2812         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2813             dev_info.flow_type_rss_offloads) {
2814                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d invalid rss_hf: "
2815                                     "0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2816                                     port_id,
2817                                     rss_conf->rss_hf,
2818                                     dev_info.flow_type_rss_offloads);
2819         }
2820         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2821         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2822                                                                  rss_conf));
2823 }
2824
2825 int
2826 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2827                               struct rte_eth_rss_conf *rss_conf)
2828 {
2829         struct rte_eth_dev *dev;
2830
2831         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2832         dev = &rte_eth_devices[port_id];
2833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2834         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2835                                                                    rss_conf));
2836 }
2837
2838 int
2839 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2840                                 struct rte_eth_udp_tunnel *udp_tunnel)
2841 {
2842         struct rte_eth_dev *dev;
2843
2844         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2845         if (udp_tunnel == NULL) {
2846                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2847                 return -EINVAL;
2848         }
2849
2850         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2851                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2852                 return -EINVAL;
2853         }
2854
2855         dev = &rte_eth_devices[port_id];
2856         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2857         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2858                                                                 udp_tunnel));
2859 }
2860
2861 int
2862 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2863                                    struct rte_eth_udp_tunnel *udp_tunnel)
2864 {
2865         struct rte_eth_dev *dev;
2866
2867         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2868         dev = &rte_eth_devices[port_id];
2869
2870         if (udp_tunnel == NULL) {
2871                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2872                 return -EINVAL;
2873         }
2874
2875         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2876                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2877                 return -EINVAL;
2878         }
2879
2880         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2881         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2882                                                                 udp_tunnel));
2883 }
2884
2885 int
2886 rte_eth_led_on(uint16_t port_id)
2887 {
2888         struct rte_eth_dev *dev;
2889
2890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2891         dev = &rte_eth_devices[port_id];
2892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2893         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2894 }
2895
2896 int
2897 rte_eth_led_off(uint16_t port_id)
2898 {
2899         struct rte_eth_dev *dev;
2900
2901         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2902         dev = &rte_eth_devices[port_id];
2903         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2904         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2905 }
2906
2907 /*
2908  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2909  * an empty spot.
2910  */
2911 static int
2912 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2913 {
2914         struct rte_eth_dev_info dev_info;
2915         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2916         unsigned i;
2917
2918         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2919         rte_eth_dev_info_get(port_id, &dev_info);
2920
2921         for (i = 0; i < dev_info.max_mac_addrs; i++)
2922                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2923                         return i;
2924
2925         return -1;
2926 }
2927
2928 static const struct ether_addr null_mac_addr;
2929
2930 int
2931 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2932                         uint32_t pool)
2933 {
2934         struct rte_eth_dev *dev;
2935         int index;
2936         uint64_t pool_mask;
2937         int ret;
2938
2939         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2940         dev = &rte_eth_devices[port_id];
2941         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2942
2943         if (is_zero_ether_addr(addr)) {
2944                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2945                         port_id);
2946                 return -EINVAL;
2947         }
2948         if (pool >= ETH_64_POOLS) {
2949                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2950                 return -EINVAL;
2951         }
2952
2953         index = get_mac_addr_index(port_id, addr);
2954         if (index < 0) {
2955                 index = get_mac_addr_index(port_id, &null_mac_addr);
2956                 if (index < 0) {
2957                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2958                                 port_id);
2959                         return -ENOSPC;
2960                 }
2961         } else {
2962                 pool_mask = dev->data->mac_pool_sel[index];
2963
2964                 /* Check if both MAC address and pool is already there, and do nothing */
2965                 if (pool_mask & (1ULL << pool))
2966                         return 0;
2967         }
2968
2969         /* Update NIC */
2970         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2971
2972         if (ret == 0) {
2973                 /* Update address in NIC data structure */
2974                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2975
2976                 /* Update pool bitmap in NIC data structure */
2977                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2978         }
2979
2980         return eth_err(port_id, ret);
2981 }
2982
2983 int
2984 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2985 {
2986         struct rte_eth_dev *dev;
2987         int index;
2988
2989         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2990         dev = &rte_eth_devices[port_id];
2991         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2992
2993         index = get_mac_addr_index(port_id, addr);
2994         if (index == 0) {
2995                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2996                 return -EADDRINUSE;
2997         } else if (index < 0)
2998                 return 0;  /* Do nothing if address wasn't found */
2999
3000         /* Update NIC */
3001         (*dev->dev_ops->mac_addr_remove)(dev, index);
3002
3003         /* Update address in NIC data structure */
3004         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3005
3006         /* reset pool bitmap */
3007         dev->data->mac_pool_sel[index] = 0;
3008
3009         return 0;
3010 }
3011
3012 int
3013 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3014 {
3015         struct rte_eth_dev *dev;
3016         int ret;
3017
3018         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3019
3020         if (!is_valid_assigned_ether_addr(addr))
3021                 return -EINVAL;
3022
3023         dev = &rte_eth_devices[port_id];
3024         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3025
3026         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3027         if (ret < 0)
3028                 return ret;
3029
3030         /* Update default address in NIC data structure */
3031         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3032
3033         return 0;
3034 }
3035
3036
3037 /*
3038  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3039  * an empty spot.
3040  */
3041 static int
3042 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3043 {
3044         struct rte_eth_dev_info dev_info;
3045         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3046         unsigned i;
3047
3048         rte_eth_dev_info_get(port_id, &dev_info);
3049         if (!dev->data->hash_mac_addrs)
3050                 return -1;
3051
3052         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3053                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3054                         ETHER_ADDR_LEN) == 0)
3055                         return i;
3056
3057         return -1;
3058 }
3059
3060 int
3061 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3062                                 uint8_t on)
3063 {
3064         int index;
3065         int ret;
3066         struct rte_eth_dev *dev;
3067
3068         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3069
3070         dev = &rte_eth_devices[port_id];
3071         if (is_zero_ether_addr(addr)) {
3072                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3073                         port_id);
3074                 return -EINVAL;
3075         }
3076
3077         index = get_hash_mac_addr_index(port_id, addr);
3078         /* Check if it's already there, and do nothing */
3079         if ((index >= 0) && on)
3080                 return 0;
3081
3082         if (index < 0) {
3083                 if (!on) {
3084                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3085                                 "set in UTA\n", port_id);
3086                         return -EINVAL;
3087                 }
3088
3089                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3090                 if (index < 0) {
3091                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3092                                         port_id);
3093                         return -ENOSPC;
3094                 }
3095         }
3096
3097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3098         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3099         if (ret == 0) {
3100                 /* Update address in NIC data structure */
3101                 if (on)
3102                         ether_addr_copy(addr,
3103                                         &dev->data->hash_mac_addrs[index]);
3104                 else
3105                         ether_addr_copy(&null_mac_addr,
3106                                         &dev->data->hash_mac_addrs[index]);
3107         }
3108
3109         return eth_err(port_id, ret);
3110 }
3111
3112 int
3113 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3114 {
3115         struct rte_eth_dev *dev;
3116
3117         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3118
3119         dev = &rte_eth_devices[port_id];
3120
3121         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3122         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3123                                                                        on));
3124 }
3125
3126 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3127                                         uint16_t tx_rate)
3128 {
3129         struct rte_eth_dev *dev;
3130         struct rte_eth_dev_info dev_info;
3131         struct rte_eth_link link;
3132
3133         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3134
3135         dev = &rte_eth_devices[port_id];
3136         rte_eth_dev_info_get(port_id, &dev_info);
3137         link = dev->data->dev_link;
3138
3139         if (queue_idx > dev_info.max_tx_queues) {
3140                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3141                                 "invalid queue id=%d\n", port_id, queue_idx);
3142                 return -EINVAL;
3143         }
3144
3145         if (tx_rate > link.link_speed) {
3146                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3147                                 "bigger than link speed= %d\n",
3148                         tx_rate, link.link_speed);
3149                 return -EINVAL;
3150         }
3151
3152         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3153         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3154                                                         queue_idx, tx_rate));
3155 }
3156
3157 int
3158 rte_eth_mirror_rule_set(uint16_t port_id,
3159                         struct rte_eth_mirror_conf *mirror_conf,
3160                         uint8_t rule_id, uint8_t on)
3161 {
3162         struct rte_eth_dev *dev;
3163
3164         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3165         if (mirror_conf->rule_type == 0) {
3166                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3167                 return -EINVAL;
3168         }
3169
3170         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3171                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3172                                 ETH_64_POOLS - 1);
3173                 return -EINVAL;
3174         }
3175
3176         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3177              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3178             (mirror_conf->pool_mask == 0)) {
3179                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3180                 return -EINVAL;
3181         }
3182
3183         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3184             mirror_conf->vlan.vlan_mask == 0) {
3185                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3186                 return -EINVAL;
3187         }
3188
3189         dev = &rte_eth_devices[port_id];
3190         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3191
3192         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3193                                                 mirror_conf, rule_id, on));
3194 }
3195
3196 int
3197 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3198 {
3199         struct rte_eth_dev *dev;
3200
3201         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3202
3203         dev = &rte_eth_devices[port_id];
3204         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3205
3206         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3207                                                                    rule_id));
3208 }
3209
3210 RTE_INIT(eth_dev_init_cb_lists)
3211 {
3212         int i;
3213
3214         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3215                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3216 }
3217
3218 int
3219 rte_eth_dev_callback_register(uint16_t port_id,
3220                         enum rte_eth_event_type event,
3221                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3222 {
3223         struct rte_eth_dev *dev;
3224         struct rte_eth_dev_callback *user_cb;
3225         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3226         uint16_t last_port;
3227
3228         if (!cb_fn)
3229                 return -EINVAL;
3230
3231         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3232                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3233                 return -EINVAL;
3234         }
3235
3236         if (port_id == RTE_ETH_ALL) {
3237                 next_port = 0;
3238                 last_port = RTE_MAX_ETHPORTS - 1;
3239         } else {
3240                 next_port = last_port = port_id;
3241         }
3242
3243         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3244
3245         do {
3246                 dev = &rte_eth_devices[next_port];
3247
3248                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3249                         if (user_cb->cb_fn == cb_fn &&
3250                                 user_cb->cb_arg == cb_arg &&
3251                                 user_cb->event == event) {
3252                                 break;
3253                         }
3254                 }
3255
3256                 /* create a new callback. */
3257                 if (user_cb == NULL) {
3258                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3259                                 sizeof(struct rte_eth_dev_callback), 0);
3260                         if (user_cb != NULL) {
3261                                 user_cb->cb_fn = cb_fn;
3262                                 user_cb->cb_arg = cb_arg;
3263                                 user_cb->event = event;
3264                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3265                                                   user_cb, next);
3266                         } else {
3267                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3268                                 rte_eth_dev_callback_unregister(port_id, event,
3269                                                                 cb_fn, cb_arg);
3270                                 return -ENOMEM;
3271                         }
3272
3273                 }
3274         } while (++next_port <= last_port);
3275
3276         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3277         return 0;
3278 }
3279
3280 int
3281 rte_eth_dev_callback_unregister(uint16_t port_id,
3282                         enum rte_eth_event_type event,
3283                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3284 {
3285         int ret;
3286         struct rte_eth_dev *dev;
3287         struct rte_eth_dev_callback *cb, *next;
3288         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3289         uint16_t last_port;
3290
3291         if (!cb_fn)
3292                 return -EINVAL;
3293
3294         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3295                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3296                 return -EINVAL;
3297         }
3298
3299         if (port_id == RTE_ETH_ALL) {
3300                 next_port = 0;
3301                 last_port = RTE_MAX_ETHPORTS - 1;
3302         } else {
3303                 next_port = last_port = port_id;
3304         }
3305
3306         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3307
3308         do {
3309                 dev = &rte_eth_devices[next_port];
3310                 ret = 0;
3311                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3312                      cb = next) {
3313
3314                         next = TAILQ_NEXT(cb, next);
3315
3316                         if (cb->cb_fn != cb_fn || cb->event != event ||
3317                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3318                                 continue;
3319
3320                         /*
3321                          * if this callback is not executing right now,
3322                          * then remove it.
3323                          */
3324                         if (cb->active == 0) {
3325                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3326                                 rte_free(cb);
3327                         } else {
3328                                 ret = -EAGAIN;
3329                         }
3330                 }
3331         } while (++next_port <= last_port);
3332
3333         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3334         return ret;
3335 }
3336
3337 int
3338 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3339         enum rte_eth_event_type event, void *ret_param)
3340 {
3341         struct rte_eth_dev_callback *cb_lst;
3342         struct rte_eth_dev_callback dev_cb;
3343         int rc = 0;
3344
3345         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3346         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3347                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3348                         continue;
3349                 dev_cb = *cb_lst;
3350                 cb_lst->active = 1;
3351                 if (ret_param != NULL)
3352                         dev_cb.ret_param = ret_param;
3353
3354                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3355                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3356                                 dev_cb.cb_arg, dev_cb.ret_param);
3357                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3358                 cb_lst->active = 0;
3359         }
3360         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3361         return rc;
3362 }
3363
3364 int
3365 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3366 {
3367         uint32_t vec;
3368         struct rte_eth_dev *dev;
3369         struct rte_intr_handle *intr_handle;
3370         uint16_t qid;
3371         int rc;
3372
3373         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3374
3375         dev = &rte_eth_devices[port_id];
3376
3377         if (!dev->intr_handle) {
3378                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3379                 return -ENOTSUP;
3380         }
3381
3382         intr_handle = dev->intr_handle;
3383         if (!intr_handle->intr_vec) {
3384                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3385                 return -EPERM;
3386         }
3387
3388         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3389                 vec = intr_handle->intr_vec[qid];
3390                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3391                 if (rc && rc != -EEXIST) {
3392                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3393                                         " op %d epfd %d vec %u\n",
3394                                         port_id, qid, op, epfd, vec);
3395                 }
3396         }
3397
3398         return 0;
3399 }
3400
3401 const struct rte_memzone *
3402 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3403                          uint16_t queue_id, size_t size, unsigned align,
3404                          int socket_id)
3405 {
3406         char z_name[RTE_MEMZONE_NAMESIZE];
3407         const struct rte_memzone *mz;
3408
3409         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3410                  dev->device->driver->name, ring_name,
3411                  dev->data->port_id, queue_id);
3412
3413         mz = rte_memzone_lookup(z_name);
3414         if (mz)
3415                 return mz;
3416
3417         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3418                         RTE_MEMZONE_IOVA_CONTIG, align);
3419 }
3420
3421 int __rte_experimental
3422 rte_eth_dev_create(struct rte_device *device, const char *name,
3423         size_t priv_data_size,
3424         ethdev_bus_specific_init ethdev_bus_specific_init,
3425         void *bus_init_params,
3426         ethdev_init_t ethdev_init, void *init_params)
3427 {
3428         struct rte_eth_dev *ethdev;
3429         int retval;
3430
3431         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3432
3433         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3434                 ethdev = rte_eth_dev_allocate(name);
3435                 if (!ethdev) {
3436                         retval = -ENODEV;
3437                         goto probe_failed;
3438                 }
3439
3440                 if (priv_data_size) {
3441                         ethdev->data->dev_private = rte_zmalloc_socket(
3442                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3443                                 device->numa_node);
3444
3445                         if (!ethdev->data->dev_private) {
3446                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
3447                                 retval = -ENOMEM;
3448                                 goto probe_failed;
3449                         }
3450                 }
3451         } else {
3452                 ethdev = rte_eth_dev_attach_secondary(name);
3453                 if (!ethdev) {
3454                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
3455                                 "ethdev doesn't exist");
3456                         retval = -ENODEV;
3457                         goto probe_failed;
3458                 }
3459         }
3460
3461         ethdev->device = device;
3462
3463         if (ethdev_bus_specific_init) {
3464                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3465                 if (retval) {
3466                         RTE_LOG(ERR, EAL,
3467                                 "ethdev bus specific initialisation failed");
3468                         goto probe_failed;
3469                 }
3470         }
3471
3472         retval = ethdev_init(ethdev, init_params);
3473         if (retval) {
3474                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3475                 goto probe_failed;
3476         }
3477
3478         return retval;
3479 probe_failed:
3480         /* free ports private data if primary process */
3481         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3482                 rte_free(ethdev->data->dev_private);
3483
3484         rte_eth_dev_release_port(ethdev);
3485
3486         return retval;
3487 }
3488
3489 int  __rte_experimental
3490 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3491         ethdev_uninit_t ethdev_uninit)
3492 {
3493         int ret;
3494
3495         ethdev = rte_eth_dev_allocated(ethdev->data->name);
3496         if (!ethdev)
3497                 return -ENODEV;
3498
3499         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3500         if (ethdev_uninit) {
3501                 ret = ethdev_uninit(ethdev);
3502                 if (ret)
3503                         return ret;
3504         }
3505
3506         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3507                 rte_free(ethdev->data->dev_private);
3508
3509         ethdev->data->dev_private = NULL;
3510
3511         return rte_eth_dev_release_port(ethdev);
3512 }
3513
3514 int
3515 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3516                           int epfd, int op, void *data)
3517 {
3518         uint32_t vec;
3519         struct rte_eth_dev *dev;
3520         struct rte_intr_handle *intr_handle;
3521         int rc;
3522
3523         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3524
3525         dev = &rte_eth_devices[port_id];
3526         if (queue_id >= dev->data->nb_rx_queues) {
3527                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3528                 return -EINVAL;
3529         }
3530
3531         if (!dev->intr_handle) {
3532                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3533                 return -ENOTSUP;
3534         }
3535
3536         intr_handle = dev->intr_handle;
3537         if (!intr_handle->intr_vec) {
3538                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3539                 return -EPERM;
3540         }
3541
3542         vec = intr_handle->intr_vec[queue_id];
3543         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3544         if (rc && rc != -EEXIST) {
3545                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3546                                 " op %d epfd %d vec %u\n",
3547                                 port_id, queue_id, op, epfd, vec);
3548                 return rc;
3549         }
3550
3551         return 0;
3552 }
3553
3554 int
3555 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3556                            uint16_t queue_id)
3557 {
3558         struct rte_eth_dev *dev;
3559
3560         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3561
3562         dev = &rte_eth_devices[port_id];
3563
3564         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3565         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3566                                                                 queue_id));
3567 }
3568
3569 int
3570 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3571                             uint16_t queue_id)
3572 {
3573         struct rte_eth_dev *dev;
3574
3575         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3576
3577         dev = &rte_eth_devices[port_id];
3578
3579         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3580         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3581                                                                 queue_id));
3582 }
3583
3584
3585 int
3586 rte_eth_dev_filter_supported(uint16_t port_id,
3587                              enum rte_filter_type filter_type)
3588 {
3589         struct rte_eth_dev *dev;
3590
3591         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3592
3593         dev = &rte_eth_devices[port_id];
3594         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3595         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3596                                 RTE_ETH_FILTER_NOP, NULL);
3597 }
3598
3599 int
3600 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3601                         enum rte_filter_op filter_op, void *arg)
3602 {
3603         struct rte_eth_dev *dev;
3604
3605         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3606
3607         dev = &rte_eth_devices[port_id];
3608         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3609         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3610                                                              filter_op, arg));
3611 }
3612
3613 const struct rte_eth_rxtx_callback *
3614 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3615                 rte_rx_callback_fn fn, void *user_param)
3616 {
3617 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3618         rte_errno = ENOTSUP;
3619         return NULL;
3620 #endif
3621         /* check input parameters */
3622         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3623                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3624                 rte_errno = EINVAL;
3625                 return NULL;
3626         }
3627         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3628
3629         if (cb == NULL) {
3630                 rte_errno = ENOMEM;
3631                 return NULL;
3632         }
3633
3634         cb->fn.rx = fn;
3635         cb->param = user_param;
3636
3637         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3638         /* Add the callbacks in fifo order. */
3639         struct rte_eth_rxtx_callback *tail =
3640                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3641
3642         if (!tail) {
3643                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3644
3645         } else {
3646                 while (tail->next)
3647                         tail = tail->next;
3648                 tail->next = cb;
3649         }
3650         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3651
3652         return cb;
3653 }
3654
3655 const struct rte_eth_rxtx_callback *
3656 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3657                 rte_rx_callback_fn fn, void *user_param)
3658 {
3659 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3660         rte_errno = ENOTSUP;
3661         return NULL;
3662 #endif
3663         /* check input parameters */
3664         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3665                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3666                 rte_errno = EINVAL;
3667                 return NULL;
3668         }
3669
3670         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3671
3672         if (cb == NULL) {
3673                 rte_errno = ENOMEM;
3674                 return NULL;
3675         }
3676
3677         cb->fn.rx = fn;
3678         cb->param = user_param;
3679
3680         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3681         /* Add the callbacks at fisrt position*/
3682         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3683         rte_smp_wmb();
3684         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3685         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3686
3687         return cb;
3688 }
3689
3690 const struct rte_eth_rxtx_callback *
3691 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3692                 rte_tx_callback_fn fn, void *user_param)
3693 {
3694 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3695         rte_errno = ENOTSUP;
3696         return NULL;
3697 #endif
3698         /* check input parameters */
3699         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3700                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3701                 rte_errno = EINVAL;
3702                 return NULL;
3703         }
3704
3705         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3706
3707         if (cb == NULL) {
3708                 rte_errno = ENOMEM;
3709                 return NULL;
3710         }
3711
3712         cb->fn.tx = fn;
3713         cb->param = user_param;
3714
3715         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3716         /* Add the callbacks in fifo order. */
3717         struct rte_eth_rxtx_callback *tail =
3718                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3719
3720         if (!tail) {
3721                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3722
3723         } else {
3724                 while (tail->next)
3725                         tail = tail->next;
3726                 tail->next = cb;
3727         }
3728         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3729
3730         return cb;
3731 }
3732
3733 int
3734 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3735                 const struct rte_eth_rxtx_callback *user_cb)
3736 {
3737 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3738         return -ENOTSUP;
3739 #endif
3740         /* Check input parameters. */
3741         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3742         if (user_cb == NULL ||
3743                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3744                 return -EINVAL;
3745
3746         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3747         struct rte_eth_rxtx_callback *cb;
3748         struct rte_eth_rxtx_callback **prev_cb;
3749         int ret = -EINVAL;
3750
3751         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3752         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3753         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3754                 cb = *prev_cb;
3755                 if (cb == user_cb) {
3756                         /* Remove the user cb from the callback list. */
3757                         *prev_cb = cb->next;
3758                         ret = 0;
3759                         break;
3760                 }
3761         }
3762         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3763
3764         return ret;
3765 }
3766
3767 int
3768 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3769                 const struct rte_eth_rxtx_callback *user_cb)
3770 {
3771 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3772         return -ENOTSUP;
3773 #endif
3774         /* Check input parameters. */
3775         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3776         if (user_cb == NULL ||
3777                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3778                 return -EINVAL;
3779
3780         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3781         int ret = -EINVAL;
3782         struct rte_eth_rxtx_callback *cb;
3783         struct rte_eth_rxtx_callback **prev_cb;
3784
3785         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3786         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3787         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3788                 cb = *prev_cb;
3789                 if (cb == user_cb) {
3790                         /* Remove the user cb from the callback list. */
3791                         *prev_cb = cb->next;
3792                         ret = 0;
3793                         break;
3794                 }
3795         }
3796         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3797
3798         return ret;
3799 }
3800
3801 int
3802 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3803         struct rte_eth_rxq_info *qinfo)
3804 {
3805         struct rte_eth_dev *dev;
3806
3807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3808
3809         if (qinfo == NULL)
3810                 return -EINVAL;
3811
3812         dev = &rte_eth_devices[port_id];
3813         if (queue_id >= dev->data->nb_rx_queues) {
3814                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3815                 return -EINVAL;
3816         }
3817
3818         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3819
3820         memset(qinfo, 0, sizeof(*qinfo));
3821         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3822         return 0;
3823 }
3824
3825 int
3826 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3827         struct rte_eth_txq_info *qinfo)
3828 {
3829         struct rte_eth_dev *dev;
3830         struct rte_eth_txconf *txconf = &qinfo->conf;
3831
3832         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3833
3834         if (qinfo == NULL)
3835                 return -EINVAL;
3836
3837         dev = &rte_eth_devices[port_id];
3838         if (queue_id >= dev->data->nb_tx_queues) {
3839                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3840                 return -EINVAL;
3841         }
3842
3843         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3844
3845         memset(qinfo, 0, sizeof(*qinfo));
3846         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3847         /* convert offload to txq_flags to support legacy app */
3848         rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
3849
3850         return 0;
3851 }
3852
3853 int
3854 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3855                              struct ether_addr *mc_addr_set,
3856                              uint32_t nb_mc_addr)
3857 {
3858         struct rte_eth_dev *dev;
3859
3860         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3861
3862         dev = &rte_eth_devices[port_id];
3863         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3864         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3865                                                 mc_addr_set, nb_mc_addr));
3866 }
3867
3868 int
3869 rte_eth_timesync_enable(uint16_t port_id)
3870 {
3871         struct rte_eth_dev *dev;
3872
3873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3874         dev = &rte_eth_devices[port_id];
3875
3876         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3877         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3878 }
3879
3880 int
3881 rte_eth_timesync_disable(uint16_t port_id)
3882 {
3883         struct rte_eth_dev *dev;
3884
3885         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3886         dev = &rte_eth_devices[port_id];
3887
3888         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3889         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3890 }
3891
3892 int
3893 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3894                                    uint32_t flags)
3895 {
3896         struct rte_eth_dev *dev;
3897
3898         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3899         dev = &rte_eth_devices[port_id];
3900
3901         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3902         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3903                                 (dev, timestamp, flags));
3904 }
3905
3906 int
3907 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3908                                    struct timespec *timestamp)
3909 {
3910         struct rte_eth_dev *dev;
3911
3912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3913         dev = &rte_eth_devices[port_id];
3914
3915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3916         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3917                                 (dev, timestamp));
3918 }
3919
3920 int
3921 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3922 {
3923         struct rte_eth_dev *dev;
3924
3925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3926         dev = &rte_eth_devices[port_id];
3927
3928         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3929         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3930                                                                       delta));
3931 }
3932
3933 int
3934 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3935 {
3936         struct rte_eth_dev *dev;
3937
3938         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3939         dev = &rte_eth_devices[port_id];
3940
3941         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3942         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3943                                                                 timestamp));
3944 }
3945
3946 int
3947 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3948 {
3949         struct rte_eth_dev *dev;
3950
3951         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3952         dev = &rte_eth_devices[port_id];
3953
3954         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3955         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3956                                                                 timestamp));
3957 }
3958
3959 int
3960 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3961 {
3962         struct rte_eth_dev *dev;
3963
3964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3965
3966         dev = &rte_eth_devices[port_id];
3967         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3968         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3969 }
3970
3971 int
3972 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3973 {
3974         struct rte_eth_dev *dev;
3975
3976         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3977
3978         dev = &rte_eth_devices[port_id];
3979         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3980         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3981 }
3982
3983 int
3984 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3985 {
3986         struct rte_eth_dev *dev;
3987
3988         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3989
3990         dev = &rte_eth_devices[port_id];
3991         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3992         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3993 }
3994
3995 int
3996 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3997 {
3998         struct rte_eth_dev *dev;
3999
4000         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4001
4002         dev = &rte_eth_devices[port_id];
4003         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4004         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4005 }
4006
4007 int __rte_experimental
4008 rte_eth_dev_get_module_info(uint16_t port_id,
4009                             struct rte_eth_dev_module_info *modinfo)
4010 {
4011         struct rte_eth_dev *dev;
4012
4013         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4014
4015         dev = &rte_eth_devices[port_id];
4016         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4017         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4018 }
4019
4020 int __rte_experimental
4021 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4022                               struct rte_dev_eeprom_info *info)
4023 {
4024         struct rte_eth_dev *dev;
4025
4026         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4027
4028         dev = &rte_eth_devices[port_id];
4029         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4030         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4031 }
4032
4033 int
4034 rte_eth_dev_get_dcb_info(uint16_t port_id,
4035                              struct rte_eth_dcb_info *dcb_info)
4036 {
4037         struct rte_eth_dev *dev;
4038
4039         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4040
4041         dev = &rte_eth_devices[port_id];
4042         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4043
4044         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4045         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4046 }
4047
4048 int
4049 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4050                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4051 {
4052         struct rte_eth_dev *dev;
4053
4054         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4055         if (l2_tunnel == NULL) {
4056                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4057                 return -EINVAL;
4058         }
4059
4060         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4061                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
4062                 return -EINVAL;
4063         }
4064
4065         dev = &rte_eth_devices[port_id];
4066         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4067                                 -ENOTSUP);
4068         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4069                                                                 l2_tunnel));
4070 }
4071
4072 int
4073 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4074                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4075                                   uint32_t mask,
4076                                   uint8_t en)
4077 {
4078         struct rte_eth_dev *dev;
4079
4080         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4081
4082         if (l2_tunnel == NULL) {
4083                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4084                 return -EINVAL;
4085         }
4086
4087         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4088                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
4089                 return -EINVAL;
4090         }
4091
4092         if (mask == 0) {
4093                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
4094                 return -EINVAL;
4095         }
4096
4097         dev = &rte_eth_devices[port_id];
4098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4099                                 -ENOTSUP);
4100         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4101                                                         l2_tunnel, mask, en));
4102 }
4103
4104 static void
4105 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4106                            const struct rte_eth_desc_lim *desc_lim)
4107 {
4108         if (desc_lim->nb_align != 0)
4109                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4110
4111         if (desc_lim->nb_max != 0)
4112                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4113
4114         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4115 }
4116
4117 int
4118 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4119                                  uint16_t *nb_rx_desc,
4120                                  uint16_t *nb_tx_desc)
4121 {
4122         struct rte_eth_dev *dev;
4123         struct rte_eth_dev_info dev_info;
4124
4125         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4126
4127         dev = &rte_eth_devices[port_id];
4128         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4129
4130         rte_eth_dev_info_get(port_id, &dev_info);
4131
4132         if (nb_rx_desc != NULL)
4133                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4134
4135         if (nb_tx_desc != NULL)
4136                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4137
4138         return 0;
4139 }
4140
4141 int
4142 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4143 {
4144         struct rte_eth_dev *dev;
4145
4146         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4147
4148         if (pool == NULL)
4149                 return -EINVAL;
4150
4151         dev = &rte_eth_devices[port_id];
4152
4153         if (*dev->dev_ops->pool_ops_supported == NULL)
4154                 return 1; /* all pools are supported */
4155
4156         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4157 }
4158
4159 /**
4160  * A set of values to describe the possible states of a switch domain.
4161  */
4162 enum rte_eth_switch_domain_state {
4163         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4164         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4165 };
4166
4167 /**
4168  * Array of switch domains available for allocation. Array is sized to
4169  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4170  * ethdev ports in a single process.
4171  */
4172 struct rte_eth_dev_switch {
4173         enum rte_eth_switch_domain_state state;
4174 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4175
4176 int __rte_experimental
4177 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4178 {
4179         unsigned int i;
4180
4181         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4182
4183         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4184                 i < RTE_MAX_ETHPORTS; i++) {
4185                 if (rte_eth_switch_domains[i].state ==
4186                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4187                         rte_eth_switch_domains[i].state =
4188                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4189                         *domain_id = i;
4190                         return 0;
4191                 }
4192         }
4193
4194         return -ENOSPC;
4195 }
4196
4197 int __rte_experimental
4198 rte_eth_switch_domain_free(uint16_t domain_id)
4199 {
4200         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4201                 domain_id >= RTE_MAX_ETHPORTS)
4202                 return -EINVAL;
4203
4204         if (rte_eth_switch_domains[domain_id].state !=
4205                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4206                 return -EINVAL;
4207
4208         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4209
4210         return 0;
4211 }
4212
4213 typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
4214
4215 static int
4216 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4217 {
4218         int state;
4219         struct rte_kvargs_pair *pair;
4220         char *letter;
4221
4222         arglist->str = strdup(str_in);
4223         if (arglist->str == NULL)
4224                 return -ENOMEM;
4225
4226         letter = arglist->str;
4227         state = 0;
4228         arglist->count = 0;
4229         pair = &arglist->pairs[0];
4230         while (1) {
4231                 switch (state) {
4232                 case 0: /* Initial */
4233                         if (*letter == '=')
4234                                 return -EINVAL;
4235                         else if (*letter == '\0')
4236                                 return 0;
4237
4238                         state = 1;
4239                         pair->key = letter;
4240                         /* fall-thru */
4241
4242                 case 1: /* Parsing key */
4243                         if (*letter == '=') {
4244                                 *letter = '\0';
4245                                 pair->value = letter + 1;
4246                                 state = 2;
4247                         } else if (*letter == ',' || *letter == '\0')
4248                                 return -EINVAL;
4249                         break;
4250
4251
4252                 case 2: /* Parsing value */
4253                         if (*letter == '[')
4254                                 state = 3;
4255                         else if (*letter == ',') {
4256                                 *letter = '\0';
4257                                 arglist->count++;
4258                                 pair = &arglist->pairs[arglist->count];
4259                                 state = 0;
4260                         } else if (*letter == '\0') {
4261                                 letter--;
4262                                 arglist->count++;
4263                                 pair = &arglist->pairs[arglist->count];
4264                                 state = 0;
4265                         }
4266                         break;
4267
4268                 case 3: /* Parsing list */
4269                         if (*letter == ']')
4270                                 state = 2;
4271                         else if (*letter == '\0')
4272                                 return -EINVAL;
4273                         break;
4274                 }
4275                 letter++;
4276         }
4277 }
4278
4279 static int
4280 rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
4281         void *data)
4282 {
4283         char *str_start;
4284         int state;
4285         int result;
4286
4287         if (*str != '[')
4288                 /* Single element, not a list */
4289                 return callback(str, data);
4290
4291         /* Sanity check, then strip the brackets */
4292         str_start = &str[strlen(str) - 1];
4293         if (*str_start != ']') {
4294                 RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
4295                 return -EINVAL;
4296         }
4297         str++;
4298         *str_start = '\0';
4299
4300         /* Process list elements */
4301         state = 0;
4302         while (1) {
4303                 if (state == 0) {
4304                         if (*str == '\0')
4305                                 break;
4306                         if (*str != ',') {
4307                                 str_start = str;
4308                                 state = 1;
4309                         }
4310                 } else if (state == 1) {
4311                         if (*str == ',' || *str == '\0') {
4312                                 if (str > str_start) {
4313                                         /* Non-empty string fragment */
4314                                         *str = '\0';
4315                                         result = callback(str_start, data);
4316                                         if (result < 0)
4317                                                 return result;
4318                                 }
4319                                 state = 0;
4320                         }
4321                 }
4322                 str++;
4323         }
4324         return 0;
4325 }
4326
4327 static int
4328 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
4329         const uint16_t max_list)
4330 {
4331         uint16_t lo, hi, val;
4332         int result;
4333
4334         result = sscanf(str, "%hu-%hu", &lo, &hi);
4335         if (result == 1) {
4336                 if (*len_list >= max_list)
4337                         return -ENOMEM;
4338                 list[(*len_list)++] = lo;
4339         } else if (result == 2) {
4340                 if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
4341                         return -EINVAL;
4342                 for (val = lo; val <= hi; val++) {
4343                         if (*len_list >= max_list)
4344                                 return -ENOMEM;
4345                         list[(*len_list)++] = val;
4346                 }
4347         } else
4348                 return -EINVAL;
4349         return 0;
4350 }
4351
4352
4353 static int
4354 rte_eth_devargs_parse_representor_ports(char *str, void *data)
4355 {
4356         struct rte_eth_devargs *eth_da = data;
4357
4358         return rte_eth_devargs_process_range(str, eth_da->representor_ports,
4359                 &eth_da->nb_representor_ports, RTE_MAX_ETHPORTS);
4360 }
4361
4362 int __rte_experimental
4363 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4364 {
4365         struct rte_kvargs args;
4366         struct rte_kvargs_pair *pair;
4367         unsigned int i;
4368         int result = 0;
4369
4370         memset(eth_da, 0, sizeof(*eth_da));
4371
4372         result = rte_eth_devargs_tokenise(&args, dargs);
4373         if (result < 0)
4374                 goto parse_cleanup;
4375
4376         for (i = 0; i < args.count; i++) {
4377                 pair = &args.pairs[i];
4378                 if (strcmp("representor", pair->key) == 0) {
4379                         result = rte_eth_devargs_parse_list(pair->value,
4380                                 rte_eth_devargs_parse_representor_ports,
4381                                 eth_da);
4382                         if (result < 0)
4383                                 goto parse_cleanup;
4384                 }
4385         }
4386
4387 parse_cleanup:
4388         if (args.str)
4389                 free(args.str);
4390
4391         return result;
4392 }
4393
4394 RTE_INIT(ethdev_init_log);
4395 static void
4396 ethdev_init_log(void)
4397 {
4398         ethdev_logtype = rte_log_register("lib.ethdev");
4399         if (ethdev_logtype >= 0)
4400                 rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);
4401 }