ethdev: support metadata as flow rule criteria
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39
40 #include "rte_ether.h"
41 #include "rte_ethdev.h"
42 #include "rte_ethdev_driver.h"
43 #include "ethdev_profile.h"
44
45 int rte_eth_dev_logtype;
46
47 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
48 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
49 static uint16_t eth_dev_last_created_port;
50
51 /* spinlock for eth device callbacks */
52 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove rx callbacks */
55 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for add/remove tx callbacks */
58 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* spinlock for shared data allocation */
61 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
62
63 /* store statistics names and its offset in stats structure  */
64 struct rte_eth_xstats_name_off {
65         char name[RTE_ETH_XSTATS_NAME_SIZE];
66         unsigned offset;
67 };
68
69 /* Shared memory between primary and secondary processes. */
70 static struct {
71         uint64_t next_owner_id;
72         rte_spinlock_t ownership_lock;
73         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
74 } *rte_eth_dev_shared_data;
75
76 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
77         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
78         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
79         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
80         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
81         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
82         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
83         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
84         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
85                 rx_nombuf)},
86 };
87
88 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
89
90 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
91         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
92         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
93         {"errors", offsetof(struct rte_eth_stats, q_errors)},
94 };
95
96 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
97                 sizeof(rte_rxq_stats_strings[0]))
98
99 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
100         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
101         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
102 };
103 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
104                 sizeof(rte_txq_stats_strings[0]))
105
106 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
107         { DEV_RX_OFFLOAD_##_name, #_name }
108
109 static const struct {
110         uint64_t offload;
111         const char *name;
112 } rte_rx_offload_names[] = {
113         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
114         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
115         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
118         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
119         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
120         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
121         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
122         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
124         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
125         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
126         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
127         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
128         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
129         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
130         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
131 };
132
133 #undef RTE_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } rte_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 uint16_t
189 rte_eth_find_next(uint16_t port_id)
190 {
191         while (port_id < RTE_MAX_ETHPORTS &&
192                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
193                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
194                 port_id++;
195
196         if (port_id >= RTE_MAX_ETHPORTS)
197                 return RTE_MAX_ETHPORTS;
198
199         return port_id;
200 }
201
202 static void
203 rte_eth_dev_shared_data_prepare(void)
204 {
205         const unsigned flags = 0;
206         const struct rte_memzone *mz;
207
208         rte_spinlock_lock(&rte_eth_shared_data_lock);
209
210         if (rte_eth_dev_shared_data == NULL) {
211                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
212                         /* Allocate port data and ownership shared memory. */
213                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
214                                         sizeof(*rte_eth_dev_shared_data),
215                                         rte_socket_id(), flags);
216                 } else
217                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
218                 if (mz == NULL)
219                         rte_panic("Cannot allocate ethdev shared data\n");
220
221                 rte_eth_dev_shared_data = mz->addr;
222                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
223                         rte_eth_dev_shared_data->next_owner_id =
224                                         RTE_ETH_DEV_NO_OWNER + 1;
225                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
226                         memset(rte_eth_dev_shared_data->data, 0,
227                                sizeof(rte_eth_dev_shared_data->data));
228                 }
229         }
230
231         rte_spinlock_unlock(&rte_eth_shared_data_lock);
232 }
233
234 static bool
235 is_allocated(const struct rte_eth_dev *ethdev)
236 {
237         return ethdev->data->name[0] != '\0';
238 }
239
240 static struct rte_eth_dev *
241 _rte_eth_dev_allocated(const char *name)
242 {
243         unsigned i;
244
245         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
246                 if (rte_eth_devices[i].data != NULL &&
247                     strcmp(rte_eth_devices[i].data->name, name) == 0)
248                         return &rte_eth_devices[i];
249         }
250         return NULL;
251 }
252
253 struct rte_eth_dev *
254 rte_eth_dev_allocated(const char *name)
255 {
256         struct rte_eth_dev *ethdev;
257
258         rte_eth_dev_shared_data_prepare();
259
260         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
261
262         ethdev = _rte_eth_dev_allocated(name);
263
264         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
265
266         return ethdev;
267 }
268
269 static uint16_t
270 rte_eth_dev_find_free_port(void)
271 {
272         unsigned i;
273
274         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
275                 /* Using shared name field to find a free port. */
276                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
277                         RTE_ASSERT(rte_eth_devices[i].state ==
278                                    RTE_ETH_DEV_UNUSED);
279                         return i;
280                 }
281         }
282         return RTE_MAX_ETHPORTS;
283 }
284
285 static struct rte_eth_dev *
286 eth_dev_get(uint16_t port_id)
287 {
288         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
289
290         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
291
292         eth_dev_last_created_port = port_id;
293
294         return eth_dev;
295 }
296
297 struct rte_eth_dev *
298 rte_eth_dev_allocate(const char *name)
299 {
300         uint16_t port_id;
301         struct rte_eth_dev *eth_dev = NULL;
302
303         rte_eth_dev_shared_data_prepare();
304
305         /* Synchronize port creation between primary and secondary threads. */
306         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
307
308         if (_rte_eth_dev_allocated(name) != NULL) {
309                 RTE_ETHDEV_LOG(ERR,
310                         "Ethernet device with name %s already allocated\n",
311                         name);
312                 goto unlock;
313         }
314
315         port_id = rte_eth_dev_find_free_port();
316         if (port_id == RTE_MAX_ETHPORTS) {
317                 RTE_ETHDEV_LOG(ERR,
318                         "Reached maximum number of Ethernet ports\n");
319                 goto unlock;
320         }
321
322         eth_dev = eth_dev_get(port_id);
323         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
324         eth_dev->data->port_id = port_id;
325         eth_dev->data->mtu = ETHER_MTU;
326
327 unlock:
328         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
329
330         return eth_dev;
331 }
332
333 /*
334  * Attach to a port already registered by the primary process, which
335  * makes sure that the same device would have the same port id both
336  * in the primary and secondary process.
337  */
338 struct rte_eth_dev *
339 rte_eth_dev_attach_secondary(const char *name)
340 {
341         uint16_t i;
342         struct rte_eth_dev *eth_dev = NULL;
343
344         rte_eth_dev_shared_data_prepare();
345
346         /* Synchronize port attachment to primary port creation and release. */
347         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
348
349         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
350                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
351                         break;
352         }
353         if (i == RTE_MAX_ETHPORTS) {
354                 RTE_ETHDEV_LOG(ERR,
355                         "Device %s is not driven by the primary process\n",
356                         name);
357         } else {
358                 eth_dev = eth_dev_get(i);
359                 RTE_ASSERT(eth_dev->data->port_id == i);
360         }
361
362         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
363         return eth_dev;
364 }
365
366 int
367 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
368 {
369         if (eth_dev == NULL)
370                 return -EINVAL;
371
372         rte_eth_dev_shared_data_prepare();
373
374         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
375
376         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
377
378         eth_dev->state = RTE_ETH_DEV_UNUSED;
379
380         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
381                 rte_free(eth_dev->data->rx_queues);
382                 rte_free(eth_dev->data->tx_queues);
383                 rte_free(eth_dev->data->mac_addrs);
384                 rte_free(eth_dev->data->hash_mac_addrs);
385                 rte_free(eth_dev->data->dev_private);
386                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
387         }
388
389         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
390
391         return 0;
392 }
393
394 int
395 rte_eth_dev_is_valid_port(uint16_t port_id)
396 {
397         if (port_id >= RTE_MAX_ETHPORTS ||
398             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
399                 return 0;
400         else
401                 return 1;
402 }
403
404 static int
405 rte_eth_is_valid_owner_id(uint64_t owner_id)
406 {
407         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
408             rte_eth_dev_shared_data->next_owner_id <= owner_id)
409                 return 0;
410         return 1;
411 }
412
413 uint64_t
414 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
415 {
416         while (port_id < RTE_MAX_ETHPORTS &&
417                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
418                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
419                rte_eth_devices[port_id].data->owner.id != owner_id))
420                 port_id++;
421
422         if (port_id >= RTE_MAX_ETHPORTS)
423                 return RTE_MAX_ETHPORTS;
424
425         return port_id;
426 }
427
428 int __rte_experimental
429 rte_eth_dev_owner_new(uint64_t *owner_id)
430 {
431         rte_eth_dev_shared_data_prepare();
432
433         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
434
435         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
436
437         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
438         return 0;
439 }
440
441 static int
442 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
443                        const struct rte_eth_dev_owner *new_owner)
444 {
445         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
446         struct rte_eth_dev_owner *port_owner;
447         int sret;
448
449         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
450                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
451                         port_id);
452                 return -ENODEV;
453         }
454
455         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
456             !rte_eth_is_valid_owner_id(old_owner_id)) {
457                 RTE_ETHDEV_LOG(ERR,
458                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
459                        old_owner_id, new_owner->id);
460                 return -EINVAL;
461         }
462
463         port_owner = &rte_eth_devices[port_id].data->owner;
464         if (port_owner->id != old_owner_id) {
465                 RTE_ETHDEV_LOG(ERR,
466                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
467                         port_id, port_owner->name, port_owner->id);
468                 return -EPERM;
469         }
470
471         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
472                         new_owner->name);
473         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
474                 RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n",
475                         port_id);
476
477         port_owner->id = new_owner->id;
478
479         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
480                 port_id, new_owner->name, new_owner->id);
481
482         return 0;
483 }
484
485 int __rte_experimental
486 rte_eth_dev_owner_set(const uint16_t port_id,
487                       const struct rte_eth_dev_owner *owner)
488 {
489         int ret;
490
491         rte_eth_dev_shared_data_prepare();
492
493         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
494
495         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
496
497         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
498         return ret;
499 }
500
501 int __rte_experimental
502 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
503 {
504         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
505                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
506         int ret;
507
508         rte_eth_dev_shared_data_prepare();
509
510         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
511
512         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
513
514         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
515         return ret;
516 }
517
518 void __rte_experimental
519 rte_eth_dev_owner_delete(const uint64_t owner_id)
520 {
521         uint16_t port_id;
522
523         rte_eth_dev_shared_data_prepare();
524
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         if (rte_eth_is_valid_owner_id(owner_id)) {
528                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
529                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
530                                 memset(&rte_eth_devices[port_id].data->owner, 0,
531                                        sizeof(struct rte_eth_dev_owner));
532                 RTE_ETHDEV_LOG(NOTICE,
533                         "All port owners owned by %016"PRIx64" identifier have removed\n",
534                         owner_id);
535         } else {
536                 RTE_ETHDEV_LOG(ERR,
537                                "Invalid owner id=%016"PRIx64"\n",
538                                owner_id);
539         }
540
541         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
542 }
543
544 int __rte_experimental
545 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
546 {
547         int ret = 0;
548         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
549
550         rte_eth_dev_shared_data_prepare();
551
552         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
553
554         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
555                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
556                         port_id);
557                 ret = -ENODEV;
558         } else {
559                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
560         }
561
562         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
563         return ret;
564 }
565
566 int
567 rte_eth_dev_socket_id(uint16_t port_id)
568 {
569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
570         return rte_eth_devices[port_id].data->numa_node;
571 }
572
573 void *
574 rte_eth_dev_get_sec_ctx(uint16_t port_id)
575 {
576         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
577         return rte_eth_devices[port_id].security_ctx;
578 }
579
580 uint16_t
581 rte_eth_dev_count(void)
582 {
583         return rte_eth_dev_count_avail();
584 }
585
586 uint16_t
587 rte_eth_dev_count_avail(void)
588 {
589         uint16_t p;
590         uint16_t count;
591
592         count = 0;
593
594         RTE_ETH_FOREACH_DEV(p)
595                 count++;
596
597         return count;
598 }
599
600 uint16_t __rte_experimental
601 rte_eth_dev_count_total(void)
602 {
603         uint16_t port, count = 0;
604
605         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
606                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
607                         count++;
608
609         return count;
610 }
611
612 int
613 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
614 {
615         char *tmp;
616
617         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
618
619         if (name == NULL) {
620                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
621                 return -EINVAL;
622         }
623
624         /* shouldn't check 'rte_eth_devices[i].data',
625          * because it might be overwritten by VDEV PMD */
626         tmp = rte_eth_dev_shared_data->data[port_id].name;
627         strcpy(name, tmp);
628         return 0;
629 }
630
631 int
632 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
633 {
634         uint32_t pid;
635
636         if (name == NULL) {
637                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
638                 return -EINVAL;
639         }
640
641         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
642                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
643                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
644                         *port_id = pid;
645                         return 0;
646                 }
647         }
648
649         return -ENODEV;
650 }
651
652 static int
653 eth_err(uint16_t port_id, int ret)
654 {
655         if (ret == 0)
656                 return 0;
657         if (rte_eth_dev_is_removed(port_id))
658                 return -EIO;
659         return ret;
660 }
661
662 /* attach the new device, then store port_id of the device */
663 int
664 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
665 {
666         int current = rte_eth_dev_count_total();
667         struct rte_devargs da;
668         int ret = -1;
669
670         memset(&da, 0, sizeof(da));
671
672         if ((devargs == NULL) || (port_id == NULL)) {
673                 ret = -EINVAL;
674                 goto err;
675         }
676
677         /* parse devargs */
678         if (rte_devargs_parse(&da, devargs))
679                 goto err;
680
681         ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
682         if (ret < 0)
683                 goto err;
684
685         /* no point looking at the port count if no port exists */
686         if (!rte_eth_dev_count_total()) {
687                 RTE_ETHDEV_LOG(ERR, "No port found for device (%s)\n", da.name);
688                 ret = -1;
689                 goto err;
690         }
691
692         /* if nothing happened, there is a bug here, since some driver told us
693          * it did attach a device, but did not create a port.
694          * FIXME: race condition in case of plug-out of another device
695          */
696         if (current == rte_eth_dev_count_total()) {
697                 ret = -1;
698                 goto err;
699         }
700
701         *port_id = eth_dev_last_created_port;
702         ret = 0;
703
704 err:
705         free(da.args);
706         return ret;
707 }
708
709 /* detach the device, then store the name of the device */
710 int
711 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
712 {
713         struct rte_device *dev;
714         struct rte_bus *bus;
715         uint32_t dev_flags;
716         int ret = -1;
717
718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
719
720         dev_flags = rte_eth_devices[port_id].data->dev_flags;
721         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
722                 RTE_ETHDEV_LOG(ERR,
723                         "Port %"PRIu16" is bonded, cannot detach\n", port_id);
724                 return -ENOTSUP;
725         }
726
727         dev = rte_eth_devices[port_id].device;
728         if (dev == NULL)
729                 return -EINVAL;
730
731         bus = rte_bus_find_by_device(dev);
732         if (bus == NULL)
733                 return -ENOENT;
734
735         ret = rte_eal_hotplug_remove(bus->name, dev->name);
736         if (ret < 0)
737                 return ret;
738
739         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
740         return 0;
741 }
742
743 static int
744 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
745 {
746         uint16_t old_nb_queues = dev->data->nb_rx_queues;
747         void **rxq;
748         unsigned i;
749
750         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
751                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
752                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
753                                 RTE_CACHE_LINE_SIZE);
754                 if (dev->data->rx_queues == NULL) {
755                         dev->data->nb_rx_queues = 0;
756                         return -(ENOMEM);
757                 }
758         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
759                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
760
761                 rxq = dev->data->rx_queues;
762
763                 for (i = nb_queues; i < old_nb_queues; i++)
764                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
765                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
766                                 RTE_CACHE_LINE_SIZE);
767                 if (rxq == NULL)
768                         return -(ENOMEM);
769                 if (nb_queues > old_nb_queues) {
770                         uint16_t new_qs = nb_queues - old_nb_queues;
771
772                         memset(rxq + old_nb_queues, 0,
773                                 sizeof(rxq[0]) * new_qs);
774                 }
775
776                 dev->data->rx_queues = rxq;
777
778         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
779                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
780
781                 rxq = dev->data->rx_queues;
782
783                 for (i = nb_queues; i < old_nb_queues; i++)
784                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
785
786                 rte_free(dev->data->rx_queues);
787                 dev->data->rx_queues = NULL;
788         }
789         dev->data->nb_rx_queues = nb_queues;
790         return 0;
791 }
792
793 int
794 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
795 {
796         struct rte_eth_dev *dev;
797
798         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
799
800         dev = &rte_eth_devices[port_id];
801         if (!dev->data->dev_started) {
802                 RTE_ETHDEV_LOG(ERR,
803                         "Port %u must be started before start any queue\n",
804                         port_id);
805                 return -EINVAL;
806         }
807
808         if (rx_queue_id >= dev->data->nb_rx_queues) {
809                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
810                 return -EINVAL;
811         }
812
813         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
814
815         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
816                 RTE_ETHDEV_LOG(INFO,
817                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
818                         rx_queue_id, port_id);
819                 return 0;
820         }
821
822         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
823                                                              rx_queue_id));
824
825 }
826
827 int
828 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
829 {
830         struct rte_eth_dev *dev;
831
832         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
833
834         dev = &rte_eth_devices[port_id];
835         if (rx_queue_id >= dev->data->nb_rx_queues) {
836                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
837                 return -EINVAL;
838         }
839
840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
841
842         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
843                 RTE_ETHDEV_LOG(INFO,
844                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
845                         rx_queue_id, port_id);
846                 return 0;
847         }
848
849         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
850
851 }
852
853 int
854 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
855 {
856         struct rte_eth_dev *dev;
857
858         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
859
860         dev = &rte_eth_devices[port_id];
861         if (!dev->data->dev_started) {
862                 RTE_ETHDEV_LOG(ERR,
863                         "Port %u must be started before start any queue\n",
864                         port_id);
865                 return -EINVAL;
866         }
867
868         if (tx_queue_id >= dev->data->nb_tx_queues) {
869                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
870                 return -EINVAL;
871         }
872
873         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
874
875         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
876                 RTE_ETHDEV_LOG(INFO,
877                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
878                         tx_queue_id, port_id);
879                 return 0;
880         }
881
882         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
883 }
884
885 int
886 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
887 {
888         struct rte_eth_dev *dev;
889
890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
891
892         dev = &rte_eth_devices[port_id];
893         if (tx_queue_id >= dev->data->nb_tx_queues) {
894                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
895                 return -EINVAL;
896         }
897
898         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
899
900         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
901                 RTE_ETHDEV_LOG(INFO,
902                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
903                         tx_queue_id, port_id);
904                 return 0;
905         }
906
907         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
908
909 }
910
911 static int
912 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
913 {
914         uint16_t old_nb_queues = dev->data->nb_tx_queues;
915         void **txq;
916         unsigned i;
917
918         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
919                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
920                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
921                                                    RTE_CACHE_LINE_SIZE);
922                 if (dev->data->tx_queues == NULL) {
923                         dev->data->nb_tx_queues = 0;
924                         return -(ENOMEM);
925                 }
926         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
927                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
928
929                 txq = dev->data->tx_queues;
930
931                 for (i = nb_queues; i < old_nb_queues; i++)
932                         (*dev->dev_ops->tx_queue_release)(txq[i]);
933                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
934                                   RTE_CACHE_LINE_SIZE);
935                 if (txq == NULL)
936                         return -ENOMEM;
937                 if (nb_queues > old_nb_queues) {
938                         uint16_t new_qs = nb_queues - old_nb_queues;
939
940                         memset(txq + old_nb_queues, 0,
941                                sizeof(txq[0]) * new_qs);
942                 }
943
944                 dev->data->tx_queues = txq;
945
946         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
947                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
948
949                 txq = dev->data->tx_queues;
950
951                 for (i = nb_queues; i < old_nb_queues; i++)
952                         (*dev->dev_ops->tx_queue_release)(txq[i]);
953
954                 rte_free(dev->data->tx_queues);
955                 dev->data->tx_queues = NULL;
956         }
957         dev->data->nb_tx_queues = nb_queues;
958         return 0;
959 }
960
961 uint32_t
962 rte_eth_speed_bitflag(uint32_t speed, int duplex)
963 {
964         switch (speed) {
965         case ETH_SPEED_NUM_10M:
966                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
967         case ETH_SPEED_NUM_100M:
968                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
969         case ETH_SPEED_NUM_1G:
970                 return ETH_LINK_SPEED_1G;
971         case ETH_SPEED_NUM_2_5G:
972                 return ETH_LINK_SPEED_2_5G;
973         case ETH_SPEED_NUM_5G:
974                 return ETH_LINK_SPEED_5G;
975         case ETH_SPEED_NUM_10G:
976                 return ETH_LINK_SPEED_10G;
977         case ETH_SPEED_NUM_20G:
978                 return ETH_LINK_SPEED_20G;
979         case ETH_SPEED_NUM_25G:
980                 return ETH_LINK_SPEED_25G;
981         case ETH_SPEED_NUM_40G:
982                 return ETH_LINK_SPEED_40G;
983         case ETH_SPEED_NUM_50G:
984                 return ETH_LINK_SPEED_50G;
985         case ETH_SPEED_NUM_56G:
986                 return ETH_LINK_SPEED_56G;
987         case ETH_SPEED_NUM_100G:
988                 return ETH_LINK_SPEED_100G;
989         default:
990                 return 0;
991         }
992 }
993
994 const char * __rte_experimental
995 rte_eth_dev_rx_offload_name(uint64_t offload)
996 {
997         const char *name = "UNKNOWN";
998         unsigned int i;
999
1000         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1001                 if (offload == rte_rx_offload_names[i].offload) {
1002                         name = rte_rx_offload_names[i].name;
1003                         break;
1004                 }
1005         }
1006
1007         return name;
1008 }
1009
1010 const char * __rte_experimental
1011 rte_eth_dev_tx_offload_name(uint64_t offload)
1012 {
1013         const char *name = "UNKNOWN";
1014         unsigned int i;
1015
1016         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1017                 if (offload == rte_tx_offload_names[i].offload) {
1018                         name = rte_tx_offload_names[i].name;
1019                         break;
1020                 }
1021         }
1022
1023         return name;
1024 }
1025
1026 int
1027 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1028                       const struct rte_eth_conf *dev_conf)
1029 {
1030         struct rte_eth_dev *dev;
1031         struct rte_eth_dev_info dev_info;
1032         struct rte_eth_conf local_conf = *dev_conf;
1033         int diag;
1034
1035         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1036
1037         dev = &rte_eth_devices[port_id];
1038
1039         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1040         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1041
1042         rte_eth_dev_info_get(port_id, &dev_info);
1043
1044         /* If number of queues specified by application for both Rx and Tx is
1045          * zero, use driver preferred values. This cannot be done individually
1046          * as it is valid for either Tx or Rx (but not both) to be zero.
1047          * If driver does not provide any preferred valued, fall back on
1048          * EAL defaults.
1049          */
1050         if (nb_rx_q == 0 && nb_tx_q == 0) {
1051                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1052                 if (nb_rx_q == 0)
1053                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1054                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1055                 if (nb_tx_q == 0)
1056                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1057         }
1058
1059         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1060                 RTE_ETHDEV_LOG(ERR,
1061                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1062                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1063                 return -EINVAL;
1064         }
1065
1066         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1067                 RTE_ETHDEV_LOG(ERR,
1068                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1069                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1070                 return -EINVAL;
1071         }
1072
1073         if (dev->data->dev_started) {
1074                 RTE_ETHDEV_LOG(ERR,
1075                         "Port %u must be stopped to allow configuration\n",
1076                         port_id);
1077                 return -EBUSY;
1078         }
1079
1080         /* Copy the dev_conf parameter into the dev structure */
1081         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1082
1083         /*
1084          * Check that the numbers of RX and TX queues are not greater
1085          * than the maximum number of RX and TX queues supported by the
1086          * configured device.
1087          */
1088         if (nb_rx_q > dev_info.max_rx_queues) {
1089                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1090                         port_id, nb_rx_q, dev_info.max_rx_queues);
1091                 return -EINVAL;
1092         }
1093
1094         if (nb_tx_q > dev_info.max_tx_queues) {
1095                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1096                         port_id, nb_tx_q, dev_info.max_tx_queues);
1097                 return -EINVAL;
1098         }
1099
1100         /* Check that the device supports requested interrupts */
1101         if ((dev_conf->intr_conf.lsc == 1) &&
1102                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1103                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1104                         dev->device->driver->name);
1105                 return -EINVAL;
1106         }
1107         if ((dev_conf->intr_conf.rmv == 1) &&
1108                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1109                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1110                         dev->device->driver->name);
1111                 return -EINVAL;
1112         }
1113
1114         /*
1115          * If jumbo frames are enabled, check that the maximum RX packet
1116          * length is supported by the configured device.
1117          */
1118         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1119                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1120                         RTE_ETHDEV_LOG(ERR,
1121                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1122                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1123                                 dev_info.max_rx_pktlen);
1124                         return -EINVAL;
1125                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1126                         RTE_ETHDEV_LOG(ERR,
1127                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1128                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1129                                 (unsigned)ETHER_MIN_LEN);
1130                         return -EINVAL;
1131                 }
1132         } else {
1133                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1134                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1135                         /* Use default value */
1136                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1137                                                         ETHER_MAX_LEN;
1138         }
1139
1140         /* Any requested offloading must be within its device capabilities */
1141         if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1142              local_conf.rxmode.offloads) {
1143                 RTE_ETHDEV_LOG(ERR,
1144                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1145                         "capabilities 0x%"PRIx64" in %s()\n",
1146                         port_id, local_conf.rxmode.offloads,
1147                         dev_info.rx_offload_capa,
1148                         __func__);
1149                 return -EINVAL;
1150         }
1151         if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1152              local_conf.txmode.offloads) {
1153                 RTE_ETHDEV_LOG(ERR,
1154                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1155                         "capabilities 0x%"PRIx64" in %s()\n",
1156                         port_id, local_conf.txmode.offloads,
1157                         dev_info.tx_offload_capa,
1158                         __func__);
1159                 return -EINVAL;
1160         }
1161
1162         /* Check that device supports requested rss hash functions. */
1163         if ((dev_info.flow_type_rss_offloads |
1164              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1165             dev_info.flow_type_rss_offloads) {
1166                 RTE_ETHDEV_LOG(ERR,
1167                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1168                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1169                         dev_info.flow_type_rss_offloads);
1170                 return -EINVAL;
1171         }
1172
1173         /*
1174          * Setup new number of RX/TX queues and reconfigure device.
1175          */
1176         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1177         if (diag != 0) {
1178                 RTE_ETHDEV_LOG(ERR,
1179                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1180                         port_id, diag);
1181                 return diag;
1182         }
1183
1184         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1185         if (diag != 0) {
1186                 RTE_ETHDEV_LOG(ERR,
1187                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1188                         port_id, diag);
1189                 rte_eth_dev_rx_queue_config(dev, 0);
1190                 return diag;
1191         }
1192
1193         diag = (*dev->dev_ops->dev_configure)(dev);
1194         if (diag != 0) {
1195                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1196                         port_id, diag);
1197                 rte_eth_dev_rx_queue_config(dev, 0);
1198                 rte_eth_dev_tx_queue_config(dev, 0);
1199                 return eth_err(port_id, diag);
1200         }
1201
1202         /* Initialize Rx profiling if enabled at compilation time. */
1203         diag = __rte_eth_dev_profile_init(port_id, dev);
1204         if (diag != 0) {
1205                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1206                         port_id, diag);
1207                 rte_eth_dev_rx_queue_config(dev, 0);
1208                 rte_eth_dev_tx_queue_config(dev, 0);
1209                 return eth_err(port_id, diag);
1210         }
1211
1212         return 0;
1213 }
1214
1215 void
1216 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1217 {
1218         if (dev->data->dev_started) {
1219                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1220                         dev->data->port_id);
1221                 return;
1222         }
1223
1224         rte_eth_dev_rx_queue_config(dev, 0);
1225         rte_eth_dev_tx_queue_config(dev, 0);
1226
1227         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1228 }
1229
1230 static void
1231 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1232                         struct rte_eth_dev_info *dev_info)
1233 {
1234         struct ether_addr *addr;
1235         uint16_t i;
1236         uint32_t pool = 0;
1237         uint64_t pool_mask;
1238
1239         /* replay MAC address configuration including default MAC */
1240         addr = &dev->data->mac_addrs[0];
1241         if (*dev->dev_ops->mac_addr_set != NULL)
1242                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1243         else if (*dev->dev_ops->mac_addr_add != NULL)
1244                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1245
1246         if (*dev->dev_ops->mac_addr_add != NULL) {
1247                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1248                         addr = &dev->data->mac_addrs[i];
1249
1250                         /* skip zero address */
1251                         if (is_zero_ether_addr(addr))
1252                                 continue;
1253
1254                         pool = 0;
1255                         pool_mask = dev->data->mac_pool_sel[i];
1256
1257                         do {
1258                                 if (pool_mask & 1ULL)
1259                                         (*dev->dev_ops->mac_addr_add)(dev,
1260                                                 addr, i, pool);
1261                                 pool_mask >>= 1;
1262                                 pool++;
1263                         } while (pool_mask);
1264                 }
1265         }
1266 }
1267
1268 static void
1269 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1270                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1271 {
1272         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1273                 rte_eth_dev_mac_restore(dev, dev_info);
1274
1275         /* replay promiscuous configuration */
1276         if (rte_eth_promiscuous_get(port_id) == 1)
1277                 rte_eth_promiscuous_enable(port_id);
1278         else if (rte_eth_promiscuous_get(port_id) == 0)
1279                 rte_eth_promiscuous_disable(port_id);
1280
1281         /* replay all multicast configuration */
1282         if (rte_eth_allmulticast_get(port_id) == 1)
1283                 rte_eth_allmulticast_enable(port_id);
1284         else if (rte_eth_allmulticast_get(port_id) == 0)
1285                 rte_eth_allmulticast_disable(port_id);
1286 }
1287
1288 int
1289 rte_eth_dev_start(uint16_t port_id)
1290 {
1291         struct rte_eth_dev *dev;
1292         struct rte_eth_dev_info dev_info;
1293         int diag;
1294
1295         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1296
1297         dev = &rte_eth_devices[port_id];
1298
1299         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1300
1301         if (dev->data->dev_started != 0) {
1302                 RTE_ETHDEV_LOG(INFO,
1303                         "Device with port_id=%"PRIu16" already started\n",
1304                         port_id);
1305                 return 0;
1306         }
1307
1308         rte_eth_dev_info_get(port_id, &dev_info);
1309
1310         /* Lets restore MAC now if device does not support live change */
1311         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1312                 rte_eth_dev_mac_restore(dev, &dev_info);
1313
1314         diag = (*dev->dev_ops->dev_start)(dev);
1315         if (diag == 0)
1316                 dev->data->dev_started = 1;
1317         else
1318                 return eth_err(port_id, diag);
1319
1320         rte_eth_dev_config_restore(dev, &dev_info, port_id);
1321
1322         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1323                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1324                 (*dev->dev_ops->link_update)(dev, 0);
1325         }
1326         return 0;
1327 }
1328
1329 void
1330 rte_eth_dev_stop(uint16_t port_id)
1331 {
1332         struct rte_eth_dev *dev;
1333
1334         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1335         dev = &rte_eth_devices[port_id];
1336
1337         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1338
1339         if (dev->data->dev_started == 0) {
1340                 RTE_ETHDEV_LOG(INFO,
1341                         "Device with port_id=%"PRIu16" already stopped\n",
1342                         port_id);
1343                 return;
1344         }
1345
1346         dev->data->dev_started = 0;
1347         (*dev->dev_ops->dev_stop)(dev);
1348 }
1349
1350 int
1351 rte_eth_dev_set_link_up(uint16_t port_id)
1352 {
1353         struct rte_eth_dev *dev;
1354
1355         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1356
1357         dev = &rte_eth_devices[port_id];
1358
1359         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1360         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1361 }
1362
1363 int
1364 rte_eth_dev_set_link_down(uint16_t port_id)
1365 {
1366         struct rte_eth_dev *dev;
1367
1368         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1369
1370         dev = &rte_eth_devices[port_id];
1371
1372         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1373         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1374 }
1375
1376 void
1377 rte_eth_dev_close(uint16_t port_id)
1378 {
1379         struct rte_eth_dev *dev;
1380
1381         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1382         dev = &rte_eth_devices[port_id];
1383
1384         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1385         dev->data->dev_started = 0;
1386         (*dev->dev_ops->dev_close)(dev);
1387
1388         /* check behaviour flag - temporary for PMD migration */
1389         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1390                 /* new behaviour: send event + reset state + free all data */
1391                 rte_eth_dev_release_port(dev);
1392                 return;
1393         }
1394         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1395                         "The driver %s should migrate to the new behaviour.\n",
1396                         dev->device->driver->name);
1397         /* old behaviour: only free queue arrays */
1398         dev->data->nb_rx_queues = 0;
1399         rte_free(dev->data->rx_queues);
1400         dev->data->rx_queues = NULL;
1401         dev->data->nb_tx_queues = 0;
1402         rte_free(dev->data->tx_queues);
1403         dev->data->tx_queues = NULL;
1404 }
1405
1406 int
1407 rte_eth_dev_reset(uint16_t port_id)
1408 {
1409         struct rte_eth_dev *dev;
1410         int ret;
1411
1412         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1413         dev = &rte_eth_devices[port_id];
1414
1415         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1416
1417         rte_eth_dev_stop(port_id);
1418         ret = dev->dev_ops->dev_reset(dev);
1419
1420         return eth_err(port_id, ret);
1421 }
1422
1423 int __rte_experimental
1424 rte_eth_dev_is_removed(uint16_t port_id)
1425 {
1426         struct rte_eth_dev *dev;
1427         int ret;
1428
1429         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1430
1431         dev = &rte_eth_devices[port_id];
1432
1433         if (dev->state == RTE_ETH_DEV_REMOVED)
1434                 return 1;
1435
1436         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1437
1438         ret = dev->dev_ops->is_removed(dev);
1439         if (ret != 0)
1440                 /* Device is physically removed. */
1441                 dev->state = RTE_ETH_DEV_REMOVED;
1442
1443         return ret;
1444 }
1445
1446 int
1447 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1448                        uint16_t nb_rx_desc, unsigned int socket_id,
1449                        const struct rte_eth_rxconf *rx_conf,
1450                        struct rte_mempool *mp)
1451 {
1452         int ret;
1453         uint32_t mbp_buf_size;
1454         struct rte_eth_dev *dev;
1455         struct rte_eth_dev_info dev_info;
1456         struct rte_eth_rxconf local_conf;
1457         void **rxq;
1458
1459         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1460
1461         dev = &rte_eth_devices[port_id];
1462         if (rx_queue_id >= dev->data->nb_rx_queues) {
1463                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1464                 return -EINVAL;
1465         }
1466
1467         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1468         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1469
1470         /*
1471          * Check the size of the mbuf data buffer.
1472          * This value must be provided in the private data of the memory pool.
1473          * First check that the memory pool has a valid private data.
1474          */
1475         rte_eth_dev_info_get(port_id, &dev_info);
1476         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1477                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1478                         mp->name, (int)mp->private_data_size,
1479                         (int)sizeof(struct rte_pktmbuf_pool_private));
1480                 return -ENOSPC;
1481         }
1482         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1483
1484         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1485                 RTE_ETHDEV_LOG(ERR,
1486                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1487                         mp->name, (int)mbp_buf_size,
1488                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1489                         (int)RTE_PKTMBUF_HEADROOM,
1490                         (int)dev_info.min_rx_bufsize);
1491                 return -EINVAL;
1492         }
1493
1494         /* Use default specified by driver, if nb_rx_desc is zero */
1495         if (nb_rx_desc == 0) {
1496                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1497                 /* If driver default is also zero, fall back on EAL default */
1498                 if (nb_rx_desc == 0)
1499                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1500         }
1501
1502         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1503                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1504                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1505
1506                 RTE_ETHDEV_LOG(ERR,
1507                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1508                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1509                         dev_info.rx_desc_lim.nb_min,
1510                         dev_info.rx_desc_lim.nb_align);
1511                 return -EINVAL;
1512         }
1513
1514         if (dev->data->dev_started &&
1515                 !(dev_info.dev_capa &
1516                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1517                 return -EBUSY;
1518
1519         if (dev->data->dev_started &&
1520                 (dev->data->rx_queue_state[rx_queue_id] !=
1521                         RTE_ETH_QUEUE_STATE_STOPPED))
1522                 return -EBUSY;
1523
1524         rxq = dev->data->rx_queues;
1525         if (rxq[rx_queue_id]) {
1526                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1527                                         -ENOTSUP);
1528                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1529                 rxq[rx_queue_id] = NULL;
1530         }
1531
1532         if (rx_conf == NULL)
1533                 rx_conf = &dev_info.default_rxconf;
1534
1535         local_conf = *rx_conf;
1536
1537         /*
1538          * If an offloading has already been enabled in
1539          * rte_eth_dev_configure(), it has been enabled on all queues,
1540          * so there is no need to enable it in this queue again.
1541          * The local_conf.offloads input to underlying PMD only carries
1542          * those offloadings which are only enabled on this queue and
1543          * not enabled on all queues.
1544          */
1545         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1546
1547         /*
1548          * New added offloadings for this queue are those not enabled in
1549          * rte_eth_dev_configure() and they must be per-queue type.
1550          * A pure per-port offloading can't be enabled on a queue while
1551          * disabled on another queue. A pure per-port offloading can't
1552          * be enabled for any queue as new added one if it hasn't been
1553          * enabled in rte_eth_dev_configure().
1554          */
1555         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1556              local_conf.offloads) {
1557                 RTE_ETHDEV_LOG(ERR,
1558                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1559                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1560                         port_id, rx_queue_id, local_conf.offloads,
1561                         dev_info.rx_queue_offload_capa,
1562                         __func__);
1563                 return -EINVAL;
1564         }
1565
1566         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1567                                               socket_id, &local_conf, mp);
1568         if (!ret) {
1569                 if (!dev->data->min_rx_buf_size ||
1570                     dev->data->min_rx_buf_size > mbp_buf_size)
1571                         dev->data->min_rx_buf_size = mbp_buf_size;
1572         }
1573
1574         return eth_err(port_id, ret);
1575 }
1576
1577 int
1578 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1579                        uint16_t nb_tx_desc, unsigned int socket_id,
1580                        const struct rte_eth_txconf *tx_conf)
1581 {
1582         struct rte_eth_dev *dev;
1583         struct rte_eth_dev_info dev_info;
1584         struct rte_eth_txconf local_conf;
1585         void **txq;
1586
1587         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1588
1589         dev = &rte_eth_devices[port_id];
1590         if (tx_queue_id >= dev->data->nb_tx_queues) {
1591                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1592                 return -EINVAL;
1593         }
1594
1595         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1596         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1597
1598         rte_eth_dev_info_get(port_id, &dev_info);
1599
1600         /* Use default specified by driver, if nb_tx_desc is zero */
1601         if (nb_tx_desc == 0) {
1602                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1603                 /* If driver default is zero, fall back on EAL default */
1604                 if (nb_tx_desc == 0)
1605                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1606         }
1607         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1608             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1609             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1610                 RTE_ETHDEV_LOG(ERR,
1611                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1612                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1613                         dev_info.tx_desc_lim.nb_min,
1614                         dev_info.tx_desc_lim.nb_align);
1615                 return -EINVAL;
1616         }
1617
1618         if (dev->data->dev_started &&
1619                 !(dev_info.dev_capa &
1620                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1621                 return -EBUSY;
1622
1623         if (dev->data->dev_started &&
1624                 (dev->data->tx_queue_state[tx_queue_id] !=
1625                         RTE_ETH_QUEUE_STATE_STOPPED))
1626                 return -EBUSY;
1627
1628         txq = dev->data->tx_queues;
1629         if (txq[tx_queue_id]) {
1630                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1631                                         -ENOTSUP);
1632                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1633                 txq[tx_queue_id] = NULL;
1634         }
1635
1636         if (tx_conf == NULL)
1637                 tx_conf = &dev_info.default_txconf;
1638
1639         local_conf = *tx_conf;
1640
1641         /*
1642          * If an offloading has already been enabled in
1643          * rte_eth_dev_configure(), it has been enabled on all queues,
1644          * so there is no need to enable it in this queue again.
1645          * The local_conf.offloads input to underlying PMD only carries
1646          * those offloadings which are only enabled on this queue and
1647          * not enabled on all queues.
1648          */
1649         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1650
1651         /*
1652          * New added offloadings for this queue are those not enabled in
1653          * rte_eth_dev_configure() and they must be per-queue type.
1654          * A pure per-port offloading can't be enabled on a queue while
1655          * disabled on another queue. A pure per-port offloading can't
1656          * be enabled for any queue as new added one if it hasn't been
1657          * enabled in rte_eth_dev_configure().
1658          */
1659         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1660              local_conf.offloads) {
1661                 RTE_ETHDEV_LOG(ERR,
1662                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1663                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1664                         port_id, tx_queue_id, local_conf.offloads,
1665                         dev_info.tx_queue_offload_capa,
1666                         __func__);
1667                 return -EINVAL;
1668         }
1669
1670         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1671                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1672 }
1673
1674 void
1675 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1676                 void *userdata __rte_unused)
1677 {
1678         unsigned i;
1679
1680         for (i = 0; i < unsent; i++)
1681                 rte_pktmbuf_free(pkts[i]);
1682 }
1683
1684 void
1685 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1686                 void *userdata)
1687 {
1688         uint64_t *count = userdata;
1689         unsigned i;
1690
1691         for (i = 0; i < unsent; i++)
1692                 rte_pktmbuf_free(pkts[i]);
1693
1694         *count += unsent;
1695 }
1696
1697 int
1698 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1699                 buffer_tx_error_fn cbfn, void *userdata)
1700 {
1701         buffer->error_callback = cbfn;
1702         buffer->error_userdata = userdata;
1703         return 0;
1704 }
1705
1706 int
1707 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1708 {
1709         int ret = 0;
1710
1711         if (buffer == NULL)
1712                 return -EINVAL;
1713
1714         buffer->size = size;
1715         if (buffer->error_callback == NULL) {
1716                 ret = rte_eth_tx_buffer_set_err_callback(
1717                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1718         }
1719
1720         return ret;
1721 }
1722
1723 int
1724 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1725 {
1726         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1727         int ret;
1728
1729         /* Validate Input Data. Bail if not valid or not supported. */
1730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1731         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1732
1733         /* Call driver to free pending mbufs. */
1734         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1735                                                free_cnt);
1736         return eth_err(port_id, ret);
1737 }
1738
1739 void
1740 rte_eth_promiscuous_enable(uint16_t port_id)
1741 {
1742         struct rte_eth_dev *dev;
1743
1744         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1745         dev = &rte_eth_devices[port_id];
1746
1747         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1748         (*dev->dev_ops->promiscuous_enable)(dev);
1749         dev->data->promiscuous = 1;
1750 }
1751
1752 void
1753 rte_eth_promiscuous_disable(uint16_t port_id)
1754 {
1755         struct rte_eth_dev *dev;
1756
1757         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1758         dev = &rte_eth_devices[port_id];
1759
1760         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1761         dev->data->promiscuous = 0;
1762         (*dev->dev_ops->promiscuous_disable)(dev);
1763 }
1764
1765 int
1766 rte_eth_promiscuous_get(uint16_t port_id)
1767 {
1768         struct rte_eth_dev *dev;
1769
1770         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1771
1772         dev = &rte_eth_devices[port_id];
1773         return dev->data->promiscuous;
1774 }
1775
1776 void
1777 rte_eth_allmulticast_enable(uint16_t port_id)
1778 {
1779         struct rte_eth_dev *dev;
1780
1781         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1782         dev = &rte_eth_devices[port_id];
1783
1784         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1785         (*dev->dev_ops->allmulticast_enable)(dev);
1786         dev->data->all_multicast = 1;
1787 }
1788
1789 void
1790 rte_eth_allmulticast_disable(uint16_t port_id)
1791 {
1792         struct rte_eth_dev *dev;
1793
1794         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1795         dev = &rte_eth_devices[port_id];
1796
1797         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1798         dev->data->all_multicast = 0;
1799         (*dev->dev_ops->allmulticast_disable)(dev);
1800 }
1801
1802 int
1803 rte_eth_allmulticast_get(uint16_t port_id)
1804 {
1805         struct rte_eth_dev *dev;
1806
1807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1808
1809         dev = &rte_eth_devices[port_id];
1810         return dev->data->all_multicast;
1811 }
1812
1813 void
1814 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1815 {
1816         struct rte_eth_dev *dev;
1817
1818         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1819         dev = &rte_eth_devices[port_id];
1820
1821         if (dev->data->dev_conf.intr_conf.lsc &&
1822             dev->data->dev_started)
1823                 rte_eth_linkstatus_get(dev, eth_link);
1824         else {
1825                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1826                 (*dev->dev_ops->link_update)(dev, 1);
1827                 *eth_link = dev->data->dev_link;
1828         }
1829 }
1830
1831 void
1832 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1833 {
1834         struct rte_eth_dev *dev;
1835
1836         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1837         dev = &rte_eth_devices[port_id];
1838
1839         if (dev->data->dev_conf.intr_conf.lsc &&
1840             dev->data->dev_started)
1841                 rte_eth_linkstatus_get(dev, eth_link);
1842         else {
1843                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1844                 (*dev->dev_ops->link_update)(dev, 0);
1845                 *eth_link = dev->data->dev_link;
1846         }
1847 }
1848
1849 int
1850 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1851 {
1852         struct rte_eth_dev *dev;
1853
1854         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1855
1856         dev = &rte_eth_devices[port_id];
1857         memset(stats, 0, sizeof(*stats));
1858
1859         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1860         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1861         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1862 }
1863
1864 int
1865 rte_eth_stats_reset(uint16_t port_id)
1866 {
1867         struct rte_eth_dev *dev;
1868
1869         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1870         dev = &rte_eth_devices[port_id];
1871
1872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1873         (*dev->dev_ops->stats_reset)(dev);
1874         dev->data->rx_mbuf_alloc_failed = 0;
1875
1876         return 0;
1877 }
1878
1879 static inline int
1880 get_xstats_basic_count(struct rte_eth_dev *dev)
1881 {
1882         uint16_t nb_rxqs, nb_txqs;
1883         int count;
1884
1885         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1886         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1887
1888         count = RTE_NB_STATS;
1889         count += nb_rxqs * RTE_NB_RXQ_STATS;
1890         count += nb_txqs * RTE_NB_TXQ_STATS;
1891
1892         return count;
1893 }
1894
1895 static int
1896 get_xstats_count(uint16_t port_id)
1897 {
1898         struct rte_eth_dev *dev;
1899         int count;
1900
1901         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1902         dev = &rte_eth_devices[port_id];
1903         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1904                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1905                                 NULL, 0);
1906                 if (count < 0)
1907                         return eth_err(port_id, count);
1908         }
1909         if (dev->dev_ops->xstats_get_names != NULL) {
1910                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1911                 if (count < 0)
1912                         return eth_err(port_id, count);
1913         } else
1914                 count = 0;
1915
1916
1917         count += get_xstats_basic_count(dev);
1918
1919         return count;
1920 }
1921
1922 int
1923 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1924                 uint64_t *id)
1925 {
1926         int cnt_xstats, idx_xstat;
1927
1928         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1929
1930         if (!id) {
1931                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
1932                 return -ENOMEM;
1933         }
1934
1935         if (!xstat_name) {
1936                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
1937                 return -ENOMEM;
1938         }
1939
1940         /* Get count */
1941         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1942         if (cnt_xstats  < 0) {
1943                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
1944                 return -ENODEV;
1945         }
1946
1947         /* Get id-name lookup table */
1948         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1949
1950         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1951                         port_id, xstats_names, cnt_xstats, NULL)) {
1952                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
1953                 return -1;
1954         }
1955
1956         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1957                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1958                         *id = idx_xstat;
1959                         return 0;
1960                 };
1961         }
1962
1963         return -EINVAL;
1964 }
1965
1966 /* retrieve basic stats names */
1967 static int
1968 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1969         struct rte_eth_xstat_name *xstats_names)
1970 {
1971         int cnt_used_entries = 0;
1972         uint32_t idx, id_queue;
1973         uint16_t num_q;
1974
1975         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1976                 snprintf(xstats_names[cnt_used_entries].name,
1977                         sizeof(xstats_names[0].name),
1978                         "%s", rte_stats_strings[idx].name);
1979                 cnt_used_entries++;
1980         }
1981         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1982         for (id_queue = 0; id_queue < num_q; id_queue++) {
1983                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1984                         snprintf(xstats_names[cnt_used_entries].name,
1985                                 sizeof(xstats_names[0].name),
1986                                 "rx_q%u%s",
1987                                 id_queue, rte_rxq_stats_strings[idx].name);
1988                         cnt_used_entries++;
1989                 }
1990
1991         }
1992         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1993         for (id_queue = 0; id_queue < num_q; id_queue++) {
1994                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1995                         snprintf(xstats_names[cnt_used_entries].name,
1996                                 sizeof(xstats_names[0].name),
1997                                 "tx_q%u%s",
1998                                 id_queue, rte_txq_stats_strings[idx].name);
1999                         cnt_used_entries++;
2000                 }
2001         }
2002         return cnt_used_entries;
2003 }
2004
2005 /* retrieve ethdev extended statistics names */
2006 int
2007 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2008         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2009         uint64_t *ids)
2010 {
2011         struct rte_eth_xstat_name *xstats_names_copy;
2012         unsigned int no_basic_stat_requested = 1;
2013         unsigned int no_ext_stat_requested = 1;
2014         unsigned int expected_entries;
2015         unsigned int basic_count;
2016         struct rte_eth_dev *dev;
2017         unsigned int i;
2018         int ret;
2019
2020         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2021         dev = &rte_eth_devices[port_id];
2022
2023         basic_count = get_xstats_basic_count(dev);
2024         ret = get_xstats_count(port_id);
2025         if (ret < 0)
2026                 return ret;
2027         expected_entries = (unsigned int)ret;
2028
2029         /* Return max number of stats if no ids given */
2030         if (!ids) {
2031                 if (!xstats_names)
2032                         return expected_entries;
2033                 else if (xstats_names && size < expected_entries)
2034                         return expected_entries;
2035         }
2036
2037         if (ids && !xstats_names)
2038                 return -EINVAL;
2039
2040         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2041                 uint64_t ids_copy[size];
2042
2043                 for (i = 0; i < size; i++) {
2044                         if (ids[i] < basic_count) {
2045                                 no_basic_stat_requested = 0;
2046                                 break;
2047                         }
2048
2049                         /*
2050                          * Convert ids to xstats ids that PMD knows.
2051                          * ids known by user are basic + extended stats.
2052                          */
2053                         ids_copy[i] = ids[i] - basic_count;
2054                 }
2055
2056                 if (no_basic_stat_requested)
2057                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2058                                         xstats_names, ids_copy, size);
2059         }
2060
2061         /* Retrieve all stats */
2062         if (!ids) {
2063                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2064                                 expected_entries);
2065                 if (num_stats < 0 || num_stats > (int)expected_entries)
2066                         return num_stats;
2067                 else
2068                         return expected_entries;
2069         }
2070
2071         xstats_names_copy = calloc(expected_entries,
2072                 sizeof(struct rte_eth_xstat_name));
2073
2074         if (!xstats_names_copy) {
2075                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2076                 return -ENOMEM;
2077         }
2078
2079         if (ids) {
2080                 for (i = 0; i < size; i++) {
2081                         if (ids[i] >= basic_count) {
2082                                 no_ext_stat_requested = 0;
2083                                 break;
2084                         }
2085                 }
2086         }
2087
2088         /* Fill xstats_names_copy structure */
2089         if (ids && no_ext_stat_requested) {
2090                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2091         } else {
2092                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2093                         expected_entries);
2094                 if (ret < 0) {
2095                         free(xstats_names_copy);
2096                         return ret;
2097                 }
2098         }
2099
2100         /* Filter stats */
2101         for (i = 0; i < size; i++) {
2102                 if (ids[i] >= expected_entries) {
2103                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2104                         free(xstats_names_copy);
2105                         return -1;
2106                 }
2107                 xstats_names[i] = xstats_names_copy[ids[i]];
2108         }
2109
2110         free(xstats_names_copy);
2111         return size;
2112 }
2113
2114 int
2115 rte_eth_xstats_get_names(uint16_t port_id,
2116         struct rte_eth_xstat_name *xstats_names,
2117         unsigned int size)
2118 {
2119         struct rte_eth_dev *dev;
2120         int cnt_used_entries;
2121         int cnt_expected_entries;
2122         int cnt_driver_entries;
2123
2124         cnt_expected_entries = get_xstats_count(port_id);
2125         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2126                         (int)size < cnt_expected_entries)
2127                 return cnt_expected_entries;
2128
2129         /* port_id checked in get_xstats_count() */
2130         dev = &rte_eth_devices[port_id];
2131
2132         cnt_used_entries = rte_eth_basic_stats_get_names(
2133                 dev, xstats_names);
2134
2135         if (dev->dev_ops->xstats_get_names != NULL) {
2136                 /* If there are any driver-specific xstats, append them
2137                  * to end of list.
2138                  */
2139                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2140                         dev,
2141                         xstats_names + cnt_used_entries,
2142                         size - cnt_used_entries);
2143                 if (cnt_driver_entries < 0)
2144                         return eth_err(port_id, cnt_driver_entries);
2145                 cnt_used_entries += cnt_driver_entries;
2146         }
2147
2148         return cnt_used_entries;
2149 }
2150
2151
2152 static int
2153 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2154 {
2155         struct rte_eth_dev *dev;
2156         struct rte_eth_stats eth_stats;
2157         unsigned int count = 0, i, q;
2158         uint64_t val, *stats_ptr;
2159         uint16_t nb_rxqs, nb_txqs;
2160         int ret;
2161
2162         ret = rte_eth_stats_get(port_id, &eth_stats);
2163         if (ret < 0)
2164                 return ret;
2165
2166         dev = &rte_eth_devices[port_id];
2167
2168         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2169         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2170
2171         /* global stats */
2172         for (i = 0; i < RTE_NB_STATS; i++) {
2173                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2174                                         rte_stats_strings[i].offset);
2175                 val = *stats_ptr;
2176                 xstats[count++].value = val;
2177         }
2178
2179         /* per-rxq stats */
2180         for (q = 0; q < nb_rxqs; q++) {
2181                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2182                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2183                                         rte_rxq_stats_strings[i].offset +
2184                                         q * sizeof(uint64_t));
2185                         val = *stats_ptr;
2186                         xstats[count++].value = val;
2187                 }
2188         }
2189
2190         /* per-txq stats */
2191         for (q = 0; q < nb_txqs; q++) {
2192                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2193                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2194                                         rte_txq_stats_strings[i].offset +
2195                                         q * sizeof(uint64_t));
2196                         val = *stats_ptr;
2197                         xstats[count++].value = val;
2198                 }
2199         }
2200         return count;
2201 }
2202
2203 /* retrieve ethdev extended statistics */
2204 int
2205 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2206                          uint64_t *values, unsigned int size)
2207 {
2208         unsigned int no_basic_stat_requested = 1;
2209         unsigned int no_ext_stat_requested = 1;
2210         unsigned int num_xstats_filled;
2211         unsigned int basic_count;
2212         uint16_t expected_entries;
2213         struct rte_eth_dev *dev;
2214         unsigned int i;
2215         int ret;
2216
2217         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2218         ret = get_xstats_count(port_id);
2219         if (ret < 0)
2220                 return ret;
2221         expected_entries = (uint16_t)ret;
2222         struct rte_eth_xstat xstats[expected_entries];
2223         dev = &rte_eth_devices[port_id];
2224         basic_count = get_xstats_basic_count(dev);
2225
2226         /* Return max number of stats if no ids given */
2227         if (!ids) {
2228                 if (!values)
2229                         return expected_entries;
2230                 else if (values && size < expected_entries)
2231                         return expected_entries;
2232         }
2233
2234         if (ids && !values)
2235                 return -EINVAL;
2236
2237         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2238                 unsigned int basic_count = get_xstats_basic_count(dev);
2239                 uint64_t ids_copy[size];
2240
2241                 for (i = 0; i < size; i++) {
2242                         if (ids[i] < basic_count) {
2243                                 no_basic_stat_requested = 0;
2244                                 break;
2245                         }
2246
2247                         /*
2248                          * Convert ids to xstats ids that PMD knows.
2249                          * ids known by user are basic + extended stats.
2250                          */
2251                         ids_copy[i] = ids[i] - basic_count;
2252                 }
2253
2254                 if (no_basic_stat_requested)
2255                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2256                                         values, size);
2257         }
2258
2259         if (ids) {
2260                 for (i = 0; i < size; i++) {
2261                         if (ids[i] >= basic_count) {
2262                                 no_ext_stat_requested = 0;
2263                                 break;
2264                         }
2265                 }
2266         }
2267
2268         /* Fill the xstats structure */
2269         if (ids && no_ext_stat_requested)
2270                 ret = rte_eth_basic_stats_get(port_id, xstats);
2271         else
2272                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2273
2274         if (ret < 0)
2275                 return ret;
2276         num_xstats_filled = (unsigned int)ret;
2277
2278         /* Return all stats */
2279         if (!ids) {
2280                 for (i = 0; i < num_xstats_filled; i++)
2281                         values[i] = xstats[i].value;
2282                 return expected_entries;
2283         }
2284
2285         /* Filter stats */
2286         for (i = 0; i < size; i++) {
2287                 if (ids[i] >= expected_entries) {
2288                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2289                         return -1;
2290                 }
2291                 values[i] = xstats[ids[i]].value;
2292         }
2293         return size;
2294 }
2295
2296 int
2297 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2298         unsigned int n)
2299 {
2300         struct rte_eth_dev *dev;
2301         unsigned int count = 0, i;
2302         signed int xcount = 0;
2303         uint16_t nb_rxqs, nb_txqs;
2304         int ret;
2305
2306         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2307
2308         dev = &rte_eth_devices[port_id];
2309
2310         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2311         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2312
2313         /* Return generic statistics */
2314         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2315                 (nb_txqs * RTE_NB_TXQ_STATS);
2316
2317         /* implemented by the driver */
2318         if (dev->dev_ops->xstats_get != NULL) {
2319                 /* Retrieve the xstats from the driver at the end of the
2320                  * xstats struct.
2321                  */
2322                 xcount = (*dev->dev_ops->xstats_get)(dev,
2323                                      xstats ? xstats + count : NULL,
2324                                      (n > count) ? n - count : 0);
2325
2326                 if (xcount < 0)
2327                         return eth_err(port_id, xcount);
2328         }
2329
2330         if (n < count + xcount || xstats == NULL)
2331                 return count + xcount;
2332
2333         /* now fill the xstats structure */
2334         ret = rte_eth_basic_stats_get(port_id, xstats);
2335         if (ret < 0)
2336                 return ret;
2337         count = ret;
2338
2339         for (i = 0; i < count; i++)
2340                 xstats[i].id = i;
2341         /* add an offset to driver-specific stats */
2342         for ( ; i < count + xcount; i++)
2343                 xstats[i].id += count;
2344
2345         return count + xcount;
2346 }
2347
2348 /* reset ethdev extended statistics */
2349 void
2350 rte_eth_xstats_reset(uint16_t port_id)
2351 {
2352         struct rte_eth_dev *dev;
2353
2354         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2355         dev = &rte_eth_devices[port_id];
2356
2357         /* implemented by the driver */
2358         if (dev->dev_ops->xstats_reset != NULL) {
2359                 (*dev->dev_ops->xstats_reset)(dev);
2360                 return;
2361         }
2362
2363         /* fallback to default */
2364         rte_eth_stats_reset(port_id);
2365 }
2366
2367 static int
2368 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2369                 uint8_t is_rx)
2370 {
2371         struct rte_eth_dev *dev;
2372
2373         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2374
2375         dev = &rte_eth_devices[port_id];
2376
2377         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2378
2379         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2380                 return -EINVAL;
2381
2382         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2383                 return -EINVAL;
2384
2385         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2386                 return -EINVAL;
2387
2388         return (*dev->dev_ops->queue_stats_mapping_set)
2389                         (dev, queue_id, stat_idx, is_rx);
2390 }
2391
2392
2393 int
2394 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2395                 uint8_t stat_idx)
2396 {
2397         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2398                                                 stat_idx, STAT_QMAP_TX));
2399 }
2400
2401
2402 int
2403 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2404                 uint8_t stat_idx)
2405 {
2406         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2407                                                 stat_idx, STAT_QMAP_RX));
2408 }
2409
2410 int
2411 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2412 {
2413         struct rte_eth_dev *dev;
2414
2415         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2416         dev = &rte_eth_devices[port_id];
2417
2418         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2419         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2420                                                         fw_version, fw_size));
2421 }
2422
2423 void
2424 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2425 {
2426         struct rte_eth_dev *dev;
2427         const struct rte_eth_desc_lim lim = {
2428                 .nb_max = UINT16_MAX,
2429                 .nb_min = 0,
2430                 .nb_align = 1,
2431         };
2432
2433         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2434         dev = &rte_eth_devices[port_id];
2435
2436         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2437         dev_info->rx_desc_lim = lim;
2438         dev_info->tx_desc_lim = lim;
2439         dev_info->device = dev->device;
2440
2441         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2442         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2443         dev_info->driver_name = dev->device->driver->name;
2444         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2445         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2446
2447         dev_info->dev_flags = &dev->data->dev_flags;
2448 }
2449
2450 int
2451 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2452                                  uint32_t *ptypes, int num)
2453 {
2454         int i, j;
2455         struct rte_eth_dev *dev;
2456         const uint32_t *all_ptypes;
2457
2458         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2459         dev = &rte_eth_devices[port_id];
2460         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2461         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2462
2463         if (!all_ptypes)
2464                 return 0;
2465
2466         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2467                 if (all_ptypes[i] & ptype_mask) {
2468                         if (j < num)
2469                                 ptypes[j] = all_ptypes[i];
2470                         j++;
2471                 }
2472
2473         return j;
2474 }
2475
2476 void
2477 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2478 {
2479         struct rte_eth_dev *dev;
2480
2481         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2482         dev = &rte_eth_devices[port_id];
2483         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2484 }
2485
2486
2487 int
2488 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2489 {
2490         struct rte_eth_dev *dev;
2491
2492         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2493
2494         dev = &rte_eth_devices[port_id];
2495         *mtu = dev->data->mtu;
2496         return 0;
2497 }
2498
2499 int
2500 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2501 {
2502         int ret;
2503         struct rte_eth_dev *dev;
2504
2505         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2506         dev = &rte_eth_devices[port_id];
2507         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2508
2509         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2510         if (!ret)
2511                 dev->data->mtu = mtu;
2512
2513         return eth_err(port_id, ret);
2514 }
2515
2516 int
2517 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2518 {
2519         struct rte_eth_dev *dev;
2520         int ret;
2521
2522         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2523         dev = &rte_eth_devices[port_id];
2524         if (!(dev->data->dev_conf.rxmode.offloads &
2525               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2526                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
2527                         port_id);
2528                 return -ENOSYS;
2529         }
2530
2531         if (vlan_id > 4095) {
2532                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
2533                         port_id, vlan_id);
2534                 return -EINVAL;
2535         }
2536         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2537
2538         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2539         if (ret == 0) {
2540                 struct rte_vlan_filter_conf *vfc;
2541                 int vidx;
2542                 int vbit;
2543
2544                 vfc = &dev->data->vlan_filter_conf;
2545                 vidx = vlan_id / 64;
2546                 vbit = vlan_id % 64;
2547
2548                 if (on)
2549                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2550                 else
2551                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2552         }
2553
2554         return eth_err(port_id, ret);
2555 }
2556
2557 int
2558 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2559                                     int on)
2560 {
2561         struct rte_eth_dev *dev;
2562
2563         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2564         dev = &rte_eth_devices[port_id];
2565         if (rx_queue_id >= dev->data->nb_rx_queues) {
2566                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
2567                 return -EINVAL;
2568         }
2569
2570         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2571         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2572
2573         return 0;
2574 }
2575
2576 int
2577 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2578                                 enum rte_vlan_type vlan_type,
2579                                 uint16_t tpid)
2580 {
2581         struct rte_eth_dev *dev;
2582
2583         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2584         dev = &rte_eth_devices[port_id];
2585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2586
2587         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2588                                                                tpid));
2589 }
2590
2591 int
2592 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2593 {
2594         struct rte_eth_dev *dev;
2595         int ret = 0;
2596         int mask = 0;
2597         int cur, org = 0;
2598         uint64_t orig_offloads;
2599
2600         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2601         dev = &rte_eth_devices[port_id];
2602
2603         /* save original values in case of failure */
2604         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2605
2606         /*check which option changed by application*/
2607         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2608         org = !!(dev->data->dev_conf.rxmode.offloads &
2609                  DEV_RX_OFFLOAD_VLAN_STRIP);
2610         if (cur != org) {
2611                 if (cur)
2612                         dev->data->dev_conf.rxmode.offloads |=
2613                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2614                 else
2615                         dev->data->dev_conf.rxmode.offloads &=
2616                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2617                 mask |= ETH_VLAN_STRIP_MASK;
2618         }
2619
2620         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2621         org = !!(dev->data->dev_conf.rxmode.offloads &
2622                  DEV_RX_OFFLOAD_VLAN_FILTER);
2623         if (cur != org) {
2624                 if (cur)
2625                         dev->data->dev_conf.rxmode.offloads |=
2626                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2627                 else
2628                         dev->data->dev_conf.rxmode.offloads &=
2629                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2630                 mask |= ETH_VLAN_FILTER_MASK;
2631         }
2632
2633         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2634         org = !!(dev->data->dev_conf.rxmode.offloads &
2635                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2636         if (cur != org) {
2637                 if (cur)
2638                         dev->data->dev_conf.rxmode.offloads |=
2639                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2640                 else
2641                         dev->data->dev_conf.rxmode.offloads &=
2642                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2643                 mask |= ETH_VLAN_EXTEND_MASK;
2644         }
2645
2646         /*no change*/
2647         if (mask == 0)
2648                 return ret;
2649
2650         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2651         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2652         if (ret) {
2653                 /* hit an error restore  original values */
2654                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2655         }
2656
2657         return eth_err(port_id, ret);
2658 }
2659
2660 int
2661 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2662 {
2663         struct rte_eth_dev *dev;
2664         int ret = 0;
2665
2666         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2667         dev = &rte_eth_devices[port_id];
2668
2669         if (dev->data->dev_conf.rxmode.offloads &
2670             DEV_RX_OFFLOAD_VLAN_STRIP)
2671                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2672
2673         if (dev->data->dev_conf.rxmode.offloads &
2674             DEV_RX_OFFLOAD_VLAN_FILTER)
2675                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2676
2677         if (dev->data->dev_conf.rxmode.offloads &
2678             DEV_RX_OFFLOAD_VLAN_EXTEND)
2679                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2680
2681         return ret;
2682 }
2683
2684 int
2685 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2686 {
2687         struct rte_eth_dev *dev;
2688
2689         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2690         dev = &rte_eth_devices[port_id];
2691         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2692
2693         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2694 }
2695
2696 int
2697 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2698 {
2699         struct rte_eth_dev *dev;
2700
2701         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2702         dev = &rte_eth_devices[port_id];
2703         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2704         memset(fc_conf, 0, sizeof(*fc_conf));
2705         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2706 }
2707
2708 int
2709 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2710 {
2711         struct rte_eth_dev *dev;
2712
2713         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2714         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2715                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
2716                 return -EINVAL;
2717         }
2718
2719         dev = &rte_eth_devices[port_id];
2720         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2721         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2722 }
2723
2724 int
2725 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2726                                    struct rte_eth_pfc_conf *pfc_conf)
2727 {
2728         struct rte_eth_dev *dev;
2729
2730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2731         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2732                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
2733                 return -EINVAL;
2734         }
2735
2736         dev = &rte_eth_devices[port_id];
2737         /* High water, low water validation are device specific */
2738         if  (*dev->dev_ops->priority_flow_ctrl_set)
2739                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2740                                         (dev, pfc_conf));
2741         return -ENOTSUP;
2742 }
2743
2744 static int
2745 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2746                         uint16_t reta_size)
2747 {
2748         uint16_t i, num;
2749
2750         if (!reta_conf)
2751                 return -EINVAL;
2752
2753         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2754         for (i = 0; i < num; i++) {
2755                 if (reta_conf[i].mask)
2756                         return 0;
2757         }
2758
2759         return -EINVAL;
2760 }
2761
2762 static int
2763 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2764                          uint16_t reta_size,
2765                          uint16_t max_rxq)
2766 {
2767         uint16_t i, idx, shift;
2768
2769         if (!reta_conf)
2770                 return -EINVAL;
2771
2772         if (max_rxq == 0) {
2773                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
2774                 return -EINVAL;
2775         }
2776
2777         for (i = 0; i < reta_size; i++) {
2778                 idx = i / RTE_RETA_GROUP_SIZE;
2779                 shift = i % RTE_RETA_GROUP_SIZE;
2780                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2781                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2782                         RTE_ETHDEV_LOG(ERR,
2783                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
2784                                 idx, shift,
2785                                 reta_conf[idx].reta[shift], max_rxq);
2786                         return -EINVAL;
2787                 }
2788         }
2789
2790         return 0;
2791 }
2792
2793 int
2794 rte_eth_dev_rss_reta_update(uint16_t port_id,
2795                             struct rte_eth_rss_reta_entry64 *reta_conf,
2796                             uint16_t reta_size)
2797 {
2798         struct rte_eth_dev *dev;
2799         int ret;
2800
2801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2802         /* Check mask bits */
2803         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2804         if (ret < 0)
2805                 return ret;
2806
2807         dev = &rte_eth_devices[port_id];
2808
2809         /* Check entry value */
2810         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2811                                 dev->data->nb_rx_queues);
2812         if (ret < 0)
2813                 return ret;
2814
2815         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2816         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2817                                                              reta_size));
2818 }
2819
2820 int
2821 rte_eth_dev_rss_reta_query(uint16_t port_id,
2822                            struct rte_eth_rss_reta_entry64 *reta_conf,
2823                            uint16_t reta_size)
2824 {
2825         struct rte_eth_dev *dev;
2826         int ret;
2827
2828         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2829
2830         /* Check mask bits */
2831         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2832         if (ret < 0)
2833                 return ret;
2834
2835         dev = &rte_eth_devices[port_id];
2836         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2837         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2838                                                             reta_size));
2839 }
2840
2841 int
2842 rte_eth_dev_rss_hash_update(uint16_t port_id,
2843                             struct rte_eth_rss_conf *rss_conf)
2844 {
2845         struct rte_eth_dev *dev;
2846         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2847
2848         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2849         dev = &rte_eth_devices[port_id];
2850         rte_eth_dev_info_get(port_id, &dev_info);
2851         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2852             dev_info.flow_type_rss_offloads) {
2853                 RTE_ETHDEV_LOG(ERR,
2854                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2855                         port_id, rss_conf->rss_hf,
2856                         dev_info.flow_type_rss_offloads);
2857                 return -EINVAL;
2858         }
2859         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2860         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2861                                                                  rss_conf));
2862 }
2863
2864 int
2865 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2866                               struct rte_eth_rss_conf *rss_conf)
2867 {
2868         struct rte_eth_dev *dev;
2869
2870         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2871         dev = &rte_eth_devices[port_id];
2872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2873         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2874                                                                    rss_conf));
2875 }
2876
2877 int
2878 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2879                                 struct rte_eth_udp_tunnel *udp_tunnel)
2880 {
2881         struct rte_eth_dev *dev;
2882
2883         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2884         if (udp_tunnel == NULL) {
2885                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
2886                 return -EINVAL;
2887         }
2888
2889         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2890                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
2891                 return -EINVAL;
2892         }
2893
2894         dev = &rte_eth_devices[port_id];
2895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2896         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2897                                                                 udp_tunnel));
2898 }
2899
2900 int
2901 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2902                                    struct rte_eth_udp_tunnel *udp_tunnel)
2903 {
2904         struct rte_eth_dev *dev;
2905
2906         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2907         dev = &rte_eth_devices[port_id];
2908
2909         if (udp_tunnel == NULL) {
2910                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
2911                 return -EINVAL;
2912         }
2913
2914         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2915                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
2916                 return -EINVAL;
2917         }
2918
2919         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2920         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2921                                                                 udp_tunnel));
2922 }
2923
2924 int
2925 rte_eth_led_on(uint16_t port_id)
2926 {
2927         struct rte_eth_dev *dev;
2928
2929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2930         dev = &rte_eth_devices[port_id];
2931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2932         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2933 }
2934
2935 int
2936 rte_eth_led_off(uint16_t port_id)
2937 {
2938         struct rte_eth_dev *dev;
2939
2940         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2941         dev = &rte_eth_devices[port_id];
2942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2943         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2944 }
2945
2946 /*
2947  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2948  * an empty spot.
2949  */
2950 static int
2951 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2952 {
2953         struct rte_eth_dev_info dev_info;
2954         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2955         unsigned i;
2956
2957         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2958         rte_eth_dev_info_get(port_id, &dev_info);
2959
2960         for (i = 0; i < dev_info.max_mac_addrs; i++)
2961                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2962                         return i;
2963
2964         return -1;
2965 }
2966
2967 static const struct ether_addr null_mac_addr;
2968
2969 int
2970 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2971                         uint32_t pool)
2972 {
2973         struct rte_eth_dev *dev;
2974         int index;
2975         uint64_t pool_mask;
2976         int ret;
2977
2978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2979         dev = &rte_eth_devices[port_id];
2980         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2981
2982         if (is_zero_ether_addr(addr)) {
2983                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
2984                         port_id);
2985                 return -EINVAL;
2986         }
2987         if (pool >= ETH_64_POOLS) {
2988                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
2989                 return -EINVAL;
2990         }
2991
2992         index = get_mac_addr_index(port_id, addr);
2993         if (index < 0) {
2994                 index = get_mac_addr_index(port_id, &null_mac_addr);
2995                 if (index < 0) {
2996                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
2997                                 port_id);
2998                         return -ENOSPC;
2999                 }
3000         } else {
3001                 pool_mask = dev->data->mac_pool_sel[index];
3002
3003                 /* Check if both MAC address and pool is already there, and do nothing */
3004                 if (pool_mask & (1ULL << pool))
3005                         return 0;
3006         }
3007
3008         /* Update NIC */
3009         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3010
3011         if (ret == 0) {
3012                 /* Update address in NIC data structure */
3013                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3014
3015                 /* Update pool bitmap in NIC data structure */
3016                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3017         }
3018
3019         return eth_err(port_id, ret);
3020 }
3021
3022 int
3023 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
3024 {
3025         struct rte_eth_dev *dev;
3026         int index;
3027
3028         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3029         dev = &rte_eth_devices[port_id];
3030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3031
3032         index = get_mac_addr_index(port_id, addr);
3033         if (index == 0) {
3034                 RTE_ETHDEV_LOG(ERR,
3035                         "Port %u: Cannot remove default MAC address\n",
3036                         port_id);
3037                 return -EADDRINUSE;
3038         } else if (index < 0)
3039                 return 0;  /* Do nothing if address wasn't found */
3040
3041         /* Update NIC */
3042         (*dev->dev_ops->mac_addr_remove)(dev, index);
3043
3044         /* Update address in NIC data structure */
3045         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3046
3047         /* reset pool bitmap */
3048         dev->data->mac_pool_sel[index] = 0;
3049
3050         return 0;
3051 }
3052
3053 int
3054 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3055 {
3056         struct rte_eth_dev *dev;
3057         int ret;
3058
3059         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3060
3061         if (!is_valid_assigned_ether_addr(addr))
3062                 return -EINVAL;
3063
3064         dev = &rte_eth_devices[port_id];
3065         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3066
3067         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3068         if (ret < 0)
3069                 return ret;
3070
3071         /* Update default address in NIC data structure */
3072         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3073
3074         return 0;
3075 }
3076
3077
3078 /*
3079  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3080  * an empty spot.
3081  */
3082 static int
3083 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3084 {
3085         struct rte_eth_dev_info dev_info;
3086         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3087         unsigned i;
3088
3089         rte_eth_dev_info_get(port_id, &dev_info);
3090         if (!dev->data->hash_mac_addrs)
3091                 return -1;
3092
3093         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3094                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3095                         ETHER_ADDR_LEN) == 0)
3096                         return i;
3097
3098         return -1;
3099 }
3100
3101 int
3102 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3103                                 uint8_t on)
3104 {
3105         int index;
3106         int ret;
3107         struct rte_eth_dev *dev;
3108
3109         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3110
3111         dev = &rte_eth_devices[port_id];
3112         if (is_zero_ether_addr(addr)) {
3113                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3114                         port_id);
3115                 return -EINVAL;
3116         }
3117
3118         index = get_hash_mac_addr_index(port_id, addr);
3119         /* Check if it's already there, and do nothing */
3120         if ((index >= 0) && on)
3121                 return 0;
3122
3123         if (index < 0) {
3124                 if (!on) {
3125                         RTE_ETHDEV_LOG(ERR,
3126                                 "Port %u: the MAC address was not set in UTA\n",
3127                                 port_id);
3128                         return -EINVAL;
3129                 }
3130
3131                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3132                 if (index < 0) {
3133                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3134                                 port_id);
3135                         return -ENOSPC;
3136                 }
3137         }
3138
3139         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3140         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3141         if (ret == 0) {
3142                 /* Update address in NIC data structure */
3143                 if (on)
3144                         ether_addr_copy(addr,
3145                                         &dev->data->hash_mac_addrs[index]);
3146                 else
3147                         ether_addr_copy(&null_mac_addr,
3148                                         &dev->data->hash_mac_addrs[index]);
3149         }
3150
3151         return eth_err(port_id, ret);
3152 }
3153
3154 int
3155 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3156 {
3157         struct rte_eth_dev *dev;
3158
3159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3160
3161         dev = &rte_eth_devices[port_id];
3162
3163         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3164         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3165                                                                        on));
3166 }
3167
3168 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3169                                         uint16_t tx_rate)
3170 {
3171         struct rte_eth_dev *dev;
3172         struct rte_eth_dev_info dev_info;
3173         struct rte_eth_link link;
3174
3175         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3176
3177         dev = &rte_eth_devices[port_id];
3178         rte_eth_dev_info_get(port_id, &dev_info);
3179         link = dev->data->dev_link;
3180
3181         if (queue_idx > dev_info.max_tx_queues) {
3182                 RTE_ETHDEV_LOG(ERR,
3183                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3184                         port_id, queue_idx);
3185                 return -EINVAL;
3186         }
3187
3188         if (tx_rate > link.link_speed) {
3189                 RTE_ETHDEV_LOG(ERR,
3190                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3191                         tx_rate, link.link_speed);
3192                 return -EINVAL;
3193         }
3194
3195         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3196         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3197                                                         queue_idx, tx_rate));
3198 }
3199
3200 int
3201 rte_eth_mirror_rule_set(uint16_t port_id,
3202                         struct rte_eth_mirror_conf *mirror_conf,
3203                         uint8_t rule_id, uint8_t on)
3204 {
3205         struct rte_eth_dev *dev;
3206
3207         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3208         if (mirror_conf->rule_type == 0) {
3209                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3210                 return -EINVAL;
3211         }
3212
3213         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3214                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3215                         ETH_64_POOLS - 1);
3216                 return -EINVAL;
3217         }
3218
3219         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3220              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3221             (mirror_conf->pool_mask == 0)) {
3222                 RTE_ETHDEV_LOG(ERR,
3223                         "Invalid mirror pool, pool mask can not be 0\n");
3224                 return -EINVAL;
3225         }
3226
3227         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3228             mirror_conf->vlan.vlan_mask == 0) {
3229                 RTE_ETHDEV_LOG(ERR,
3230                         "Invalid vlan mask, vlan mask can not be 0\n");
3231                 return -EINVAL;
3232         }
3233
3234         dev = &rte_eth_devices[port_id];
3235         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3236
3237         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3238                                                 mirror_conf, rule_id, on));
3239 }
3240
3241 int
3242 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3243 {
3244         struct rte_eth_dev *dev;
3245
3246         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3247
3248         dev = &rte_eth_devices[port_id];
3249         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3250
3251         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3252                                                                    rule_id));
3253 }
3254
3255 RTE_INIT(eth_dev_init_cb_lists)
3256 {
3257         int i;
3258
3259         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3260                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3261 }
3262
3263 int
3264 rte_eth_dev_callback_register(uint16_t port_id,
3265                         enum rte_eth_event_type event,
3266                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3267 {
3268         struct rte_eth_dev *dev;
3269         struct rte_eth_dev_callback *user_cb;
3270         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3271         uint16_t last_port;
3272
3273         if (!cb_fn)
3274                 return -EINVAL;
3275
3276         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3277                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3278                 return -EINVAL;
3279         }
3280
3281         if (port_id == RTE_ETH_ALL) {
3282                 next_port = 0;
3283                 last_port = RTE_MAX_ETHPORTS - 1;
3284         } else {
3285                 next_port = last_port = port_id;
3286         }
3287
3288         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3289
3290         do {
3291                 dev = &rte_eth_devices[next_port];
3292
3293                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3294                         if (user_cb->cb_fn == cb_fn &&
3295                                 user_cb->cb_arg == cb_arg &&
3296                                 user_cb->event == event) {
3297                                 break;
3298                         }
3299                 }
3300
3301                 /* create a new callback. */
3302                 if (user_cb == NULL) {
3303                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3304                                 sizeof(struct rte_eth_dev_callback), 0);
3305                         if (user_cb != NULL) {
3306                                 user_cb->cb_fn = cb_fn;
3307                                 user_cb->cb_arg = cb_arg;
3308                                 user_cb->event = event;
3309                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3310                                                   user_cb, next);
3311                         } else {
3312                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3313                                 rte_eth_dev_callback_unregister(port_id, event,
3314                                                                 cb_fn, cb_arg);
3315                                 return -ENOMEM;
3316                         }
3317
3318                 }
3319         } while (++next_port <= last_port);
3320
3321         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3322         return 0;
3323 }
3324
3325 int
3326 rte_eth_dev_callback_unregister(uint16_t port_id,
3327                         enum rte_eth_event_type event,
3328                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3329 {
3330         int ret;
3331         struct rte_eth_dev *dev;
3332         struct rte_eth_dev_callback *cb, *next;
3333         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3334         uint16_t last_port;
3335
3336         if (!cb_fn)
3337                 return -EINVAL;
3338
3339         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3340                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3341                 return -EINVAL;
3342         }
3343
3344         if (port_id == RTE_ETH_ALL) {
3345                 next_port = 0;
3346                 last_port = RTE_MAX_ETHPORTS - 1;
3347         } else {
3348                 next_port = last_port = port_id;
3349         }
3350
3351         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3352
3353         do {
3354                 dev = &rte_eth_devices[next_port];
3355                 ret = 0;
3356                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3357                      cb = next) {
3358
3359                         next = TAILQ_NEXT(cb, next);
3360
3361                         if (cb->cb_fn != cb_fn || cb->event != event ||
3362                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3363                                 continue;
3364
3365                         /*
3366                          * if this callback is not executing right now,
3367                          * then remove it.
3368                          */
3369                         if (cb->active == 0) {
3370                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3371                                 rte_free(cb);
3372                         } else {
3373                                 ret = -EAGAIN;
3374                         }
3375                 }
3376         } while (++next_port <= last_port);
3377
3378         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3379         return ret;
3380 }
3381
3382 int
3383 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3384         enum rte_eth_event_type event, void *ret_param)
3385 {
3386         struct rte_eth_dev_callback *cb_lst;
3387         struct rte_eth_dev_callback dev_cb;
3388         int rc = 0;
3389
3390         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3391         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3392                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3393                         continue;
3394                 dev_cb = *cb_lst;
3395                 cb_lst->active = 1;
3396                 if (ret_param != NULL)
3397                         dev_cb.ret_param = ret_param;
3398
3399                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3400                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3401                                 dev_cb.cb_arg, dev_cb.ret_param);
3402                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3403                 cb_lst->active = 0;
3404         }
3405         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3406         return rc;
3407 }
3408
3409 void
3410 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3411 {
3412         if (dev == NULL)
3413                 return;
3414
3415         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3416
3417         dev->state = RTE_ETH_DEV_ATTACHED;
3418 }
3419
3420 int
3421 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3422 {
3423         uint32_t vec;
3424         struct rte_eth_dev *dev;
3425         struct rte_intr_handle *intr_handle;
3426         uint16_t qid;
3427         int rc;
3428
3429         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3430
3431         dev = &rte_eth_devices[port_id];
3432
3433         if (!dev->intr_handle) {
3434                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3435                 return -ENOTSUP;
3436         }
3437
3438         intr_handle = dev->intr_handle;
3439         if (!intr_handle->intr_vec) {
3440                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3441                 return -EPERM;
3442         }
3443
3444         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3445                 vec = intr_handle->intr_vec[qid];
3446                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3447                 if (rc && rc != -EEXIST) {
3448                         RTE_ETHDEV_LOG(ERR,
3449                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3450                                 port_id, qid, op, epfd, vec);
3451                 }
3452         }
3453
3454         return 0;
3455 }
3456
3457 int __rte_experimental
3458 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
3459 {
3460         struct rte_intr_handle *intr_handle;
3461         struct rte_eth_dev *dev;
3462         unsigned int efd_idx;
3463         uint32_t vec;
3464         int fd;
3465
3466         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
3467
3468         dev = &rte_eth_devices[port_id];
3469
3470         if (queue_id >= dev->data->nb_rx_queues) {
3471                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3472                 return -1;
3473         }
3474
3475         if (!dev->intr_handle) {
3476                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3477                 return -1;
3478         }
3479
3480         intr_handle = dev->intr_handle;
3481         if (!intr_handle->intr_vec) {
3482                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3483                 return -1;
3484         }
3485
3486         vec = intr_handle->intr_vec[queue_id];
3487         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
3488                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
3489         fd = intr_handle->efds[efd_idx];
3490
3491         return fd;
3492 }
3493
3494 const struct rte_memzone *
3495 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3496                          uint16_t queue_id, size_t size, unsigned align,
3497                          int socket_id)
3498 {
3499         char z_name[RTE_MEMZONE_NAMESIZE];
3500         const struct rte_memzone *mz;
3501
3502         snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
3503                  dev->data->port_id, queue_id, ring_name);
3504
3505         mz = rte_memzone_lookup(z_name);
3506         if (mz)
3507                 return mz;
3508
3509         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3510                         RTE_MEMZONE_IOVA_CONTIG, align);
3511 }
3512
3513 int __rte_experimental
3514 rte_eth_dev_create(struct rte_device *device, const char *name,
3515         size_t priv_data_size,
3516         ethdev_bus_specific_init ethdev_bus_specific_init,
3517         void *bus_init_params,
3518         ethdev_init_t ethdev_init, void *init_params)
3519 {
3520         struct rte_eth_dev *ethdev;
3521         int retval;
3522
3523         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3524
3525         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3526                 ethdev = rte_eth_dev_allocate(name);
3527                 if (!ethdev)
3528                         return -ENODEV;
3529
3530                 if (priv_data_size) {
3531                         ethdev->data->dev_private = rte_zmalloc_socket(
3532                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3533                                 device->numa_node);
3534
3535                         if (!ethdev->data->dev_private) {
3536                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
3537                                 retval = -ENOMEM;
3538                                 goto probe_failed;
3539                         }
3540                 }
3541         } else {
3542                 ethdev = rte_eth_dev_attach_secondary(name);
3543                 if (!ethdev) {
3544                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
3545                                 "ethdev doesn't exist");
3546                         return  -ENODEV;
3547                 }
3548         }
3549
3550         ethdev->device = device;
3551
3552         if (ethdev_bus_specific_init) {
3553                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3554                 if (retval) {
3555                         RTE_LOG(ERR, EAL,
3556                                 "ethdev bus specific initialisation failed");
3557                         goto probe_failed;
3558                 }
3559         }
3560
3561         retval = ethdev_init(ethdev, init_params);
3562         if (retval) {
3563                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3564                 goto probe_failed;
3565         }
3566
3567         rte_eth_dev_probing_finish(ethdev);
3568
3569         return retval;
3570
3571 probe_failed:
3572         rte_eth_dev_release_port(ethdev);
3573         return retval;
3574 }
3575
3576 int  __rte_experimental
3577 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3578         ethdev_uninit_t ethdev_uninit)
3579 {
3580         int ret;
3581
3582         ethdev = rte_eth_dev_allocated(ethdev->data->name);
3583         if (!ethdev)
3584                 return -ENODEV;
3585
3586         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3587         if (ethdev_uninit) {
3588                 ret = ethdev_uninit(ethdev);
3589                 if (ret)
3590                         return ret;
3591         }
3592
3593         return rte_eth_dev_release_port(ethdev);
3594 }
3595
3596 int
3597 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3598                           int epfd, int op, void *data)
3599 {
3600         uint32_t vec;
3601         struct rte_eth_dev *dev;
3602         struct rte_intr_handle *intr_handle;
3603         int rc;
3604
3605         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3606
3607         dev = &rte_eth_devices[port_id];
3608         if (queue_id >= dev->data->nb_rx_queues) {
3609                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3610                 return -EINVAL;
3611         }
3612
3613         if (!dev->intr_handle) {
3614                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3615                 return -ENOTSUP;
3616         }
3617
3618         intr_handle = dev->intr_handle;
3619         if (!intr_handle->intr_vec) {
3620                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3621                 return -EPERM;
3622         }
3623
3624         vec = intr_handle->intr_vec[queue_id];
3625         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3626         if (rc && rc != -EEXIST) {
3627                 RTE_ETHDEV_LOG(ERR,
3628                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3629                         port_id, queue_id, op, epfd, vec);
3630                 return rc;
3631         }
3632
3633         return 0;
3634 }
3635
3636 int
3637 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3638                            uint16_t queue_id)
3639 {
3640         struct rte_eth_dev *dev;
3641
3642         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3643
3644         dev = &rte_eth_devices[port_id];
3645
3646         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3647         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3648                                                                 queue_id));
3649 }
3650
3651 int
3652 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3653                             uint16_t queue_id)
3654 {
3655         struct rte_eth_dev *dev;
3656
3657         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3658
3659         dev = &rte_eth_devices[port_id];
3660
3661         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3662         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3663                                                                 queue_id));
3664 }
3665
3666
3667 int
3668 rte_eth_dev_filter_supported(uint16_t port_id,
3669                              enum rte_filter_type filter_type)
3670 {
3671         struct rte_eth_dev *dev;
3672
3673         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3674
3675         dev = &rte_eth_devices[port_id];
3676         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3677         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3678                                 RTE_ETH_FILTER_NOP, NULL);
3679 }
3680
3681 int
3682 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3683                         enum rte_filter_op filter_op, void *arg)
3684 {
3685         struct rte_eth_dev *dev;
3686
3687         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3688
3689         dev = &rte_eth_devices[port_id];
3690         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3691         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3692                                                              filter_op, arg));
3693 }
3694
3695 const struct rte_eth_rxtx_callback *
3696 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3697                 rte_rx_callback_fn fn, void *user_param)
3698 {
3699 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3700         rte_errno = ENOTSUP;
3701         return NULL;
3702 #endif
3703         /* check input parameters */
3704         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3705                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3706                 rte_errno = EINVAL;
3707                 return NULL;
3708         }
3709         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3710
3711         if (cb == NULL) {
3712                 rte_errno = ENOMEM;
3713                 return NULL;
3714         }
3715
3716         cb->fn.rx = fn;
3717         cb->param = user_param;
3718
3719         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3720         /* Add the callbacks in fifo order. */
3721         struct rte_eth_rxtx_callback *tail =
3722                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3723
3724         if (!tail) {
3725                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3726
3727         } else {
3728                 while (tail->next)
3729                         tail = tail->next;
3730                 tail->next = cb;
3731         }
3732         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3733
3734         return cb;
3735 }
3736
3737 const struct rte_eth_rxtx_callback *
3738 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3739                 rte_rx_callback_fn fn, void *user_param)
3740 {
3741 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3742         rte_errno = ENOTSUP;
3743         return NULL;
3744 #endif
3745         /* check input parameters */
3746         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3747                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3748                 rte_errno = EINVAL;
3749                 return NULL;
3750         }
3751
3752         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3753
3754         if (cb == NULL) {
3755                 rte_errno = ENOMEM;
3756                 return NULL;
3757         }
3758
3759         cb->fn.rx = fn;
3760         cb->param = user_param;
3761
3762         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3763         /* Add the callbacks at fisrt position*/
3764         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3765         rte_smp_wmb();
3766         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3767         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3768
3769         return cb;
3770 }
3771
3772 const struct rte_eth_rxtx_callback *
3773 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3774                 rte_tx_callback_fn fn, void *user_param)
3775 {
3776 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3777         rte_errno = ENOTSUP;
3778         return NULL;
3779 #endif
3780         /* check input parameters */
3781         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3782                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3783                 rte_errno = EINVAL;
3784                 return NULL;
3785         }
3786
3787         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3788
3789         if (cb == NULL) {
3790                 rte_errno = ENOMEM;
3791                 return NULL;
3792         }
3793
3794         cb->fn.tx = fn;
3795         cb->param = user_param;
3796
3797         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3798         /* Add the callbacks in fifo order. */
3799         struct rte_eth_rxtx_callback *tail =
3800                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3801
3802         if (!tail) {
3803                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3804
3805         } else {
3806                 while (tail->next)
3807                         tail = tail->next;
3808                 tail->next = cb;
3809         }
3810         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3811
3812         return cb;
3813 }
3814
3815 int
3816 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3817                 const struct rte_eth_rxtx_callback *user_cb)
3818 {
3819 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3820         return -ENOTSUP;
3821 #endif
3822         /* Check input parameters. */
3823         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3824         if (user_cb == NULL ||
3825                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3826                 return -EINVAL;
3827
3828         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3829         struct rte_eth_rxtx_callback *cb;
3830         struct rte_eth_rxtx_callback **prev_cb;
3831         int ret = -EINVAL;
3832
3833         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3834         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3835         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3836                 cb = *prev_cb;
3837                 if (cb == user_cb) {
3838                         /* Remove the user cb from the callback list. */
3839                         *prev_cb = cb->next;
3840                         ret = 0;
3841                         break;
3842                 }
3843         }
3844         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3845
3846         return ret;
3847 }
3848
3849 int
3850 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3851                 const struct rte_eth_rxtx_callback *user_cb)
3852 {
3853 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3854         return -ENOTSUP;
3855 #endif
3856         /* Check input parameters. */
3857         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3858         if (user_cb == NULL ||
3859                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3860                 return -EINVAL;
3861
3862         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3863         int ret = -EINVAL;
3864         struct rte_eth_rxtx_callback *cb;
3865         struct rte_eth_rxtx_callback **prev_cb;
3866
3867         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3868         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3869         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3870                 cb = *prev_cb;
3871                 if (cb == user_cb) {
3872                         /* Remove the user cb from the callback list. */
3873                         *prev_cb = cb->next;
3874                         ret = 0;
3875                         break;
3876                 }
3877         }
3878         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3879
3880         return ret;
3881 }
3882
3883 int
3884 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3885         struct rte_eth_rxq_info *qinfo)
3886 {
3887         struct rte_eth_dev *dev;
3888
3889         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3890
3891         if (qinfo == NULL)
3892                 return -EINVAL;
3893
3894         dev = &rte_eth_devices[port_id];
3895         if (queue_id >= dev->data->nb_rx_queues) {
3896                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3897                 return -EINVAL;
3898         }
3899
3900         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3901
3902         memset(qinfo, 0, sizeof(*qinfo));
3903         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3904         return 0;
3905 }
3906
3907 int
3908 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3909         struct rte_eth_txq_info *qinfo)
3910 {
3911         struct rte_eth_dev *dev;
3912
3913         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3914
3915         if (qinfo == NULL)
3916                 return -EINVAL;
3917
3918         dev = &rte_eth_devices[port_id];
3919         if (queue_id >= dev->data->nb_tx_queues) {
3920                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
3921                 return -EINVAL;
3922         }
3923
3924         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3925
3926         memset(qinfo, 0, sizeof(*qinfo));
3927         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3928
3929         return 0;
3930 }
3931
3932 int
3933 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3934                              struct ether_addr *mc_addr_set,
3935                              uint32_t nb_mc_addr)
3936 {
3937         struct rte_eth_dev *dev;
3938
3939         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3940
3941         dev = &rte_eth_devices[port_id];
3942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3943         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3944                                                 mc_addr_set, nb_mc_addr));
3945 }
3946
3947 int
3948 rte_eth_timesync_enable(uint16_t port_id)
3949 {
3950         struct rte_eth_dev *dev;
3951
3952         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3953         dev = &rte_eth_devices[port_id];
3954
3955         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3956         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3957 }
3958
3959 int
3960 rte_eth_timesync_disable(uint16_t port_id)
3961 {
3962         struct rte_eth_dev *dev;
3963
3964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3965         dev = &rte_eth_devices[port_id];
3966
3967         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3968         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3969 }
3970
3971 int
3972 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3973                                    uint32_t flags)
3974 {
3975         struct rte_eth_dev *dev;
3976
3977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3978         dev = &rte_eth_devices[port_id];
3979
3980         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3981         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3982                                 (dev, timestamp, flags));
3983 }
3984
3985 int
3986 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3987                                    struct timespec *timestamp)
3988 {
3989         struct rte_eth_dev *dev;
3990
3991         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3992         dev = &rte_eth_devices[port_id];
3993
3994         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3995         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3996                                 (dev, timestamp));
3997 }
3998
3999 int
4000 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4001 {
4002         struct rte_eth_dev *dev;
4003
4004         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4005         dev = &rte_eth_devices[port_id];
4006
4007         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4008         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4009                                                                       delta));
4010 }
4011
4012 int
4013 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4014 {
4015         struct rte_eth_dev *dev;
4016
4017         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4018         dev = &rte_eth_devices[port_id];
4019
4020         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4021         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4022                                                                 timestamp));
4023 }
4024
4025 int
4026 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4027 {
4028         struct rte_eth_dev *dev;
4029
4030         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4031         dev = &rte_eth_devices[port_id];
4032
4033         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4034         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4035                                                                 timestamp));
4036 }
4037
4038 int
4039 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4040 {
4041         struct rte_eth_dev *dev;
4042
4043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4044
4045         dev = &rte_eth_devices[port_id];
4046         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4047         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4048 }
4049
4050 int
4051 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4052 {
4053         struct rte_eth_dev *dev;
4054
4055         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4056
4057         dev = &rte_eth_devices[port_id];
4058         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4059         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4060 }
4061
4062 int
4063 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4064 {
4065         struct rte_eth_dev *dev;
4066
4067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4068
4069         dev = &rte_eth_devices[port_id];
4070         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4071         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4072 }
4073
4074 int
4075 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4076 {
4077         struct rte_eth_dev *dev;
4078
4079         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4080
4081         dev = &rte_eth_devices[port_id];
4082         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4083         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4084 }
4085
4086 int __rte_experimental
4087 rte_eth_dev_get_module_info(uint16_t port_id,
4088                             struct rte_eth_dev_module_info *modinfo)
4089 {
4090         struct rte_eth_dev *dev;
4091
4092         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4093
4094         dev = &rte_eth_devices[port_id];
4095         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4096         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4097 }
4098
4099 int __rte_experimental
4100 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4101                               struct rte_dev_eeprom_info *info)
4102 {
4103         struct rte_eth_dev *dev;
4104
4105         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4106
4107         dev = &rte_eth_devices[port_id];
4108         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4109         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4110 }
4111
4112 int
4113 rte_eth_dev_get_dcb_info(uint16_t port_id,
4114                              struct rte_eth_dcb_info *dcb_info)
4115 {
4116         struct rte_eth_dev *dev;
4117
4118         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4119
4120         dev = &rte_eth_devices[port_id];
4121         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4122
4123         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4124         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4125 }
4126
4127 int
4128 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4129                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4130 {
4131         struct rte_eth_dev *dev;
4132
4133         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4134         if (l2_tunnel == NULL) {
4135                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4136                 return -EINVAL;
4137         }
4138
4139         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4140                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4141                 return -EINVAL;
4142         }
4143
4144         dev = &rte_eth_devices[port_id];
4145         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4146                                 -ENOTSUP);
4147         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4148                                                                 l2_tunnel));
4149 }
4150
4151 int
4152 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4153                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4154                                   uint32_t mask,
4155                                   uint8_t en)
4156 {
4157         struct rte_eth_dev *dev;
4158
4159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4160
4161         if (l2_tunnel == NULL) {
4162                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4163                 return -EINVAL;
4164         }
4165
4166         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4167                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4168                 return -EINVAL;
4169         }
4170
4171         if (mask == 0) {
4172                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4173                 return -EINVAL;
4174         }
4175
4176         dev = &rte_eth_devices[port_id];
4177         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4178                                 -ENOTSUP);
4179         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4180                                                         l2_tunnel, mask, en));
4181 }
4182
4183 static void
4184 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4185                            const struct rte_eth_desc_lim *desc_lim)
4186 {
4187         if (desc_lim->nb_align != 0)
4188                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4189
4190         if (desc_lim->nb_max != 0)
4191                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4192
4193         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4194 }
4195
4196 int
4197 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4198                                  uint16_t *nb_rx_desc,
4199                                  uint16_t *nb_tx_desc)
4200 {
4201         struct rte_eth_dev *dev;
4202         struct rte_eth_dev_info dev_info;
4203
4204         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4205
4206         dev = &rte_eth_devices[port_id];
4207         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4208
4209         rte_eth_dev_info_get(port_id, &dev_info);
4210
4211         if (nb_rx_desc != NULL)
4212                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4213
4214         if (nb_tx_desc != NULL)
4215                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4216
4217         return 0;
4218 }
4219
4220 int
4221 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4222 {
4223         struct rte_eth_dev *dev;
4224
4225         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4226
4227         if (pool == NULL)
4228                 return -EINVAL;
4229
4230         dev = &rte_eth_devices[port_id];
4231
4232         if (*dev->dev_ops->pool_ops_supported == NULL)
4233                 return 1; /* all pools are supported */
4234
4235         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4236 }
4237
4238 /**
4239  * A set of values to describe the possible states of a switch domain.
4240  */
4241 enum rte_eth_switch_domain_state {
4242         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4243         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4244 };
4245
4246 /**
4247  * Array of switch domains available for allocation. Array is sized to
4248  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4249  * ethdev ports in a single process.
4250  */
4251 struct rte_eth_dev_switch {
4252         enum rte_eth_switch_domain_state state;
4253 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4254
4255 int __rte_experimental
4256 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4257 {
4258         unsigned int i;
4259
4260         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4261
4262         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4263                 i < RTE_MAX_ETHPORTS; i++) {
4264                 if (rte_eth_switch_domains[i].state ==
4265                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4266                         rte_eth_switch_domains[i].state =
4267                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4268                         *domain_id = i;
4269                         return 0;
4270                 }
4271         }
4272
4273         return -ENOSPC;
4274 }
4275
4276 int __rte_experimental
4277 rte_eth_switch_domain_free(uint16_t domain_id)
4278 {
4279         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4280                 domain_id >= RTE_MAX_ETHPORTS)
4281                 return -EINVAL;
4282
4283         if (rte_eth_switch_domains[domain_id].state !=
4284                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4285                 return -EINVAL;
4286
4287         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4288
4289         return 0;
4290 }
4291
4292 typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
4293
4294 static int
4295 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4296 {
4297         int state;
4298         struct rte_kvargs_pair *pair;
4299         char *letter;
4300
4301         arglist->str = strdup(str_in);
4302         if (arglist->str == NULL)
4303                 return -ENOMEM;
4304
4305         letter = arglist->str;
4306         state = 0;
4307         arglist->count = 0;
4308         pair = &arglist->pairs[0];
4309         while (1) {
4310                 switch (state) {
4311                 case 0: /* Initial */
4312                         if (*letter == '=')
4313                                 return -EINVAL;
4314                         else if (*letter == '\0')
4315                                 return 0;
4316
4317                         state = 1;
4318                         pair->key = letter;
4319                         /* fall-thru */
4320
4321                 case 1: /* Parsing key */
4322                         if (*letter == '=') {
4323                                 *letter = '\0';
4324                                 pair->value = letter + 1;
4325                                 state = 2;
4326                         } else if (*letter == ',' || *letter == '\0')
4327                                 return -EINVAL;
4328                         break;
4329
4330
4331                 case 2: /* Parsing value */
4332                         if (*letter == '[')
4333                                 state = 3;
4334                         else if (*letter == ',') {
4335                                 *letter = '\0';
4336                                 arglist->count++;
4337                                 pair = &arglist->pairs[arglist->count];
4338                                 state = 0;
4339                         } else if (*letter == '\0') {
4340                                 letter--;
4341                                 arglist->count++;
4342                                 pair = &arglist->pairs[arglist->count];
4343                                 state = 0;
4344                         }
4345                         break;
4346
4347                 case 3: /* Parsing list */
4348                         if (*letter == ']')
4349                                 state = 2;
4350                         else if (*letter == '\0')
4351                                 return -EINVAL;
4352                         break;
4353                 }
4354                 letter++;
4355         }
4356 }
4357
4358 static int
4359 rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
4360         void *data)
4361 {
4362         char *str_start;
4363         int state;
4364         int result;
4365
4366         if (*str != '[')
4367                 /* Single element, not a list */
4368                 return callback(str, data);
4369
4370         /* Sanity check, then strip the brackets */
4371         str_start = &str[strlen(str) - 1];
4372         if (*str_start != ']') {
4373                 RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
4374                 return -EINVAL;
4375         }
4376         str++;
4377         *str_start = '\0';
4378
4379         /* Process list elements */
4380         state = 0;
4381         while (1) {
4382                 if (state == 0) {
4383                         if (*str == '\0')
4384                                 break;
4385                         if (*str != ',') {
4386                                 str_start = str;
4387                                 state = 1;
4388                         }
4389                 } else if (state == 1) {
4390                         if (*str == ',' || *str == '\0') {
4391                                 if (str > str_start) {
4392                                         /* Non-empty string fragment */
4393                                         *str = '\0';
4394                                         result = callback(str_start, data);
4395                                         if (result < 0)
4396                                                 return result;
4397                                 }
4398                                 state = 0;
4399                         }
4400                 }
4401                 str++;
4402         }
4403         return 0;
4404 }
4405
4406 static int
4407 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
4408         const uint16_t max_list)
4409 {
4410         uint16_t lo, hi, val;
4411         int result;
4412
4413         result = sscanf(str, "%hu-%hu", &lo, &hi);
4414         if (result == 1) {
4415                 if (*len_list >= max_list)
4416                         return -ENOMEM;
4417                 list[(*len_list)++] = lo;
4418         } else if (result == 2) {
4419                 if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
4420                         return -EINVAL;
4421                 for (val = lo; val <= hi; val++) {
4422                         if (*len_list >= max_list)
4423                                 return -ENOMEM;
4424                         list[(*len_list)++] = val;
4425                 }
4426         } else
4427                 return -EINVAL;
4428         return 0;
4429 }
4430
4431
4432 static int
4433 rte_eth_devargs_parse_representor_ports(char *str, void *data)
4434 {
4435         struct rte_eth_devargs *eth_da = data;
4436
4437         return rte_eth_devargs_process_range(str, eth_da->representor_ports,
4438                 &eth_da->nb_representor_ports, RTE_MAX_ETHPORTS);
4439 }
4440
4441 int __rte_experimental
4442 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4443 {
4444         struct rte_kvargs args;
4445         struct rte_kvargs_pair *pair;
4446         unsigned int i;
4447         int result = 0;
4448
4449         memset(eth_da, 0, sizeof(*eth_da));
4450
4451         result = rte_eth_devargs_tokenise(&args, dargs);
4452         if (result < 0)
4453                 goto parse_cleanup;
4454
4455         for (i = 0; i < args.count; i++) {
4456                 pair = &args.pairs[i];
4457                 if (strcmp("representor", pair->key) == 0) {
4458                         result = rte_eth_devargs_parse_list(pair->value,
4459                                 rte_eth_devargs_parse_representor_ports,
4460                                 eth_da);
4461                         if (result < 0)
4462                                 goto parse_cleanup;
4463                 }
4464         }
4465
4466 parse_cleanup:
4467         if (args.str)
4468                 free(args.str);
4469
4470         return result;
4471 }
4472
4473 RTE_INIT(ethdev_init_log)
4474 {
4475         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
4476         if (rte_eth_dev_logtype >= 0)
4477                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
4478 }