ethdev: remove release function for secondary process
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39
40 #include "rte_ether.h"
41 #include "rte_ethdev.h"
42 #include "rte_ethdev_driver.h"
43 #include "ethdev_profile.h"
44
45 int rte_eth_dev_logtype;
46
47 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
48 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
49 static uint16_t eth_dev_last_created_port;
50
51 /* spinlock for eth device callbacks */
52 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove rx callbacks */
55 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for add/remove tx callbacks */
58 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* spinlock for shared data allocation */
61 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
62
63 /* store statistics names and its offset in stats structure  */
64 struct rte_eth_xstats_name_off {
65         char name[RTE_ETH_XSTATS_NAME_SIZE];
66         unsigned offset;
67 };
68
69 /* Shared memory between primary and secondary processes. */
70 static struct {
71         uint64_t next_owner_id;
72         rte_spinlock_t ownership_lock;
73         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
74 } *rte_eth_dev_shared_data;
75
76 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
77         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
78         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
79         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
80         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
81         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
82         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
83         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
84         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
85                 rx_nombuf)},
86 };
87
88 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
89
90 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
91         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
92         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
93         {"errors", offsetof(struct rte_eth_stats, q_errors)},
94 };
95
96 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
97                 sizeof(rte_rxq_stats_strings[0]))
98
99 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
100         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
101         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
102 };
103 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
104                 sizeof(rte_txq_stats_strings[0]))
105
106 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
107         { DEV_RX_OFFLOAD_##_name, #_name }
108
109 static const struct {
110         uint64_t offload;
111         const char *name;
112 } rte_rx_offload_names[] = {
113         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
114         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
115         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
118         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
119         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
120         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
121         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
122         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
124         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
125         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
126         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
127         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
128         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
129         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
130         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
131 };
132
133 #undef RTE_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } rte_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163 };
164
165 #undef RTE_TX_OFFLOAD_BIT2STR
166
167 /**
168  * The user application callback description.
169  *
170  * It contains callback address to be registered by user application,
171  * the pointer to the parameters for callback, and the event type.
172  */
173 struct rte_eth_dev_callback {
174         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
175         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
176         void *cb_arg;                           /**< Parameter for callback */
177         void *ret_param;                        /**< Return parameter */
178         enum rte_eth_event_type event;          /**< Interrupt event type */
179         uint32_t active;                        /**< Callback is executing */
180 };
181
182 enum {
183         STAT_QMAP_TX = 0,
184         STAT_QMAP_RX
185 };
186
187 uint16_t
188 rte_eth_find_next(uint16_t port_id)
189 {
190         while (port_id < RTE_MAX_ETHPORTS &&
191                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
192                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
193                 port_id++;
194
195         if (port_id >= RTE_MAX_ETHPORTS)
196                 return RTE_MAX_ETHPORTS;
197
198         return port_id;
199 }
200
201 static void
202 rte_eth_dev_shared_data_prepare(void)
203 {
204         const unsigned flags = 0;
205         const struct rte_memzone *mz;
206
207         rte_spinlock_lock(&rte_eth_shared_data_lock);
208
209         if (rte_eth_dev_shared_data == NULL) {
210                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
211                         /* Allocate port data and ownership shared memory. */
212                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
213                                         sizeof(*rte_eth_dev_shared_data),
214                                         rte_socket_id(), flags);
215                 } else
216                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
217                 if (mz == NULL)
218                         rte_panic("Cannot allocate ethdev shared data\n");
219
220                 rte_eth_dev_shared_data = mz->addr;
221                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
222                         rte_eth_dev_shared_data->next_owner_id =
223                                         RTE_ETH_DEV_NO_OWNER + 1;
224                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
225                         memset(rte_eth_dev_shared_data->data, 0,
226                                sizeof(rte_eth_dev_shared_data->data));
227                 }
228         }
229
230         rte_spinlock_unlock(&rte_eth_shared_data_lock);
231 }
232
233 static bool
234 is_allocated(const struct rte_eth_dev *ethdev)
235 {
236         return ethdev->data->name[0] != '\0';
237 }
238
239 static struct rte_eth_dev *
240 _rte_eth_dev_allocated(const char *name)
241 {
242         unsigned i;
243
244         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
245                 if (rte_eth_devices[i].data != NULL &&
246                     strcmp(rte_eth_devices[i].data->name, name) == 0)
247                         return &rte_eth_devices[i];
248         }
249         return NULL;
250 }
251
252 struct rte_eth_dev *
253 rte_eth_dev_allocated(const char *name)
254 {
255         struct rte_eth_dev *ethdev;
256
257         rte_eth_dev_shared_data_prepare();
258
259         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
260
261         ethdev = _rte_eth_dev_allocated(name);
262
263         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
264
265         return ethdev;
266 }
267
268 static uint16_t
269 rte_eth_dev_find_free_port(void)
270 {
271         unsigned i;
272
273         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
274                 /* Using shared name field to find a free port. */
275                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
276                         RTE_ASSERT(rte_eth_devices[i].state ==
277                                    RTE_ETH_DEV_UNUSED);
278                         return i;
279                 }
280         }
281         return RTE_MAX_ETHPORTS;
282 }
283
284 static struct rte_eth_dev *
285 eth_dev_get(uint16_t port_id)
286 {
287         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
288
289         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
290
291         eth_dev_last_created_port = port_id;
292
293         return eth_dev;
294 }
295
296 struct rte_eth_dev *
297 rte_eth_dev_allocate(const char *name)
298 {
299         uint16_t port_id;
300         struct rte_eth_dev *eth_dev = NULL;
301
302         rte_eth_dev_shared_data_prepare();
303
304         /* Synchronize port creation between primary and secondary threads. */
305         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
306
307         if (_rte_eth_dev_allocated(name) != NULL) {
308                 RTE_ETHDEV_LOG(ERR,
309                         "Ethernet device with name %s already allocated\n",
310                         name);
311                 goto unlock;
312         }
313
314         port_id = rte_eth_dev_find_free_port();
315         if (port_id == RTE_MAX_ETHPORTS) {
316                 RTE_ETHDEV_LOG(ERR,
317                         "Reached maximum number of Ethernet ports\n");
318                 goto unlock;
319         }
320
321         eth_dev = eth_dev_get(port_id);
322         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
323         eth_dev->data->port_id = port_id;
324         eth_dev->data->mtu = ETHER_MTU;
325
326 unlock:
327         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
328
329         return eth_dev;
330 }
331
332 /*
333  * Attach to a port already registered by the primary process, which
334  * makes sure that the same device would have the same port id both
335  * in the primary and secondary process.
336  */
337 struct rte_eth_dev *
338 rte_eth_dev_attach_secondary(const char *name)
339 {
340         uint16_t i;
341         struct rte_eth_dev *eth_dev = NULL;
342
343         rte_eth_dev_shared_data_prepare();
344
345         /* Synchronize port attachment to primary port creation and release. */
346         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
347
348         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
349                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
350                         break;
351         }
352         if (i == RTE_MAX_ETHPORTS) {
353                 RTE_ETHDEV_LOG(ERR,
354                         "Device %s is not driven by the primary process\n",
355                         name);
356         } else {
357                 eth_dev = eth_dev_get(i);
358                 RTE_ASSERT(eth_dev->data->port_id == i);
359         }
360
361         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
362         return eth_dev;
363 }
364
365 int
366 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
367 {
368         if (eth_dev == NULL)
369                 return -EINVAL;
370
371         rte_eth_dev_shared_data_prepare();
372
373         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
374
375         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
376
377         eth_dev->state = RTE_ETH_DEV_UNUSED;
378
379         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
380                 rte_free(eth_dev->data->rx_queues);
381                 rte_free(eth_dev->data->tx_queues);
382                 rte_free(eth_dev->data->mac_addrs);
383                 rte_free(eth_dev->data->hash_mac_addrs);
384                 rte_free(eth_dev->data->dev_private);
385                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
386         }
387
388         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
389
390         return 0;
391 }
392
393 int
394 rte_eth_dev_is_valid_port(uint16_t port_id)
395 {
396         if (port_id >= RTE_MAX_ETHPORTS ||
397             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
398                 return 0;
399         else
400                 return 1;
401 }
402
403 static int
404 rte_eth_is_valid_owner_id(uint64_t owner_id)
405 {
406         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
407             rte_eth_dev_shared_data->next_owner_id <= owner_id)
408                 return 0;
409         return 1;
410 }
411
412 uint64_t
413 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
414 {
415         while (port_id < RTE_MAX_ETHPORTS &&
416                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
417                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
418                rte_eth_devices[port_id].data->owner.id != owner_id))
419                 port_id++;
420
421         if (port_id >= RTE_MAX_ETHPORTS)
422                 return RTE_MAX_ETHPORTS;
423
424         return port_id;
425 }
426
427 int __rte_experimental
428 rte_eth_dev_owner_new(uint64_t *owner_id)
429 {
430         rte_eth_dev_shared_data_prepare();
431
432         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
433
434         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
435
436         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
437         return 0;
438 }
439
440 static int
441 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
442                        const struct rte_eth_dev_owner *new_owner)
443 {
444         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
445         struct rte_eth_dev_owner *port_owner;
446         int sret;
447
448         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
449                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
450                         port_id);
451                 return -ENODEV;
452         }
453
454         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
455             !rte_eth_is_valid_owner_id(old_owner_id)) {
456                 RTE_ETHDEV_LOG(ERR,
457                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
458                        old_owner_id, new_owner->id);
459                 return -EINVAL;
460         }
461
462         port_owner = &rte_eth_devices[port_id].data->owner;
463         if (port_owner->id != old_owner_id) {
464                 RTE_ETHDEV_LOG(ERR,
465                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
466                         port_id, port_owner->name, port_owner->id);
467                 return -EPERM;
468         }
469
470         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
471                         new_owner->name);
472         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
473                 RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n",
474                         port_id);
475
476         port_owner->id = new_owner->id;
477
478         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
479                 port_id, new_owner->name, new_owner->id);
480
481         return 0;
482 }
483
484 int __rte_experimental
485 rte_eth_dev_owner_set(const uint16_t port_id,
486                       const struct rte_eth_dev_owner *owner)
487 {
488         int ret;
489
490         rte_eth_dev_shared_data_prepare();
491
492         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
493
494         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
495
496         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
497         return ret;
498 }
499
500 int __rte_experimental
501 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
502 {
503         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
504                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
505         int ret;
506
507         rte_eth_dev_shared_data_prepare();
508
509         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
510
511         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
512
513         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
514         return ret;
515 }
516
517 void __rte_experimental
518 rte_eth_dev_owner_delete(const uint64_t owner_id)
519 {
520         uint16_t port_id;
521
522         rte_eth_dev_shared_data_prepare();
523
524         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
525
526         if (rte_eth_is_valid_owner_id(owner_id)) {
527                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
528                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
529                                 memset(&rte_eth_devices[port_id].data->owner, 0,
530                                        sizeof(struct rte_eth_dev_owner));
531                 RTE_ETHDEV_LOG(NOTICE,
532                         "All port owners owned by %016"PRIx64" identifier have removed\n",
533                         owner_id);
534         } else {
535                 RTE_ETHDEV_LOG(ERR,
536                                "Invalid owner id=%016"PRIx64"\n",
537                                owner_id);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541 }
542
543 int __rte_experimental
544 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
545 {
546         int ret = 0;
547         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
548
549         rte_eth_dev_shared_data_prepare();
550
551         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
552
553         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
554                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
555                         port_id);
556                 ret = -ENODEV;
557         } else {
558                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
559         }
560
561         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
562         return ret;
563 }
564
565 int
566 rte_eth_dev_socket_id(uint16_t port_id)
567 {
568         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
569         return rte_eth_devices[port_id].data->numa_node;
570 }
571
572 void *
573 rte_eth_dev_get_sec_ctx(uint16_t port_id)
574 {
575         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
576         return rte_eth_devices[port_id].security_ctx;
577 }
578
579 uint16_t
580 rte_eth_dev_count(void)
581 {
582         return rte_eth_dev_count_avail();
583 }
584
585 uint16_t
586 rte_eth_dev_count_avail(void)
587 {
588         uint16_t p;
589         uint16_t count;
590
591         count = 0;
592
593         RTE_ETH_FOREACH_DEV(p)
594                 count++;
595
596         return count;
597 }
598
599 uint16_t __rte_experimental
600 rte_eth_dev_count_total(void)
601 {
602         uint16_t port, count = 0;
603
604         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
605                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
606                         count++;
607
608         return count;
609 }
610
611 int
612 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
613 {
614         char *tmp;
615
616         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
617
618         if (name == NULL) {
619                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
620                 return -EINVAL;
621         }
622
623         /* shouldn't check 'rte_eth_devices[i].data',
624          * because it might be overwritten by VDEV PMD */
625         tmp = rte_eth_dev_shared_data->data[port_id].name;
626         strcpy(name, tmp);
627         return 0;
628 }
629
630 int
631 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
632 {
633         uint32_t pid;
634
635         if (name == NULL) {
636                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
637                 return -EINVAL;
638         }
639
640         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
641                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
642                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
643                         *port_id = pid;
644                         return 0;
645                 }
646         }
647
648         return -ENODEV;
649 }
650
651 static int
652 eth_err(uint16_t port_id, int ret)
653 {
654         if (ret == 0)
655                 return 0;
656         if (rte_eth_dev_is_removed(port_id))
657                 return -EIO;
658         return ret;
659 }
660
661 /* attach the new device, then store port_id of the device */
662 int
663 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
664 {
665         int current = rte_eth_dev_count_total();
666         struct rte_devargs da;
667         int ret = -1;
668
669         memset(&da, 0, sizeof(da));
670
671         if ((devargs == NULL) || (port_id == NULL)) {
672                 ret = -EINVAL;
673                 goto err;
674         }
675
676         /* parse devargs */
677         if (rte_devargs_parse(&da, devargs))
678                 goto err;
679
680         ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
681         if (ret < 0)
682                 goto err;
683
684         /* no point looking at the port count if no port exists */
685         if (!rte_eth_dev_count_total()) {
686                 RTE_ETHDEV_LOG(ERR, "No port found for device (%s)\n", da.name);
687                 ret = -1;
688                 goto err;
689         }
690
691         /* if nothing happened, there is a bug here, since some driver told us
692          * it did attach a device, but did not create a port.
693          * FIXME: race condition in case of plug-out of another device
694          */
695         if (current == rte_eth_dev_count_total()) {
696                 ret = -1;
697                 goto err;
698         }
699
700         *port_id = eth_dev_last_created_port;
701         ret = 0;
702
703 err:
704         free(da.args);
705         return ret;
706 }
707
708 /* detach the device, then store the name of the device */
709 int
710 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
711 {
712         struct rte_device *dev;
713         struct rte_bus *bus;
714         uint32_t dev_flags;
715         int ret = -1;
716
717         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
718
719         dev_flags = rte_eth_devices[port_id].data->dev_flags;
720         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
721                 RTE_ETHDEV_LOG(ERR,
722                         "Port %"PRIu16" is bonded, cannot detach\n", port_id);
723                 return -ENOTSUP;
724         }
725
726         dev = rte_eth_devices[port_id].device;
727         if (dev == NULL)
728                 return -EINVAL;
729
730         bus = rte_bus_find_by_device(dev);
731         if (bus == NULL)
732                 return -ENOENT;
733
734         ret = rte_eal_hotplug_remove(bus->name, dev->name);
735         if (ret < 0)
736                 return ret;
737
738         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
739         return 0;
740 }
741
742 static int
743 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
744 {
745         uint16_t old_nb_queues = dev->data->nb_rx_queues;
746         void **rxq;
747         unsigned i;
748
749         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
750                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
751                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
752                                 RTE_CACHE_LINE_SIZE);
753                 if (dev->data->rx_queues == NULL) {
754                         dev->data->nb_rx_queues = 0;
755                         return -(ENOMEM);
756                 }
757         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
758                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
759
760                 rxq = dev->data->rx_queues;
761
762                 for (i = nb_queues; i < old_nb_queues; i++)
763                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
764                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
765                                 RTE_CACHE_LINE_SIZE);
766                 if (rxq == NULL)
767                         return -(ENOMEM);
768                 if (nb_queues > old_nb_queues) {
769                         uint16_t new_qs = nb_queues - old_nb_queues;
770
771                         memset(rxq + old_nb_queues, 0,
772                                 sizeof(rxq[0]) * new_qs);
773                 }
774
775                 dev->data->rx_queues = rxq;
776
777         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
778                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
779
780                 rxq = dev->data->rx_queues;
781
782                 for (i = nb_queues; i < old_nb_queues; i++)
783                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
784
785                 rte_free(dev->data->rx_queues);
786                 dev->data->rx_queues = NULL;
787         }
788         dev->data->nb_rx_queues = nb_queues;
789         return 0;
790 }
791
792 int
793 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
794 {
795         struct rte_eth_dev *dev;
796
797         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
798
799         dev = &rte_eth_devices[port_id];
800         if (!dev->data->dev_started) {
801                 RTE_ETHDEV_LOG(ERR,
802                         "Port %u must be started before start any queue\n",
803                         port_id);
804                 return -EINVAL;
805         }
806
807         if (rx_queue_id >= dev->data->nb_rx_queues) {
808                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
809                 return -EINVAL;
810         }
811
812         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
813
814         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
815                 RTE_ETHDEV_LOG(INFO,
816                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
817                         rx_queue_id, port_id);
818                 return 0;
819         }
820
821         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
822                                                              rx_queue_id));
823
824 }
825
826 int
827 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
828 {
829         struct rte_eth_dev *dev;
830
831         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
832
833         dev = &rte_eth_devices[port_id];
834         if (rx_queue_id >= dev->data->nb_rx_queues) {
835                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
836                 return -EINVAL;
837         }
838
839         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
840
841         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
842                 RTE_ETHDEV_LOG(INFO,
843                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
844                         rx_queue_id, port_id);
845                 return 0;
846         }
847
848         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
849
850 }
851
852 int
853 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
854 {
855         struct rte_eth_dev *dev;
856
857         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
858
859         dev = &rte_eth_devices[port_id];
860         if (!dev->data->dev_started) {
861                 RTE_ETHDEV_LOG(ERR,
862                         "Port %u must be started before start any queue\n",
863                         port_id);
864                 return -EINVAL;
865         }
866
867         if (tx_queue_id >= dev->data->nb_tx_queues) {
868                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
869                 return -EINVAL;
870         }
871
872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
873
874         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
875                 RTE_ETHDEV_LOG(INFO,
876                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
877                         tx_queue_id, port_id);
878                 return 0;
879         }
880
881         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
882 }
883
884 int
885 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
886 {
887         struct rte_eth_dev *dev;
888
889         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
890
891         dev = &rte_eth_devices[port_id];
892         if (tx_queue_id >= dev->data->nb_tx_queues) {
893                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
894                 return -EINVAL;
895         }
896
897         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
898
899         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
900                 RTE_ETHDEV_LOG(INFO,
901                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
902                         tx_queue_id, port_id);
903                 return 0;
904         }
905
906         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
907
908 }
909
910 static int
911 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
912 {
913         uint16_t old_nb_queues = dev->data->nb_tx_queues;
914         void **txq;
915         unsigned i;
916
917         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
918                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
919                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
920                                                    RTE_CACHE_LINE_SIZE);
921                 if (dev->data->tx_queues == NULL) {
922                         dev->data->nb_tx_queues = 0;
923                         return -(ENOMEM);
924                 }
925         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
926                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
927
928                 txq = dev->data->tx_queues;
929
930                 for (i = nb_queues; i < old_nb_queues; i++)
931                         (*dev->dev_ops->tx_queue_release)(txq[i]);
932                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
933                                   RTE_CACHE_LINE_SIZE);
934                 if (txq == NULL)
935                         return -ENOMEM;
936                 if (nb_queues > old_nb_queues) {
937                         uint16_t new_qs = nb_queues - old_nb_queues;
938
939                         memset(txq + old_nb_queues, 0,
940                                sizeof(txq[0]) * new_qs);
941                 }
942
943                 dev->data->tx_queues = txq;
944
945         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
946                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
947
948                 txq = dev->data->tx_queues;
949
950                 for (i = nb_queues; i < old_nb_queues; i++)
951                         (*dev->dev_ops->tx_queue_release)(txq[i]);
952
953                 rte_free(dev->data->tx_queues);
954                 dev->data->tx_queues = NULL;
955         }
956         dev->data->nb_tx_queues = nb_queues;
957         return 0;
958 }
959
960 uint32_t
961 rte_eth_speed_bitflag(uint32_t speed, int duplex)
962 {
963         switch (speed) {
964         case ETH_SPEED_NUM_10M:
965                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
966         case ETH_SPEED_NUM_100M:
967                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
968         case ETH_SPEED_NUM_1G:
969                 return ETH_LINK_SPEED_1G;
970         case ETH_SPEED_NUM_2_5G:
971                 return ETH_LINK_SPEED_2_5G;
972         case ETH_SPEED_NUM_5G:
973                 return ETH_LINK_SPEED_5G;
974         case ETH_SPEED_NUM_10G:
975                 return ETH_LINK_SPEED_10G;
976         case ETH_SPEED_NUM_20G:
977                 return ETH_LINK_SPEED_20G;
978         case ETH_SPEED_NUM_25G:
979                 return ETH_LINK_SPEED_25G;
980         case ETH_SPEED_NUM_40G:
981                 return ETH_LINK_SPEED_40G;
982         case ETH_SPEED_NUM_50G:
983                 return ETH_LINK_SPEED_50G;
984         case ETH_SPEED_NUM_56G:
985                 return ETH_LINK_SPEED_56G;
986         case ETH_SPEED_NUM_100G:
987                 return ETH_LINK_SPEED_100G;
988         default:
989                 return 0;
990         }
991 }
992
993 const char * __rte_experimental
994 rte_eth_dev_rx_offload_name(uint64_t offload)
995 {
996         const char *name = "UNKNOWN";
997         unsigned int i;
998
999         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1000                 if (offload == rte_rx_offload_names[i].offload) {
1001                         name = rte_rx_offload_names[i].name;
1002                         break;
1003                 }
1004         }
1005
1006         return name;
1007 }
1008
1009 const char * __rte_experimental
1010 rte_eth_dev_tx_offload_name(uint64_t offload)
1011 {
1012         const char *name = "UNKNOWN";
1013         unsigned int i;
1014
1015         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1016                 if (offload == rte_tx_offload_names[i].offload) {
1017                         name = rte_tx_offload_names[i].name;
1018                         break;
1019                 }
1020         }
1021
1022         return name;
1023 }
1024
1025 int
1026 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1027                       const struct rte_eth_conf *dev_conf)
1028 {
1029         struct rte_eth_dev *dev;
1030         struct rte_eth_dev_info dev_info;
1031         struct rte_eth_conf local_conf = *dev_conf;
1032         int diag;
1033
1034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1035
1036         dev = &rte_eth_devices[port_id];
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1039         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1040
1041         rte_eth_dev_info_get(port_id, &dev_info);
1042
1043         /* If number of queues specified by application for both Rx and Tx is
1044          * zero, use driver preferred values. This cannot be done individually
1045          * as it is valid for either Tx or Rx (but not both) to be zero.
1046          * If driver does not provide any preferred valued, fall back on
1047          * EAL defaults.
1048          */
1049         if (nb_rx_q == 0 && nb_tx_q == 0) {
1050                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1051                 if (nb_rx_q == 0)
1052                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1053                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1054                 if (nb_tx_q == 0)
1055                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1056         }
1057
1058         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1059                 RTE_ETHDEV_LOG(ERR,
1060                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1061                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1062                 return -EINVAL;
1063         }
1064
1065         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1066                 RTE_ETHDEV_LOG(ERR,
1067                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1068                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1069                 return -EINVAL;
1070         }
1071
1072         if (dev->data->dev_started) {
1073                 RTE_ETHDEV_LOG(ERR,
1074                         "Port %u must be stopped to allow configuration\n",
1075                         port_id);
1076                 return -EBUSY;
1077         }
1078
1079         /* Copy the dev_conf parameter into the dev structure */
1080         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1081
1082         /*
1083          * Check that the numbers of RX and TX queues are not greater
1084          * than the maximum number of RX and TX queues supported by the
1085          * configured device.
1086          */
1087         if (nb_rx_q > dev_info.max_rx_queues) {
1088                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1089                         port_id, nb_rx_q, dev_info.max_rx_queues);
1090                 return -EINVAL;
1091         }
1092
1093         if (nb_tx_q > dev_info.max_tx_queues) {
1094                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1095                         port_id, nb_tx_q, dev_info.max_tx_queues);
1096                 return -EINVAL;
1097         }
1098
1099         /* Check that the device supports requested interrupts */
1100         if ((dev_conf->intr_conf.lsc == 1) &&
1101                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1102                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1103                         dev->device->driver->name);
1104                 return -EINVAL;
1105         }
1106         if ((dev_conf->intr_conf.rmv == 1) &&
1107                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1108                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1109                         dev->device->driver->name);
1110                 return -EINVAL;
1111         }
1112
1113         /*
1114          * If jumbo frames are enabled, check that the maximum RX packet
1115          * length is supported by the configured device.
1116          */
1117         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1118                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1119                         RTE_ETHDEV_LOG(ERR,
1120                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1121                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1122                                 dev_info.max_rx_pktlen);
1123                         return -EINVAL;
1124                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1125                         RTE_ETHDEV_LOG(ERR,
1126                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1127                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1128                                 (unsigned)ETHER_MIN_LEN);
1129                         return -EINVAL;
1130                 }
1131         } else {
1132                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1133                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1134                         /* Use default value */
1135                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1136                                                         ETHER_MAX_LEN;
1137         }
1138
1139         /* Any requested offloading must be within its device capabilities */
1140         if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1141              local_conf.rxmode.offloads) {
1142                 RTE_ETHDEV_LOG(ERR,
1143                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1144                         "capabilities 0x%"PRIx64" in %s()\n",
1145                         port_id, local_conf.rxmode.offloads,
1146                         dev_info.rx_offload_capa,
1147                         __func__);
1148                 return -EINVAL;
1149         }
1150         if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1151              local_conf.txmode.offloads) {
1152                 RTE_ETHDEV_LOG(ERR,
1153                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1154                         "capabilities 0x%"PRIx64" in %s()\n",
1155                         port_id, local_conf.txmode.offloads,
1156                         dev_info.tx_offload_capa,
1157                         __func__);
1158                 return -EINVAL;
1159         }
1160
1161         /* Check that device supports requested rss hash functions. */
1162         if ((dev_info.flow_type_rss_offloads |
1163              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1164             dev_info.flow_type_rss_offloads) {
1165                 RTE_ETHDEV_LOG(ERR,
1166                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1167                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1168                         dev_info.flow_type_rss_offloads);
1169                 return -EINVAL;
1170         }
1171
1172         /*
1173          * Setup new number of RX/TX queues and reconfigure device.
1174          */
1175         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1176         if (diag != 0) {
1177                 RTE_ETHDEV_LOG(ERR,
1178                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1179                         port_id, diag);
1180                 return diag;
1181         }
1182
1183         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1184         if (diag != 0) {
1185                 RTE_ETHDEV_LOG(ERR,
1186                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1187                         port_id, diag);
1188                 rte_eth_dev_rx_queue_config(dev, 0);
1189                 return diag;
1190         }
1191
1192         diag = (*dev->dev_ops->dev_configure)(dev);
1193         if (diag != 0) {
1194                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1195                         port_id, diag);
1196                 rte_eth_dev_rx_queue_config(dev, 0);
1197                 rte_eth_dev_tx_queue_config(dev, 0);
1198                 return eth_err(port_id, diag);
1199         }
1200
1201         /* Initialize Rx profiling if enabled at compilation time. */
1202         diag = __rte_eth_dev_profile_init(port_id, dev);
1203         if (diag != 0) {
1204                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1205                         port_id, diag);
1206                 rte_eth_dev_rx_queue_config(dev, 0);
1207                 rte_eth_dev_tx_queue_config(dev, 0);
1208                 return eth_err(port_id, diag);
1209         }
1210
1211         return 0;
1212 }
1213
1214 void
1215 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1216 {
1217         if (dev->data->dev_started) {
1218                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1219                         dev->data->port_id);
1220                 return;
1221         }
1222
1223         rte_eth_dev_rx_queue_config(dev, 0);
1224         rte_eth_dev_tx_queue_config(dev, 0);
1225
1226         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1227 }
1228
1229 static void
1230 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1231                         struct rte_eth_dev_info *dev_info)
1232 {
1233         struct ether_addr *addr;
1234         uint16_t i;
1235         uint32_t pool = 0;
1236         uint64_t pool_mask;
1237
1238         /* replay MAC address configuration including default MAC */
1239         addr = &dev->data->mac_addrs[0];
1240         if (*dev->dev_ops->mac_addr_set != NULL)
1241                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1242         else if (*dev->dev_ops->mac_addr_add != NULL)
1243                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1244
1245         if (*dev->dev_ops->mac_addr_add != NULL) {
1246                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1247                         addr = &dev->data->mac_addrs[i];
1248
1249                         /* skip zero address */
1250                         if (is_zero_ether_addr(addr))
1251                                 continue;
1252
1253                         pool = 0;
1254                         pool_mask = dev->data->mac_pool_sel[i];
1255
1256                         do {
1257                                 if (pool_mask & 1ULL)
1258                                         (*dev->dev_ops->mac_addr_add)(dev,
1259                                                 addr, i, pool);
1260                                 pool_mask >>= 1;
1261                                 pool++;
1262                         } while (pool_mask);
1263                 }
1264         }
1265 }
1266
1267 static void
1268 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1269                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1270 {
1271         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1272                 rte_eth_dev_mac_restore(dev, dev_info);
1273
1274         /* replay promiscuous configuration */
1275         if (rte_eth_promiscuous_get(port_id) == 1)
1276                 rte_eth_promiscuous_enable(port_id);
1277         else if (rte_eth_promiscuous_get(port_id) == 0)
1278                 rte_eth_promiscuous_disable(port_id);
1279
1280         /* replay all multicast configuration */
1281         if (rte_eth_allmulticast_get(port_id) == 1)
1282                 rte_eth_allmulticast_enable(port_id);
1283         else if (rte_eth_allmulticast_get(port_id) == 0)
1284                 rte_eth_allmulticast_disable(port_id);
1285 }
1286
1287 int
1288 rte_eth_dev_start(uint16_t port_id)
1289 {
1290         struct rte_eth_dev *dev;
1291         struct rte_eth_dev_info dev_info;
1292         int diag;
1293
1294         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1295
1296         dev = &rte_eth_devices[port_id];
1297
1298         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1299
1300         if (dev->data->dev_started != 0) {
1301                 RTE_ETHDEV_LOG(INFO,
1302                         "Device with port_id=%"PRIu16" already started\n",
1303                         port_id);
1304                 return 0;
1305         }
1306
1307         rte_eth_dev_info_get(port_id, &dev_info);
1308
1309         /* Lets restore MAC now if device does not support live change */
1310         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1311                 rte_eth_dev_mac_restore(dev, &dev_info);
1312
1313         diag = (*dev->dev_ops->dev_start)(dev);
1314         if (diag == 0)
1315                 dev->data->dev_started = 1;
1316         else
1317                 return eth_err(port_id, diag);
1318
1319         rte_eth_dev_config_restore(dev, &dev_info, port_id);
1320
1321         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1322                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1323                 (*dev->dev_ops->link_update)(dev, 0);
1324         }
1325         return 0;
1326 }
1327
1328 void
1329 rte_eth_dev_stop(uint16_t port_id)
1330 {
1331         struct rte_eth_dev *dev;
1332
1333         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1334         dev = &rte_eth_devices[port_id];
1335
1336         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1337
1338         if (dev->data->dev_started == 0) {
1339                 RTE_ETHDEV_LOG(INFO,
1340                         "Device with port_id=%"PRIu16" already stopped\n",
1341                         port_id);
1342                 return;
1343         }
1344
1345         dev->data->dev_started = 0;
1346         (*dev->dev_ops->dev_stop)(dev);
1347 }
1348
1349 int
1350 rte_eth_dev_set_link_up(uint16_t port_id)
1351 {
1352         struct rte_eth_dev *dev;
1353
1354         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1355
1356         dev = &rte_eth_devices[port_id];
1357
1358         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1359         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1360 }
1361
1362 int
1363 rte_eth_dev_set_link_down(uint16_t port_id)
1364 {
1365         struct rte_eth_dev *dev;
1366
1367         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1368
1369         dev = &rte_eth_devices[port_id];
1370
1371         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1372         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1373 }
1374
1375 void
1376 rte_eth_dev_close(uint16_t port_id)
1377 {
1378         struct rte_eth_dev *dev;
1379
1380         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1381         dev = &rte_eth_devices[port_id];
1382
1383         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1384         dev->data->dev_started = 0;
1385         (*dev->dev_ops->dev_close)(dev);
1386
1387         dev->data->nb_rx_queues = 0;
1388         rte_free(dev->data->rx_queues);
1389         dev->data->rx_queues = NULL;
1390         dev->data->nb_tx_queues = 0;
1391         rte_free(dev->data->tx_queues);
1392         dev->data->tx_queues = NULL;
1393 }
1394
1395 int
1396 rte_eth_dev_reset(uint16_t port_id)
1397 {
1398         struct rte_eth_dev *dev;
1399         int ret;
1400
1401         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1402         dev = &rte_eth_devices[port_id];
1403
1404         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1405
1406         rte_eth_dev_stop(port_id);
1407         ret = dev->dev_ops->dev_reset(dev);
1408
1409         return eth_err(port_id, ret);
1410 }
1411
1412 int __rte_experimental
1413 rte_eth_dev_is_removed(uint16_t port_id)
1414 {
1415         struct rte_eth_dev *dev;
1416         int ret;
1417
1418         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1419
1420         dev = &rte_eth_devices[port_id];
1421
1422         if (dev->state == RTE_ETH_DEV_REMOVED)
1423                 return 1;
1424
1425         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1426
1427         ret = dev->dev_ops->is_removed(dev);
1428         if (ret != 0)
1429                 /* Device is physically removed. */
1430                 dev->state = RTE_ETH_DEV_REMOVED;
1431
1432         return ret;
1433 }
1434
1435 int
1436 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1437                        uint16_t nb_rx_desc, unsigned int socket_id,
1438                        const struct rte_eth_rxconf *rx_conf,
1439                        struct rte_mempool *mp)
1440 {
1441         int ret;
1442         uint32_t mbp_buf_size;
1443         struct rte_eth_dev *dev;
1444         struct rte_eth_dev_info dev_info;
1445         struct rte_eth_rxconf local_conf;
1446         void **rxq;
1447
1448         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1449
1450         dev = &rte_eth_devices[port_id];
1451         if (rx_queue_id >= dev->data->nb_rx_queues) {
1452                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1453                 return -EINVAL;
1454         }
1455
1456         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1457         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1458
1459         /*
1460          * Check the size of the mbuf data buffer.
1461          * This value must be provided in the private data of the memory pool.
1462          * First check that the memory pool has a valid private data.
1463          */
1464         rte_eth_dev_info_get(port_id, &dev_info);
1465         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1466                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1467                         mp->name, (int)mp->private_data_size,
1468                         (int)sizeof(struct rte_pktmbuf_pool_private));
1469                 return -ENOSPC;
1470         }
1471         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1472
1473         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1474                 RTE_ETHDEV_LOG(ERR,
1475                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1476                         mp->name, (int)mbp_buf_size,
1477                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1478                         (int)RTE_PKTMBUF_HEADROOM,
1479                         (int)dev_info.min_rx_bufsize);
1480                 return -EINVAL;
1481         }
1482
1483         /* Use default specified by driver, if nb_rx_desc is zero */
1484         if (nb_rx_desc == 0) {
1485                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1486                 /* If driver default is also zero, fall back on EAL default */
1487                 if (nb_rx_desc == 0)
1488                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1489         }
1490
1491         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1492                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1493                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1494
1495                 RTE_ETHDEV_LOG(ERR,
1496                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1497                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1498                         dev_info.rx_desc_lim.nb_min,
1499                         dev_info.rx_desc_lim.nb_align);
1500                 return -EINVAL;
1501         }
1502
1503         if (dev->data->dev_started &&
1504                 !(dev_info.dev_capa &
1505                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1506                 return -EBUSY;
1507
1508         if (dev->data->dev_started &&
1509                 (dev->data->rx_queue_state[rx_queue_id] !=
1510                         RTE_ETH_QUEUE_STATE_STOPPED))
1511                 return -EBUSY;
1512
1513         rxq = dev->data->rx_queues;
1514         if (rxq[rx_queue_id]) {
1515                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1516                                         -ENOTSUP);
1517                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1518                 rxq[rx_queue_id] = NULL;
1519         }
1520
1521         if (rx_conf == NULL)
1522                 rx_conf = &dev_info.default_rxconf;
1523
1524         local_conf = *rx_conf;
1525
1526         /*
1527          * If an offloading has already been enabled in
1528          * rte_eth_dev_configure(), it has been enabled on all queues,
1529          * so there is no need to enable it in this queue again.
1530          * The local_conf.offloads input to underlying PMD only carries
1531          * those offloadings which are only enabled on this queue and
1532          * not enabled on all queues.
1533          */
1534         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1535
1536         /*
1537          * New added offloadings for this queue are those not enabled in
1538          * rte_eth_dev_configure() and they must be per-queue type.
1539          * A pure per-port offloading can't be enabled on a queue while
1540          * disabled on another queue. A pure per-port offloading can't
1541          * be enabled for any queue as new added one if it hasn't been
1542          * enabled in rte_eth_dev_configure().
1543          */
1544         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1545              local_conf.offloads) {
1546                 RTE_ETHDEV_LOG(ERR,
1547                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1548                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1549                         port_id, rx_queue_id, local_conf.offloads,
1550                         dev_info.rx_queue_offload_capa,
1551                         __func__);
1552                 return -EINVAL;
1553         }
1554
1555         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1556                                               socket_id, &local_conf, mp);
1557         if (!ret) {
1558                 if (!dev->data->min_rx_buf_size ||
1559                     dev->data->min_rx_buf_size > mbp_buf_size)
1560                         dev->data->min_rx_buf_size = mbp_buf_size;
1561         }
1562
1563         return eth_err(port_id, ret);
1564 }
1565
1566 int
1567 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1568                        uint16_t nb_tx_desc, unsigned int socket_id,
1569                        const struct rte_eth_txconf *tx_conf)
1570 {
1571         struct rte_eth_dev *dev;
1572         struct rte_eth_dev_info dev_info;
1573         struct rte_eth_txconf local_conf;
1574         void **txq;
1575
1576         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1577
1578         dev = &rte_eth_devices[port_id];
1579         if (tx_queue_id >= dev->data->nb_tx_queues) {
1580                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1581                 return -EINVAL;
1582         }
1583
1584         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1586
1587         rte_eth_dev_info_get(port_id, &dev_info);
1588
1589         /* Use default specified by driver, if nb_tx_desc is zero */
1590         if (nb_tx_desc == 0) {
1591                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1592                 /* If driver default is zero, fall back on EAL default */
1593                 if (nb_tx_desc == 0)
1594                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1595         }
1596         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1597             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1598             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1599                 RTE_ETHDEV_LOG(ERR,
1600                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1601                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1602                         dev_info.tx_desc_lim.nb_min,
1603                         dev_info.tx_desc_lim.nb_align);
1604                 return -EINVAL;
1605         }
1606
1607         if (dev->data->dev_started &&
1608                 !(dev_info.dev_capa &
1609                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1610                 return -EBUSY;
1611
1612         if (dev->data->dev_started &&
1613                 (dev->data->tx_queue_state[tx_queue_id] !=
1614                         RTE_ETH_QUEUE_STATE_STOPPED))
1615                 return -EBUSY;
1616
1617         txq = dev->data->tx_queues;
1618         if (txq[tx_queue_id]) {
1619                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1620                                         -ENOTSUP);
1621                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1622                 txq[tx_queue_id] = NULL;
1623         }
1624
1625         if (tx_conf == NULL)
1626                 tx_conf = &dev_info.default_txconf;
1627
1628         local_conf = *tx_conf;
1629
1630         /*
1631          * If an offloading has already been enabled in
1632          * rte_eth_dev_configure(), it has been enabled on all queues,
1633          * so there is no need to enable it in this queue again.
1634          * The local_conf.offloads input to underlying PMD only carries
1635          * those offloadings which are only enabled on this queue and
1636          * not enabled on all queues.
1637          */
1638         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1639
1640         /*
1641          * New added offloadings for this queue are those not enabled in
1642          * rte_eth_dev_configure() and they must be per-queue type.
1643          * A pure per-port offloading can't be enabled on a queue while
1644          * disabled on another queue. A pure per-port offloading can't
1645          * be enabled for any queue as new added one if it hasn't been
1646          * enabled in rte_eth_dev_configure().
1647          */
1648         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1649              local_conf.offloads) {
1650                 RTE_ETHDEV_LOG(ERR,
1651                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1652                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1653                         port_id, tx_queue_id, local_conf.offloads,
1654                         dev_info.tx_queue_offload_capa,
1655                         __func__);
1656                 return -EINVAL;
1657         }
1658
1659         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1660                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1661 }
1662
1663 void
1664 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1665                 void *userdata __rte_unused)
1666 {
1667         unsigned i;
1668
1669         for (i = 0; i < unsent; i++)
1670                 rte_pktmbuf_free(pkts[i]);
1671 }
1672
1673 void
1674 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1675                 void *userdata)
1676 {
1677         uint64_t *count = userdata;
1678         unsigned i;
1679
1680         for (i = 0; i < unsent; i++)
1681                 rte_pktmbuf_free(pkts[i]);
1682
1683         *count += unsent;
1684 }
1685
1686 int
1687 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1688                 buffer_tx_error_fn cbfn, void *userdata)
1689 {
1690         buffer->error_callback = cbfn;
1691         buffer->error_userdata = userdata;
1692         return 0;
1693 }
1694
1695 int
1696 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1697 {
1698         int ret = 0;
1699
1700         if (buffer == NULL)
1701                 return -EINVAL;
1702
1703         buffer->size = size;
1704         if (buffer->error_callback == NULL) {
1705                 ret = rte_eth_tx_buffer_set_err_callback(
1706                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1707         }
1708
1709         return ret;
1710 }
1711
1712 int
1713 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1714 {
1715         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1716         int ret;
1717
1718         /* Validate Input Data. Bail if not valid or not supported. */
1719         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1720         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1721
1722         /* Call driver to free pending mbufs. */
1723         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1724                                                free_cnt);
1725         return eth_err(port_id, ret);
1726 }
1727
1728 void
1729 rte_eth_promiscuous_enable(uint16_t port_id)
1730 {
1731         struct rte_eth_dev *dev;
1732
1733         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1734         dev = &rte_eth_devices[port_id];
1735
1736         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1737         (*dev->dev_ops->promiscuous_enable)(dev);
1738         dev->data->promiscuous = 1;
1739 }
1740
1741 void
1742 rte_eth_promiscuous_disable(uint16_t port_id)
1743 {
1744         struct rte_eth_dev *dev;
1745
1746         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1747         dev = &rte_eth_devices[port_id];
1748
1749         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1750         dev->data->promiscuous = 0;
1751         (*dev->dev_ops->promiscuous_disable)(dev);
1752 }
1753
1754 int
1755 rte_eth_promiscuous_get(uint16_t port_id)
1756 {
1757         struct rte_eth_dev *dev;
1758
1759         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1760
1761         dev = &rte_eth_devices[port_id];
1762         return dev->data->promiscuous;
1763 }
1764
1765 void
1766 rte_eth_allmulticast_enable(uint16_t port_id)
1767 {
1768         struct rte_eth_dev *dev;
1769
1770         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1771         dev = &rte_eth_devices[port_id];
1772
1773         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1774         (*dev->dev_ops->allmulticast_enable)(dev);
1775         dev->data->all_multicast = 1;
1776 }
1777
1778 void
1779 rte_eth_allmulticast_disable(uint16_t port_id)
1780 {
1781         struct rte_eth_dev *dev;
1782
1783         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1784         dev = &rte_eth_devices[port_id];
1785
1786         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1787         dev->data->all_multicast = 0;
1788         (*dev->dev_ops->allmulticast_disable)(dev);
1789 }
1790
1791 int
1792 rte_eth_allmulticast_get(uint16_t port_id)
1793 {
1794         struct rte_eth_dev *dev;
1795
1796         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1797
1798         dev = &rte_eth_devices[port_id];
1799         return dev->data->all_multicast;
1800 }
1801
1802 void
1803 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1804 {
1805         struct rte_eth_dev *dev;
1806
1807         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1808         dev = &rte_eth_devices[port_id];
1809
1810         if (dev->data->dev_conf.intr_conf.lsc &&
1811             dev->data->dev_started)
1812                 rte_eth_linkstatus_get(dev, eth_link);
1813         else {
1814                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1815                 (*dev->dev_ops->link_update)(dev, 1);
1816                 *eth_link = dev->data->dev_link;
1817         }
1818 }
1819
1820 void
1821 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1822 {
1823         struct rte_eth_dev *dev;
1824
1825         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1826         dev = &rte_eth_devices[port_id];
1827
1828         if (dev->data->dev_conf.intr_conf.lsc &&
1829             dev->data->dev_started)
1830                 rte_eth_linkstatus_get(dev, eth_link);
1831         else {
1832                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1833                 (*dev->dev_ops->link_update)(dev, 0);
1834                 *eth_link = dev->data->dev_link;
1835         }
1836 }
1837
1838 int
1839 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1840 {
1841         struct rte_eth_dev *dev;
1842
1843         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1844
1845         dev = &rte_eth_devices[port_id];
1846         memset(stats, 0, sizeof(*stats));
1847
1848         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1849         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1850         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1851 }
1852
1853 int
1854 rte_eth_stats_reset(uint16_t port_id)
1855 {
1856         struct rte_eth_dev *dev;
1857
1858         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1859         dev = &rte_eth_devices[port_id];
1860
1861         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1862         (*dev->dev_ops->stats_reset)(dev);
1863         dev->data->rx_mbuf_alloc_failed = 0;
1864
1865         return 0;
1866 }
1867
1868 static inline int
1869 get_xstats_basic_count(struct rte_eth_dev *dev)
1870 {
1871         uint16_t nb_rxqs, nb_txqs;
1872         int count;
1873
1874         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1875         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1876
1877         count = RTE_NB_STATS;
1878         count += nb_rxqs * RTE_NB_RXQ_STATS;
1879         count += nb_txqs * RTE_NB_TXQ_STATS;
1880
1881         return count;
1882 }
1883
1884 static int
1885 get_xstats_count(uint16_t port_id)
1886 {
1887         struct rte_eth_dev *dev;
1888         int count;
1889
1890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1891         dev = &rte_eth_devices[port_id];
1892         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1893                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1894                                 NULL, 0);
1895                 if (count < 0)
1896                         return eth_err(port_id, count);
1897         }
1898         if (dev->dev_ops->xstats_get_names != NULL) {
1899                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1900                 if (count < 0)
1901                         return eth_err(port_id, count);
1902         } else
1903                 count = 0;
1904
1905
1906         count += get_xstats_basic_count(dev);
1907
1908         return count;
1909 }
1910
1911 int
1912 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1913                 uint64_t *id)
1914 {
1915         int cnt_xstats, idx_xstat;
1916
1917         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1918
1919         if (!id) {
1920                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
1921                 return -ENOMEM;
1922         }
1923
1924         if (!xstat_name) {
1925                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
1926                 return -ENOMEM;
1927         }
1928
1929         /* Get count */
1930         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1931         if (cnt_xstats  < 0) {
1932                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
1933                 return -ENODEV;
1934         }
1935
1936         /* Get id-name lookup table */
1937         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1938
1939         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1940                         port_id, xstats_names, cnt_xstats, NULL)) {
1941                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
1942                 return -1;
1943         }
1944
1945         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1946                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1947                         *id = idx_xstat;
1948                         return 0;
1949                 };
1950         }
1951
1952         return -EINVAL;
1953 }
1954
1955 /* retrieve basic stats names */
1956 static int
1957 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1958         struct rte_eth_xstat_name *xstats_names)
1959 {
1960         int cnt_used_entries = 0;
1961         uint32_t idx, id_queue;
1962         uint16_t num_q;
1963
1964         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1965                 snprintf(xstats_names[cnt_used_entries].name,
1966                         sizeof(xstats_names[0].name),
1967                         "%s", rte_stats_strings[idx].name);
1968                 cnt_used_entries++;
1969         }
1970         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1971         for (id_queue = 0; id_queue < num_q; id_queue++) {
1972                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1973                         snprintf(xstats_names[cnt_used_entries].name,
1974                                 sizeof(xstats_names[0].name),
1975                                 "rx_q%u%s",
1976                                 id_queue, rte_rxq_stats_strings[idx].name);
1977                         cnt_used_entries++;
1978                 }
1979
1980         }
1981         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1982         for (id_queue = 0; id_queue < num_q; id_queue++) {
1983                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1984                         snprintf(xstats_names[cnt_used_entries].name,
1985                                 sizeof(xstats_names[0].name),
1986                                 "tx_q%u%s",
1987                                 id_queue, rte_txq_stats_strings[idx].name);
1988                         cnt_used_entries++;
1989                 }
1990         }
1991         return cnt_used_entries;
1992 }
1993
1994 /* retrieve ethdev extended statistics names */
1995 int
1996 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1997         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1998         uint64_t *ids)
1999 {
2000         struct rte_eth_xstat_name *xstats_names_copy;
2001         unsigned int no_basic_stat_requested = 1;
2002         unsigned int no_ext_stat_requested = 1;
2003         unsigned int expected_entries;
2004         unsigned int basic_count;
2005         struct rte_eth_dev *dev;
2006         unsigned int i;
2007         int ret;
2008
2009         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2010         dev = &rte_eth_devices[port_id];
2011
2012         basic_count = get_xstats_basic_count(dev);
2013         ret = get_xstats_count(port_id);
2014         if (ret < 0)
2015                 return ret;
2016         expected_entries = (unsigned int)ret;
2017
2018         /* Return max number of stats if no ids given */
2019         if (!ids) {
2020                 if (!xstats_names)
2021                         return expected_entries;
2022                 else if (xstats_names && size < expected_entries)
2023                         return expected_entries;
2024         }
2025
2026         if (ids && !xstats_names)
2027                 return -EINVAL;
2028
2029         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2030                 uint64_t ids_copy[size];
2031
2032                 for (i = 0; i < size; i++) {
2033                         if (ids[i] < basic_count) {
2034                                 no_basic_stat_requested = 0;
2035                                 break;
2036                         }
2037
2038                         /*
2039                          * Convert ids to xstats ids that PMD knows.
2040                          * ids known by user are basic + extended stats.
2041                          */
2042                         ids_copy[i] = ids[i] - basic_count;
2043                 }
2044
2045                 if (no_basic_stat_requested)
2046                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2047                                         xstats_names, ids_copy, size);
2048         }
2049
2050         /* Retrieve all stats */
2051         if (!ids) {
2052                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2053                                 expected_entries);
2054                 if (num_stats < 0 || num_stats > (int)expected_entries)
2055                         return num_stats;
2056                 else
2057                         return expected_entries;
2058         }
2059
2060         xstats_names_copy = calloc(expected_entries,
2061                 sizeof(struct rte_eth_xstat_name));
2062
2063         if (!xstats_names_copy) {
2064                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2065                 return -ENOMEM;
2066         }
2067
2068         if (ids) {
2069                 for (i = 0; i < size; i++) {
2070                         if (ids[i] >= basic_count) {
2071                                 no_ext_stat_requested = 0;
2072                                 break;
2073                         }
2074                 }
2075         }
2076
2077         /* Fill xstats_names_copy structure */
2078         if (ids && no_ext_stat_requested) {
2079                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2080         } else {
2081                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2082                         expected_entries);
2083                 if (ret < 0) {
2084                         free(xstats_names_copy);
2085                         return ret;
2086                 }
2087         }
2088
2089         /* Filter stats */
2090         for (i = 0; i < size; i++) {
2091                 if (ids[i] >= expected_entries) {
2092                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2093                         free(xstats_names_copy);
2094                         return -1;
2095                 }
2096                 xstats_names[i] = xstats_names_copy[ids[i]];
2097         }
2098
2099         free(xstats_names_copy);
2100         return size;
2101 }
2102
2103 int
2104 rte_eth_xstats_get_names(uint16_t port_id,
2105         struct rte_eth_xstat_name *xstats_names,
2106         unsigned int size)
2107 {
2108         struct rte_eth_dev *dev;
2109         int cnt_used_entries;
2110         int cnt_expected_entries;
2111         int cnt_driver_entries;
2112
2113         cnt_expected_entries = get_xstats_count(port_id);
2114         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2115                         (int)size < cnt_expected_entries)
2116                 return cnt_expected_entries;
2117
2118         /* port_id checked in get_xstats_count() */
2119         dev = &rte_eth_devices[port_id];
2120
2121         cnt_used_entries = rte_eth_basic_stats_get_names(
2122                 dev, xstats_names);
2123
2124         if (dev->dev_ops->xstats_get_names != NULL) {
2125                 /* If there are any driver-specific xstats, append them
2126                  * to end of list.
2127                  */
2128                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2129                         dev,
2130                         xstats_names + cnt_used_entries,
2131                         size - cnt_used_entries);
2132                 if (cnt_driver_entries < 0)
2133                         return eth_err(port_id, cnt_driver_entries);
2134                 cnt_used_entries += cnt_driver_entries;
2135         }
2136
2137         return cnt_used_entries;
2138 }
2139
2140
2141 static int
2142 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2143 {
2144         struct rte_eth_dev *dev;
2145         struct rte_eth_stats eth_stats;
2146         unsigned int count = 0, i, q;
2147         uint64_t val, *stats_ptr;
2148         uint16_t nb_rxqs, nb_txqs;
2149         int ret;
2150
2151         ret = rte_eth_stats_get(port_id, &eth_stats);
2152         if (ret < 0)
2153                 return ret;
2154
2155         dev = &rte_eth_devices[port_id];
2156
2157         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2158         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2159
2160         /* global stats */
2161         for (i = 0; i < RTE_NB_STATS; i++) {
2162                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2163                                         rte_stats_strings[i].offset);
2164                 val = *stats_ptr;
2165                 xstats[count++].value = val;
2166         }
2167
2168         /* per-rxq stats */
2169         for (q = 0; q < nb_rxqs; q++) {
2170                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2171                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2172                                         rte_rxq_stats_strings[i].offset +
2173                                         q * sizeof(uint64_t));
2174                         val = *stats_ptr;
2175                         xstats[count++].value = val;
2176                 }
2177         }
2178
2179         /* per-txq stats */
2180         for (q = 0; q < nb_txqs; q++) {
2181                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2182                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2183                                         rte_txq_stats_strings[i].offset +
2184                                         q * sizeof(uint64_t));
2185                         val = *stats_ptr;
2186                         xstats[count++].value = val;
2187                 }
2188         }
2189         return count;
2190 }
2191
2192 /* retrieve ethdev extended statistics */
2193 int
2194 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2195                          uint64_t *values, unsigned int size)
2196 {
2197         unsigned int no_basic_stat_requested = 1;
2198         unsigned int no_ext_stat_requested = 1;
2199         unsigned int num_xstats_filled;
2200         unsigned int basic_count;
2201         uint16_t expected_entries;
2202         struct rte_eth_dev *dev;
2203         unsigned int i;
2204         int ret;
2205
2206         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2207         ret = get_xstats_count(port_id);
2208         if (ret < 0)
2209                 return ret;
2210         expected_entries = (uint16_t)ret;
2211         struct rte_eth_xstat xstats[expected_entries];
2212         dev = &rte_eth_devices[port_id];
2213         basic_count = get_xstats_basic_count(dev);
2214
2215         /* Return max number of stats if no ids given */
2216         if (!ids) {
2217                 if (!values)
2218                         return expected_entries;
2219                 else if (values && size < expected_entries)
2220                         return expected_entries;
2221         }
2222
2223         if (ids && !values)
2224                 return -EINVAL;
2225
2226         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2227                 unsigned int basic_count = get_xstats_basic_count(dev);
2228                 uint64_t ids_copy[size];
2229
2230                 for (i = 0; i < size; i++) {
2231                         if (ids[i] < basic_count) {
2232                                 no_basic_stat_requested = 0;
2233                                 break;
2234                         }
2235
2236                         /*
2237                          * Convert ids to xstats ids that PMD knows.
2238                          * ids known by user are basic + extended stats.
2239                          */
2240                         ids_copy[i] = ids[i] - basic_count;
2241                 }
2242
2243                 if (no_basic_stat_requested)
2244                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2245                                         values, size);
2246         }
2247
2248         if (ids) {
2249                 for (i = 0; i < size; i++) {
2250                         if (ids[i] >= basic_count) {
2251                                 no_ext_stat_requested = 0;
2252                                 break;
2253                         }
2254                 }
2255         }
2256
2257         /* Fill the xstats structure */
2258         if (ids && no_ext_stat_requested)
2259                 ret = rte_eth_basic_stats_get(port_id, xstats);
2260         else
2261                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2262
2263         if (ret < 0)
2264                 return ret;
2265         num_xstats_filled = (unsigned int)ret;
2266
2267         /* Return all stats */
2268         if (!ids) {
2269                 for (i = 0; i < num_xstats_filled; i++)
2270                         values[i] = xstats[i].value;
2271                 return expected_entries;
2272         }
2273
2274         /* Filter stats */
2275         for (i = 0; i < size; i++) {
2276                 if (ids[i] >= expected_entries) {
2277                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2278                         return -1;
2279                 }
2280                 values[i] = xstats[ids[i]].value;
2281         }
2282         return size;
2283 }
2284
2285 int
2286 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2287         unsigned int n)
2288 {
2289         struct rte_eth_dev *dev;
2290         unsigned int count = 0, i;
2291         signed int xcount = 0;
2292         uint16_t nb_rxqs, nb_txqs;
2293         int ret;
2294
2295         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2296
2297         dev = &rte_eth_devices[port_id];
2298
2299         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2300         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2301
2302         /* Return generic statistics */
2303         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2304                 (nb_txqs * RTE_NB_TXQ_STATS);
2305
2306         /* implemented by the driver */
2307         if (dev->dev_ops->xstats_get != NULL) {
2308                 /* Retrieve the xstats from the driver at the end of the
2309                  * xstats struct.
2310                  */
2311                 xcount = (*dev->dev_ops->xstats_get)(dev,
2312                                      xstats ? xstats + count : NULL,
2313                                      (n > count) ? n - count : 0);
2314
2315                 if (xcount < 0)
2316                         return eth_err(port_id, xcount);
2317         }
2318
2319         if (n < count + xcount || xstats == NULL)
2320                 return count + xcount;
2321
2322         /* now fill the xstats structure */
2323         ret = rte_eth_basic_stats_get(port_id, xstats);
2324         if (ret < 0)
2325                 return ret;
2326         count = ret;
2327
2328         for (i = 0; i < count; i++)
2329                 xstats[i].id = i;
2330         /* add an offset to driver-specific stats */
2331         for ( ; i < count + xcount; i++)
2332                 xstats[i].id += count;
2333
2334         return count + xcount;
2335 }
2336
2337 /* reset ethdev extended statistics */
2338 void
2339 rte_eth_xstats_reset(uint16_t port_id)
2340 {
2341         struct rte_eth_dev *dev;
2342
2343         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2344         dev = &rte_eth_devices[port_id];
2345
2346         /* implemented by the driver */
2347         if (dev->dev_ops->xstats_reset != NULL) {
2348                 (*dev->dev_ops->xstats_reset)(dev);
2349                 return;
2350         }
2351
2352         /* fallback to default */
2353         rte_eth_stats_reset(port_id);
2354 }
2355
2356 static int
2357 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2358                 uint8_t is_rx)
2359 {
2360         struct rte_eth_dev *dev;
2361
2362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2363
2364         dev = &rte_eth_devices[port_id];
2365
2366         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2367
2368         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2369                 return -EINVAL;
2370
2371         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2372                 return -EINVAL;
2373
2374         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2375                 return -EINVAL;
2376
2377         return (*dev->dev_ops->queue_stats_mapping_set)
2378                         (dev, queue_id, stat_idx, is_rx);
2379 }
2380
2381
2382 int
2383 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2384                 uint8_t stat_idx)
2385 {
2386         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2387                                                 stat_idx, STAT_QMAP_TX));
2388 }
2389
2390
2391 int
2392 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2393                 uint8_t stat_idx)
2394 {
2395         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2396                                                 stat_idx, STAT_QMAP_RX));
2397 }
2398
2399 int
2400 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2401 {
2402         struct rte_eth_dev *dev;
2403
2404         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2405         dev = &rte_eth_devices[port_id];
2406
2407         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2408         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2409                                                         fw_version, fw_size));
2410 }
2411
2412 void
2413 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2414 {
2415         struct rte_eth_dev *dev;
2416         const struct rte_eth_desc_lim lim = {
2417                 .nb_max = UINT16_MAX,
2418                 .nb_min = 0,
2419                 .nb_align = 1,
2420         };
2421
2422         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2423         dev = &rte_eth_devices[port_id];
2424
2425         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2426         dev_info->rx_desc_lim = lim;
2427         dev_info->tx_desc_lim = lim;
2428         dev_info->device = dev->device;
2429
2430         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2431         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2432         dev_info->driver_name = dev->device->driver->name;
2433         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2434         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2435
2436         dev_info->dev_flags = &dev->data->dev_flags;
2437 }
2438
2439 int
2440 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2441                                  uint32_t *ptypes, int num)
2442 {
2443         int i, j;
2444         struct rte_eth_dev *dev;
2445         const uint32_t *all_ptypes;
2446
2447         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2448         dev = &rte_eth_devices[port_id];
2449         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2450         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2451
2452         if (!all_ptypes)
2453                 return 0;
2454
2455         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2456                 if (all_ptypes[i] & ptype_mask) {
2457                         if (j < num)
2458                                 ptypes[j] = all_ptypes[i];
2459                         j++;
2460                 }
2461
2462         return j;
2463 }
2464
2465 void
2466 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2467 {
2468         struct rte_eth_dev *dev;
2469
2470         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2471         dev = &rte_eth_devices[port_id];
2472         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2473 }
2474
2475
2476 int
2477 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2478 {
2479         struct rte_eth_dev *dev;
2480
2481         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2482
2483         dev = &rte_eth_devices[port_id];
2484         *mtu = dev->data->mtu;
2485         return 0;
2486 }
2487
2488 int
2489 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2490 {
2491         int ret;
2492         struct rte_eth_dev *dev;
2493
2494         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2495         dev = &rte_eth_devices[port_id];
2496         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2497
2498         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2499         if (!ret)
2500                 dev->data->mtu = mtu;
2501
2502         return eth_err(port_id, ret);
2503 }
2504
2505 int
2506 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2507 {
2508         struct rte_eth_dev *dev;
2509         int ret;
2510
2511         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2512         dev = &rte_eth_devices[port_id];
2513         if (!(dev->data->dev_conf.rxmode.offloads &
2514               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2515                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
2516                         port_id);
2517                 return -ENOSYS;
2518         }
2519
2520         if (vlan_id > 4095) {
2521                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
2522                         port_id, vlan_id);
2523                 return -EINVAL;
2524         }
2525         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2526
2527         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2528         if (ret == 0) {
2529                 struct rte_vlan_filter_conf *vfc;
2530                 int vidx;
2531                 int vbit;
2532
2533                 vfc = &dev->data->vlan_filter_conf;
2534                 vidx = vlan_id / 64;
2535                 vbit = vlan_id % 64;
2536
2537                 if (on)
2538                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2539                 else
2540                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2541         }
2542
2543         return eth_err(port_id, ret);
2544 }
2545
2546 int
2547 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2548                                     int on)
2549 {
2550         struct rte_eth_dev *dev;
2551
2552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2553         dev = &rte_eth_devices[port_id];
2554         if (rx_queue_id >= dev->data->nb_rx_queues) {
2555                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
2556                 return -EINVAL;
2557         }
2558
2559         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2560         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2561
2562         return 0;
2563 }
2564
2565 int
2566 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2567                                 enum rte_vlan_type vlan_type,
2568                                 uint16_t tpid)
2569 {
2570         struct rte_eth_dev *dev;
2571
2572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2573         dev = &rte_eth_devices[port_id];
2574         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2575
2576         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2577                                                                tpid));
2578 }
2579
2580 int
2581 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2582 {
2583         struct rte_eth_dev *dev;
2584         int ret = 0;
2585         int mask = 0;
2586         int cur, org = 0;
2587         uint64_t orig_offloads;
2588
2589         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2590         dev = &rte_eth_devices[port_id];
2591
2592         /* save original values in case of failure */
2593         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2594
2595         /*check which option changed by application*/
2596         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2597         org = !!(dev->data->dev_conf.rxmode.offloads &
2598                  DEV_RX_OFFLOAD_VLAN_STRIP);
2599         if (cur != org) {
2600                 if (cur)
2601                         dev->data->dev_conf.rxmode.offloads |=
2602                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2603                 else
2604                         dev->data->dev_conf.rxmode.offloads &=
2605                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2606                 mask |= ETH_VLAN_STRIP_MASK;
2607         }
2608
2609         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2610         org = !!(dev->data->dev_conf.rxmode.offloads &
2611                  DEV_RX_OFFLOAD_VLAN_FILTER);
2612         if (cur != org) {
2613                 if (cur)
2614                         dev->data->dev_conf.rxmode.offloads |=
2615                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2616                 else
2617                         dev->data->dev_conf.rxmode.offloads &=
2618                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2619                 mask |= ETH_VLAN_FILTER_MASK;
2620         }
2621
2622         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2623         org = !!(dev->data->dev_conf.rxmode.offloads &
2624                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2625         if (cur != org) {
2626                 if (cur)
2627                         dev->data->dev_conf.rxmode.offloads |=
2628                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2629                 else
2630                         dev->data->dev_conf.rxmode.offloads &=
2631                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2632                 mask |= ETH_VLAN_EXTEND_MASK;
2633         }
2634
2635         /*no change*/
2636         if (mask == 0)
2637                 return ret;
2638
2639         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2640         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2641         if (ret) {
2642                 /* hit an error restore  original values */
2643                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2644         }
2645
2646         return eth_err(port_id, ret);
2647 }
2648
2649 int
2650 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2651 {
2652         struct rte_eth_dev *dev;
2653         int ret = 0;
2654
2655         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2656         dev = &rte_eth_devices[port_id];
2657
2658         if (dev->data->dev_conf.rxmode.offloads &
2659             DEV_RX_OFFLOAD_VLAN_STRIP)
2660                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2661
2662         if (dev->data->dev_conf.rxmode.offloads &
2663             DEV_RX_OFFLOAD_VLAN_FILTER)
2664                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2665
2666         if (dev->data->dev_conf.rxmode.offloads &
2667             DEV_RX_OFFLOAD_VLAN_EXTEND)
2668                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2669
2670         return ret;
2671 }
2672
2673 int
2674 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2675 {
2676         struct rte_eth_dev *dev;
2677
2678         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2679         dev = &rte_eth_devices[port_id];
2680         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2681
2682         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2683 }
2684
2685 int
2686 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2687 {
2688         struct rte_eth_dev *dev;
2689
2690         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2691         dev = &rte_eth_devices[port_id];
2692         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2693         memset(fc_conf, 0, sizeof(*fc_conf));
2694         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2695 }
2696
2697 int
2698 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2699 {
2700         struct rte_eth_dev *dev;
2701
2702         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2703         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2704                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
2705                 return -EINVAL;
2706         }
2707
2708         dev = &rte_eth_devices[port_id];
2709         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2710         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2711 }
2712
2713 int
2714 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2715                                    struct rte_eth_pfc_conf *pfc_conf)
2716 {
2717         struct rte_eth_dev *dev;
2718
2719         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2720         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2721                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
2722                 return -EINVAL;
2723         }
2724
2725         dev = &rte_eth_devices[port_id];
2726         /* High water, low water validation are device specific */
2727         if  (*dev->dev_ops->priority_flow_ctrl_set)
2728                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2729                                         (dev, pfc_conf));
2730         return -ENOTSUP;
2731 }
2732
2733 static int
2734 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2735                         uint16_t reta_size)
2736 {
2737         uint16_t i, num;
2738
2739         if (!reta_conf)
2740                 return -EINVAL;
2741
2742         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2743         for (i = 0; i < num; i++) {
2744                 if (reta_conf[i].mask)
2745                         return 0;
2746         }
2747
2748         return -EINVAL;
2749 }
2750
2751 static int
2752 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2753                          uint16_t reta_size,
2754                          uint16_t max_rxq)
2755 {
2756         uint16_t i, idx, shift;
2757
2758         if (!reta_conf)
2759                 return -EINVAL;
2760
2761         if (max_rxq == 0) {
2762                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
2763                 return -EINVAL;
2764         }
2765
2766         for (i = 0; i < reta_size; i++) {
2767                 idx = i / RTE_RETA_GROUP_SIZE;
2768                 shift = i % RTE_RETA_GROUP_SIZE;
2769                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2770                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2771                         RTE_ETHDEV_LOG(ERR,
2772                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
2773                                 idx, shift,
2774                                 reta_conf[idx].reta[shift], max_rxq);
2775                         return -EINVAL;
2776                 }
2777         }
2778
2779         return 0;
2780 }
2781
2782 int
2783 rte_eth_dev_rss_reta_update(uint16_t port_id,
2784                             struct rte_eth_rss_reta_entry64 *reta_conf,
2785                             uint16_t reta_size)
2786 {
2787         struct rte_eth_dev *dev;
2788         int ret;
2789
2790         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2791         /* Check mask bits */
2792         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2793         if (ret < 0)
2794                 return ret;
2795
2796         dev = &rte_eth_devices[port_id];
2797
2798         /* Check entry value */
2799         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2800                                 dev->data->nb_rx_queues);
2801         if (ret < 0)
2802                 return ret;
2803
2804         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2805         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2806                                                              reta_size));
2807 }
2808
2809 int
2810 rte_eth_dev_rss_reta_query(uint16_t port_id,
2811                            struct rte_eth_rss_reta_entry64 *reta_conf,
2812                            uint16_t reta_size)
2813 {
2814         struct rte_eth_dev *dev;
2815         int ret;
2816
2817         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2818
2819         /* Check mask bits */
2820         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2821         if (ret < 0)
2822                 return ret;
2823
2824         dev = &rte_eth_devices[port_id];
2825         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2826         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2827                                                             reta_size));
2828 }
2829
2830 int
2831 rte_eth_dev_rss_hash_update(uint16_t port_id,
2832                             struct rte_eth_rss_conf *rss_conf)
2833 {
2834         struct rte_eth_dev *dev;
2835         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2836
2837         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2838         dev = &rte_eth_devices[port_id];
2839         rte_eth_dev_info_get(port_id, &dev_info);
2840         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2841             dev_info.flow_type_rss_offloads) {
2842                 RTE_ETHDEV_LOG(ERR,
2843                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2844                         port_id, rss_conf->rss_hf,
2845                         dev_info.flow_type_rss_offloads);
2846                 return -EINVAL;
2847         }
2848         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2849         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2850                                                                  rss_conf));
2851 }
2852
2853 int
2854 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2855                               struct rte_eth_rss_conf *rss_conf)
2856 {
2857         struct rte_eth_dev *dev;
2858
2859         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2860         dev = &rte_eth_devices[port_id];
2861         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2862         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2863                                                                    rss_conf));
2864 }
2865
2866 int
2867 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2868                                 struct rte_eth_udp_tunnel *udp_tunnel)
2869 {
2870         struct rte_eth_dev *dev;
2871
2872         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2873         if (udp_tunnel == NULL) {
2874                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
2875                 return -EINVAL;
2876         }
2877
2878         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2879                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
2880                 return -EINVAL;
2881         }
2882
2883         dev = &rte_eth_devices[port_id];
2884         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2885         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2886                                                                 udp_tunnel));
2887 }
2888
2889 int
2890 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2891                                    struct rte_eth_udp_tunnel *udp_tunnel)
2892 {
2893         struct rte_eth_dev *dev;
2894
2895         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2896         dev = &rte_eth_devices[port_id];
2897
2898         if (udp_tunnel == NULL) {
2899                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
2900                 return -EINVAL;
2901         }
2902
2903         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2904                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
2905                 return -EINVAL;
2906         }
2907
2908         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2909         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2910                                                                 udp_tunnel));
2911 }
2912
2913 int
2914 rte_eth_led_on(uint16_t port_id)
2915 {
2916         struct rte_eth_dev *dev;
2917
2918         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2919         dev = &rte_eth_devices[port_id];
2920         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2921         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2922 }
2923
2924 int
2925 rte_eth_led_off(uint16_t port_id)
2926 {
2927         struct rte_eth_dev *dev;
2928
2929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2930         dev = &rte_eth_devices[port_id];
2931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2932         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2933 }
2934
2935 /*
2936  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2937  * an empty spot.
2938  */
2939 static int
2940 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2941 {
2942         struct rte_eth_dev_info dev_info;
2943         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2944         unsigned i;
2945
2946         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2947         rte_eth_dev_info_get(port_id, &dev_info);
2948
2949         for (i = 0; i < dev_info.max_mac_addrs; i++)
2950                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2951                         return i;
2952
2953         return -1;
2954 }
2955
2956 static const struct ether_addr null_mac_addr;
2957
2958 int
2959 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2960                         uint32_t pool)
2961 {
2962         struct rte_eth_dev *dev;
2963         int index;
2964         uint64_t pool_mask;
2965         int ret;
2966
2967         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2968         dev = &rte_eth_devices[port_id];
2969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2970
2971         if (is_zero_ether_addr(addr)) {
2972                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
2973                         port_id);
2974                 return -EINVAL;
2975         }
2976         if (pool >= ETH_64_POOLS) {
2977                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
2978                 return -EINVAL;
2979         }
2980
2981         index = get_mac_addr_index(port_id, addr);
2982         if (index < 0) {
2983                 index = get_mac_addr_index(port_id, &null_mac_addr);
2984                 if (index < 0) {
2985                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
2986                                 port_id);
2987                         return -ENOSPC;
2988                 }
2989         } else {
2990                 pool_mask = dev->data->mac_pool_sel[index];
2991
2992                 /* Check if both MAC address and pool is already there, and do nothing */
2993                 if (pool_mask & (1ULL << pool))
2994                         return 0;
2995         }
2996
2997         /* Update NIC */
2998         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2999
3000         if (ret == 0) {
3001                 /* Update address in NIC data structure */
3002                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3003
3004                 /* Update pool bitmap in NIC data structure */
3005                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3006         }
3007
3008         return eth_err(port_id, ret);
3009 }
3010
3011 int
3012 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
3013 {
3014         struct rte_eth_dev *dev;
3015         int index;
3016
3017         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3018         dev = &rte_eth_devices[port_id];
3019         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3020
3021         index = get_mac_addr_index(port_id, addr);
3022         if (index == 0) {
3023                 RTE_ETHDEV_LOG(ERR,
3024                         "Port %u: Cannot remove default MAC address\n",
3025                         port_id);
3026                 return -EADDRINUSE;
3027         } else if (index < 0)
3028                 return 0;  /* Do nothing if address wasn't found */
3029
3030         /* Update NIC */
3031         (*dev->dev_ops->mac_addr_remove)(dev, index);
3032
3033         /* Update address in NIC data structure */
3034         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3035
3036         /* reset pool bitmap */
3037         dev->data->mac_pool_sel[index] = 0;
3038
3039         return 0;
3040 }
3041
3042 int
3043 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3044 {
3045         struct rte_eth_dev *dev;
3046         int ret;
3047
3048         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3049
3050         if (!is_valid_assigned_ether_addr(addr))
3051                 return -EINVAL;
3052
3053         dev = &rte_eth_devices[port_id];
3054         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3055
3056         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3057         if (ret < 0)
3058                 return ret;
3059
3060         /* Update default address in NIC data structure */
3061         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3062
3063         return 0;
3064 }
3065
3066
3067 /*
3068  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3069  * an empty spot.
3070  */
3071 static int
3072 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3073 {
3074         struct rte_eth_dev_info dev_info;
3075         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3076         unsigned i;
3077
3078         rte_eth_dev_info_get(port_id, &dev_info);
3079         if (!dev->data->hash_mac_addrs)
3080                 return -1;
3081
3082         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3083                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3084                         ETHER_ADDR_LEN) == 0)
3085                         return i;
3086
3087         return -1;
3088 }
3089
3090 int
3091 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3092                                 uint8_t on)
3093 {
3094         int index;
3095         int ret;
3096         struct rte_eth_dev *dev;
3097
3098         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3099
3100         dev = &rte_eth_devices[port_id];
3101         if (is_zero_ether_addr(addr)) {
3102                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3103                         port_id);
3104                 return -EINVAL;
3105         }
3106
3107         index = get_hash_mac_addr_index(port_id, addr);
3108         /* Check if it's already there, and do nothing */
3109         if ((index >= 0) && on)
3110                 return 0;
3111
3112         if (index < 0) {
3113                 if (!on) {
3114                         RTE_ETHDEV_LOG(ERR,
3115                                 "Port %u: the MAC address was not set in UTA\n",
3116                                 port_id);
3117                         return -EINVAL;
3118                 }
3119
3120                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3121                 if (index < 0) {
3122                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3123                                 port_id);
3124                         return -ENOSPC;
3125                 }
3126         }
3127
3128         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3129         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3130         if (ret == 0) {
3131                 /* Update address in NIC data structure */
3132                 if (on)
3133                         ether_addr_copy(addr,
3134                                         &dev->data->hash_mac_addrs[index]);
3135                 else
3136                         ether_addr_copy(&null_mac_addr,
3137                                         &dev->data->hash_mac_addrs[index]);
3138         }
3139
3140         return eth_err(port_id, ret);
3141 }
3142
3143 int
3144 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3145 {
3146         struct rte_eth_dev *dev;
3147
3148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3149
3150         dev = &rte_eth_devices[port_id];
3151
3152         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3153         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3154                                                                        on));
3155 }
3156
3157 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3158                                         uint16_t tx_rate)
3159 {
3160         struct rte_eth_dev *dev;
3161         struct rte_eth_dev_info dev_info;
3162         struct rte_eth_link link;
3163
3164         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3165
3166         dev = &rte_eth_devices[port_id];
3167         rte_eth_dev_info_get(port_id, &dev_info);
3168         link = dev->data->dev_link;
3169
3170         if (queue_idx > dev_info.max_tx_queues) {
3171                 RTE_ETHDEV_LOG(ERR,
3172                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3173                         port_id, queue_idx);
3174                 return -EINVAL;
3175         }
3176
3177         if (tx_rate > link.link_speed) {
3178                 RTE_ETHDEV_LOG(ERR,
3179                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3180                         tx_rate, link.link_speed);
3181                 return -EINVAL;
3182         }
3183
3184         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3185         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3186                                                         queue_idx, tx_rate));
3187 }
3188
3189 int
3190 rte_eth_mirror_rule_set(uint16_t port_id,
3191                         struct rte_eth_mirror_conf *mirror_conf,
3192                         uint8_t rule_id, uint8_t on)
3193 {
3194         struct rte_eth_dev *dev;
3195
3196         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3197         if (mirror_conf->rule_type == 0) {
3198                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3199                 return -EINVAL;
3200         }
3201
3202         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3203                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3204                         ETH_64_POOLS - 1);
3205                 return -EINVAL;
3206         }
3207
3208         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3209              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3210             (mirror_conf->pool_mask == 0)) {
3211                 RTE_ETHDEV_LOG(ERR,
3212                         "Invalid mirror pool, pool mask can not be 0\n");
3213                 return -EINVAL;
3214         }
3215
3216         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3217             mirror_conf->vlan.vlan_mask == 0) {
3218                 RTE_ETHDEV_LOG(ERR,
3219                         "Invalid vlan mask, vlan mask can not be 0\n");
3220                 return -EINVAL;
3221         }
3222
3223         dev = &rte_eth_devices[port_id];
3224         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3225
3226         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3227                                                 mirror_conf, rule_id, on));
3228 }
3229
3230 int
3231 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3232 {
3233         struct rte_eth_dev *dev;
3234
3235         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3236
3237         dev = &rte_eth_devices[port_id];
3238         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3239
3240         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3241                                                                    rule_id));
3242 }
3243
3244 RTE_INIT(eth_dev_init_cb_lists)
3245 {
3246         int i;
3247
3248         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3249                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3250 }
3251
3252 int
3253 rte_eth_dev_callback_register(uint16_t port_id,
3254                         enum rte_eth_event_type event,
3255                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3256 {
3257         struct rte_eth_dev *dev;
3258         struct rte_eth_dev_callback *user_cb;
3259         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3260         uint16_t last_port;
3261
3262         if (!cb_fn)
3263                 return -EINVAL;
3264
3265         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3266                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3267                 return -EINVAL;
3268         }
3269
3270         if (port_id == RTE_ETH_ALL) {
3271                 next_port = 0;
3272                 last_port = RTE_MAX_ETHPORTS - 1;
3273         } else {
3274                 next_port = last_port = port_id;
3275         }
3276
3277         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3278
3279         do {
3280                 dev = &rte_eth_devices[next_port];
3281
3282                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3283                         if (user_cb->cb_fn == cb_fn &&
3284                                 user_cb->cb_arg == cb_arg &&
3285                                 user_cb->event == event) {
3286                                 break;
3287                         }
3288                 }
3289
3290                 /* create a new callback. */
3291                 if (user_cb == NULL) {
3292                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3293                                 sizeof(struct rte_eth_dev_callback), 0);
3294                         if (user_cb != NULL) {
3295                                 user_cb->cb_fn = cb_fn;
3296                                 user_cb->cb_arg = cb_arg;
3297                                 user_cb->event = event;
3298                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3299                                                   user_cb, next);
3300                         } else {
3301                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3302                                 rte_eth_dev_callback_unregister(port_id, event,
3303                                                                 cb_fn, cb_arg);
3304                                 return -ENOMEM;
3305                         }
3306
3307                 }
3308         } while (++next_port <= last_port);
3309
3310         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3311         return 0;
3312 }
3313
3314 int
3315 rte_eth_dev_callback_unregister(uint16_t port_id,
3316                         enum rte_eth_event_type event,
3317                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3318 {
3319         int ret;
3320         struct rte_eth_dev *dev;
3321         struct rte_eth_dev_callback *cb, *next;
3322         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3323         uint16_t last_port;
3324
3325         if (!cb_fn)
3326                 return -EINVAL;
3327
3328         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3329                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3330                 return -EINVAL;
3331         }
3332
3333         if (port_id == RTE_ETH_ALL) {
3334                 next_port = 0;
3335                 last_port = RTE_MAX_ETHPORTS - 1;
3336         } else {
3337                 next_port = last_port = port_id;
3338         }
3339
3340         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3341
3342         do {
3343                 dev = &rte_eth_devices[next_port];
3344                 ret = 0;
3345                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3346                      cb = next) {
3347
3348                         next = TAILQ_NEXT(cb, next);
3349
3350                         if (cb->cb_fn != cb_fn || cb->event != event ||
3351                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3352                                 continue;
3353
3354                         /*
3355                          * if this callback is not executing right now,
3356                          * then remove it.
3357                          */
3358                         if (cb->active == 0) {
3359                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3360                                 rte_free(cb);
3361                         } else {
3362                                 ret = -EAGAIN;
3363                         }
3364                 }
3365         } while (++next_port <= last_port);
3366
3367         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3368         return ret;
3369 }
3370
3371 int
3372 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3373         enum rte_eth_event_type event, void *ret_param)
3374 {
3375         struct rte_eth_dev_callback *cb_lst;
3376         struct rte_eth_dev_callback dev_cb;
3377         int rc = 0;
3378
3379         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3380         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3381                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3382                         continue;
3383                 dev_cb = *cb_lst;
3384                 cb_lst->active = 1;
3385                 if (ret_param != NULL)
3386                         dev_cb.ret_param = ret_param;
3387
3388                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3389                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3390                                 dev_cb.cb_arg, dev_cb.ret_param);
3391                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3392                 cb_lst->active = 0;
3393         }
3394         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3395         return rc;
3396 }
3397
3398 void
3399 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3400 {
3401         if (dev == NULL)
3402                 return;
3403
3404         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3405
3406         dev->state = RTE_ETH_DEV_ATTACHED;
3407 }
3408
3409 int
3410 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3411 {
3412         uint32_t vec;
3413         struct rte_eth_dev *dev;
3414         struct rte_intr_handle *intr_handle;
3415         uint16_t qid;
3416         int rc;
3417
3418         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3419
3420         dev = &rte_eth_devices[port_id];
3421
3422         if (!dev->intr_handle) {
3423                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3424                 return -ENOTSUP;
3425         }
3426
3427         intr_handle = dev->intr_handle;
3428         if (!intr_handle->intr_vec) {
3429                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3430                 return -EPERM;
3431         }
3432
3433         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3434                 vec = intr_handle->intr_vec[qid];
3435                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3436                 if (rc && rc != -EEXIST) {
3437                         RTE_ETHDEV_LOG(ERR,
3438                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3439                                 port_id, qid, op, epfd, vec);
3440                 }
3441         }
3442
3443         return 0;
3444 }
3445
3446 int __rte_experimental
3447 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
3448 {
3449         struct rte_intr_handle *intr_handle;
3450         struct rte_eth_dev *dev;
3451         unsigned int efd_idx;
3452         uint32_t vec;
3453         int fd;
3454
3455         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
3456
3457         dev = &rte_eth_devices[port_id];
3458
3459         if (queue_id >= dev->data->nb_rx_queues) {
3460                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3461                 return -1;
3462         }
3463
3464         if (!dev->intr_handle) {
3465                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3466                 return -1;
3467         }
3468
3469         intr_handle = dev->intr_handle;
3470         if (!intr_handle->intr_vec) {
3471                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3472                 return -1;
3473         }
3474
3475         vec = intr_handle->intr_vec[queue_id];
3476         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
3477                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
3478         fd = intr_handle->efds[efd_idx];
3479
3480         return fd;
3481 }
3482
3483 const struct rte_memzone *
3484 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3485                          uint16_t queue_id, size_t size, unsigned align,
3486                          int socket_id)
3487 {
3488         char z_name[RTE_MEMZONE_NAMESIZE];
3489         const struct rte_memzone *mz;
3490
3491         snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
3492                  dev->data->port_id, queue_id, ring_name);
3493
3494         mz = rte_memzone_lookup(z_name);
3495         if (mz)
3496                 return mz;
3497
3498         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3499                         RTE_MEMZONE_IOVA_CONTIG, align);
3500 }
3501
3502 int __rte_experimental
3503 rte_eth_dev_create(struct rte_device *device, const char *name,
3504         size_t priv_data_size,
3505         ethdev_bus_specific_init ethdev_bus_specific_init,
3506         void *bus_init_params,
3507         ethdev_init_t ethdev_init, void *init_params)
3508 {
3509         struct rte_eth_dev *ethdev;
3510         int retval;
3511
3512         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3513
3514         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3515                 ethdev = rte_eth_dev_allocate(name);
3516                 if (!ethdev)
3517                         return -ENODEV;
3518
3519                 if (priv_data_size) {
3520                         ethdev->data->dev_private = rte_zmalloc_socket(
3521                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3522                                 device->numa_node);
3523
3524                         if (!ethdev->data->dev_private) {
3525                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
3526                                 retval = -ENOMEM;
3527                                 goto probe_failed;
3528                         }
3529                 }
3530         } else {
3531                 ethdev = rte_eth_dev_attach_secondary(name);
3532                 if (!ethdev) {
3533                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
3534                                 "ethdev doesn't exist");
3535                         return  -ENODEV;
3536                 }
3537         }
3538
3539         ethdev->device = device;
3540
3541         if (ethdev_bus_specific_init) {
3542                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3543                 if (retval) {
3544                         RTE_LOG(ERR, EAL,
3545                                 "ethdev bus specific initialisation failed");
3546                         goto probe_failed;
3547                 }
3548         }
3549
3550         retval = ethdev_init(ethdev, init_params);
3551         if (retval) {
3552                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3553                 goto probe_failed;
3554         }
3555
3556         rte_eth_dev_probing_finish(ethdev);
3557
3558         return retval;
3559
3560 probe_failed:
3561         rte_eth_dev_release_port(ethdev);
3562         return retval;
3563 }
3564
3565 int  __rte_experimental
3566 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3567         ethdev_uninit_t ethdev_uninit)
3568 {
3569         int ret;
3570
3571         ethdev = rte_eth_dev_allocated(ethdev->data->name);
3572         if (!ethdev)
3573                 return -ENODEV;
3574
3575         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3576         if (ethdev_uninit) {
3577                 ret = ethdev_uninit(ethdev);
3578                 if (ret)
3579                         return ret;
3580         }
3581
3582         return rte_eth_dev_release_port(ethdev);
3583 }
3584
3585 int
3586 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3587                           int epfd, int op, void *data)
3588 {
3589         uint32_t vec;
3590         struct rte_eth_dev *dev;
3591         struct rte_intr_handle *intr_handle;
3592         int rc;
3593
3594         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3595
3596         dev = &rte_eth_devices[port_id];
3597         if (queue_id >= dev->data->nb_rx_queues) {
3598                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3599                 return -EINVAL;
3600         }
3601
3602         if (!dev->intr_handle) {
3603                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3604                 return -ENOTSUP;
3605         }
3606
3607         intr_handle = dev->intr_handle;
3608         if (!intr_handle->intr_vec) {
3609                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3610                 return -EPERM;
3611         }
3612
3613         vec = intr_handle->intr_vec[queue_id];
3614         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3615         if (rc && rc != -EEXIST) {
3616                 RTE_ETHDEV_LOG(ERR,
3617                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3618                         port_id, queue_id, op, epfd, vec);
3619                 return rc;
3620         }
3621
3622         return 0;
3623 }
3624
3625 int
3626 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3627                            uint16_t queue_id)
3628 {
3629         struct rte_eth_dev *dev;
3630
3631         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3632
3633         dev = &rte_eth_devices[port_id];
3634
3635         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3636         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3637                                                                 queue_id));
3638 }
3639
3640 int
3641 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3642                             uint16_t queue_id)
3643 {
3644         struct rte_eth_dev *dev;
3645
3646         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3647
3648         dev = &rte_eth_devices[port_id];
3649
3650         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3651         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3652                                                                 queue_id));
3653 }
3654
3655
3656 int
3657 rte_eth_dev_filter_supported(uint16_t port_id,
3658                              enum rte_filter_type filter_type)
3659 {
3660         struct rte_eth_dev *dev;
3661
3662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3663
3664         dev = &rte_eth_devices[port_id];
3665         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3666         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3667                                 RTE_ETH_FILTER_NOP, NULL);
3668 }
3669
3670 int
3671 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3672                         enum rte_filter_op filter_op, void *arg)
3673 {
3674         struct rte_eth_dev *dev;
3675
3676         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3677
3678         dev = &rte_eth_devices[port_id];
3679         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3680         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3681                                                              filter_op, arg));
3682 }
3683
3684 const struct rte_eth_rxtx_callback *
3685 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3686                 rte_rx_callback_fn fn, void *user_param)
3687 {
3688 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3689         rte_errno = ENOTSUP;
3690         return NULL;
3691 #endif
3692         /* check input parameters */
3693         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3694                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3695                 rte_errno = EINVAL;
3696                 return NULL;
3697         }
3698         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3699
3700         if (cb == NULL) {
3701                 rte_errno = ENOMEM;
3702                 return NULL;
3703         }
3704
3705         cb->fn.rx = fn;
3706         cb->param = user_param;
3707
3708         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3709         /* Add the callbacks in fifo order. */
3710         struct rte_eth_rxtx_callback *tail =
3711                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3712
3713         if (!tail) {
3714                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3715
3716         } else {
3717                 while (tail->next)
3718                         tail = tail->next;
3719                 tail->next = cb;
3720         }
3721         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3722
3723         return cb;
3724 }
3725
3726 const struct rte_eth_rxtx_callback *
3727 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3728                 rte_rx_callback_fn fn, void *user_param)
3729 {
3730 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3731         rte_errno = ENOTSUP;
3732         return NULL;
3733 #endif
3734         /* check input parameters */
3735         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3736                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3737                 rte_errno = EINVAL;
3738                 return NULL;
3739         }
3740
3741         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3742
3743         if (cb == NULL) {
3744                 rte_errno = ENOMEM;
3745                 return NULL;
3746         }
3747
3748         cb->fn.rx = fn;
3749         cb->param = user_param;
3750
3751         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3752         /* Add the callbacks at fisrt position*/
3753         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3754         rte_smp_wmb();
3755         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3756         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3757
3758         return cb;
3759 }
3760
3761 const struct rte_eth_rxtx_callback *
3762 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3763                 rte_tx_callback_fn fn, void *user_param)
3764 {
3765 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3766         rte_errno = ENOTSUP;
3767         return NULL;
3768 #endif
3769         /* check input parameters */
3770         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3771                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3772                 rte_errno = EINVAL;
3773                 return NULL;
3774         }
3775
3776         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3777
3778         if (cb == NULL) {
3779                 rte_errno = ENOMEM;
3780                 return NULL;
3781         }
3782
3783         cb->fn.tx = fn;
3784         cb->param = user_param;
3785
3786         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3787         /* Add the callbacks in fifo order. */
3788         struct rte_eth_rxtx_callback *tail =
3789                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3790
3791         if (!tail) {
3792                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3793
3794         } else {
3795                 while (tail->next)
3796                         tail = tail->next;
3797                 tail->next = cb;
3798         }
3799         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3800
3801         return cb;
3802 }
3803
3804 int
3805 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3806                 const struct rte_eth_rxtx_callback *user_cb)
3807 {
3808 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3809         return -ENOTSUP;
3810 #endif
3811         /* Check input parameters. */
3812         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3813         if (user_cb == NULL ||
3814                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3815                 return -EINVAL;
3816
3817         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3818         struct rte_eth_rxtx_callback *cb;
3819         struct rte_eth_rxtx_callback **prev_cb;
3820         int ret = -EINVAL;
3821
3822         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3823         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3824         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3825                 cb = *prev_cb;
3826                 if (cb == user_cb) {
3827                         /* Remove the user cb from the callback list. */
3828                         *prev_cb = cb->next;
3829                         ret = 0;
3830                         break;
3831                 }
3832         }
3833         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3834
3835         return ret;
3836 }
3837
3838 int
3839 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3840                 const struct rte_eth_rxtx_callback *user_cb)
3841 {
3842 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3843         return -ENOTSUP;
3844 #endif
3845         /* Check input parameters. */
3846         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3847         if (user_cb == NULL ||
3848                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3849                 return -EINVAL;
3850
3851         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3852         int ret = -EINVAL;
3853         struct rte_eth_rxtx_callback *cb;
3854         struct rte_eth_rxtx_callback **prev_cb;
3855
3856         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3857         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3858         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3859                 cb = *prev_cb;
3860                 if (cb == user_cb) {
3861                         /* Remove the user cb from the callback list. */
3862                         *prev_cb = cb->next;
3863                         ret = 0;
3864                         break;
3865                 }
3866         }
3867         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3868
3869         return ret;
3870 }
3871
3872 int
3873 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3874         struct rte_eth_rxq_info *qinfo)
3875 {
3876         struct rte_eth_dev *dev;
3877
3878         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3879
3880         if (qinfo == NULL)
3881                 return -EINVAL;
3882
3883         dev = &rte_eth_devices[port_id];
3884         if (queue_id >= dev->data->nb_rx_queues) {
3885                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3886                 return -EINVAL;
3887         }
3888
3889         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3890
3891         memset(qinfo, 0, sizeof(*qinfo));
3892         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3893         return 0;
3894 }
3895
3896 int
3897 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3898         struct rte_eth_txq_info *qinfo)
3899 {
3900         struct rte_eth_dev *dev;
3901
3902         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3903
3904         if (qinfo == NULL)
3905                 return -EINVAL;
3906
3907         dev = &rte_eth_devices[port_id];
3908         if (queue_id >= dev->data->nb_tx_queues) {
3909                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
3910                 return -EINVAL;
3911         }
3912
3913         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3914
3915         memset(qinfo, 0, sizeof(*qinfo));
3916         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3917
3918         return 0;
3919 }
3920
3921 int
3922 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3923                              struct ether_addr *mc_addr_set,
3924                              uint32_t nb_mc_addr)
3925 {
3926         struct rte_eth_dev *dev;
3927
3928         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3929
3930         dev = &rte_eth_devices[port_id];
3931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3932         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3933                                                 mc_addr_set, nb_mc_addr));
3934 }
3935
3936 int
3937 rte_eth_timesync_enable(uint16_t port_id)
3938 {
3939         struct rte_eth_dev *dev;
3940
3941         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3942         dev = &rte_eth_devices[port_id];
3943
3944         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3945         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3946 }
3947
3948 int
3949 rte_eth_timesync_disable(uint16_t port_id)
3950 {
3951         struct rte_eth_dev *dev;
3952
3953         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3954         dev = &rte_eth_devices[port_id];
3955
3956         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3957         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3958 }
3959
3960 int
3961 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3962                                    uint32_t flags)
3963 {
3964         struct rte_eth_dev *dev;
3965
3966         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3967         dev = &rte_eth_devices[port_id];
3968
3969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3970         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3971                                 (dev, timestamp, flags));
3972 }
3973
3974 int
3975 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3976                                    struct timespec *timestamp)
3977 {
3978         struct rte_eth_dev *dev;
3979
3980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3981         dev = &rte_eth_devices[port_id];
3982
3983         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3984         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3985                                 (dev, timestamp));
3986 }
3987
3988 int
3989 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3990 {
3991         struct rte_eth_dev *dev;
3992
3993         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3994         dev = &rte_eth_devices[port_id];
3995
3996         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3997         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3998                                                                       delta));
3999 }
4000
4001 int
4002 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4003 {
4004         struct rte_eth_dev *dev;
4005
4006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4007         dev = &rte_eth_devices[port_id];
4008
4009         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4010         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4011                                                                 timestamp));
4012 }
4013
4014 int
4015 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4016 {
4017         struct rte_eth_dev *dev;
4018
4019         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4020         dev = &rte_eth_devices[port_id];
4021
4022         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4023         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4024                                                                 timestamp));
4025 }
4026
4027 int
4028 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4029 {
4030         struct rte_eth_dev *dev;
4031
4032         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4033
4034         dev = &rte_eth_devices[port_id];
4035         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4036         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4037 }
4038
4039 int
4040 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4041 {
4042         struct rte_eth_dev *dev;
4043
4044         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4045
4046         dev = &rte_eth_devices[port_id];
4047         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4048         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4049 }
4050
4051 int
4052 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4053 {
4054         struct rte_eth_dev *dev;
4055
4056         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4057
4058         dev = &rte_eth_devices[port_id];
4059         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4060         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4061 }
4062
4063 int
4064 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4065 {
4066         struct rte_eth_dev *dev;
4067
4068         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4069
4070         dev = &rte_eth_devices[port_id];
4071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4072         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4073 }
4074
4075 int __rte_experimental
4076 rte_eth_dev_get_module_info(uint16_t port_id,
4077                             struct rte_eth_dev_module_info *modinfo)
4078 {
4079         struct rte_eth_dev *dev;
4080
4081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4082
4083         dev = &rte_eth_devices[port_id];
4084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4085         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4086 }
4087
4088 int __rte_experimental
4089 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4090                               struct rte_dev_eeprom_info *info)
4091 {
4092         struct rte_eth_dev *dev;
4093
4094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4095
4096         dev = &rte_eth_devices[port_id];
4097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4098         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4099 }
4100
4101 int
4102 rte_eth_dev_get_dcb_info(uint16_t port_id,
4103                              struct rte_eth_dcb_info *dcb_info)
4104 {
4105         struct rte_eth_dev *dev;
4106
4107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4108
4109         dev = &rte_eth_devices[port_id];
4110         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4111
4112         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4113         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4114 }
4115
4116 int
4117 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4118                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4119 {
4120         struct rte_eth_dev *dev;
4121
4122         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4123         if (l2_tunnel == NULL) {
4124                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4125                 return -EINVAL;
4126         }
4127
4128         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4129                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4130                 return -EINVAL;
4131         }
4132
4133         dev = &rte_eth_devices[port_id];
4134         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4135                                 -ENOTSUP);
4136         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4137                                                                 l2_tunnel));
4138 }
4139
4140 int
4141 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4142                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4143                                   uint32_t mask,
4144                                   uint8_t en)
4145 {
4146         struct rte_eth_dev *dev;
4147
4148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4149
4150         if (l2_tunnel == NULL) {
4151                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4152                 return -EINVAL;
4153         }
4154
4155         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4156                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4157                 return -EINVAL;
4158         }
4159
4160         if (mask == 0) {
4161                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4162                 return -EINVAL;
4163         }
4164
4165         dev = &rte_eth_devices[port_id];
4166         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4167                                 -ENOTSUP);
4168         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4169                                                         l2_tunnel, mask, en));
4170 }
4171
4172 static void
4173 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4174                            const struct rte_eth_desc_lim *desc_lim)
4175 {
4176         if (desc_lim->nb_align != 0)
4177                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4178
4179         if (desc_lim->nb_max != 0)
4180                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4181
4182         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4183 }
4184
4185 int
4186 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4187                                  uint16_t *nb_rx_desc,
4188                                  uint16_t *nb_tx_desc)
4189 {
4190         struct rte_eth_dev *dev;
4191         struct rte_eth_dev_info dev_info;
4192
4193         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4194
4195         dev = &rte_eth_devices[port_id];
4196         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4197
4198         rte_eth_dev_info_get(port_id, &dev_info);
4199
4200         if (nb_rx_desc != NULL)
4201                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4202
4203         if (nb_tx_desc != NULL)
4204                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4205
4206         return 0;
4207 }
4208
4209 int
4210 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4211 {
4212         struct rte_eth_dev *dev;
4213
4214         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4215
4216         if (pool == NULL)
4217                 return -EINVAL;
4218
4219         dev = &rte_eth_devices[port_id];
4220
4221         if (*dev->dev_ops->pool_ops_supported == NULL)
4222                 return 1; /* all pools are supported */
4223
4224         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4225 }
4226
4227 /**
4228  * A set of values to describe the possible states of a switch domain.
4229  */
4230 enum rte_eth_switch_domain_state {
4231         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4232         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4233 };
4234
4235 /**
4236  * Array of switch domains available for allocation. Array is sized to
4237  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4238  * ethdev ports in a single process.
4239  */
4240 struct rte_eth_dev_switch {
4241         enum rte_eth_switch_domain_state state;
4242 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4243
4244 int __rte_experimental
4245 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4246 {
4247         unsigned int i;
4248
4249         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4250
4251         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4252                 i < RTE_MAX_ETHPORTS; i++) {
4253                 if (rte_eth_switch_domains[i].state ==
4254                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4255                         rte_eth_switch_domains[i].state =
4256                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4257                         *domain_id = i;
4258                         return 0;
4259                 }
4260         }
4261
4262         return -ENOSPC;
4263 }
4264
4265 int __rte_experimental
4266 rte_eth_switch_domain_free(uint16_t domain_id)
4267 {
4268         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4269                 domain_id >= RTE_MAX_ETHPORTS)
4270                 return -EINVAL;
4271
4272         if (rte_eth_switch_domains[domain_id].state !=
4273                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4274                 return -EINVAL;
4275
4276         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4277
4278         return 0;
4279 }
4280
4281 typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
4282
4283 static int
4284 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4285 {
4286         int state;
4287         struct rte_kvargs_pair *pair;
4288         char *letter;
4289
4290         arglist->str = strdup(str_in);
4291         if (arglist->str == NULL)
4292                 return -ENOMEM;
4293
4294         letter = arglist->str;
4295         state = 0;
4296         arglist->count = 0;
4297         pair = &arglist->pairs[0];
4298         while (1) {
4299                 switch (state) {
4300                 case 0: /* Initial */
4301                         if (*letter == '=')
4302                                 return -EINVAL;
4303                         else if (*letter == '\0')
4304                                 return 0;
4305
4306                         state = 1;
4307                         pair->key = letter;
4308                         /* fall-thru */
4309
4310                 case 1: /* Parsing key */
4311                         if (*letter == '=') {
4312                                 *letter = '\0';
4313                                 pair->value = letter + 1;
4314                                 state = 2;
4315                         } else if (*letter == ',' || *letter == '\0')
4316                                 return -EINVAL;
4317                         break;
4318
4319
4320                 case 2: /* Parsing value */
4321                         if (*letter == '[')
4322                                 state = 3;
4323                         else if (*letter == ',') {
4324                                 *letter = '\0';
4325                                 arglist->count++;
4326                                 pair = &arglist->pairs[arglist->count];
4327                                 state = 0;
4328                         } else if (*letter == '\0') {
4329                                 letter--;
4330                                 arglist->count++;
4331                                 pair = &arglist->pairs[arglist->count];
4332                                 state = 0;
4333                         }
4334                         break;
4335
4336                 case 3: /* Parsing list */
4337                         if (*letter == ']')
4338                                 state = 2;
4339                         else if (*letter == '\0')
4340                                 return -EINVAL;
4341                         break;
4342                 }
4343                 letter++;
4344         }
4345 }
4346
4347 static int
4348 rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
4349         void *data)
4350 {
4351         char *str_start;
4352         int state;
4353         int result;
4354
4355         if (*str != '[')
4356                 /* Single element, not a list */
4357                 return callback(str, data);
4358
4359         /* Sanity check, then strip the brackets */
4360         str_start = &str[strlen(str) - 1];
4361         if (*str_start != ']') {
4362                 RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
4363                 return -EINVAL;
4364         }
4365         str++;
4366         *str_start = '\0';
4367
4368         /* Process list elements */
4369         state = 0;
4370         while (1) {
4371                 if (state == 0) {
4372                         if (*str == '\0')
4373                                 break;
4374                         if (*str != ',') {
4375                                 str_start = str;
4376                                 state = 1;
4377                         }
4378                 } else if (state == 1) {
4379                         if (*str == ',' || *str == '\0') {
4380                                 if (str > str_start) {
4381                                         /* Non-empty string fragment */
4382                                         *str = '\0';
4383                                         result = callback(str_start, data);
4384                                         if (result < 0)
4385                                                 return result;
4386                                 }
4387                                 state = 0;
4388                         }
4389                 }
4390                 str++;
4391         }
4392         return 0;
4393 }
4394
4395 static int
4396 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
4397         const uint16_t max_list)
4398 {
4399         uint16_t lo, hi, val;
4400         int result;
4401
4402         result = sscanf(str, "%hu-%hu", &lo, &hi);
4403         if (result == 1) {
4404                 if (*len_list >= max_list)
4405                         return -ENOMEM;
4406                 list[(*len_list)++] = lo;
4407         } else if (result == 2) {
4408                 if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
4409                         return -EINVAL;
4410                 for (val = lo; val <= hi; val++) {
4411                         if (*len_list >= max_list)
4412                                 return -ENOMEM;
4413                         list[(*len_list)++] = val;
4414                 }
4415         } else
4416                 return -EINVAL;
4417         return 0;
4418 }
4419
4420
4421 static int
4422 rte_eth_devargs_parse_representor_ports(char *str, void *data)
4423 {
4424         struct rte_eth_devargs *eth_da = data;
4425
4426         return rte_eth_devargs_process_range(str, eth_da->representor_ports,
4427                 &eth_da->nb_representor_ports, RTE_MAX_ETHPORTS);
4428 }
4429
4430 int __rte_experimental
4431 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4432 {
4433         struct rte_kvargs args;
4434         struct rte_kvargs_pair *pair;
4435         unsigned int i;
4436         int result = 0;
4437
4438         memset(eth_da, 0, sizeof(*eth_da));
4439
4440         result = rte_eth_devargs_tokenise(&args, dargs);
4441         if (result < 0)
4442                 goto parse_cleanup;
4443
4444         for (i = 0; i < args.count; i++) {
4445                 pair = &args.pairs[i];
4446                 if (strcmp("representor", pair->key) == 0) {
4447                         result = rte_eth_devargs_parse_list(pair->value,
4448                                 rte_eth_devargs_parse_representor_ports,
4449                                 eth_da);
4450                         if (result < 0)
4451                                 goto parse_cleanup;
4452                 }
4453         }
4454
4455 parse_cleanup:
4456         if (args.str)
4457                 free(args.str);
4458
4459         return result;
4460 }
4461
4462 RTE_INIT(ethdev_init_log)
4463 {
4464         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
4465         if (rte_eth_dev_logtype >= 0)
4466                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
4467 }