ethdev: convert static log type usage to dynamic
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39
40 #include "rte_ether.h"
41 #include "rte_ethdev.h"
42 #include "rte_ethdev_driver.h"
43 #include "ethdev_profile.h"
44
45 int rte_eth_dev_logtype;
46
47 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
48 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
49 static uint16_t eth_dev_last_created_port;
50
51 /* spinlock for eth device callbacks */
52 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove rx callbacks */
55 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for add/remove tx callbacks */
58 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* spinlock for shared data allocation */
61 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
62
63 /* store statistics names and its offset in stats structure  */
64 struct rte_eth_xstats_name_off {
65         char name[RTE_ETH_XSTATS_NAME_SIZE];
66         unsigned offset;
67 };
68
69 /* Shared memory between primary and secondary processes. */
70 static struct {
71         uint64_t next_owner_id;
72         rte_spinlock_t ownership_lock;
73         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
74 } *rte_eth_dev_shared_data;
75
76 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
77         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
78         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
79         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
80         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
81         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
82         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
83         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
84         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
85                 rx_nombuf)},
86 };
87
88 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
89
90 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
91         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
92         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
93         {"errors", offsetof(struct rte_eth_stats, q_errors)},
94 };
95
96 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
97                 sizeof(rte_rxq_stats_strings[0]))
98
99 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
100         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
101         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
102 };
103 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
104                 sizeof(rte_txq_stats_strings[0]))
105
106 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
107         { DEV_RX_OFFLOAD_##_name, #_name }
108
109 static const struct {
110         uint64_t offload;
111         const char *name;
112 } rte_rx_offload_names[] = {
113         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
114         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
115         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
118         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
119         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
120         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
121         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
122         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
124         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
125         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129 };
130
131 #undef RTE_RX_OFFLOAD_BIT2STR
132
133 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
134         { DEV_TX_OFFLOAD_##_name, #_name }
135
136 static const struct {
137         uint64_t offload;
138         const char *name;
139 } rte_tx_offload_names[] = {
140         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
141         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
142         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
143         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
146         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
147         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
149         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
154         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
155         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
156         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
157         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
158 };
159
160 #undef RTE_TX_OFFLOAD_BIT2STR
161
162 /**
163  * The user application callback description.
164  *
165  * It contains callback address to be registered by user application,
166  * the pointer to the parameters for callback, and the event type.
167  */
168 struct rte_eth_dev_callback {
169         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
170         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
171         void *cb_arg;                           /**< Parameter for callback */
172         void *ret_param;                        /**< Return parameter */
173         enum rte_eth_event_type event;          /**< Interrupt event type */
174         uint32_t active;                        /**< Callback is executing */
175 };
176
177 enum {
178         STAT_QMAP_TX = 0,
179         STAT_QMAP_RX
180 };
181
182 uint16_t
183 rte_eth_find_next(uint16_t port_id)
184 {
185         while (port_id < RTE_MAX_ETHPORTS &&
186                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
187                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
188                 port_id++;
189
190         if (port_id >= RTE_MAX_ETHPORTS)
191                 return RTE_MAX_ETHPORTS;
192
193         return port_id;
194 }
195
196 static void
197 rte_eth_dev_shared_data_prepare(void)
198 {
199         const unsigned flags = 0;
200         const struct rte_memzone *mz;
201
202         rte_spinlock_lock(&rte_eth_shared_data_lock);
203
204         if (rte_eth_dev_shared_data == NULL) {
205                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
206                         /* Allocate port data and ownership shared memory. */
207                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
208                                         sizeof(*rte_eth_dev_shared_data),
209                                         rte_socket_id(), flags);
210                 } else
211                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
212                 if (mz == NULL)
213                         rte_panic("Cannot allocate ethdev shared data\n");
214
215                 rte_eth_dev_shared_data = mz->addr;
216                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
217                         rte_eth_dev_shared_data->next_owner_id =
218                                         RTE_ETH_DEV_NO_OWNER + 1;
219                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
220                         memset(rte_eth_dev_shared_data->data, 0,
221                                sizeof(rte_eth_dev_shared_data->data));
222                 }
223         }
224
225         rte_spinlock_unlock(&rte_eth_shared_data_lock);
226 }
227
228 static bool
229 is_allocated(const struct rte_eth_dev *ethdev)
230 {
231         return ethdev->data->name[0] != '\0';
232 }
233
234 static struct rte_eth_dev *
235 _rte_eth_dev_allocated(const char *name)
236 {
237         unsigned i;
238
239         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
240                 if (rte_eth_devices[i].data != NULL &&
241                     strcmp(rte_eth_devices[i].data->name, name) == 0)
242                         return &rte_eth_devices[i];
243         }
244         return NULL;
245 }
246
247 struct rte_eth_dev *
248 rte_eth_dev_allocated(const char *name)
249 {
250         struct rte_eth_dev *ethdev;
251
252         rte_eth_dev_shared_data_prepare();
253
254         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
255
256         ethdev = _rte_eth_dev_allocated(name);
257
258         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
259
260         return ethdev;
261 }
262
263 static uint16_t
264 rte_eth_dev_find_free_port(void)
265 {
266         unsigned i;
267
268         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
269                 /* Using shared name field to find a free port. */
270                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
271                         RTE_ASSERT(rte_eth_devices[i].state ==
272                                    RTE_ETH_DEV_UNUSED);
273                         return i;
274                 }
275         }
276         return RTE_MAX_ETHPORTS;
277 }
278
279 static struct rte_eth_dev *
280 eth_dev_get(uint16_t port_id)
281 {
282         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
283
284         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
285
286         eth_dev_last_created_port = port_id;
287
288         return eth_dev;
289 }
290
291 struct rte_eth_dev *
292 rte_eth_dev_allocate(const char *name)
293 {
294         uint16_t port_id;
295         struct rte_eth_dev *eth_dev = NULL;
296
297         rte_eth_dev_shared_data_prepare();
298
299         /* Synchronize port creation between primary and secondary threads. */
300         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
301
302         if (_rte_eth_dev_allocated(name) != NULL) {
303                 RTE_ETHDEV_LOG(ERR,
304                         "Ethernet device with name %s already allocated\n",
305                         name);
306                 goto unlock;
307         }
308
309         port_id = rte_eth_dev_find_free_port();
310         if (port_id == RTE_MAX_ETHPORTS) {
311                 RTE_ETHDEV_LOG(ERR,
312                         "Reached maximum number of Ethernet ports\n");
313                 goto unlock;
314         }
315
316         eth_dev = eth_dev_get(port_id);
317         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
318         eth_dev->data->port_id = port_id;
319         eth_dev->data->mtu = ETHER_MTU;
320
321 unlock:
322         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
323
324         return eth_dev;
325 }
326
327 /*
328  * Attach to a port already registered by the primary process, which
329  * makes sure that the same device would have the same port id both
330  * in the primary and secondary process.
331  */
332 struct rte_eth_dev *
333 rte_eth_dev_attach_secondary(const char *name)
334 {
335         uint16_t i;
336         struct rte_eth_dev *eth_dev = NULL;
337
338         rte_eth_dev_shared_data_prepare();
339
340         /* Synchronize port attachment to primary port creation and release. */
341         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
342
343         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
344                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
345                         break;
346         }
347         if (i == RTE_MAX_ETHPORTS) {
348                 RTE_ETHDEV_LOG(ERR,
349                         "Device %s is not driven by the primary process\n",
350                         name);
351         } else {
352                 eth_dev = eth_dev_get(i);
353                 RTE_ASSERT(eth_dev->data->port_id == i);
354         }
355
356         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
357         return eth_dev;
358 }
359
360 int
361 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
362 {
363         if (eth_dev == NULL)
364                 return -EINVAL;
365
366         rte_eth_dev_shared_data_prepare();
367
368         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
369
370         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
371
372         eth_dev->state = RTE_ETH_DEV_UNUSED;
373
374         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
375
376         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
377
378         return 0;
379 }
380
381 int
382 rte_eth_dev_is_valid_port(uint16_t port_id)
383 {
384         if (port_id >= RTE_MAX_ETHPORTS ||
385             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
386                 return 0;
387         else
388                 return 1;
389 }
390
391 static int
392 rte_eth_is_valid_owner_id(uint64_t owner_id)
393 {
394         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
395             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
396                 RTE_ETHDEV_LOG(ERR, "Invalid owner_id=%016"PRIx64"\n",
397                         owner_id);
398                 return 0;
399         }
400         return 1;
401 }
402
403 uint64_t
404 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
405 {
406         while (port_id < RTE_MAX_ETHPORTS &&
407                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
408                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
409                rte_eth_devices[port_id].data->owner.id != owner_id))
410                 port_id++;
411
412         if (port_id >= RTE_MAX_ETHPORTS)
413                 return RTE_MAX_ETHPORTS;
414
415         return port_id;
416 }
417
418 int __rte_experimental
419 rte_eth_dev_owner_new(uint64_t *owner_id)
420 {
421         rte_eth_dev_shared_data_prepare();
422
423         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
424
425         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
426
427         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
428         return 0;
429 }
430
431 static int
432 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
433                        const struct rte_eth_dev_owner *new_owner)
434 {
435         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
436         struct rte_eth_dev_owner *port_owner;
437         int sret;
438
439         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
440                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
441                         port_id);
442                 return -ENODEV;
443         }
444
445         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
446             !rte_eth_is_valid_owner_id(old_owner_id))
447                 return -EINVAL;
448
449         port_owner = &rte_eth_devices[port_id].data->owner;
450         if (port_owner->id != old_owner_id) {
451                 RTE_ETHDEV_LOG(ERR,
452                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
453                         port_id, port_owner->name, port_owner->id);
454                 return -EPERM;
455         }
456
457         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
458                         new_owner->name);
459         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
460                 RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n",
461                         port_id);
462
463         port_owner->id = new_owner->id;
464
465         RTE_ETHDEV_LOG(ERR, "Port %u owner is %s_%016"PRIx64"\n",
466                 port_id, new_owner->name, new_owner->id);
467
468         return 0;
469 }
470
471 int __rte_experimental
472 rte_eth_dev_owner_set(const uint16_t port_id,
473                       const struct rte_eth_dev_owner *owner)
474 {
475         int ret;
476
477         rte_eth_dev_shared_data_prepare();
478
479         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
480
481         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
482
483         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
484         return ret;
485 }
486
487 int __rte_experimental
488 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
489 {
490         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
491                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
492         int ret;
493
494         rte_eth_dev_shared_data_prepare();
495
496         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
497
498         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
499
500         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
501         return ret;
502 }
503
504 void __rte_experimental
505 rte_eth_dev_owner_delete(const uint64_t owner_id)
506 {
507         uint16_t port_id;
508
509         rte_eth_dev_shared_data_prepare();
510
511         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
512
513         if (rte_eth_is_valid_owner_id(owner_id)) {
514                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
515                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
516                                 memset(&rte_eth_devices[port_id].data->owner, 0,
517                                        sizeof(struct rte_eth_dev_owner));
518                 RTE_ETHDEV_LOG(ERR,
519                         "All port owners owned by %016"PRIx64" identifier have removed\n",
520                         owner_id);
521         }
522
523         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
524 }
525
526 int __rte_experimental
527 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
528 {
529         int ret = 0;
530         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
531
532         rte_eth_dev_shared_data_prepare();
533
534         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
535
536         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
537                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
538                         port_id);
539                 ret = -ENODEV;
540         } else {
541                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
542         }
543
544         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
545         return ret;
546 }
547
548 int
549 rte_eth_dev_socket_id(uint16_t port_id)
550 {
551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
552         return rte_eth_devices[port_id].data->numa_node;
553 }
554
555 void *
556 rte_eth_dev_get_sec_ctx(uint16_t port_id)
557 {
558         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
559         return rte_eth_devices[port_id].security_ctx;
560 }
561
562 uint16_t
563 rte_eth_dev_count(void)
564 {
565         return rte_eth_dev_count_avail();
566 }
567
568 uint16_t
569 rte_eth_dev_count_avail(void)
570 {
571         uint16_t p;
572         uint16_t count;
573
574         count = 0;
575
576         RTE_ETH_FOREACH_DEV(p)
577                 count++;
578
579         return count;
580 }
581
582 uint16_t __rte_experimental
583 rte_eth_dev_count_total(void)
584 {
585         uint16_t port, count = 0;
586
587         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
588                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
589                         count++;
590
591         return count;
592 }
593
594 int
595 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
596 {
597         char *tmp;
598
599         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
600
601         if (name == NULL) {
602                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
603                 return -EINVAL;
604         }
605
606         /* shouldn't check 'rte_eth_devices[i].data',
607          * because it might be overwritten by VDEV PMD */
608         tmp = rte_eth_dev_shared_data->data[port_id].name;
609         strcpy(name, tmp);
610         return 0;
611 }
612
613 int
614 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
615 {
616         uint32_t pid;
617
618         if (name == NULL) {
619                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
620                 return -EINVAL;
621         }
622
623         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
624                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
625                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
626                         *port_id = pid;
627                         return 0;
628                 }
629         }
630
631         return -ENODEV;
632 }
633
634 static int
635 eth_err(uint16_t port_id, int ret)
636 {
637         if (ret == 0)
638                 return 0;
639         if (rte_eth_dev_is_removed(port_id))
640                 return -EIO;
641         return ret;
642 }
643
644 /* attach the new device, then store port_id of the device */
645 int
646 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
647 {
648         int current = rte_eth_dev_count_total();
649         struct rte_devargs da;
650         int ret = -1;
651
652         memset(&da, 0, sizeof(da));
653
654         if ((devargs == NULL) || (port_id == NULL)) {
655                 ret = -EINVAL;
656                 goto err;
657         }
658
659         /* parse devargs */
660         if (rte_devargs_parse(&da, "%s", devargs))
661                 goto err;
662
663         ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
664         if (ret < 0)
665                 goto err;
666
667         /* no point looking at the port count if no port exists */
668         if (!rte_eth_dev_count_total()) {
669                 RTE_ETHDEV_LOG(ERR, "No port found for device (%s)\n", da.name);
670                 ret = -1;
671                 goto err;
672         }
673
674         /* if nothing happened, there is a bug here, since some driver told us
675          * it did attach a device, but did not create a port.
676          * FIXME: race condition in case of plug-out of another device
677          */
678         if (current == rte_eth_dev_count_total()) {
679                 ret = -1;
680                 goto err;
681         }
682
683         *port_id = eth_dev_last_created_port;
684         ret = 0;
685
686 err:
687         free(da.args);
688         return ret;
689 }
690
691 /* detach the device, then store the name of the device */
692 int
693 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
694 {
695         struct rte_device *dev;
696         struct rte_bus *bus;
697         uint32_t dev_flags;
698         int ret = -1;
699
700         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
701
702         dev_flags = rte_eth_devices[port_id].data->dev_flags;
703         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
704                 RTE_ETHDEV_LOG(ERR,
705                         "Port %"PRIu16" is bonded, cannot detach\n", port_id);
706                 return -ENOTSUP;
707         }
708
709         dev = rte_eth_devices[port_id].device;
710         if (dev == NULL)
711                 return -EINVAL;
712
713         bus = rte_bus_find_by_device(dev);
714         if (bus == NULL)
715                 return -ENOENT;
716
717         ret = rte_eal_hotplug_remove(bus->name, dev->name);
718         if (ret < 0)
719                 return ret;
720
721         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
722         return 0;
723 }
724
725 static int
726 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
727 {
728         uint16_t old_nb_queues = dev->data->nb_rx_queues;
729         void **rxq;
730         unsigned i;
731
732         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
733                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
734                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
735                                 RTE_CACHE_LINE_SIZE);
736                 if (dev->data->rx_queues == NULL) {
737                         dev->data->nb_rx_queues = 0;
738                         return -(ENOMEM);
739                 }
740         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
741                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
742
743                 rxq = dev->data->rx_queues;
744
745                 for (i = nb_queues; i < old_nb_queues; i++)
746                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
747                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
748                                 RTE_CACHE_LINE_SIZE);
749                 if (rxq == NULL)
750                         return -(ENOMEM);
751                 if (nb_queues > old_nb_queues) {
752                         uint16_t new_qs = nb_queues - old_nb_queues;
753
754                         memset(rxq + old_nb_queues, 0,
755                                 sizeof(rxq[0]) * new_qs);
756                 }
757
758                 dev->data->rx_queues = rxq;
759
760         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
761                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
762
763                 rxq = dev->data->rx_queues;
764
765                 for (i = nb_queues; i < old_nb_queues; i++)
766                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
767
768                 rte_free(dev->data->rx_queues);
769                 dev->data->rx_queues = NULL;
770         }
771         dev->data->nb_rx_queues = nb_queues;
772         return 0;
773 }
774
775 int
776 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
777 {
778         struct rte_eth_dev *dev;
779
780         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
781
782         dev = &rte_eth_devices[port_id];
783         if (!dev->data->dev_started) {
784                 RTE_ETHDEV_LOG(ERR,
785                         "Port %u must be started before start any queue\n",
786                         port_id);
787                 return -EINVAL;
788         }
789
790         if (rx_queue_id >= dev->data->nb_rx_queues) {
791                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
792                 return -EINVAL;
793         }
794
795         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
796
797         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
798                 RTE_ETHDEV_LOG(ERR,
799                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
800                         rx_queue_id, port_id);
801                 return 0;
802         }
803
804         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
805                                                              rx_queue_id));
806
807 }
808
809 int
810 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
811 {
812         struct rte_eth_dev *dev;
813
814         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
815
816         dev = &rte_eth_devices[port_id];
817         if (rx_queue_id >= dev->data->nb_rx_queues) {
818                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
819                 return -EINVAL;
820         }
821
822         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
823
824         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
825                 RTE_ETHDEV_LOG(ERR,
826                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
827                         rx_queue_id, port_id);
828                 return 0;
829         }
830
831         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
832
833 }
834
835 int
836 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
837 {
838         struct rte_eth_dev *dev;
839
840         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
841
842         dev = &rte_eth_devices[port_id];
843         if (!dev->data->dev_started) {
844                 RTE_ETHDEV_LOG(ERR,
845                         "Port %u must be started before start any queue\n",
846                         port_id);
847                 return -EINVAL;
848         }
849
850         if (tx_queue_id >= dev->data->nb_tx_queues) {
851                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
852                 return -EINVAL;
853         }
854
855         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
856
857         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
858                 RTE_ETHDEV_LOG(ERR,
859                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
860                         tx_queue_id, port_id);
861                 return 0;
862         }
863
864         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
865 }
866
867 int
868 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
869 {
870         struct rte_eth_dev *dev;
871
872         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
873
874         dev = &rte_eth_devices[port_id];
875         if (tx_queue_id >= dev->data->nb_tx_queues) {
876                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
877                 return -EINVAL;
878         }
879
880         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
881
882         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
883                 RTE_ETHDEV_LOG(ERR,
884                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
885                         tx_queue_id, port_id);
886                 return 0;
887         }
888
889         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
890
891 }
892
893 static int
894 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
895 {
896         uint16_t old_nb_queues = dev->data->nb_tx_queues;
897         void **txq;
898         unsigned i;
899
900         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
901                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
902                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
903                                                    RTE_CACHE_LINE_SIZE);
904                 if (dev->data->tx_queues == NULL) {
905                         dev->data->nb_tx_queues = 0;
906                         return -(ENOMEM);
907                 }
908         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
909                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
910
911                 txq = dev->data->tx_queues;
912
913                 for (i = nb_queues; i < old_nb_queues; i++)
914                         (*dev->dev_ops->tx_queue_release)(txq[i]);
915                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
916                                   RTE_CACHE_LINE_SIZE);
917                 if (txq == NULL)
918                         return -ENOMEM;
919                 if (nb_queues > old_nb_queues) {
920                         uint16_t new_qs = nb_queues - old_nb_queues;
921
922                         memset(txq + old_nb_queues, 0,
923                                sizeof(txq[0]) * new_qs);
924                 }
925
926                 dev->data->tx_queues = txq;
927
928         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
929                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
930
931                 txq = dev->data->tx_queues;
932
933                 for (i = nb_queues; i < old_nb_queues; i++)
934                         (*dev->dev_ops->tx_queue_release)(txq[i]);
935
936                 rte_free(dev->data->tx_queues);
937                 dev->data->tx_queues = NULL;
938         }
939         dev->data->nb_tx_queues = nb_queues;
940         return 0;
941 }
942
943 uint32_t
944 rte_eth_speed_bitflag(uint32_t speed, int duplex)
945 {
946         switch (speed) {
947         case ETH_SPEED_NUM_10M:
948                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
949         case ETH_SPEED_NUM_100M:
950                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
951         case ETH_SPEED_NUM_1G:
952                 return ETH_LINK_SPEED_1G;
953         case ETH_SPEED_NUM_2_5G:
954                 return ETH_LINK_SPEED_2_5G;
955         case ETH_SPEED_NUM_5G:
956                 return ETH_LINK_SPEED_5G;
957         case ETH_SPEED_NUM_10G:
958                 return ETH_LINK_SPEED_10G;
959         case ETH_SPEED_NUM_20G:
960                 return ETH_LINK_SPEED_20G;
961         case ETH_SPEED_NUM_25G:
962                 return ETH_LINK_SPEED_25G;
963         case ETH_SPEED_NUM_40G:
964                 return ETH_LINK_SPEED_40G;
965         case ETH_SPEED_NUM_50G:
966                 return ETH_LINK_SPEED_50G;
967         case ETH_SPEED_NUM_56G:
968                 return ETH_LINK_SPEED_56G;
969         case ETH_SPEED_NUM_100G:
970                 return ETH_LINK_SPEED_100G;
971         default:
972                 return 0;
973         }
974 }
975
976 /**
977  * A conversion function from rxmode bitfield API.
978  */
979 static void
980 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
981                                     uint64_t *rx_offloads)
982 {
983         uint64_t offloads = 0;
984
985         if (rxmode->header_split == 1)
986                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
987         if (rxmode->hw_ip_checksum == 1)
988                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
989         if (rxmode->hw_vlan_filter == 1)
990                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
991         if (rxmode->hw_vlan_strip == 1)
992                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
993         if (rxmode->hw_vlan_extend == 1)
994                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
995         if (rxmode->jumbo_frame == 1)
996                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
997         if (rxmode->hw_strip_crc == 1)
998                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
999         if (rxmode->enable_scatter == 1)
1000                 offloads |= DEV_RX_OFFLOAD_SCATTER;
1001         if (rxmode->enable_lro == 1)
1002                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
1003         if (rxmode->hw_timestamp == 1)
1004                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
1005         if (rxmode->security == 1)
1006                 offloads |= DEV_RX_OFFLOAD_SECURITY;
1007
1008         *rx_offloads = offloads;
1009 }
1010
1011 const char * __rte_experimental
1012 rte_eth_dev_rx_offload_name(uint64_t offload)
1013 {
1014         const char *name = "UNKNOWN";
1015         unsigned int i;
1016
1017         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1018                 if (offload == rte_rx_offload_names[i].offload) {
1019                         name = rte_rx_offload_names[i].name;
1020                         break;
1021                 }
1022         }
1023
1024         return name;
1025 }
1026
1027 const char * __rte_experimental
1028 rte_eth_dev_tx_offload_name(uint64_t offload)
1029 {
1030         const char *name = "UNKNOWN";
1031         unsigned int i;
1032
1033         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1034                 if (offload == rte_tx_offload_names[i].offload) {
1035                         name = rte_tx_offload_names[i].name;
1036                         break;
1037                 }
1038         }
1039
1040         return name;
1041 }
1042
1043 int
1044 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1045                       const struct rte_eth_conf *dev_conf)
1046 {
1047         struct rte_eth_dev *dev;
1048         struct rte_eth_dev_info dev_info;
1049         struct rte_eth_conf local_conf = *dev_conf;
1050         int diag;
1051
1052         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1053
1054         dev = &rte_eth_devices[port_id];
1055
1056         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1057         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1058
1059         rte_eth_dev_info_get(port_id, &dev_info);
1060
1061         /* If number of queues specified by application for both Rx and Tx is
1062          * zero, use driver preferred values. This cannot be done individually
1063          * as it is valid for either Tx or Rx (but not both) to be zero.
1064          * If driver does not provide any preferred valued, fall back on
1065          * EAL defaults.
1066          */
1067         if (nb_rx_q == 0 && nb_tx_q == 0) {
1068                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1069                 if (nb_rx_q == 0)
1070                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1071                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1072                 if (nb_tx_q == 0)
1073                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1074         }
1075
1076         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1077                 RTE_ETHDEV_LOG(ERR,
1078                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1079                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1080                 return -EINVAL;
1081         }
1082
1083         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1084                 RTE_ETHDEV_LOG(ERR,
1085                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1086                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1087                 return -EINVAL;
1088         }
1089
1090         if (dev->data->dev_started) {
1091                 RTE_ETHDEV_LOG(ERR,
1092                         "Port %u must be stopped to allow configuration\n",
1093                         port_id);
1094                 return -EBUSY;
1095         }
1096
1097         /*
1098          * Convert between the offloads API to enable PMDs to support
1099          * only one of them.
1100          */
1101         if (dev_conf->rxmode.ignore_offload_bitfield == 0)
1102                 rte_eth_convert_rx_offload_bitfield(
1103                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1104
1105         /* Copy the dev_conf parameter into the dev structure */
1106         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1107
1108         /*
1109          * Check that the numbers of RX and TX queues are not greater
1110          * than the maximum number of RX and TX queues supported by the
1111          * configured device.
1112          */
1113         if (nb_rx_q > dev_info.max_rx_queues) {
1114                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1115                         port_id, nb_rx_q, dev_info.max_rx_queues);
1116                 return -EINVAL;
1117         }
1118
1119         if (nb_tx_q > dev_info.max_tx_queues) {
1120                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1121                         port_id, nb_tx_q, dev_info.max_tx_queues);
1122                 return -EINVAL;
1123         }
1124
1125         /* Check that the device supports requested interrupts */
1126         if ((dev_conf->intr_conf.lsc == 1) &&
1127                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1128                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1129                         dev->device->driver->name);
1130                 return -EINVAL;
1131         }
1132         if ((dev_conf->intr_conf.rmv == 1) &&
1133                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1134                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1135                         dev->device->driver->name);
1136                 return -EINVAL;
1137         }
1138
1139         /*
1140          * If jumbo frames are enabled, check that the maximum RX packet
1141          * length is supported by the configured device.
1142          */
1143         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1144                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1145                         RTE_ETHDEV_LOG(ERR,
1146                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1147                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1148                                 dev_info.max_rx_pktlen);
1149                         return -EINVAL;
1150                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1151                         RTE_ETHDEV_LOG(ERR,
1152                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1153                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1154                                 (unsigned)ETHER_MIN_LEN);
1155                         return -EINVAL;
1156                 }
1157         } else {
1158                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1159                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1160                         /* Use default value */
1161                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1162                                                         ETHER_MAX_LEN;
1163         }
1164
1165         /* Any requested offloading must be within its device capabilities */
1166         if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1167              local_conf.rxmode.offloads) {
1168                 RTE_ETHDEV_LOG(ERR,
1169                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1170                         "capabilities 0x%"PRIx64" in %s()\n",
1171                         port_id, local_conf.rxmode.offloads,
1172                         dev_info.rx_offload_capa,
1173                         __func__);
1174                 return -EINVAL;
1175         }
1176         if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1177              local_conf.txmode.offloads) {
1178                 RTE_ETHDEV_LOG(ERR,
1179                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1180                         "capabilities 0x%"PRIx64" in %s()\n",
1181                         port_id, local_conf.txmode.offloads,
1182                         dev_info.tx_offload_capa,
1183                         __func__);
1184                 return -EINVAL;
1185         }
1186
1187         /* Check that device supports requested rss hash functions. */
1188         if ((dev_info.flow_type_rss_offloads |
1189              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1190             dev_info.flow_type_rss_offloads) {
1191                 RTE_ETHDEV_LOG(ERR,
1192                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1193                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1194                         dev_info.flow_type_rss_offloads);
1195                 return -EINVAL;
1196         }
1197
1198         /*
1199          * Setup new number of RX/TX queues and reconfigure device.
1200          */
1201         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1202         if (diag != 0) {
1203                 RTE_ETHDEV_LOG(ERR,
1204                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1205                         port_id, diag);
1206                 return diag;
1207         }
1208
1209         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1210         if (diag != 0) {
1211                 RTE_ETHDEV_LOG(ERR,
1212                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1213                         port_id, diag);
1214                 rte_eth_dev_rx_queue_config(dev, 0);
1215                 return diag;
1216         }
1217
1218         diag = (*dev->dev_ops->dev_configure)(dev);
1219         if (diag != 0) {
1220                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1221                         port_id, diag);
1222                 rte_eth_dev_rx_queue_config(dev, 0);
1223                 rte_eth_dev_tx_queue_config(dev, 0);
1224                 return eth_err(port_id, diag);
1225         }
1226
1227         /* Initialize Rx profiling if enabled at compilation time. */
1228         diag = __rte_eth_profile_rx_init(port_id, dev);
1229         if (diag != 0) {
1230                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_profile_rx_init = %d\n",
1231                         port_id, diag);
1232                 rte_eth_dev_rx_queue_config(dev, 0);
1233                 rte_eth_dev_tx_queue_config(dev, 0);
1234                 return eth_err(port_id, diag);
1235         }
1236
1237         return 0;
1238 }
1239
1240 void
1241 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1242 {
1243         if (dev->data->dev_started) {
1244                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1245                         dev->data->port_id);
1246                 return;
1247         }
1248
1249         rte_eth_dev_rx_queue_config(dev, 0);
1250         rte_eth_dev_tx_queue_config(dev, 0);
1251
1252         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1253 }
1254
1255 static void
1256 rte_eth_dev_config_restore(uint16_t port_id)
1257 {
1258         struct rte_eth_dev *dev;
1259         struct rte_eth_dev_info dev_info;
1260         struct ether_addr *addr;
1261         uint16_t i;
1262         uint32_t pool = 0;
1263         uint64_t pool_mask;
1264
1265         dev = &rte_eth_devices[port_id];
1266
1267         rte_eth_dev_info_get(port_id, &dev_info);
1268
1269         /* replay MAC address configuration including default MAC */
1270         addr = &dev->data->mac_addrs[0];
1271         if (*dev->dev_ops->mac_addr_set != NULL)
1272                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1273         else if (*dev->dev_ops->mac_addr_add != NULL)
1274                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1275
1276         if (*dev->dev_ops->mac_addr_add != NULL) {
1277                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1278                         addr = &dev->data->mac_addrs[i];
1279
1280                         /* skip zero address */
1281                         if (is_zero_ether_addr(addr))
1282                                 continue;
1283
1284                         pool = 0;
1285                         pool_mask = dev->data->mac_pool_sel[i];
1286
1287                         do {
1288                                 if (pool_mask & 1ULL)
1289                                         (*dev->dev_ops->mac_addr_add)(dev,
1290                                                 addr, i, pool);
1291                                 pool_mask >>= 1;
1292                                 pool++;
1293                         } while (pool_mask);
1294                 }
1295         }
1296
1297         /* replay promiscuous configuration */
1298         if (rte_eth_promiscuous_get(port_id) == 1)
1299                 rte_eth_promiscuous_enable(port_id);
1300         else if (rte_eth_promiscuous_get(port_id) == 0)
1301                 rte_eth_promiscuous_disable(port_id);
1302
1303         /* replay all multicast configuration */
1304         if (rte_eth_allmulticast_get(port_id) == 1)
1305                 rte_eth_allmulticast_enable(port_id);
1306         else if (rte_eth_allmulticast_get(port_id) == 0)
1307                 rte_eth_allmulticast_disable(port_id);
1308 }
1309
1310 int
1311 rte_eth_dev_start(uint16_t port_id)
1312 {
1313         struct rte_eth_dev *dev;
1314         int diag;
1315
1316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1317
1318         dev = &rte_eth_devices[port_id];
1319
1320         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1321
1322         if (dev->data->dev_started != 0) {
1323                 RTE_ETHDEV_LOG(ERR,
1324                         "Device with port_id=%"PRIu16" already started\n",
1325                         port_id);
1326                 return 0;
1327         }
1328
1329         diag = (*dev->dev_ops->dev_start)(dev);
1330         if (diag == 0)
1331                 dev->data->dev_started = 1;
1332         else
1333                 return eth_err(port_id, diag);
1334
1335         rte_eth_dev_config_restore(port_id);
1336
1337         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1338                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1339                 (*dev->dev_ops->link_update)(dev, 0);
1340         }
1341         return 0;
1342 }
1343
1344 void
1345 rte_eth_dev_stop(uint16_t port_id)
1346 {
1347         struct rte_eth_dev *dev;
1348
1349         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1350         dev = &rte_eth_devices[port_id];
1351
1352         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1353
1354         if (dev->data->dev_started == 0) {
1355                 RTE_ETHDEV_LOG(ERR,
1356                         "Device with port_id=%"PRIu16" already stopped\n",
1357                         port_id);
1358                 return;
1359         }
1360
1361         dev->data->dev_started = 0;
1362         (*dev->dev_ops->dev_stop)(dev);
1363 }
1364
1365 int
1366 rte_eth_dev_set_link_up(uint16_t port_id)
1367 {
1368         struct rte_eth_dev *dev;
1369
1370         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1371
1372         dev = &rte_eth_devices[port_id];
1373
1374         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1375         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1376 }
1377
1378 int
1379 rte_eth_dev_set_link_down(uint16_t port_id)
1380 {
1381         struct rte_eth_dev *dev;
1382
1383         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1384
1385         dev = &rte_eth_devices[port_id];
1386
1387         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1388         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1389 }
1390
1391 void
1392 rte_eth_dev_close(uint16_t port_id)
1393 {
1394         struct rte_eth_dev *dev;
1395
1396         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1397         dev = &rte_eth_devices[port_id];
1398
1399         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1400         dev->data->dev_started = 0;
1401         (*dev->dev_ops->dev_close)(dev);
1402
1403         dev->data->nb_rx_queues = 0;
1404         rte_free(dev->data->rx_queues);
1405         dev->data->rx_queues = NULL;
1406         dev->data->nb_tx_queues = 0;
1407         rte_free(dev->data->tx_queues);
1408         dev->data->tx_queues = NULL;
1409 }
1410
1411 int
1412 rte_eth_dev_reset(uint16_t port_id)
1413 {
1414         struct rte_eth_dev *dev;
1415         int ret;
1416
1417         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1418         dev = &rte_eth_devices[port_id];
1419
1420         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1421
1422         rte_eth_dev_stop(port_id);
1423         ret = dev->dev_ops->dev_reset(dev);
1424
1425         return eth_err(port_id, ret);
1426 }
1427
1428 int __rte_experimental
1429 rte_eth_dev_is_removed(uint16_t port_id)
1430 {
1431         struct rte_eth_dev *dev;
1432         int ret;
1433
1434         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1435
1436         dev = &rte_eth_devices[port_id];
1437
1438         if (dev->state == RTE_ETH_DEV_REMOVED)
1439                 return 1;
1440
1441         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1442
1443         ret = dev->dev_ops->is_removed(dev);
1444         if (ret != 0)
1445                 /* Device is physically removed. */
1446                 dev->state = RTE_ETH_DEV_REMOVED;
1447
1448         return ret;
1449 }
1450
1451 int
1452 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1453                        uint16_t nb_rx_desc, unsigned int socket_id,
1454                        const struct rte_eth_rxconf *rx_conf,
1455                        struct rte_mempool *mp)
1456 {
1457         int ret;
1458         uint32_t mbp_buf_size;
1459         struct rte_eth_dev *dev;
1460         struct rte_eth_dev_info dev_info;
1461         struct rte_eth_rxconf local_conf;
1462         void **rxq;
1463
1464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1465
1466         dev = &rte_eth_devices[port_id];
1467         if (rx_queue_id >= dev->data->nb_rx_queues) {
1468                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1469                 return -EINVAL;
1470         }
1471
1472         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1473         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1474
1475         /*
1476          * Check the size of the mbuf data buffer.
1477          * This value must be provided in the private data of the memory pool.
1478          * First check that the memory pool has a valid private data.
1479          */
1480         rte_eth_dev_info_get(port_id, &dev_info);
1481         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1482                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1483                         mp->name, (int)mp->private_data_size,
1484                         (int)sizeof(struct rte_pktmbuf_pool_private));
1485                 return -ENOSPC;
1486         }
1487         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1488
1489         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1490                 RTE_ETHDEV_LOG(ERR,
1491                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1492                         mp->name, (int)mbp_buf_size,
1493                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1494                         (int)RTE_PKTMBUF_HEADROOM,
1495                         (int)dev_info.min_rx_bufsize);
1496                 return -EINVAL;
1497         }
1498
1499         /* Use default specified by driver, if nb_rx_desc is zero */
1500         if (nb_rx_desc == 0) {
1501                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1502                 /* If driver default is also zero, fall back on EAL default */
1503                 if (nb_rx_desc == 0)
1504                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1505         }
1506
1507         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1508                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1509                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1510
1511                 RTE_ETHDEV_LOG(ERR,
1512                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1513                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1514                         dev_info.rx_desc_lim.nb_min,
1515                         dev_info.rx_desc_lim.nb_align);
1516                 return -EINVAL;
1517         }
1518
1519         if (dev->data->dev_started &&
1520                 !(dev_info.dev_capa &
1521                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1522                 return -EBUSY;
1523
1524         if (dev->data->dev_started &&
1525                 (dev->data->rx_queue_state[rx_queue_id] !=
1526                         RTE_ETH_QUEUE_STATE_STOPPED))
1527                 return -EBUSY;
1528
1529         rxq = dev->data->rx_queues;
1530         if (rxq[rx_queue_id]) {
1531                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1532                                         -ENOTSUP);
1533                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1534                 rxq[rx_queue_id] = NULL;
1535         }
1536
1537         if (rx_conf == NULL)
1538                 rx_conf = &dev_info.default_rxconf;
1539
1540         local_conf = *rx_conf;
1541         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1542                 /**
1543                  * Reflect port offloads to queue offloads in order for
1544                  * offloads to not be discarded.
1545                  */
1546                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1547                                                     &local_conf.offloads);
1548         }
1549
1550         /*
1551          * If an offloading has already been enabled in
1552          * rte_eth_dev_configure(), it has been enabled on all queues,
1553          * so there is no need to enable it in this queue again.
1554          * The local_conf.offloads input to underlying PMD only carries
1555          * those offloadings which are only enabled on this queue and
1556          * not enabled on all queues.
1557          */
1558         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1559
1560         /*
1561          * New added offloadings for this queue are those not enabled in
1562          * rte_eth_dev_configure() and they must be per-queue type.
1563          * A pure per-port offloading can't be enabled on a queue while
1564          * disabled on another queue. A pure per-port offloading can't
1565          * be enabled for any queue as new added one if it hasn't been
1566          * enabled in rte_eth_dev_configure().
1567          */
1568         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1569              local_conf.offloads) {
1570                 RTE_ETHDEV_LOG(ERR,
1571                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1572                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1573                         port_id, rx_queue_id, local_conf.offloads,
1574                         dev_info.rx_queue_offload_capa,
1575                         __func__);
1576                 return -EINVAL;
1577         }
1578
1579         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1580                                               socket_id, &local_conf, mp);
1581         if (!ret) {
1582                 if (!dev->data->min_rx_buf_size ||
1583                     dev->data->min_rx_buf_size > mbp_buf_size)
1584                         dev->data->min_rx_buf_size = mbp_buf_size;
1585         }
1586
1587         return eth_err(port_id, ret);
1588 }
1589
1590 /**
1591  * Convert from tx offloads to txq_flags.
1592  */
1593 static void
1594 rte_eth_convert_tx_offload(const uint64_t tx_offloads, uint32_t *txq_flags)
1595 {
1596         uint32_t flags = 0;
1597
1598         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1599                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1600         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1601                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1602         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1603                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1604         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1605                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1606         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1607                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1608         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1609                 flags |= ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP;
1610
1611         *txq_flags = flags;
1612 }
1613
1614 /**
1615  * A conversion function from txq_flags API.
1616  */
1617 static void
1618 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1619 {
1620         uint64_t offloads = 0;
1621
1622         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1623                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1624         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1625                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1626         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1627                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1628         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1629                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1630         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1631                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1632         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1633             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1634                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1635
1636         *tx_offloads = offloads;
1637 }
1638
1639 int
1640 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1641                        uint16_t nb_tx_desc, unsigned int socket_id,
1642                        const struct rte_eth_txconf *tx_conf)
1643 {
1644         struct rte_eth_dev *dev;
1645         struct rte_eth_dev_info dev_info;
1646         struct rte_eth_txconf local_conf;
1647         void **txq;
1648
1649         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1650
1651         dev = &rte_eth_devices[port_id];
1652         if (tx_queue_id >= dev->data->nb_tx_queues) {
1653                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1654                 return -EINVAL;
1655         }
1656
1657         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1658         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1659
1660         rte_eth_dev_info_get(port_id, &dev_info);
1661
1662         /* Use default specified by driver, if nb_tx_desc is zero */
1663         if (nb_tx_desc == 0) {
1664                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1665                 /* If driver default is zero, fall back on EAL default */
1666                 if (nb_tx_desc == 0)
1667                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1668         }
1669         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1670             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1671             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1672                 RTE_ETHDEV_LOG(ERR,
1673                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1674                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1675                         dev_info.tx_desc_lim.nb_min,
1676                         dev_info.tx_desc_lim.nb_align);
1677                 return -EINVAL;
1678         }
1679
1680         if (dev->data->dev_started &&
1681                 !(dev_info.dev_capa &
1682                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1683                 return -EBUSY;
1684
1685         if (dev->data->dev_started &&
1686                 (dev->data->tx_queue_state[tx_queue_id] !=
1687                         RTE_ETH_QUEUE_STATE_STOPPED))
1688                 return -EBUSY;
1689
1690         txq = dev->data->tx_queues;
1691         if (txq[tx_queue_id]) {
1692                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1693                                         -ENOTSUP);
1694                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1695                 txq[tx_queue_id] = NULL;
1696         }
1697
1698         if (tx_conf == NULL)
1699                 tx_conf = &dev_info.default_txconf;
1700
1701         /*
1702          * Convert between the offloads API to enable PMDs to support
1703          * only one of them.
1704          */
1705         local_conf = *tx_conf;
1706         if (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
1707                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1708                                           &local_conf.offloads);
1709         }
1710
1711         /*
1712          * If an offloading has already been enabled in
1713          * rte_eth_dev_configure(), it has been enabled on all queues,
1714          * so there is no need to enable it in this queue again.
1715          * The local_conf.offloads input to underlying PMD only carries
1716          * those offloadings which are only enabled on this queue and
1717          * not enabled on all queues.
1718          */
1719         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1720
1721         /*
1722          * New added offloadings for this queue are those not enabled in
1723          * rte_eth_dev_configure() and they must be per-queue type.
1724          * A pure per-port offloading can't be enabled on a queue while
1725          * disabled on another queue. A pure per-port offloading can't
1726          * be enabled for any queue as new added one if it hasn't been
1727          * enabled in rte_eth_dev_configure().
1728          */
1729         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1730              local_conf.offloads) {
1731                 RTE_ETHDEV_LOG(ERR,
1732                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1733                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1734                         port_id, tx_queue_id, local_conf.offloads,
1735                         dev_info.tx_queue_offload_capa,
1736                         __func__);
1737                 return -EINVAL;
1738         }
1739
1740         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1741                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1742 }
1743
1744 void
1745 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1746                 void *userdata __rte_unused)
1747 {
1748         unsigned i;
1749
1750         for (i = 0; i < unsent; i++)
1751                 rte_pktmbuf_free(pkts[i]);
1752 }
1753
1754 void
1755 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1756                 void *userdata)
1757 {
1758         uint64_t *count = userdata;
1759         unsigned i;
1760
1761         for (i = 0; i < unsent; i++)
1762                 rte_pktmbuf_free(pkts[i]);
1763
1764         *count += unsent;
1765 }
1766
1767 int
1768 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1769                 buffer_tx_error_fn cbfn, void *userdata)
1770 {
1771         buffer->error_callback = cbfn;
1772         buffer->error_userdata = userdata;
1773         return 0;
1774 }
1775
1776 int
1777 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1778 {
1779         int ret = 0;
1780
1781         if (buffer == NULL)
1782                 return -EINVAL;
1783
1784         buffer->size = size;
1785         if (buffer->error_callback == NULL) {
1786                 ret = rte_eth_tx_buffer_set_err_callback(
1787                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1788         }
1789
1790         return ret;
1791 }
1792
1793 int
1794 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1795 {
1796         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1797         int ret;
1798
1799         /* Validate Input Data. Bail if not valid or not supported. */
1800         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1801         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1802
1803         /* Call driver to free pending mbufs. */
1804         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1805                                                free_cnt);
1806         return eth_err(port_id, ret);
1807 }
1808
1809 void
1810 rte_eth_promiscuous_enable(uint16_t port_id)
1811 {
1812         struct rte_eth_dev *dev;
1813
1814         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1815         dev = &rte_eth_devices[port_id];
1816
1817         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1818         (*dev->dev_ops->promiscuous_enable)(dev);
1819         dev->data->promiscuous = 1;
1820 }
1821
1822 void
1823 rte_eth_promiscuous_disable(uint16_t port_id)
1824 {
1825         struct rte_eth_dev *dev;
1826
1827         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1828         dev = &rte_eth_devices[port_id];
1829
1830         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1831         dev->data->promiscuous = 0;
1832         (*dev->dev_ops->promiscuous_disable)(dev);
1833 }
1834
1835 int
1836 rte_eth_promiscuous_get(uint16_t port_id)
1837 {
1838         struct rte_eth_dev *dev;
1839
1840         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1841
1842         dev = &rte_eth_devices[port_id];
1843         return dev->data->promiscuous;
1844 }
1845
1846 void
1847 rte_eth_allmulticast_enable(uint16_t port_id)
1848 {
1849         struct rte_eth_dev *dev;
1850
1851         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1852         dev = &rte_eth_devices[port_id];
1853
1854         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1855         (*dev->dev_ops->allmulticast_enable)(dev);
1856         dev->data->all_multicast = 1;
1857 }
1858
1859 void
1860 rte_eth_allmulticast_disable(uint16_t port_id)
1861 {
1862         struct rte_eth_dev *dev;
1863
1864         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1865         dev = &rte_eth_devices[port_id];
1866
1867         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1868         dev->data->all_multicast = 0;
1869         (*dev->dev_ops->allmulticast_disable)(dev);
1870 }
1871
1872 int
1873 rte_eth_allmulticast_get(uint16_t port_id)
1874 {
1875         struct rte_eth_dev *dev;
1876
1877         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1878
1879         dev = &rte_eth_devices[port_id];
1880         return dev->data->all_multicast;
1881 }
1882
1883 void
1884 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1885 {
1886         struct rte_eth_dev *dev;
1887
1888         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1889         dev = &rte_eth_devices[port_id];
1890
1891         if (dev->data->dev_conf.intr_conf.lsc &&
1892             dev->data->dev_started)
1893                 rte_eth_linkstatus_get(dev, eth_link);
1894         else {
1895                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1896                 (*dev->dev_ops->link_update)(dev, 1);
1897                 *eth_link = dev->data->dev_link;
1898         }
1899 }
1900
1901 void
1902 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1903 {
1904         struct rte_eth_dev *dev;
1905
1906         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1907         dev = &rte_eth_devices[port_id];
1908
1909         if (dev->data->dev_conf.intr_conf.lsc &&
1910             dev->data->dev_started)
1911                 rte_eth_linkstatus_get(dev, eth_link);
1912         else {
1913                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1914                 (*dev->dev_ops->link_update)(dev, 0);
1915                 *eth_link = dev->data->dev_link;
1916         }
1917 }
1918
1919 int
1920 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1921 {
1922         struct rte_eth_dev *dev;
1923
1924         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1925
1926         dev = &rte_eth_devices[port_id];
1927         memset(stats, 0, sizeof(*stats));
1928
1929         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1930         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1931         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1932 }
1933
1934 int
1935 rte_eth_stats_reset(uint16_t port_id)
1936 {
1937         struct rte_eth_dev *dev;
1938
1939         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1940         dev = &rte_eth_devices[port_id];
1941
1942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1943         (*dev->dev_ops->stats_reset)(dev);
1944         dev->data->rx_mbuf_alloc_failed = 0;
1945
1946         return 0;
1947 }
1948
1949 static inline int
1950 get_xstats_basic_count(struct rte_eth_dev *dev)
1951 {
1952         uint16_t nb_rxqs, nb_txqs;
1953         int count;
1954
1955         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1956         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1957
1958         count = RTE_NB_STATS;
1959         count += nb_rxqs * RTE_NB_RXQ_STATS;
1960         count += nb_txqs * RTE_NB_TXQ_STATS;
1961
1962         return count;
1963 }
1964
1965 static int
1966 get_xstats_count(uint16_t port_id)
1967 {
1968         struct rte_eth_dev *dev;
1969         int count;
1970
1971         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1972         dev = &rte_eth_devices[port_id];
1973         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1974                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1975                                 NULL, 0);
1976                 if (count < 0)
1977                         return eth_err(port_id, count);
1978         }
1979         if (dev->dev_ops->xstats_get_names != NULL) {
1980                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1981                 if (count < 0)
1982                         return eth_err(port_id, count);
1983         } else
1984                 count = 0;
1985
1986
1987         count += get_xstats_basic_count(dev);
1988
1989         return count;
1990 }
1991
1992 int
1993 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1994                 uint64_t *id)
1995 {
1996         int cnt_xstats, idx_xstat;
1997
1998         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1999
2000         if (!id) {
2001                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2002                 return -ENOMEM;
2003         }
2004
2005         if (!xstat_name) {
2006                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2007                 return -ENOMEM;
2008         }
2009
2010         /* Get count */
2011         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2012         if (cnt_xstats  < 0) {
2013                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2014                 return -ENODEV;
2015         }
2016
2017         /* Get id-name lookup table */
2018         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2019
2020         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2021                         port_id, xstats_names, cnt_xstats, NULL)) {
2022                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2023                 return -1;
2024         }
2025
2026         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2027                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2028                         *id = idx_xstat;
2029                         return 0;
2030                 };
2031         }
2032
2033         return -EINVAL;
2034 }
2035
2036 /* retrieve basic stats names */
2037 static int
2038 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2039         struct rte_eth_xstat_name *xstats_names)
2040 {
2041         int cnt_used_entries = 0;
2042         uint32_t idx, id_queue;
2043         uint16_t num_q;
2044
2045         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2046                 snprintf(xstats_names[cnt_used_entries].name,
2047                         sizeof(xstats_names[0].name),
2048                         "%s", rte_stats_strings[idx].name);
2049                 cnt_used_entries++;
2050         }
2051         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2052         for (id_queue = 0; id_queue < num_q; id_queue++) {
2053                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2054                         snprintf(xstats_names[cnt_used_entries].name,
2055                                 sizeof(xstats_names[0].name),
2056                                 "rx_q%u%s",
2057                                 id_queue, rte_rxq_stats_strings[idx].name);
2058                         cnt_used_entries++;
2059                 }
2060
2061         }
2062         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2063         for (id_queue = 0; id_queue < num_q; id_queue++) {
2064                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2065                         snprintf(xstats_names[cnt_used_entries].name,
2066                                 sizeof(xstats_names[0].name),
2067                                 "tx_q%u%s",
2068                                 id_queue, rte_txq_stats_strings[idx].name);
2069                         cnt_used_entries++;
2070                 }
2071         }
2072         return cnt_used_entries;
2073 }
2074
2075 /* retrieve ethdev extended statistics names */
2076 int
2077 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2078         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2079         uint64_t *ids)
2080 {
2081         struct rte_eth_xstat_name *xstats_names_copy;
2082         unsigned int no_basic_stat_requested = 1;
2083         unsigned int no_ext_stat_requested = 1;
2084         unsigned int expected_entries;
2085         unsigned int basic_count;
2086         struct rte_eth_dev *dev;
2087         unsigned int i;
2088         int ret;
2089
2090         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2091         dev = &rte_eth_devices[port_id];
2092
2093         basic_count = get_xstats_basic_count(dev);
2094         ret = get_xstats_count(port_id);
2095         if (ret < 0)
2096                 return ret;
2097         expected_entries = (unsigned int)ret;
2098
2099         /* Return max number of stats if no ids given */
2100         if (!ids) {
2101                 if (!xstats_names)
2102                         return expected_entries;
2103                 else if (xstats_names && size < expected_entries)
2104                         return expected_entries;
2105         }
2106
2107         if (ids && !xstats_names)
2108                 return -EINVAL;
2109
2110         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2111                 uint64_t ids_copy[size];
2112
2113                 for (i = 0; i < size; i++) {
2114                         if (ids[i] < basic_count) {
2115                                 no_basic_stat_requested = 0;
2116                                 break;
2117                         }
2118
2119                         /*
2120                          * Convert ids to xstats ids that PMD knows.
2121                          * ids known by user are basic + extended stats.
2122                          */
2123                         ids_copy[i] = ids[i] - basic_count;
2124                 }
2125
2126                 if (no_basic_stat_requested)
2127                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2128                                         xstats_names, ids_copy, size);
2129         }
2130
2131         /* Retrieve all stats */
2132         if (!ids) {
2133                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2134                                 expected_entries);
2135                 if (num_stats < 0 || num_stats > (int)expected_entries)
2136                         return num_stats;
2137                 else
2138                         return expected_entries;
2139         }
2140
2141         xstats_names_copy = calloc(expected_entries,
2142                 sizeof(struct rte_eth_xstat_name));
2143
2144         if (!xstats_names_copy) {
2145                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2146                 return -ENOMEM;
2147         }
2148
2149         if (ids) {
2150                 for (i = 0; i < size; i++) {
2151                         if (ids[i] >= basic_count) {
2152                                 no_ext_stat_requested = 0;
2153                                 break;
2154                         }
2155                 }
2156         }
2157
2158         /* Fill xstats_names_copy structure */
2159         if (ids && no_ext_stat_requested) {
2160                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2161         } else {
2162                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2163                         expected_entries);
2164                 if (ret < 0) {
2165                         free(xstats_names_copy);
2166                         return ret;
2167                 }
2168         }
2169
2170         /* Filter stats */
2171         for (i = 0; i < size; i++) {
2172                 if (ids[i] >= expected_entries) {
2173                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2174                         free(xstats_names_copy);
2175                         return -1;
2176                 }
2177                 xstats_names[i] = xstats_names_copy[ids[i]];
2178         }
2179
2180         free(xstats_names_copy);
2181         return size;
2182 }
2183
2184 int
2185 rte_eth_xstats_get_names(uint16_t port_id,
2186         struct rte_eth_xstat_name *xstats_names,
2187         unsigned int size)
2188 {
2189         struct rte_eth_dev *dev;
2190         int cnt_used_entries;
2191         int cnt_expected_entries;
2192         int cnt_driver_entries;
2193
2194         cnt_expected_entries = get_xstats_count(port_id);
2195         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2196                         (int)size < cnt_expected_entries)
2197                 return cnt_expected_entries;
2198
2199         /* port_id checked in get_xstats_count() */
2200         dev = &rte_eth_devices[port_id];
2201
2202         cnt_used_entries = rte_eth_basic_stats_get_names(
2203                 dev, xstats_names);
2204
2205         if (dev->dev_ops->xstats_get_names != NULL) {
2206                 /* If there are any driver-specific xstats, append them
2207                  * to end of list.
2208                  */
2209                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2210                         dev,
2211                         xstats_names + cnt_used_entries,
2212                         size - cnt_used_entries);
2213                 if (cnt_driver_entries < 0)
2214                         return eth_err(port_id, cnt_driver_entries);
2215                 cnt_used_entries += cnt_driver_entries;
2216         }
2217
2218         return cnt_used_entries;
2219 }
2220
2221
2222 static int
2223 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2224 {
2225         struct rte_eth_dev *dev;
2226         struct rte_eth_stats eth_stats;
2227         unsigned int count = 0, i, q;
2228         uint64_t val, *stats_ptr;
2229         uint16_t nb_rxqs, nb_txqs;
2230         int ret;
2231
2232         ret = rte_eth_stats_get(port_id, &eth_stats);
2233         if (ret < 0)
2234                 return ret;
2235
2236         dev = &rte_eth_devices[port_id];
2237
2238         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2239         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2240
2241         /* global stats */
2242         for (i = 0; i < RTE_NB_STATS; i++) {
2243                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2244                                         rte_stats_strings[i].offset);
2245                 val = *stats_ptr;
2246                 xstats[count++].value = val;
2247         }
2248
2249         /* per-rxq stats */
2250         for (q = 0; q < nb_rxqs; q++) {
2251                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2252                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2253                                         rte_rxq_stats_strings[i].offset +
2254                                         q * sizeof(uint64_t));
2255                         val = *stats_ptr;
2256                         xstats[count++].value = val;
2257                 }
2258         }
2259
2260         /* per-txq stats */
2261         for (q = 0; q < nb_txqs; q++) {
2262                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2263                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2264                                         rte_txq_stats_strings[i].offset +
2265                                         q * sizeof(uint64_t));
2266                         val = *stats_ptr;
2267                         xstats[count++].value = val;
2268                 }
2269         }
2270         return count;
2271 }
2272
2273 /* retrieve ethdev extended statistics */
2274 int
2275 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2276                          uint64_t *values, unsigned int size)
2277 {
2278         unsigned int no_basic_stat_requested = 1;
2279         unsigned int no_ext_stat_requested = 1;
2280         unsigned int num_xstats_filled;
2281         unsigned int basic_count;
2282         uint16_t expected_entries;
2283         struct rte_eth_dev *dev;
2284         unsigned int i;
2285         int ret;
2286
2287         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2288         ret = get_xstats_count(port_id);
2289         if (ret < 0)
2290                 return ret;
2291         expected_entries = (uint16_t)ret;
2292         struct rte_eth_xstat xstats[expected_entries];
2293         dev = &rte_eth_devices[port_id];
2294         basic_count = get_xstats_basic_count(dev);
2295
2296         /* Return max number of stats if no ids given */
2297         if (!ids) {
2298                 if (!values)
2299                         return expected_entries;
2300                 else if (values && size < expected_entries)
2301                         return expected_entries;
2302         }
2303
2304         if (ids && !values)
2305                 return -EINVAL;
2306
2307         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2308                 unsigned int basic_count = get_xstats_basic_count(dev);
2309                 uint64_t ids_copy[size];
2310
2311                 for (i = 0; i < size; i++) {
2312                         if (ids[i] < basic_count) {
2313                                 no_basic_stat_requested = 0;
2314                                 break;
2315                         }
2316
2317                         /*
2318                          * Convert ids to xstats ids that PMD knows.
2319                          * ids known by user are basic + extended stats.
2320                          */
2321                         ids_copy[i] = ids[i] - basic_count;
2322                 }
2323
2324                 if (no_basic_stat_requested)
2325                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2326                                         values, size);
2327         }
2328
2329         if (ids) {
2330                 for (i = 0; i < size; i++) {
2331                         if (ids[i] >= basic_count) {
2332                                 no_ext_stat_requested = 0;
2333                                 break;
2334                         }
2335                 }
2336         }
2337
2338         /* Fill the xstats structure */
2339         if (ids && no_ext_stat_requested)
2340                 ret = rte_eth_basic_stats_get(port_id, xstats);
2341         else
2342                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2343
2344         if (ret < 0)
2345                 return ret;
2346         num_xstats_filled = (unsigned int)ret;
2347
2348         /* Return all stats */
2349         if (!ids) {
2350                 for (i = 0; i < num_xstats_filled; i++)
2351                         values[i] = xstats[i].value;
2352                 return expected_entries;
2353         }
2354
2355         /* Filter stats */
2356         for (i = 0; i < size; i++) {
2357                 if (ids[i] >= expected_entries) {
2358                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2359                         return -1;
2360                 }
2361                 values[i] = xstats[ids[i]].value;
2362         }
2363         return size;
2364 }
2365
2366 int
2367 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2368         unsigned int n)
2369 {
2370         struct rte_eth_dev *dev;
2371         unsigned int count = 0, i;
2372         signed int xcount = 0;
2373         uint16_t nb_rxqs, nb_txqs;
2374         int ret;
2375
2376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2377
2378         dev = &rte_eth_devices[port_id];
2379
2380         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2381         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2382
2383         /* Return generic statistics */
2384         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2385                 (nb_txqs * RTE_NB_TXQ_STATS);
2386
2387         /* implemented by the driver */
2388         if (dev->dev_ops->xstats_get != NULL) {
2389                 /* Retrieve the xstats from the driver at the end of the
2390                  * xstats struct.
2391                  */
2392                 xcount = (*dev->dev_ops->xstats_get)(dev,
2393                                      xstats ? xstats + count : NULL,
2394                                      (n > count) ? n - count : 0);
2395
2396                 if (xcount < 0)
2397                         return eth_err(port_id, xcount);
2398         }
2399
2400         if (n < count + xcount || xstats == NULL)
2401                 return count + xcount;
2402
2403         /* now fill the xstats structure */
2404         ret = rte_eth_basic_stats_get(port_id, xstats);
2405         if (ret < 0)
2406                 return ret;
2407         count = ret;
2408
2409         for (i = 0; i < count; i++)
2410                 xstats[i].id = i;
2411         /* add an offset to driver-specific stats */
2412         for ( ; i < count + xcount; i++)
2413                 xstats[i].id += count;
2414
2415         return count + xcount;
2416 }
2417
2418 /* reset ethdev extended statistics */
2419 void
2420 rte_eth_xstats_reset(uint16_t port_id)
2421 {
2422         struct rte_eth_dev *dev;
2423
2424         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2425         dev = &rte_eth_devices[port_id];
2426
2427         /* implemented by the driver */
2428         if (dev->dev_ops->xstats_reset != NULL) {
2429                 (*dev->dev_ops->xstats_reset)(dev);
2430                 return;
2431         }
2432
2433         /* fallback to default */
2434         rte_eth_stats_reset(port_id);
2435 }
2436
2437 static int
2438 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2439                 uint8_t is_rx)
2440 {
2441         struct rte_eth_dev *dev;
2442
2443         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2444
2445         dev = &rte_eth_devices[port_id];
2446
2447         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2448         return (*dev->dev_ops->queue_stats_mapping_set)
2449                         (dev, queue_id, stat_idx, is_rx);
2450 }
2451
2452
2453 int
2454 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2455                 uint8_t stat_idx)
2456 {
2457         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2458                                                 stat_idx, STAT_QMAP_TX));
2459 }
2460
2461
2462 int
2463 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2464                 uint8_t stat_idx)
2465 {
2466         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2467                                                 stat_idx, STAT_QMAP_RX));
2468 }
2469
2470 int
2471 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2472 {
2473         struct rte_eth_dev *dev;
2474
2475         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2476         dev = &rte_eth_devices[port_id];
2477
2478         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2479         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2480                                                         fw_version, fw_size));
2481 }
2482
2483 void
2484 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2485 {
2486         struct rte_eth_dev *dev;
2487         struct rte_eth_txconf *txconf;
2488         const struct rte_eth_desc_lim lim = {
2489                 .nb_max = UINT16_MAX,
2490                 .nb_min = 0,
2491                 .nb_align = 1,
2492         };
2493
2494         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2495         dev = &rte_eth_devices[port_id];
2496
2497         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2498         dev_info->rx_desc_lim = lim;
2499         dev_info->tx_desc_lim = lim;
2500         dev_info->device = dev->device;
2501
2502         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2503         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2504         dev_info->driver_name = dev->device->driver->name;
2505         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2506         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2507
2508         dev_info->dev_flags = &dev->data->dev_flags;
2509         txconf = &dev_info->default_txconf;
2510         /* convert offload to txq_flags to support legacy app */
2511         rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
2512 }
2513
2514 int
2515 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2516                                  uint32_t *ptypes, int num)
2517 {
2518         int i, j;
2519         struct rte_eth_dev *dev;
2520         const uint32_t *all_ptypes;
2521
2522         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2523         dev = &rte_eth_devices[port_id];
2524         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2525         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2526
2527         if (!all_ptypes)
2528                 return 0;
2529
2530         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2531                 if (all_ptypes[i] & ptype_mask) {
2532                         if (j < num)
2533                                 ptypes[j] = all_ptypes[i];
2534                         j++;
2535                 }
2536
2537         return j;
2538 }
2539
2540 void
2541 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2542 {
2543         struct rte_eth_dev *dev;
2544
2545         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2546         dev = &rte_eth_devices[port_id];
2547         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2548 }
2549
2550
2551 int
2552 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2553 {
2554         struct rte_eth_dev *dev;
2555
2556         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2557
2558         dev = &rte_eth_devices[port_id];
2559         *mtu = dev->data->mtu;
2560         return 0;
2561 }
2562
2563 int
2564 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2565 {
2566         int ret;
2567         struct rte_eth_dev *dev;
2568
2569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2570         dev = &rte_eth_devices[port_id];
2571         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2572
2573         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2574         if (!ret)
2575                 dev->data->mtu = mtu;
2576
2577         return eth_err(port_id, ret);
2578 }
2579
2580 int
2581 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2582 {
2583         struct rte_eth_dev *dev;
2584         int ret;
2585
2586         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2587         dev = &rte_eth_devices[port_id];
2588         if (!(dev->data->dev_conf.rxmode.offloads &
2589               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2590                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
2591                         port_id);
2592                 return -ENOSYS;
2593         }
2594
2595         if (vlan_id > 4095) {
2596                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
2597                         port_id, vlan_id);
2598                 return -EINVAL;
2599         }
2600         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2601
2602         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2603         if (ret == 0) {
2604                 struct rte_vlan_filter_conf *vfc;
2605                 int vidx;
2606                 int vbit;
2607
2608                 vfc = &dev->data->vlan_filter_conf;
2609                 vidx = vlan_id / 64;
2610                 vbit = vlan_id % 64;
2611
2612                 if (on)
2613                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2614                 else
2615                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2616         }
2617
2618         return eth_err(port_id, ret);
2619 }
2620
2621 int
2622 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2623                                     int on)
2624 {
2625         struct rte_eth_dev *dev;
2626
2627         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2628         dev = &rte_eth_devices[port_id];
2629         if (rx_queue_id >= dev->data->nb_rx_queues) {
2630                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
2631                 return -EINVAL;
2632         }
2633
2634         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2635         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2636
2637         return 0;
2638 }
2639
2640 int
2641 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2642                                 enum rte_vlan_type vlan_type,
2643                                 uint16_t tpid)
2644 {
2645         struct rte_eth_dev *dev;
2646
2647         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2648         dev = &rte_eth_devices[port_id];
2649         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2650
2651         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2652                                                                tpid));
2653 }
2654
2655 int
2656 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2657 {
2658         struct rte_eth_dev *dev;
2659         int ret = 0;
2660         int mask = 0;
2661         int cur, org = 0;
2662         uint64_t orig_offloads;
2663
2664         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2665         dev = &rte_eth_devices[port_id];
2666
2667         /* save original values in case of failure */
2668         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2669
2670         /*check which option changed by application*/
2671         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2672         org = !!(dev->data->dev_conf.rxmode.offloads &
2673                  DEV_RX_OFFLOAD_VLAN_STRIP);
2674         if (cur != org) {
2675                 if (cur)
2676                         dev->data->dev_conf.rxmode.offloads |=
2677                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2678                 else
2679                         dev->data->dev_conf.rxmode.offloads &=
2680                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2681                 mask |= ETH_VLAN_STRIP_MASK;
2682         }
2683
2684         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2685         org = !!(dev->data->dev_conf.rxmode.offloads &
2686                  DEV_RX_OFFLOAD_VLAN_FILTER);
2687         if (cur != org) {
2688                 if (cur)
2689                         dev->data->dev_conf.rxmode.offloads |=
2690                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2691                 else
2692                         dev->data->dev_conf.rxmode.offloads &=
2693                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2694                 mask |= ETH_VLAN_FILTER_MASK;
2695         }
2696
2697         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2698         org = !!(dev->data->dev_conf.rxmode.offloads &
2699                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2700         if (cur != org) {
2701                 if (cur)
2702                         dev->data->dev_conf.rxmode.offloads |=
2703                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2704                 else
2705                         dev->data->dev_conf.rxmode.offloads &=
2706                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2707                 mask |= ETH_VLAN_EXTEND_MASK;
2708         }
2709
2710         /*no change*/
2711         if (mask == 0)
2712                 return ret;
2713
2714         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2715         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2716         if (ret) {
2717                 /* hit an error restore  original values */
2718                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2719         }
2720
2721         return eth_err(port_id, ret);
2722 }
2723
2724 int
2725 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2726 {
2727         struct rte_eth_dev *dev;
2728         int ret = 0;
2729
2730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2731         dev = &rte_eth_devices[port_id];
2732
2733         if (dev->data->dev_conf.rxmode.offloads &
2734             DEV_RX_OFFLOAD_VLAN_STRIP)
2735                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2736
2737         if (dev->data->dev_conf.rxmode.offloads &
2738             DEV_RX_OFFLOAD_VLAN_FILTER)
2739                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2740
2741         if (dev->data->dev_conf.rxmode.offloads &
2742             DEV_RX_OFFLOAD_VLAN_EXTEND)
2743                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2744
2745         return ret;
2746 }
2747
2748 int
2749 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2750 {
2751         struct rte_eth_dev *dev;
2752
2753         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2754         dev = &rte_eth_devices[port_id];
2755         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2756
2757         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2758 }
2759
2760 int
2761 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2762 {
2763         struct rte_eth_dev *dev;
2764
2765         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2766         dev = &rte_eth_devices[port_id];
2767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2768         memset(fc_conf, 0, sizeof(*fc_conf));
2769         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2770 }
2771
2772 int
2773 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2774 {
2775         struct rte_eth_dev *dev;
2776
2777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2778         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2779                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
2780                 return -EINVAL;
2781         }
2782
2783         dev = &rte_eth_devices[port_id];
2784         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2785         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2786 }
2787
2788 int
2789 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2790                                    struct rte_eth_pfc_conf *pfc_conf)
2791 {
2792         struct rte_eth_dev *dev;
2793
2794         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2795         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2796                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
2797                 return -EINVAL;
2798         }
2799
2800         dev = &rte_eth_devices[port_id];
2801         /* High water, low water validation are device specific */
2802         if  (*dev->dev_ops->priority_flow_ctrl_set)
2803                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2804                                         (dev, pfc_conf));
2805         return -ENOTSUP;
2806 }
2807
2808 static int
2809 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2810                         uint16_t reta_size)
2811 {
2812         uint16_t i, num;
2813
2814         if (!reta_conf)
2815                 return -EINVAL;
2816
2817         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2818         for (i = 0; i < num; i++) {
2819                 if (reta_conf[i].mask)
2820                         return 0;
2821         }
2822
2823         return -EINVAL;
2824 }
2825
2826 static int
2827 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2828                          uint16_t reta_size,
2829                          uint16_t max_rxq)
2830 {
2831         uint16_t i, idx, shift;
2832
2833         if (!reta_conf)
2834                 return -EINVAL;
2835
2836         if (max_rxq == 0) {
2837                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
2838                 return -EINVAL;
2839         }
2840
2841         for (i = 0; i < reta_size; i++) {
2842                 idx = i / RTE_RETA_GROUP_SIZE;
2843                 shift = i % RTE_RETA_GROUP_SIZE;
2844                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2845                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2846                         RTE_ETHDEV_LOG(ERR,
2847                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
2848                                 idx, shift,
2849                                 reta_conf[idx].reta[shift], max_rxq);
2850                         return -EINVAL;
2851                 }
2852         }
2853
2854         return 0;
2855 }
2856
2857 int
2858 rte_eth_dev_rss_reta_update(uint16_t port_id,
2859                             struct rte_eth_rss_reta_entry64 *reta_conf,
2860                             uint16_t reta_size)
2861 {
2862         struct rte_eth_dev *dev;
2863         int ret;
2864
2865         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2866         /* Check mask bits */
2867         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2868         if (ret < 0)
2869                 return ret;
2870
2871         dev = &rte_eth_devices[port_id];
2872
2873         /* Check entry value */
2874         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2875                                 dev->data->nb_rx_queues);
2876         if (ret < 0)
2877                 return ret;
2878
2879         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2880         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2881                                                              reta_size));
2882 }
2883
2884 int
2885 rte_eth_dev_rss_reta_query(uint16_t port_id,
2886                            struct rte_eth_rss_reta_entry64 *reta_conf,
2887                            uint16_t reta_size)
2888 {
2889         struct rte_eth_dev *dev;
2890         int ret;
2891
2892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2893
2894         /* Check mask bits */
2895         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2896         if (ret < 0)
2897                 return ret;
2898
2899         dev = &rte_eth_devices[port_id];
2900         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2901         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2902                                                             reta_size));
2903 }
2904
2905 int
2906 rte_eth_dev_rss_hash_update(uint16_t port_id,
2907                             struct rte_eth_rss_conf *rss_conf)
2908 {
2909         struct rte_eth_dev *dev;
2910         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2911
2912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2913         dev = &rte_eth_devices[port_id];
2914         rte_eth_dev_info_get(port_id, &dev_info);
2915         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2916             dev_info.flow_type_rss_offloads) {
2917                 RTE_ETHDEV_LOG(ERR,
2918                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2919                         port_id, rss_conf->rss_hf,
2920                         dev_info.flow_type_rss_offloads);
2921                 return -EINVAL;
2922         }
2923         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2924         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2925                                                                  rss_conf));
2926 }
2927
2928 int
2929 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2930                               struct rte_eth_rss_conf *rss_conf)
2931 {
2932         struct rte_eth_dev *dev;
2933
2934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2935         dev = &rte_eth_devices[port_id];
2936         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2937         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2938                                                                    rss_conf));
2939 }
2940
2941 int
2942 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2943                                 struct rte_eth_udp_tunnel *udp_tunnel)
2944 {
2945         struct rte_eth_dev *dev;
2946
2947         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2948         if (udp_tunnel == NULL) {
2949                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
2950                 return -EINVAL;
2951         }
2952
2953         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2954                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
2955                 return -EINVAL;
2956         }
2957
2958         dev = &rte_eth_devices[port_id];
2959         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2960         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2961                                                                 udp_tunnel));
2962 }
2963
2964 int
2965 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2966                                    struct rte_eth_udp_tunnel *udp_tunnel)
2967 {
2968         struct rte_eth_dev *dev;
2969
2970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2971         dev = &rte_eth_devices[port_id];
2972
2973         if (udp_tunnel == NULL) {
2974                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
2975                 return -EINVAL;
2976         }
2977
2978         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2979                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
2980                 return -EINVAL;
2981         }
2982
2983         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2984         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2985                                                                 udp_tunnel));
2986 }
2987
2988 int
2989 rte_eth_led_on(uint16_t port_id)
2990 {
2991         struct rte_eth_dev *dev;
2992
2993         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2994         dev = &rte_eth_devices[port_id];
2995         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2996         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2997 }
2998
2999 int
3000 rte_eth_led_off(uint16_t port_id)
3001 {
3002         struct rte_eth_dev *dev;
3003
3004         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3005         dev = &rte_eth_devices[port_id];
3006         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3007         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3008 }
3009
3010 /*
3011  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3012  * an empty spot.
3013  */
3014 static int
3015 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3016 {
3017         struct rte_eth_dev_info dev_info;
3018         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3019         unsigned i;
3020
3021         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3022         rte_eth_dev_info_get(port_id, &dev_info);
3023
3024         for (i = 0; i < dev_info.max_mac_addrs; i++)
3025                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
3026                         return i;
3027
3028         return -1;
3029 }
3030
3031 static const struct ether_addr null_mac_addr;
3032
3033 int
3034 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
3035                         uint32_t pool)
3036 {
3037         struct rte_eth_dev *dev;
3038         int index;
3039         uint64_t pool_mask;
3040         int ret;
3041
3042         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3043         dev = &rte_eth_devices[port_id];
3044         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3045
3046         if (is_zero_ether_addr(addr)) {
3047                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3048                         port_id);
3049                 return -EINVAL;
3050         }
3051         if (pool >= ETH_64_POOLS) {
3052                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3053                 return -EINVAL;
3054         }
3055
3056         index = get_mac_addr_index(port_id, addr);
3057         if (index < 0) {
3058                 index = get_mac_addr_index(port_id, &null_mac_addr);
3059                 if (index < 0) {
3060                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3061                                 port_id);
3062                         return -ENOSPC;
3063                 }
3064         } else {
3065                 pool_mask = dev->data->mac_pool_sel[index];
3066
3067                 /* Check if both MAC address and pool is already there, and do nothing */
3068                 if (pool_mask & (1ULL << pool))
3069                         return 0;
3070         }
3071
3072         /* Update NIC */
3073         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3074
3075         if (ret == 0) {
3076                 /* Update address in NIC data structure */
3077                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3078
3079                 /* Update pool bitmap in NIC data structure */
3080                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3081         }
3082
3083         return eth_err(port_id, ret);
3084 }
3085
3086 int
3087 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
3088 {
3089         struct rte_eth_dev *dev;
3090         int index;
3091
3092         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3093         dev = &rte_eth_devices[port_id];
3094         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3095
3096         index = get_mac_addr_index(port_id, addr);
3097         if (index == 0) {
3098                 RTE_ETHDEV_LOG(ERR,
3099                         "Port %u: Cannot remove default MAC address\n",
3100                         port_id);
3101                 return -EADDRINUSE;
3102         } else if (index < 0)
3103                 return 0;  /* Do nothing if address wasn't found */
3104
3105         /* Update NIC */
3106         (*dev->dev_ops->mac_addr_remove)(dev, index);
3107
3108         /* Update address in NIC data structure */
3109         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3110
3111         /* reset pool bitmap */
3112         dev->data->mac_pool_sel[index] = 0;
3113
3114         return 0;
3115 }
3116
3117 int
3118 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3119 {
3120         struct rte_eth_dev *dev;
3121         int ret;
3122
3123         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3124
3125         if (!is_valid_assigned_ether_addr(addr))
3126                 return -EINVAL;
3127
3128         dev = &rte_eth_devices[port_id];
3129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3130
3131         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3132         if (ret < 0)
3133                 return ret;
3134
3135         /* Update default address in NIC data structure */
3136         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3137
3138         return 0;
3139 }
3140
3141
3142 /*
3143  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3144  * an empty spot.
3145  */
3146 static int
3147 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3148 {
3149         struct rte_eth_dev_info dev_info;
3150         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3151         unsigned i;
3152
3153         rte_eth_dev_info_get(port_id, &dev_info);
3154         if (!dev->data->hash_mac_addrs)
3155                 return -1;
3156
3157         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3158                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3159                         ETHER_ADDR_LEN) == 0)
3160                         return i;
3161
3162         return -1;
3163 }
3164
3165 int
3166 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3167                                 uint8_t on)
3168 {
3169         int index;
3170         int ret;
3171         struct rte_eth_dev *dev;
3172
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3174
3175         dev = &rte_eth_devices[port_id];
3176         if (is_zero_ether_addr(addr)) {
3177                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3178                         port_id);
3179                 return -EINVAL;
3180         }
3181
3182         index = get_hash_mac_addr_index(port_id, addr);
3183         /* Check if it's already there, and do nothing */
3184         if ((index >= 0) && on)
3185                 return 0;
3186
3187         if (index < 0) {
3188                 if (!on) {
3189                         RTE_ETHDEV_LOG(ERR,
3190                                 "Port %u: the MAC address was not set in UTA\n",
3191                                 port_id);
3192                         return -EINVAL;
3193                 }
3194
3195                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3196                 if (index < 0) {
3197                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3198                                 port_id);
3199                         return -ENOSPC;
3200                 }
3201         }
3202
3203         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3204         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3205         if (ret == 0) {
3206                 /* Update address in NIC data structure */
3207                 if (on)
3208                         ether_addr_copy(addr,
3209                                         &dev->data->hash_mac_addrs[index]);
3210                 else
3211                         ether_addr_copy(&null_mac_addr,
3212                                         &dev->data->hash_mac_addrs[index]);
3213         }
3214
3215         return eth_err(port_id, ret);
3216 }
3217
3218 int
3219 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3220 {
3221         struct rte_eth_dev *dev;
3222
3223         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3224
3225         dev = &rte_eth_devices[port_id];
3226
3227         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3228         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3229                                                                        on));
3230 }
3231
3232 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3233                                         uint16_t tx_rate)
3234 {
3235         struct rte_eth_dev *dev;
3236         struct rte_eth_dev_info dev_info;
3237         struct rte_eth_link link;
3238
3239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3240
3241         dev = &rte_eth_devices[port_id];
3242         rte_eth_dev_info_get(port_id, &dev_info);
3243         link = dev->data->dev_link;
3244
3245         if (queue_idx > dev_info.max_tx_queues) {
3246                 RTE_ETHDEV_LOG(ERR,
3247                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3248                         port_id, queue_idx);
3249                 return -EINVAL;
3250         }
3251
3252         if (tx_rate > link.link_speed) {
3253                 RTE_ETHDEV_LOG(ERR,
3254                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3255                         tx_rate, link.link_speed);
3256                 return -EINVAL;
3257         }
3258
3259         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3260         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3261                                                         queue_idx, tx_rate));
3262 }
3263
3264 int
3265 rte_eth_mirror_rule_set(uint16_t port_id,
3266                         struct rte_eth_mirror_conf *mirror_conf,
3267                         uint8_t rule_id, uint8_t on)
3268 {
3269         struct rte_eth_dev *dev;
3270
3271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3272         if (mirror_conf->rule_type == 0) {
3273                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3274                 return -EINVAL;
3275         }
3276
3277         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3278                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3279                         ETH_64_POOLS - 1);
3280                 return -EINVAL;
3281         }
3282
3283         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3284              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3285             (mirror_conf->pool_mask == 0)) {
3286                 RTE_ETHDEV_LOG(ERR,
3287                         "Invalid mirror pool, pool mask can not be 0\n");
3288                 return -EINVAL;
3289         }
3290
3291         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3292             mirror_conf->vlan.vlan_mask == 0) {
3293                 RTE_ETHDEV_LOG(ERR,
3294                         "Invalid vlan mask, vlan mask can not be 0\n");
3295                 return -EINVAL;
3296         }
3297
3298         dev = &rte_eth_devices[port_id];
3299         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3300
3301         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3302                                                 mirror_conf, rule_id, on));
3303 }
3304
3305 int
3306 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3307 {
3308         struct rte_eth_dev *dev;
3309
3310         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3311
3312         dev = &rte_eth_devices[port_id];
3313         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3314
3315         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3316                                                                    rule_id));
3317 }
3318
3319 RTE_INIT(eth_dev_init_cb_lists)
3320 {
3321         int i;
3322
3323         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3324                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3325 }
3326
3327 int
3328 rte_eth_dev_callback_register(uint16_t port_id,
3329                         enum rte_eth_event_type event,
3330                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3331 {
3332         struct rte_eth_dev *dev;
3333         struct rte_eth_dev_callback *user_cb;
3334         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3335         uint16_t last_port;
3336
3337         if (!cb_fn)
3338                 return -EINVAL;
3339
3340         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3341                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3342                 return -EINVAL;
3343         }
3344
3345         if (port_id == RTE_ETH_ALL) {
3346                 next_port = 0;
3347                 last_port = RTE_MAX_ETHPORTS - 1;
3348         } else {
3349                 next_port = last_port = port_id;
3350         }
3351
3352         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3353
3354         do {
3355                 dev = &rte_eth_devices[next_port];
3356
3357                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3358                         if (user_cb->cb_fn == cb_fn &&
3359                                 user_cb->cb_arg == cb_arg &&
3360                                 user_cb->event == event) {
3361                                 break;
3362                         }
3363                 }
3364
3365                 /* create a new callback. */
3366                 if (user_cb == NULL) {
3367                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3368                                 sizeof(struct rte_eth_dev_callback), 0);
3369                         if (user_cb != NULL) {
3370                                 user_cb->cb_fn = cb_fn;
3371                                 user_cb->cb_arg = cb_arg;
3372                                 user_cb->event = event;
3373                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3374                                                   user_cb, next);
3375                         } else {
3376                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3377                                 rte_eth_dev_callback_unregister(port_id, event,
3378                                                                 cb_fn, cb_arg);
3379                                 return -ENOMEM;
3380                         }
3381
3382                 }
3383         } while (++next_port <= last_port);
3384
3385         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3386         return 0;
3387 }
3388
3389 int
3390 rte_eth_dev_callback_unregister(uint16_t port_id,
3391                         enum rte_eth_event_type event,
3392                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3393 {
3394         int ret;
3395         struct rte_eth_dev *dev;
3396         struct rte_eth_dev_callback *cb, *next;
3397         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3398         uint16_t last_port;
3399
3400         if (!cb_fn)
3401                 return -EINVAL;
3402
3403         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3404                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3405                 return -EINVAL;
3406         }
3407
3408         if (port_id == RTE_ETH_ALL) {
3409                 next_port = 0;
3410                 last_port = RTE_MAX_ETHPORTS - 1;
3411         } else {
3412                 next_port = last_port = port_id;
3413         }
3414
3415         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3416
3417         do {
3418                 dev = &rte_eth_devices[next_port];
3419                 ret = 0;
3420                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3421                      cb = next) {
3422
3423                         next = TAILQ_NEXT(cb, next);
3424
3425                         if (cb->cb_fn != cb_fn || cb->event != event ||
3426                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3427                                 continue;
3428
3429                         /*
3430                          * if this callback is not executing right now,
3431                          * then remove it.
3432                          */
3433                         if (cb->active == 0) {
3434                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3435                                 rte_free(cb);
3436                         } else {
3437                                 ret = -EAGAIN;
3438                         }
3439                 }
3440         } while (++next_port <= last_port);
3441
3442         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3443         return ret;
3444 }
3445
3446 int
3447 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3448         enum rte_eth_event_type event, void *ret_param)
3449 {
3450         struct rte_eth_dev_callback *cb_lst;
3451         struct rte_eth_dev_callback dev_cb;
3452         int rc = 0;
3453
3454         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3455         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3456                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3457                         continue;
3458                 dev_cb = *cb_lst;
3459                 cb_lst->active = 1;
3460                 if (ret_param != NULL)
3461                         dev_cb.ret_param = ret_param;
3462
3463                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3464                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3465                                 dev_cb.cb_arg, dev_cb.ret_param);
3466                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3467                 cb_lst->active = 0;
3468         }
3469         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3470         return rc;
3471 }
3472
3473 void
3474 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3475 {
3476         if (dev == NULL)
3477                 return;
3478
3479         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3480
3481         dev->state = RTE_ETH_DEV_ATTACHED;
3482 }
3483
3484 int
3485 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3486 {
3487         uint32_t vec;
3488         struct rte_eth_dev *dev;
3489         struct rte_intr_handle *intr_handle;
3490         uint16_t qid;
3491         int rc;
3492
3493         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3494
3495         dev = &rte_eth_devices[port_id];
3496
3497         if (!dev->intr_handle) {
3498                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3499                 return -ENOTSUP;
3500         }
3501
3502         intr_handle = dev->intr_handle;
3503         if (!intr_handle->intr_vec) {
3504                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3505                 return -EPERM;
3506         }
3507
3508         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3509                 vec = intr_handle->intr_vec[qid];
3510                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3511                 if (rc && rc != -EEXIST) {
3512                         RTE_ETHDEV_LOG(ERR,
3513                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3514                                 port_id, qid, op, epfd, vec);
3515                 }
3516         }
3517
3518         return 0;
3519 }
3520
3521 const struct rte_memzone *
3522 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3523                          uint16_t queue_id, size_t size, unsigned align,
3524                          int socket_id)
3525 {
3526         char z_name[RTE_MEMZONE_NAMESIZE];
3527         const struct rte_memzone *mz;
3528
3529         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3530                  dev->device->driver->name, ring_name,
3531                  dev->data->port_id, queue_id);
3532
3533         mz = rte_memzone_lookup(z_name);
3534         if (mz)
3535                 return mz;
3536
3537         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3538                         RTE_MEMZONE_IOVA_CONTIG, align);
3539 }
3540
3541 int __rte_experimental
3542 rte_eth_dev_create(struct rte_device *device, const char *name,
3543         size_t priv_data_size,
3544         ethdev_bus_specific_init ethdev_bus_specific_init,
3545         void *bus_init_params,
3546         ethdev_init_t ethdev_init, void *init_params)
3547 {
3548         struct rte_eth_dev *ethdev;
3549         int retval;
3550
3551         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3552
3553         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3554                 ethdev = rte_eth_dev_allocate(name);
3555                 if (!ethdev) {
3556                         retval = -ENODEV;
3557                         goto probe_failed;
3558                 }
3559
3560                 if (priv_data_size) {
3561                         ethdev->data->dev_private = rte_zmalloc_socket(
3562                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3563                                 device->numa_node);
3564
3565                         if (!ethdev->data->dev_private) {
3566                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
3567                                 retval = -ENOMEM;
3568                                 goto probe_failed;
3569                         }
3570                 }
3571         } else {
3572                 ethdev = rte_eth_dev_attach_secondary(name);
3573                 if (!ethdev) {
3574                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
3575                                 "ethdev doesn't exist");
3576                         retval = -ENODEV;
3577                         goto probe_failed;
3578                 }
3579         }
3580
3581         ethdev->device = device;
3582
3583         if (ethdev_bus_specific_init) {
3584                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3585                 if (retval) {
3586                         RTE_LOG(ERR, EAL,
3587                                 "ethdev bus specific initialisation failed");
3588                         goto probe_failed;
3589                 }
3590         }
3591
3592         retval = ethdev_init(ethdev, init_params);
3593         if (retval) {
3594                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3595                 goto probe_failed;
3596         }
3597
3598         rte_eth_dev_probing_finish(ethdev);
3599
3600         return retval;
3601 probe_failed:
3602         /* free ports private data if primary process */
3603         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3604                 rte_free(ethdev->data->dev_private);
3605
3606         rte_eth_dev_release_port(ethdev);
3607
3608         return retval;
3609 }
3610
3611 int  __rte_experimental
3612 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3613         ethdev_uninit_t ethdev_uninit)
3614 {
3615         int ret;
3616
3617         ethdev = rte_eth_dev_allocated(ethdev->data->name);
3618         if (!ethdev)
3619                 return -ENODEV;
3620
3621         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3622         if (ethdev_uninit) {
3623                 ret = ethdev_uninit(ethdev);
3624                 if (ret)
3625                         return ret;
3626         }
3627
3628         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3629                 rte_free(ethdev->data->dev_private);
3630
3631         ethdev->data->dev_private = NULL;
3632
3633         return rte_eth_dev_release_port(ethdev);
3634 }
3635
3636 int
3637 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3638                           int epfd, int op, void *data)
3639 {
3640         uint32_t vec;
3641         struct rte_eth_dev *dev;
3642         struct rte_intr_handle *intr_handle;
3643         int rc;
3644
3645         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3646
3647         dev = &rte_eth_devices[port_id];
3648         if (queue_id >= dev->data->nb_rx_queues) {
3649                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3650                 return -EINVAL;
3651         }
3652
3653         if (!dev->intr_handle) {
3654                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3655                 return -ENOTSUP;
3656         }
3657
3658         intr_handle = dev->intr_handle;
3659         if (!intr_handle->intr_vec) {
3660                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3661                 return -EPERM;
3662         }
3663
3664         vec = intr_handle->intr_vec[queue_id];
3665         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3666         if (rc && rc != -EEXIST) {
3667                 RTE_ETHDEV_LOG(ERR,
3668                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3669                         port_id, queue_id, op, epfd, vec);
3670                 return rc;
3671         }
3672
3673         return 0;
3674 }
3675
3676 int
3677 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3678                            uint16_t queue_id)
3679 {
3680         struct rte_eth_dev *dev;
3681
3682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3683
3684         dev = &rte_eth_devices[port_id];
3685
3686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3687         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3688                                                                 queue_id));
3689 }
3690
3691 int
3692 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3693                             uint16_t queue_id)
3694 {
3695         struct rte_eth_dev *dev;
3696
3697         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3698
3699         dev = &rte_eth_devices[port_id];
3700
3701         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3702         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3703                                                                 queue_id));
3704 }
3705
3706
3707 int
3708 rte_eth_dev_filter_supported(uint16_t port_id,
3709                              enum rte_filter_type filter_type)
3710 {
3711         struct rte_eth_dev *dev;
3712
3713         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3714
3715         dev = &rte_eth_devices[port_id];
3716         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3717         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3718                                 RTE_ETH_FILTER_NOP, NULL);
3719 }
3720
3721 int
3722 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3723                         enum rte_filter_op filter_op, void *arg)
3724 {
3725         struct rte_eth_dev *dev;
3726
3727         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3728
3729         dev = &rte_eth_devices[port_id];
3730         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3731         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3732                                                              filter_op, arg));
3733 }
3734
3735 const struct rte_eth_rxtx_callback *
3736 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3737                 rte_rx_callback_fn fn, void *user_param)
3738 {
3739 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3740         rte_errno = ENOTSUP;
3741         return NULL;
3742 #endif
3743         /* check input parameters */
3744         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3745                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3746                 rte_errno = EINVAL;
3747                 return NULL;
3748         }
3749         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3750
3751         if (cb == NULL) {
3752                 rte_errno = ENOMEM;
3753                 return NULL;
3754         }
3755
3756         cb->fn.rx = fn;
3757         cb->param = user_param;
3758
3759         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3760         /* Add the callbacks in fifo order. */
3761         struct rte_eth_rxtx_callback *tail =
3762                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3763
3764         if (!tail) {
3765                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3766
3767         } else {
3768                 while (tail->next)
3769                         tail = tail->next;
3770                 tail->next = cb;
3771         }
3772         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3773
3774         return cb;
3775 }
3776
3777 const struct rte_eth_rxtx_callback *
3778 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3779                 rte_rx_callback_fn fn, void *user_param)
3780 {
3781 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3782         rte_errno = ENOTSUP;
3783         return NULL;
3784 #endif
3785         /* check input parameters */
3786         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3787                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3788                 rte_errno = EINVAL;
3789                 return NULL;
3790         }
3791
3792         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3793
3794         if (cb == NULL) {
3795                 rte_errno = ENOMEM;
3796                 return NULL;
3797         }
3798
3799         cb->fn.rx = fn;
3800         cb->param = user_param;
3801
3802         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3803         /* Add the callbacks at fisrt position*/
3804         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3805         rte_smp_wmb();
3806         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3807         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3808
3809         return cb;
3810 }
3811
3812 const struct rte_eth_rxtx_callback *
3813 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3814                 rte_tx_callback_fn fn, void *user_param)
3815 {
3816 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3817         rte_errno = ENOTSUP;
3818         return NULL;
3819 #endif
3820         /* check input parameters */
3821         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3822                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3823                 rte_errno = EINVAL;
3824                 return NULL;
3825         }
3826
3827         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3828
3829         if (cb == NULL) {
3830                 rte_errno = ENOMEM;
3831                 return NULL;
3832         }
3833
3834         cb->fn.tx = fn;
3835         cb->param = user_param;
3836
3837         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3838         /* Add the callbacks in fifo order. */
3839         struct rte_eth_rxtx_callback *tail =
3840                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3841
3842         if (!tail) {
3843                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3844
3845         } else {
3846                 while (tail->next)
3847                         tail = tail->next;
3848                 tail->next = cb;
3849         }
3850         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3851
3852         return cb;
3853 }
3854
3855 int
3856 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3857                 const struct rte_eth_rxtx_callback *user_cb)
3858 {
3859 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3860         return -ENOTSUP;
3861 #endif
3862         /* Check input parameters. */
3863         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3864         if (user_cb == NULL ||
3865                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3866                 return -EINVAL;
3867
3868         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3869         struct rte_eth_rxtx_callback *cb;
3870         struct rte_eth_rxtx_callback **prev_cb;
3871         int ret = -EINVAL;
3872
3873         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3874         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3875         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3876                 cb = *prev_cb;
3877                 if (cb == user_cb) {
3878                         /* Remove the user cb from the callback list. */
3879                         *prev_cb = cb->next;
3880                         ret = 0;
3881                         break;
3882                 }
3883         }
3884         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3885
3886         return ret;
3887 }
3888
3889 int
3890 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3891                 const struct rte_eth_rxtx_callback *user_cb)
3892 {
3893 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3894         return -ENOTSUP;
3895 #endif
3896         /* Check input parameters. */
3897         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3898         if (user_cb == NULL ||
3899                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3900                 return -EINVAL;
3901
3902         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3903         int ret = -EINVAL;
3904         struct rte_eth_rxtx_callback *cb;
3905         struct rte_eth_rxtx_callback **prev_cb;
3906
3907         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3908         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3909         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3910                 cb = *prev_cb;
3911                 if (cb == user_cb) {
3912                         /* Remove the user cb from the callback list. */
3913                         *prev_cb = cb->next;
3914                         ret = 0;
3915                         break;
3916                 }
3917         }
3918         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3919
3920         return ret;
3921 }
3922
3923 int
3924 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3925         struct rte_eth_rxq_info *qinfo)
3926 {
3927         struct rte_eth_dev *dev;
3928
3929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3930
3931         if (qinfo == NULL)
3932                 return -EINVAL;
3933
3934         dev = &rte_eth_devices[port_id];
3935         if (queue_id >= dev->data->nb_rx_queues) {
3936                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3937                 return -EINVAL;
3938         }
3939
3940         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3941
3942         memset(qinfo, 0, sizeof(*qinfo));
3943         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3944         return 0;
3945 }
3946
3947 int
3948 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3949         struct rte_eth_txq_info *qinfo)
3950 {
3951         struct rte_eth_dev *dev;
3952         struct rte_eth_txconf *txconf = &qinfo->conf;
3953
3954         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3955
3956         if (qinfo == NULL)
3957                 return -EINVAL;
3958
3959         dev = &rte_eth_devices[port_id];
3960         if (queue_id >= dev->data->nb_tx_queues) {
3961                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
3962                 return -EINVAL;
3963         }
3964
3965         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3966
3967         memset(qinfo, 0, sizeof(*qinfo));
3968         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3969         /* convert offload to txq_flags to support legacy app */
3970         rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
3971
3972         return 0;
3973 }
3974
3975 int
3976 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3977                              struct ether_addr *mc_addr_set,
3978                              uint32_t nb_mc_addr)
3979 {
3980         struct rte_eth_dev *dev;
3981
3982         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3983
3984         dev = &rte_eth_devices[port_id];
3985         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3986         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3987                                                 mc_addr_set, nb_mc_addr));
3988 }
3989
3990 int
3991 rte_eth_timesync_enable(uint16_t port_id)
3992 {
3993         struct rte_eth_dev *dev;
3994
3995         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3996         dev = &rte_eth_devices[port_id];
3997
3998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3999         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4000 }
4001
4002 int
4003 rte_eth_timesync_disable(uint16_t port_id)
4004 {
4005         struct rte_eth_dev *dev;
4006
4007         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4008         dev = &rte_eth_devices[port_id];
4009
4010         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4011         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4012 }
4013
4014 int
4015 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4016                                    uint32_t flags)
4017 {
4018         struct rte_eth_dev *dev;
4019
4020         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4021         dev = &rte_eth_devices[port_id];
4022
4023         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4024         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4025                                 (dev, timestamp, flags));
4026 }
4027
4028 int
4029 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4030                                    struct timespec *timestamp)
4031 {
4032         struct rte_eth_dev *dev;
4033
4034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4035         dev = &rte_eth_devices[port_id];
4036
4037         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4038         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4039                                 (dev, timestamp));
4040 }
4041
4042 int
4043 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4044 {
4045         struct rte_eth_dev *dev;
4046
4047         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4048         dev = &rte_eth_devices[port_id];
4049
4050         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4051         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4052                                                                       delta));
4053 }
4054
4055 int
4056 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4057 {
4058         struct rte_eth_dev *dev;
4059
4060         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4061         dev = &rte_eth_devices[port_id];
4062
4063         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4064         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4065                                                                 timestamp));
4066 }
4067
4068 int
4069 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4070 {
4071         struct rte_eth_dev *dev;
4072
4073         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4074         dev = &rte_eth_devices[port_id];
4075
4076         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4077         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4078                                                                 timestamp));
4079 }
4080
4081 int
4082 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4083 {
4084         struct rte_eth_dev *dev;
4085
4086         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4087
4088         dev = &rte_eth_devices[port_id];
4089         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4090         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4091 }
4092
4093 int
4094 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4095 {
4096         struct rte_eth_dev *dev;
4097
4098         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4099
4100         dev = &rte_eth_devices[port_id];
4101         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4102         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4103 }
4104
4105 int
4106 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4107 {
4108         struct rte_eth_dev *dev;
4109
4110         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4111
4112         dev = &rte_eth_devices[port_id];
4113         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4114         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4115 }
4116
4117 int
4118 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4119 {
4120         struct rte_eth_dev *dev;
4121
4122         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4123
4124         dev = &rte_eth_devices[port_id];
4125         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4126         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4127 }
4128
4129 int __rte_experimental
4130 rte_eth_dev_get_module_info(uint16_t port_id,
4131                             struct rte_eth_dev_module_info *modinfo)
4132 {
4133         struct rte_eth_dev *dev;
4134
4135         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4136
4137         dev = &rte_eth_devices[port_id];
4138         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4139         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4140 }
4141
4142 int __rte_experimental
4143 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4144                               struct rte_dev_eeprom_info *info)
4145 {
4146         struct rte_eth_dev *dev;
4147
4148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4149
4150         dev = &rte_eth_devices[port_id];
4151         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4152         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4153 }
4154
4155 int
4156 rte_eth_dev_get_dcb_info(uint16_t port_id,
4157                              struct rte_eth_dcb_info *dcb_info)
4158 {
4159         struct rte_eth_dev *dev;
4160
4161         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4162
4163         dev = &rte_eth_devices[port_id];
4164         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4165
4166         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4167         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4168 }
4169
4170 int
4171 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4172                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4173 {
4174         struct rte_eth_dev *dev;
4175
4176         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4177         if (l2_tunnel == NULL) {
4178                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4179                 return -EINVAL;
4180         }
4181
4182         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4183                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4184                 return -EINVAL;
4185         }
4186
4187         dev = &rte_eth_devices[port_id];
4188         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4189                                 -ENOTSUP);
4190         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4191                                                                 l2_tunnel));
4192 }
4193
4194 int
4195 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4196                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4197                                   uint32_t mask,
4198                                   uint8_t en)
4199 {
4200         struct rte_eth_dev *dev;
4201
4202         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4203
4204         if (l2_tunnel == NULL) {
4205                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4206                 return -EINVAL;
4207         }
4208
4209         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4210                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4211                 return -EINVAL;
4212         }
4213
4214         if (mask == 0) {
4215                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4216                 return -EINVAL;
4217         }
4218
4219         dev = &rte_eth_devices[port_id];
4220         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4221                                 -ENOTSUP);
4222         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4223                                                         l2_tunnel, mask, en));
4224 }
4225
4226 static void
4227 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4228                            const struct rte_eth_desc_lim *desc_lim)
4229 {
4230         if (desc_lim->nb_align != 0)
4231                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4232
4233         if (desc_lim->nb_max != 0)
4234                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4235
4236         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4237 }
4238
4239 int
4240 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4241                                  uint16_t *nb_rx_desc,
4242                                  uint16_t *nb_tx_desc)
4243 {
4244         struct rte_eth_dev *dev;
4245         struct rte_eth_dev_info dev_info;
4246
4247         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4248
4249         dev = &rte_eth_devices[port_id];
4250         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4251
4252         rte_eth_dev_info_get(port_id, &dev_info);
4253
4254         if (nb_rx_desc != NULL)
4255                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4256
4257         if (nb_tx_desc != NULL)
4258                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4259
4260         return 0;
4261 }
4262
4263 int
4264 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4265 {
4266         struct rte_eth_dev *dev;
4267
4268         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4269
4270         if (pool == NULL)
4271                 return -EINVAL;
4272
4273         dev = &rte_eth_devices[port_id];
4274
4275         if (*dev->dev_ops->pool_ops_supported == NULL)
4276                 return 1; /* all pools are supported */
4277
4278         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4279 }
4280
4281 /**
4282  * A set of values to describe the possible states of a switch domain.
4283  */
4284 enum rte_eth_switch_domain_state {
4285         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4286         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4287 };
4288
4289 /**
4290  * Array of switch domains available for allocation. Array is sized to
4291  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4292  * ethdev ports in a single process.
4293  */
4294 struct rte_eth_dev_switch {
4295         enum rte_eth_switch_domain_state state;
4296 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4297
4298 int __rte_experimental
4299 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4300 {
4301         unsigned int i;
4302
4303         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4304
4305         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4306                 i < RTE_MAX_ETHPORTS; i++) {
4307                 if (rte_eth_switch_domains[i].state ==
4308                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4309                         rte_eth_switch_domains[i].state =
4310                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4311                         *domain_id = i;
4312                         return 0;
4313                 }
4314         }
4315
4316         return -ENOSPC;
4317 }
4318
4319 int __rte_experimental
4320 rte_eth_switch_domain_free(uint16_t domain_id)
4321 {
4322         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4323                 domain_id >= RTE_MAX_ETHPORTS)
4324                 return -EINVAL;
4325
4326         if (rte_eth_switch_domains[domain_id].state !=
4327                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4328                 return -EINVAL;
4329
4330         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4331
4332         return 0;
4333 }
4334
4335 typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
4336
4337 static int
4338 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4339 {
4340         int state;
4341         struct rte_kvargs_pair *pair;
4342         char *letter;
4343
4344         arglist->str = strdup(str_in);
4345         if (arglist->str == NULL)
4346                 return -ENOMEM;
4347
4348         letter = arglist->str;
4349         state = 0;
4350         arglist->count = 0;
4351         pair = &arglist->pairs[0];
4352         while (1) {
4353                 switch (state) {
4354                 case 0: /* Initial */
4355                         if (*letter == '=')
4356                                 return -EINVAL;
4357                         else if (*letter == '\0')
4358                                 return 0;
4359
4360                         state = 1;
4361                         pair->key = letter;
4362                         /* fall-thru */
4363
4364                 case 1: /* Parsing key */
4365                         if (*letter == '=') {
4366                                 *letter = '\0';
4367                                 pair->value = letter + 1;
4368                                 state = 2;
4369                         } else if (*letter == ',' || *letter == '\0')
4370                                 return -EINVAL;
4371                         break;
4372
4373
4374                 case 2: /* Parsing value */
4375                         if (*letter == '[')
4376                                 state = 3;
4377                         else if (*letter == ',') {
4378                                 *letter = '\0';
4379                                 arglist->count++;
4380                                 pair = &arglist->pairs[arglist->count];
4381                                 state = 0;
4382                         } else if (*letter == '\0') {
4383                                 letter--;
4384                                 arglist->count++;
4385                                 pair = &arglist->pairs[arglist->count];
4386                                 state = 0;
4387                         }
4388                         break;
4389
4390                 case 3: /* Parsing list */
4391                         if (*letter == ']')
4392                                 state = 2;
4393                         else if (*letter == '\0')
4394                                 return -EINVAL;
4395                         break;
4396                 }
4397                 letter++;
4398         }
4399 }
4400
4401 static int
4402 rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
4403         void *data)
4404 {
4405         char *str_start;
4406         int state;
4407         int result;
4408
4409         if (*str != '[')
4410                 /* Single element, not a list */
4411                 return callback(str, data);
4412
4413         /* Sanity check, then strip the brackets */
4414         str_start = &str[strlen(str) - 1];
4415         if (*str_start != ']') {
4416                 RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
4417                 return -EINVAL;
4418         }
4419         str++;
4420         *str_start = '\0';
4421
4422         /* Process list elements */
4423         state = 0;
4424         while (1) {
4425                 if (state == 0) {
4426                         if (*str == '\0')
4427                                 break;
4428                         if (*str != ',') {
4429                                 str_start = str;
4430                                 state = 1;
4431                         }
4432                 } else if (state == 1) {
4433                         if (*str == ',' || *str == '\0') {
4434                                 if (str > str_start) {
4435                                         /* Non-empty string fragment */
4436                                         *str = '\0';
4437                                         result = callback(str_start, data);
4438                                         if (result < 0)
4439                                                 return result;
4440                                 }
4441                                 state = 0;
4442                         }
4443                 }
4444                 str++;
4445         }
4446         return 0;
4447 }
4448
4449 static int
4450 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
4451         const uint16_t max_list)
4452 {
4453         uint16_t lo, hi, val;
4454         int result;
4455
4456         result = sscanf(str, "%hu-%hu", &lo, &hi);
4457         if (result == 1) {
4458                 if (*len_list >= max_list)
4459                         return -ENOMEM;
4460                 list[(*len_list)++] = lo;
4461         } else if (result == 2) {
4462                 if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
4463                         return -EINVAL;
4464                 for (val = lo; val <= hi; val++) {
4465                         if (*len_list >= max_list)
4466                                 return -ENOMEM;
4467                         list[(*len_list)++] = val;
4468                 }
4469         } else
4470                 return -EINVAL;
4471         return 0;
4472 }
4473
4474
4475 static int
4476 rte_eth_devargs_parse_representor_ports(char *str, void *data)
4477 {
4478         struct rte_eth_devargs *eth_da = data;
4479
4480         return rte_eth_devargs_process_range(str, eth_da->representor_ports,
4481                 &eth_da->nb_representor_ports, RTE_MAX_ETHPORTS);
4482 }
4483
4484 int __rte_experimental
4485 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4486 {
4487         struct rte_kvargs args;
4488         struct rte_kvargs_pair *pair;
4489         unsigned int i;
4490         int result = 0;
4491
4492         memset(eth_da, 0, sizeof(*eth_da));
4493
4494         result = rte_eth_devargs_tokenise(&args, dargs);
4495         if (result < 0)
4496                 goto parse_cleanup;
4497
4498         for (i = 0; i < args.count; i++) {
4499                 pair = &args.pairs[i];
4500                 if (strcmp("representor", pair->key) == 0) {
4501                         result = rte_eth_devargs_parse_list(pair->value,
4502                                 rte_eth_devargs_parse_representor_ports,
4503                                 eth_da);
4504                         if (result < 0)
4505                                 goto parse_cleanup;
4506                 }
4507         }
4508
4509 parse_cleanup:
4510         if (args.str)
4511                 free(args.str);
4512
4513         return result;
4514 }
4515
4516 RTE_INIT(ethdev_init_log);
4517 static void
4518 ethdev_init_log(void)
4519 {
4520         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
4521         if (rte_eth_dev_logtype >= 0)
4522                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
4523 }