ethdev: return named opaque type instead of void pointer
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37
38 #include "rte_ether.h"
39 #include "rte_ethdev.h"
40 #include "rte_ethdev_driver.h"
41 #include "ethdev_profile.h"
42
43 static int ethdev_logtype;
44
45 #define ethdev_log(level, fmt, ...) \
46         rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
47
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50 static uint8_t eth_dev_last_created_port;
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
127         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
128         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
129         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133
134 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
135         { DEV_TX_OFFLOAD_##_name, #_name }
136
137 static const struct {
138         uint64_t offload;
139         const char *name;
140 } rte_tx_offload_names[] = {
141         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
142         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
143         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
147         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
150         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
156         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
157         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
158         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
159 };
160
161 #undef RTE_TX_OFFLOAD_BIT2STR
162
163 /**
164  * The user application callback description.
165  *
166  * It contains callback address to be registered by user application,
167  * the pointer to the parameters for callback, and the event type.
168  */
169 struct rte_eth_dev_callback {
170         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
171         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
172         void *cb_arg;                           /**< Parameter for callback */
173         void *ret_param;                        /**< Return parameter */
174         enum rte_eth_event_type event;          /**< Interrupt event type */
175         uint32_t active;                        /**< Callback is executing */
176 };
177
178 enum {
179         STAT_QMAP_TX = 0,
180         STAT_QMAP_RX
181 };
182
183 uint16_t
184 rte_eth_find_next(uint16_t port_id)
185 {
186         while (port_id < RTE_MAX_ETHPORTS &&
187                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
188                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
189                 port_id++;
190
191         if (port_id >= RTE_MAX_ETHPORTS)
192                 return RTE_MAX_ETHPORTS;
193
194         return port_id;
195 }
196
197 static void
198 rte_eth_dev_shared_data_prepare(void)
199 {
200         const unsigned flags = 0;
201         const struct rte_memzone *mz;
202
203         rte_spinlock_lock(&rte_eth_shared_data_lock);
204
205         if (rte_eth_dev_shared_data == NULL) {
206                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207                         /* Allocate port data and ownership shared memory. */
208                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
209                                         sizeof(*rte_eth_dev_shared_data),
210                                         rte_socket_id(), flags);
211                 } else
212                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
213                 if (mz == NULL)
214                         rte_panic("Cannot allocate ethdev shared data\n");
215
216                 rte_eth_dev_shared_data = mz->addr;
217                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
218                         rte_eth_dev_shared_data->next_owner_id =
219                                         RTE_ETH_DEV_NO_OWNER + 1;
220                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
221                         memset(rte_eth_dev_shared_data->data, 0,
222                                sizeof(rte_eth_dev_shared_data->data));
223                 }
224         }
225
226         rte_spinlock_unlock(&rte_eth_shared_data_lock);
227 }
228
229 struct rte_eth_dev *
230 rte_eth_dev_allocated(const char *name)
231 {
232         unsigned i;
233
234         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
235                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
236                     strcmp(rte_eth_devices[i].data->name, name) == 0)
237                         return &rte_eth_devices[i];
238         }
239         return NULL;
240 }
241
242 static uint16_t
243 rte_eth_dev_find_free_port(void)
244 {
245         unsigned i;
246
247         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
248                 /* Using shared name field to find a free port. */
249                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
250                         RTE_ASSERT(rte_eth_devices[i].state ==
251                                    RTE_ETH_DEV_UNUSED);
252                         return i;
253                 }
254         }
255         return RTE_MAX_ETHPORTS;
256 }
257
258 static struct rte_eth_dev *
259 eth_dev_get(uint16_t port_id)
260 {
261         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
262
263         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
264         eth_dev->state = RTE_ETH_DEV_ATTACHED;
265
266         eth_dev_last_created_port = port_id;
267
268         return eth_dev;
269 }
270
271 struct rte_eth_dev *
272 rte_eth_dev_allocate(const char *name)
273 {
274         uint16_t port_id;
275         struct rte_eth_dev *eth_dev = NULL;
276
277         rte_eth_dev_shared_data_prepare();
278
279         /* Synchronize port creation between primary and secondary threads. */
280         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
281
282         port_id = rte_eth_dev_find_free_port();
283         if (port_id == RTE_MAX_ETHPORTS) {
284                 ethdev_log(ERR, "Reached maximum number of Ethernet ports");
285                 goto unlock;
286         }
287
288         if (rte_eth_dev_allocated(name) != NULL) {
289                 ethdev_log(ERR,
290                         "Ethernet Device with name %s already allocated!",
291                         name);
292                 goto unlock;
293         }
294
295         eth_dev = eth_dev_get(port_id);
296         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
297         eth_dev->data->port_id = port_id;
298         eth_dev->data->mtu = ETHER_MTU;
299
300 unlock:
301         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
302
303         if (eth_dev != NULL)
304                 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
305
306         return eth_dev;
307 }
308
309 /*
310  * Attach to a port already registered by the primary process, which
311  * makes sure that the same device would have the same port id both
312  * in the primary and secondary process.
313  */
314 struct rte_eth_dev *
315 rte_eth_dev_attach_secondary(const char *name)
316 {
317         uint16_t i;
318         struct rte_eth_dev *eth_dev = NULL;
319
320         rte_eth_dev_shared_data_prepare();
321
322         /* Synchronize port attachment to primary port creation and release. */
323         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
324
325         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
326                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
327                         break;
328         }
329         if (i == RTE_MAX_ETHPORTS) {
330                 RTE_PMD_DEBUG_TRACE(
331                         "device %s is not driven by the primary process\n",
332                         name);
333         } else {
334                 eth_dev = eth_dev_get(i);
335                 RTE_ASSERT(eth_dev->data->port_id == i);
336         }
337
338         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
339         return eth_dev;
340 }
341
342 int
343 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
344 {
345         if (eth_dev == NULL)
346                 return -EINVAL;
347
348         rte_eth_dev_shared_data_prepare();
349
350         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
351
352         eth_dev->state = RTE_ETH_DEV_UNUSED;
353
354         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
355
356         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
357
358         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
359
360         return 0;
361 }
362
363 int
364 rte_eth_dev_is_valid_port(uint16_t port_id)
365 {
366         if (port_id >= RTE_MAX_ETHPORTS ||
367             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
368                 return 0;
369         else
370                 return 1;
371 }
372
373 static int
374 rte_eth_is_valid_owner_id(uint64_t owner_id)
375 {
376         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
377             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
378                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
379                 return 0;
380         }
381         return 1;
382 }
383
384 uint64_t __rte_experimental
385 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
386 {
387         while (port_id < RTE_MAX_ETHPORTS &&
388                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
389                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
390                rte_eth_devices[port_id].data->owner.id != owner_id))
391                 port_id++;
392
393         if (port_id >= RTE_MAX_ETHPORTS)
394                 return RTE_MAX_ETHPORTS;
395
396         return port_id;
397 }
398
399 int __rte_experimental
400 rte_eth_dev_owner_new(uint64_t *owner_id)
401 {
402         rte_eth_dev_shared_data_prepare();
403
404         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
405
406         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
407
408         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
409         return 0;
410 }
411
412 static int
413 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
414                        const struct rte_eth_dev_owner *new_owner)
415 {
416         struct rte_eth_dev_owner *port_owner;
417         int sret;
418
419         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
420
421         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
422             !rte_eth_is_valid_owner_id(old_owner_id))
423                 return -EINVAL;
424
425         port_owner = &rte_eth_devices[port_id].data->owner;
426         if (port_owner->id != old_owner_id) {
427                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
428                                     " by %s_%016lX.\n", port_id,
429                                     port_owner->name, port_owner->id);
430                 return -EPERM;
431         }
432
433         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
434                         new_owner->name);
435         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
436                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
437                                     port_id);
438
439         port_owner->id = new_owner->id;
440
441         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
442                             new_owner->name, new_owner->id);
443
444         return 0;
445 }
446
447 int __rte_experimental
448 rte_eth_dev_owner_set(const uint16_t port_id,
449                       const struct rte_eth_dev_owner *owner)
450 {
451         int ret;
452
453         rte_eth_dev_shared_data_prepare();
454
455         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
456
457         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
458
459         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
460         return ret;
461 }
462
463 int __rte_experimental
464 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
465 {
466         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
467                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
468         int ret;
469
470         rte_eth_dev_shared_data_prepare();
471
472         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
473
474         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
475
476         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
477         return ret;
478 }
479
480 void __rte_experimental
481 rte_eth_dev_owner_delete(const uint64_t owner_id)
482 {
483         uint16_t port_id;
484
485         rte_eth_dev_shared_data_prepare();
486
487         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
488
489         if (rte_eth_is_valid_owner_id(owner_id)) {
490                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
491                         memset(&rte_eth_devices[port_id].data->owner, 0,
492                                sizeof(struct rte_eth_dev_owner));
493                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
494                                     " have removed.\n", owner_id);
495         }
496
497         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
498 }
499
500 int __rte_experimental
501 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
502 {
503         int ret = 0;
504
505         rte_eth_dev_shared_data_prepare();
506
507         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
508
509         if (!rte_eth_dev_is_valid_port(port_id)) {
510                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
511                 ret = -ENODEV;
512         } else {
513                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
514                            sizeof(*owner));
515         }
516
517         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
518         return ret;
519 }
520
521 int
522 rte_eth_dev_socket_id(uint16_t port_id)
523 {
524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
525         return rte_eth_devices[port_id].data->numa_node;
526 }
527
528 void *
529 rte_eth_dev_get_sec_ctx(uint16_t port_id)
530 {
531         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
532         return rte_eth_devices[port_id].security_ctx;
533 }
534
535 uint16_t
536 rte_eth_dev_count(void)
537 {
538         uint16_t p;
539         uint16_t count;
540
541         count = 0;
542
543         RTE_ETH_FOREACH_DEV(p)
544                 count++;
545
546         return count;
547 }
548
549 int
550 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
551 {
552         char *tmp;
553
554         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
555
556         if (name == NULL) {
557                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
558                 return -EINVAL;
559         }
560
561         /* shouldn't check 'rte_eth_devices[i].data',
562          * because it might be overwritten by VDEV PMD */
563         tmp = rte_eth_dev_shared_data->data[port_id].name;
564         strcpy(name, tmp);
565         return 0;
566 }
567
568 int
569 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
570 {
571         uint32_t pid;
572
573         if (name == NULL) {
574                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
575                 return -EINVAL;
576         }
577
578         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
579                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
580                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
581                         *port_id = pid;
582                         return 0;
583                 }
584         }
585
586         return -ENODEV;
587 }
588
589 static int
590 eth_err(uint16_t port_id, int ret)
591 {
592         if (ret == 0)
593                 return 0;
594         if (rte_eth_dev_is_removed(port_id))
595                 return -EIO;
596         return ret;
597 }
598
599 /* attach the new device, then store port_id of the device */
600 int
601 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
602 {
603         int ret = -1;
604         int current = rte_eth_dev_count();
605         char *name = NULL;
606         char *args = NULL;
607
608         if ((devargs == NULL) || (port_id == NULL)) {
609                 ret = -EINVAL;
610                 goto err;
611         }
612
613         /* parse devargs, then retrieve device name and args */
614         if (rte_eal_parse_devargs_str(devargs, &name, &args))
615                 goto err;
616
617         ret = rte_eal_dev_attach(name, args);
618         if (ret < 0)
619                 goto err;
620
621         /* no point looking at the port count if no port exists */
622         if (!rte_eth_dev_count()) {
623                 ethdev_log(ERR, "No port found for device (%s)", name);
624                 ret = -1;
625                 goto err;
626         }
627
628         /* if nothing happened, there is a bug here, since some driver told us
629          * it did attach a device, but did not create a port.
630          */
631         if (current == rte_eth_dev_count()) {
632                 ret = -1;
633                 goto err;
634         }
635
636         *port_id = eth_dev_last_created_port;
637         ret = 0;
638
639 err:
640         free(name);
641         free(args);
642         return ret;
643 }
644
645 /* detach the device, then store the name of the device */
646 int
647 rte_eth_dev_detach(uint16_t port_id, char *name)
648 {
649         uint32_t dev_flags;
650         int ret = -1;
651
652         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
653
654         if (name == NULL) {
655                 ret = -EINVAL;
656                 goto err;
657         }
658
659         dev_flags = rte_eth_devices[port_id].data->dev_flags;
660         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
661                 ethdev_log(ERR,
662                         "Port %" PRIu16 " is bonded, cannot detach", port_id);
663                 ret = -ENOTSUP;
664                 goto err;
665         }
666
667         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
668                  "%s", rte_eth_devices[port_id].data->name);
669
670         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
671         if (ret < 0)
672                 goto err;
673
674         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
675         return 0;
676
677 err:
678         return ret;
679 }
680
681 static int
682 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
683 {
684         uint16_t old_nb_queues = dev->data->nb_rx_queues;
685         void **rxq;
686         unsigned i;
687
688         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
689                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
690                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
691                                 RTE_CACHE_LINE_SIZE);
692                 if (dev->data->rx_queues == NULL) {
693                         dev->data->nb_rx_queues = 0;
694                         return -(ENOMEM);
695                 }
696         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
697                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
698
699                 rxq = dev->data->rx_queues;
700
701                 for (i = nb_queues; i < old_nb_queues; i++)
702                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
703                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
704                                 RTE_CACHE_LINE_SIZE);
705                 if (rxq == NULL)
706                         return -(ENOMEM);
707                 if (nb_queues > old_nb_queues) {
708                         uint16_t new_qs = nb_queues - old_nb_queues;
709
710                         memset(rxq + old_nb_queues, 0,
711                                 sizeof(rxq[0]) * new_qs);
712                 }
713
714                 dev->data->rx_queues = rxq;
715
716         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
717                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
718
719                 rxq = dev->data->rx_queues;
720
721                 for (i = nb_queues; i < old_nb_queues; i++)
722                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
723
724                 rte_free(dev->data->rx_queues);
725                 dev->data->rx_queues = NULL;
726         }
727         dev->data->nb_rx_queues = nb_queues;
728         return 0;
729 }
730
731 int
732 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
733 {
734         struct rte_eth_dev *dev;
735
736         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
737
738         dev = &rte_eth_devices[port_id];
739         if (rx_queue_id >= dev->data->nb_rx_queues) {
740                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
741                 return -EINVAL;
742         }
743
744         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
745
746         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
747                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
748                         " already started\n",
749                         rx_queue_id, port_id);
750                 return 0;
751         }
752
753         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
754                                                              rx_queue_id));
755
756 }
757
758 int
759 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
760 {
761         struct rte_eth_dev *dev;
762
763         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
764
765         dev = &rte_eth_devices[port_id];
766         if (rx_queue_id >= dev->data->nb_rx_queues) {
767                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
768                 return -EINVAL;
769         }
770
771         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
772
773         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
774                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
775                         " already stopped\n",
776                         rx_queue_id, port_id);
777                 return 0;
778         }
779
780         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
781
782 }
783
784 int
785 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
786 {
787         struct rte_eth_dev *dev;
788
789         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
790
791         dev = &rte_eth_devices[port_id];
792         if (tx_queue_id >= dev->data->nb_tx_queues) {
793                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
794                 return -EINVAL;
795         }
796
797         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
798
799         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
800                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
801                         " already started\n",
802                         tx_queue_id, port_id);
803                 return 0;
804         }
805
806         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
807                                                              tx_queue_id));
808
809 }
810
811 int
812 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
813 {
814         struct rte_eth_dev *dev;
815
816         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
817
818         dev = &rte_eth_devices[port_id];
819         if (tx_queue_id >= dev->data->nb_tx_queues) {
820                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
821                 return -EINVAL;
822         }
823
824         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
825
826         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
827                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
828                         " already stopped\n",
829                         tx_queue_id, port_id);
830                 return 0;
831         }
832
833         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
834
835 }
836
837 static int
838 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
839 {
840         uint16_t old_nb_queues = dev->data->nb_tx_queues;
841         void **txq;
842         unsigned i;
843
844         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
845                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
846                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
847                                                    RTE_CACHE_LINE_SIZE);
848                 if (dev->data->tx_queues == NULL) {
849                         dev->data->nb_tx_queues = 0;
850                         return -(ENOMEM);
851                 }
852         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
853                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
854
855                 txq = dev->data->tx_queues;
856
857                 for (i = nb_queues; i < old_nb_queues; i++)
858                         (*dev->dev_ops->tx_queue_release)(txq[i]);
859                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
860                                   RTE_CACHE_LINE_SIZE);
861                 if (txq == NULL)
862                         return -ENOMEM;
863                 if (nb_queues > old_nb_queues) {
864                         uint16_t new_qs = nb_queues - old_nb_queues;
865
866                         memset(txq + old_nb_queues, 0,
867                                sizeof(txq[0]) * new_qs);
868                 }
869
870                 dev->data->tx_queues = txq;
871
872         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
873                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
874
875                 txq = dev->data->tx_queues;
876
877                 for (i = nb_queues; i < old_nb_queues; i++)
878                         (*dev->dev_ops->tx_queue_release)(txq[i]);
879
880                 rte_free(dev->data->tx_queues);
881                 dev->data->tx_queues = NULL;
882         }
883         dev->data->nb_tx_queues = nb_queues;
884         return 0;
885 }
886
887 uint32_t
888 rte_eth_speed_bitflag(uint32_t speed, int duplex)
889 {
890         switch (speed) {
891         case ETH_SPEED_NUM_10M:
892                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
893         case ETH_SPEED_NUM_100M:
894                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
895         case ETH_SPEED_NUM_1G:
896                 return ETH_LINK_SPEED_1G;
897         case ETH_SPEED_NUM_2_5G:
898                 return ETH_LINK_SPEED_2_5G;
899         case ETH_SPEED_NUM_5G:
900                 return ETH_LINK_SPEED_5G;
901         case ETH_SPEED_NUM_10G:
902                 return ETH_LINK_SPEED_10G;
903         case ETH_SPEED_NUM_20G:
904                 return ETH_LINK_SPEED_20G;
905         case ETH_SPEED_NUM_25G:
906                 return ETH_LINK_SPEED_25G;
907         case ETH_SPEED_NUM_40G:
908                 return ETH_LINK_SPEED_40G;
909         case ETH_SPEED_NUM_50G:
910                 return ETH_LINK_SPEED_50G;
911         case ETH_SPEED_NUM_56G:
912                 return ETH_LINK_SPEED_56G;
913         case ETH_SPEED_NUM_100G:
914                 return ETH_LINK_SPEED_100G;
915         default:
916                 return 0;
917         }
918 }
919
920 /**
921  * A conversion function from rxmode bitfield API.
922  */
923 static void
924 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
925                                     uint64_t *rx_offloads)
926 {
927         uint64_t offloads = 0;
928
929         if (rxmode->header_split == 1)
930                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
931         if (rxmode->hw_ip_checksum == 1)
932                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
933         if (rxmode->hw_vlan_filter == 1)
934                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
935         if (rxmode->hw_vlan_strip == 1)
936                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
937         if (rxmode->hw_vlan_extend == 1)
938                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
939         if (rxmode->jumbo_frame == 1)
940                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
941         if (rxmode->hw_strip_crc == 1)
942                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
943         if (rxmode->enable_scatter == 1)
944                 offloads |= DEV_RX_OFFLOAD_SCATTER;
945         if (rxmode->enable_lro == 1)
946                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
947         if (rxmode->hw_timestamp == 1)
948                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
949         if (rxmode->security == 1)
950                 offloads |= DEV_RX_OFFLOAD_SECURITY;
951
952         *rx_offloads = offloads;
953 }
954
955 /**
956  * A conversion function from rxmode offloads API.
957  */
958 static void
959 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
960                             struct rte_eth_rxmode *rxmode)
961 {
962
963         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
964                 rxmode->header_split = 1;
965         else
966                 rxmode->header_split = 0;
967         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
968                 rxmode->hw_ip_checksum = 1;
969         else
970                 rxmode->hw_ip_checksum = 0;
971         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
972                 rxmode->hw_vlan_filter = 1;
973         else
974                 rxmode->hw_vlan_filter = 0;
975         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
976                 rxmode->hw_vlan_strip = 1;
977         else
978                 rxmode->hw_vlan_strip = 0;
979         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
980                 rxmode->hw_vlan_extend = 1;
981         else
982                 rxmode->hw_vlan_extend = 0;
983         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
984                 rxmode->jumbo_frame = 1;
985         else
986                 rxmode->jumbo_frame = 0;
987         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
988                 rxmode->hw_strip_crc = 1;
989         else
990                 rxmode->hw_strip_crc = 0;
991         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
992                 rxmode->enable_scatter = 1;
993         else
994                 rxmode->enable_scatter = 0;
995         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
996                 rxmode->enable_lro = 1;
997         else
998                 rxmode->enable_lro = 0;
999         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
1000                 rxmode->hw_timestamp = 1;
1001         else
1002                 rxmode->hw_timestamp = 0;
1003         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
1004                 rxmode->security = 1;
1005         else
1006                 rxmode->security = 0;
1007 }
1008
1009 const char * __rte_experimental
1010 rte_eth_dev_rx_offload_name(uint64_t offload)
1011 {
1012         const char *name = "UNKNOWN";
1013         unsigned int i;
1014
1015         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1016                 if (offload == rte_rx_offload_names[i].offload) {
1017                         name = rte_rx_offload_names[i].name;
1018                         break;
1019                 }
1020         }
1021
1022         return name;
1023 }
1024
1025 const char * __rte_experimental
1026 rte_eth_dev_tx_offload_name(uint64_t offload)
1027 {
1028         const char *name = "UNKNOWN";
1029         unsigned int i;
1030
1031         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1032                 if (offload == rte_tx_offload_names[i].offload) {
1033                         name = rte_tx_offload_names[i].name;
1034                         break;
1035                 }
1036         }
1037
1038         return name;
1039 }
1040
1041 int
1042 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1043                       const struct rte_eth_conf *dev_conf)
1044 {
1045         struct rte_eth_dev *dev;
1046         struct rte_eth_dev_info dev_info;
1047         struct rte_eth_conf local_conf = *dev_conf;
1048         int diag;
1049
1050         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1051
1052         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1053                 RTE_PMD_DEBUG_TRACE(
1054                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1055                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1056                 return -EINVAL;
1057         }
1058
1059         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1060                 RTE_PMD_DEBUG_TRACE(
1061                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1062                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1063                 return -EINVAL;
1064         }
1065
1066         dev = &rte_eth_devices[port_id];
1067
1068         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1069         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1070
1071         if (dev->data->dev_started) {
1072                 RTE_PMD_DEBUG_TRACE(
1073                     "port %d must be stopped to allow configuration\n", port_id);
1074                 return -EBUSY;
1075         }
1076
1077         /*
1078          * Convert between the offloads API to enable PMDs to support
1079          * only one of them.
1080          */
1081         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
1082                 rte_eth_convert_rx_offload_bitfield(
1083                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1084         } else {
1085                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
1086                                             &local_conf.rxmode);
1087         }
1088
1089         /* Copy the dev_conf parameter into the dev structure */
1090         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1091
1092         /*
1093          * Check that the numbers of RX and TX queues are not greater
1094          * than the maximum number of RX and TX queues supported by the
1095          * configured device.
1096          */
1097         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1098
1099         if (nb_rx_q == 0 && nb_tx_q == 0) {
1100                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
1101                 return -EINVAL;
1102         }
1103
1104         if (nb_rx_q > dev_info.max_rx_queues) {
1105                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1106                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1107                 return -EINVAL;
1108         }
1109
1110         if (nb_tx_q > dev_info.max_tx_queues) {
1111                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1112                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1113                 return -EINVAL;
1114         }
1115
1116         /* Check that the device supports requested interrupts */
1117         if ((dev_conf->intr_conf.lsc == 1) &&
1118                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1119                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1120                                         dev->device->driver->name);
1121                         return -EINVAL;
1122         }
1123         if ((dev_conf->intr_conf.rmv == 1) &&
1124             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1125                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1126                                     dev->device->driver->name);
1127                 return -EINVAL;
1128         }
1129
1130         /*
1131          * If jumbo frames are enabled, check that the maximum RX packet
1132          * length is supported by the configured device.
1133          */
1134         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1135                 if (dev_conf->rxmode.max_rx_pkt_len >
1136                     dev_info.max_rx_pktlen) {
1137                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1138                                 " > max valid value %u\n",
1139                                 port_id,
1140                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1141                                 (unsigned)dev_info.max_rx_pktlen);
1142                         return -EINVAL;
1143                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1144                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1145                                 " < min valid value %u\n",
1146                                 port_id,
1147                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1148                                 (unsigned)ETHER_MIN_LEN);
1149                         return -EINVAL;
1150                 }
1151         } else {
1152                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1153                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1154                         /* Use default value */
1155                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1156                                                         ETHER_MAX_LEN;
1157         }
1158
1159         /*
1160          * Setup new number of RX/TX queues and reconfigure device.
1161          */
1162         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1163         if (diag != 0) {
1164                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1165                                 port_id, diag);
1166                 return diag;
1167         }
1168
1169         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1170         if (diag != 0) {
1171                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1172                                 port_id, diag);
1173                 rte_eth_dev_rx_queue_config(dev, 0);
1174                 return diag;
1175         }
1176
1177         diag = (*dev->dev_ops->dev_configure)(dev);
1178         if (diag != 0) {
1179                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1180                                 port_id, diag);
1181                 rte_eth_dev_rx_queue_config(dev, 0);
1182                 rte_eth_dev_tx_queue_config(dev, 0);
1183                 return eth_err(port_id, diag);
1184         }
1185
1186         /* Initialize Rx profiling if enabled at compilation time. */
1187         diag = __rte_eth_profile_rx_init(port_id, dev);
1188         if (diag != 0) {
1189                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1190                                 port_id, diag);
1191                 rte_eth_dev_rx_queue_config(dev, 0);
1192                 rte_eth_dev_tx_queue_config(dev, 0);
1193                 return eth_err(port_id, diag);
1194         }
1195
1196         return 0;
1197 }
1198
1199 void
1200 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1201 {
1202         if (dev->data->dev_started) {
1203                 RTE_PMD_DEBUG_TRACE(
1204                         "port %d must be stopped to allow reset\n",
1205                         dev->data->port_id);
1206                 return;
1207         }
1208
1209         rte_eth_dev_rx_queue_config(dev, 0);
1210         rte_eth_dev_tx_queue_config(dev, 0);
1211
1212         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1213 }
1214
1215 static void
1216 rte_eth_dev_config_restore(uint16_t port_id)
1217 {
1218         struct rte_eth_dev *dev;
1219         struct rte_eth_dev_info dev_info;
1220         struct ether_addr *addr;
1221         uint16_t i;
1222         uint32_t pool = 0;
1223         uint64_t pool_mask;
1224
1225         dev = &rte_eth_devices[port_id];
1226
1227         rte_eth_dev_info_get(port_id, &dev_info);
1228
1229         /* replay MAC address configuration including default MAC */
1230         addr = &dev->data->mac_addrs[0];
1231         if (*dev->dev_ops->mac_addr_set != NULL)
1232                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1233         else if (*dev->dev_ops->mac_addr_add != NULL)
1234                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1235
1236         if (*dev->dev_ops->mac_addr_add != NULL) {
1237                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1238                         addr = &dev->data->mac_addrs[i];
1239
1240                         /* skip zero address */
1241                         if (is_zero_ether_addr(addr))
1242                                 continue;
1243
1244                         pool = 0;
1245                         pool_mask = dev->data->mac_pool_sel[i];
1246
1247                         do {
1248                                 if (pool_mask & 1ULL)
1249                                         (*dev->dev_ops->mac_addr_add)(dev,
1250                                                 addr, i, pool);
1251                                 pool_mask >>= 1;
1252                                 pool++;
1253                         } while (pool_mask);
1254                 }
1255         }
1256
1257         /* replay promiscuous configuration */
1258         if (rte_eth_promiscuous_get(port_id) == 1)
1259                 rte_eth_promiscuous_enable(port_id);
1260         else if (rte_eth_promiscuous_get(port_id) == 0)
1261                 rte_eth_promiscuous_disable(port_id);
1262
1263         /* replay all multicast configuration */
1264         if (rte_eth_allmulticast_get(port_id) == 1)
1265                 rte_eth_allmulticast_enable(port_id);
1266         else if (rte_eth_allmulticast_get(port_id) == 0)
1267                 rte_eth_allmulticast_disable(port_id);
1268 }
1269
1270 int
1271 rte_eth_dev_start(uint16_t port_id)
1272 {
1273         struct rte_eth_dev *dev;
1274         int diag;
1275
1276         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1277
1278         dev = &rte_eth_devices[port_id];
1279
1280         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1281
1282         if (dev->data->dev_started != 0) {
1283                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1284                         " already started\n",
1285                         port_id);
1286                 return 0;
1287         }
1288
1289         diag = (*dev->dev_ops->dev_start)(dev);
1290         if (diag == 0)
1291                 dev->data->dev_started = 1;
1292         else
1293                 return eth_err(port_id, diag);
1294
1295         rte_eth_dev_config_restore(port_id);
1296
1297         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1298                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1299                 (*dev->dev_ops->link_update)(dev, 0);
1300         }
1301         return 0;
1302 }
1303
1304 void
1305 rte_eth_dev_stop(uint16_t port_id)
1306 {
1307         struct rte_eth_dev *dev;
1308
1309         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1310         dev = &rte_eth_devices[port_id];
1311
1312         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1313
1314         if (dev->data->dev_started == 0) {
1315                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1316                         " already stopped\n",
1317                         port_id);
1318                 return;
1319         }
1320
1321         dev->data->dev_started = 0;
1322         (*dev->dev_ops->dev_stop)(dev);
1323 }
1324
1325 int
1326 rte_eth_dev_set_link_up(uint16_t port_id)
1327 {
1328         struct rte_eth_dev *dev;
1329
1330         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1331
1332         dev = &rte_eth_devices[port_id];
1333
1334         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1335         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1336 }
1337
1338 int
1339 rte_eth_dev_set_link_down(uint16_t port_id)
1340 {
1341         struct rte_eth_dev *dev;
1342
1343         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1344
1345         dev = &rte_eth_devices[port_id];
1346
1347         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1348         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1349 }
1350
1351 void
1352 rte_eth_dev_close(uint16_t port_id)
1353 {
1354         struct rte_eth_dev *dev;
1355
1356         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1357         dev = &rte_eth_devices[port_id];
1358
1359         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1360         dev->data->dev_started = 0;
1361         (*dev->dev_ops->dev_close)(dev);
1362
1363         dev->data->nb_rx_queues = 0;
1364         rte_free(dev->data->rx_queues);
1365         dev->data->rx_queues = NULL;
1366         dev->data->nb_tx_queues = 0;
1367         rte_free(dev->data->tx_queues);
1368         dev->data->tx_queues = NULL;
1369 }
1370
1371 int
1372 rte_eth_dev_reset(uint16_t port_id)
1373 {
1374         struct rte_eth_dev *dev;
1375         int ret;
1376
1377         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1378         dev = &rte_eth_devices[port_id];
1379
1380         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1381
1382         rte_eth_dev_stop(port_id);
1383         ret = dev->dev_ops->dev_reset(dev);
1384
1385         return eth_err(port_id, ret);
1386 }
1387
1388 int __rte_experimental
1389 rte_eth_dev_is_removed(uint16_t port_id)
1390 {
1391         struct rte_eth_dev *dev;
1392         int ret;
1393
1394         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1395
1396         dev = &rte_eth_devices[port_id];
1397
1398         if (dev->state == RTE_ETH_DEV_REMOVED)
1399                 return 1;
1400
1401         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1402
1403         ret = dev->dev_ops->is_removed(dev);
1404         if (ret != 0)
1405                 /* Device is physically removed. */
1406                 dev->state = RTE_ETH_DEV_REMOVED;
1407
1408         return ret;
1409 }
1410
1411 int
1412 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1413                        uint16_t nb_rx_desc, unsigned int socket_id,
1414                        const struct rte_eth_rxconf *rx_conf,
1415                        struct rte_mempool *mp)
1416 {
1417         int ret;
1418         uint32_t mbp_buf_size;
1419         struct rte_eth_dev *dev;
1420         struct rte_eth_dev_info dev_info;
1421         struct rte_eth_rxconf local_conf;
1422         void **rxq;
1423
1424         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1425
1426         dev = &rte_eth_devices[port_id];
1427         if (rx_queue_id >= dev->data->nb_rx_queues) {
1428                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1429                 return -EINVAL;
1430         }
1431
1432         if (dev->data->dev_started) {
1433                 RTE_PMD_DEBUG_TRACE(
1434                     "port %d must be stopped to allow configuration\n", port_id);
1435                 return -EBUSY;
1436         }
1437
1438         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1439         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1440
1441         /*
1442          * Check the size of the mbuf data buffer.
1443          * This value must be provided in the private data of the memory pool.
1444          * First check that the memory pool has a valid private data.
1445          */
1446         rte_eth_dev_info_get(port_id, &dev_info);
1447         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1448                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1449                                 mp->name, (int) mp->private_data_size,
1450                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1451                 return -ENOSPC;
1452         }
1453         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1454
1455         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1456                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1457                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1458                                 "=%d)\n",
1459                                 mp->name,
1460                                 (int)mbp_buf_size,
1461                                 (int)(RTE_PKTMBUF_HEADROOM +
1462                                       dev_info.min_rx_bufsize),
1463                                 (int)RTE_PKTMBUF_HEADROOM,
1464                                 (int)dev_info.min_rx_bufsize);
1465                 return -EINVAL;
1466         }
1467
1468         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1469                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1470                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1471
1472                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1473                         "should be: <= %hu, = %hu, and a product of %hu\n",
1474                         nb_rx_desc,
1475                         dev_info.rx_desc_lim.nb_max,
1476                         dev_info.rx_desc_lim.nb_min,
1477                         dev_info.rx_desc_lim.nb_align);
1478                 return -EINVAL;
1479         }
1480
1481         rxq = dev->data->rx_queues;
1482         if (rxq[rx_queue_id]) {
1483                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1484                                         -ENOTSUP);
1485                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1486                 rxq[rx_queue_id] = NULL;
1487         }
1488
1489         if (rx_conf == NULL)
1490                 rx_conf = &dev_info.default_rxconf;
1491
1492         local_conf = *rx_conf;
1493         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1494                 /**
1495                  * Reflect port offloads to queue offloads in order for
1496                  * offloads to not be discarded.
1497                  */
1498                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1499                                                     &local_conf.offloads);
1500         }
1501
1502         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1503                                               socket_id, &local_conf, mp);
1504         if (!ret) {
1505                 if (!dev->data->min_rx_buf_size ||
1506                     dev->data->min_rx_buf_size > mbp_buf_size)
1507                         dev->data->min_rx_buf_size = mbp_buf_size;
1508         }
1509
1510         return eth_err(port_id, ret);
1511 }
1512
1513 /**
1514  * A conversion function from txq_flags API.
1515  */
1516 static void
1517 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1518 {
1519         uint64_t offloads = 0;
1520
1521         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1522                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1523         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1524                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1525         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1526                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1527         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1528                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1529         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1530                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1531         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1532             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1533                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1534
1535         *tx_offloads = offloads;
1536 }
1537
1538 /**
1539  * A conversion function from offloads API.
1540  */
1541 static void
1542 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1543 {
1544         uint32_t flags = 0;
1545
1546         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1547                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1548         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1549                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1550         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1551                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1552         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1553                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1554         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1555                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1556         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1557                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1558
1559         *txq_flags = flags;
1560 }
1561
1562 int
1563 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1564                        uint16_t nb_tx_desc, unsigned int socket_id,
1565                        const struct rte_eth_txconf *tx_conf)
1566 {
1567         struct rte_eth_dev *dev;
1568         struct rte_eth_dev_info dev_info;
1569         struct rte_eth_txconf local_conf;
1570         void **txq;
1571
1572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1573
1574         dev = &rte_eth_devices[port_id];
1575         if (tx_queue_id >= dev->data->nb_tx_queues) {
1576                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1577                 return -EINVAL;
1578         }
1579
1580         if (dev->data->dev_started) {
1581                 RTE_PMD_DEBUG_TRACE(
1582                     "port %d must be stopped to allow configuration\n", port_id);
1583                 return -EBUSY;
1584         }
1585
1586         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1587         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1588
1589         rte_eth_dev_info_get(port_id, &dev_info);
1590
1591         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1592             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1593             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1594                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1595                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1596                                 nb_tx_desc,
1597                                 dev_info.tx_desc_lim.nb_max,
1598                                 dev_info.tx_desc_lim.nb_min,
1599                                 dev_info.tx_desc_lim.nb_align);
1600                 return -EINVAL;
1601         }
1602
1603         txq = dev->data->tx_queues;
1604         if (txq[tx_queue_id]) {
1605                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1606                                         -ENOTSUP);
1607                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1608                 txq[tx_queue_id] = NULL;
1609         }
1610
1611         if (tx_conf == NULL)
1612                 tx_conf = &dev_info.default_txconf;
1613
1614         /*
1615          * Convert between the offloads API to enable PMDs to support
1616          * only one of them.
1617          */
1618         local_conf = *tx_conf;
1619         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1620                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1621                                              &local_conf.txq_flags);
1622                 /* Keep the ignore flag. */
1623                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1624         } else {
1625                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1626                                           &local_conf.offloads);
1627         }
1628
1629         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1630                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1631 }
1632
1633 void
1634 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1635                 void *userdata __rte_unused)
1636 {
1637         unsigned i;
1638
1639         for (i = 0; i < unsent; i++)
1640                 rte_pktmbuf_free(pkts[i]);
1641 }
1642
1643 void
1644 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1645                 void *userdata)
1646 {
1647         uint64_t *count = userdata;
1648         unsigned i;
1649
1650         for (i = 0; i < unsent; i++)
1651                 rte_pktmbuf_free(pkts[i]);
1652
1653         *count += unsent;
1654 }
1655
1656 int
1657 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1658                 buffer_tx_error_fn cbfn, void *userdata)
1659 {
1660         buffer->error_callback = cbfn;
1661         buffer->error_userdata = userdata;
1662         return 0;
1663 }
1664
1665 int
1666 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1667 {
1668         int ret = 0;
1669
1670         if (buffer == NULL)
1671                 return -EINVAL;
1672
1673         buffer->size = size;
1674         if (buffer->error_callback == NULL) {
1675                 ret = rte_eth_tx_buffer_set_err_callback(
1676                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1677         }
1678
1679         return ret;
1680 }
1681
1682 int
1683 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1684 {
1685         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1686         int ret;
1687
1688         /* Validate Input Data. Bail if not valid or not supported. */
1689         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1690         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1691
1692         /* Call driver to free pending mbufs. */
1693         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1694                                                free_cnt);
1695         return eth_err(port_id, ret);
1696 }
1697
1698 void
1699 rte_eth_promiscuous_enable(uint16_t port_id)
1700 {
1701         struct rte_eth_dev *dev;
1702
1703         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1704         dev = &rte_eth_devices[port_id];
1705
1706         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1707         (*dev->dev_ops->promiscuous_enable)(dev);
1708         dev->data->promiscuous = 1;
1709 }
1710
1711 void
1712 rte_eth_promiscuous_disable(uint16_t port_id)
1713 {
1714         struct rte_eth_dev *dev;
1715
1716         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1717         dev = &rte_eth_devices[port_id];
1718
1719         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1720         dev->data->promiscuous = 0;
1721         (*dev->dev_ops->promiscuous_disable)(dev);
1722 }
1723
1724 int
1725 rte_eth_promiscuous_get(uint16_t port_id)
1726 {
1727         struct rte_eth_dev *dev;
1728
1729         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1730
1731         dev = &rte_eth_devices[port_id];
1732         return dev->data->promiscuous;
1733 }
1734
1735 void
1736 rte_eth_allmulticast_enable(uint16_t port_id)
1737 {
1738         struct rte_eth_dev *dev;
1739
1740         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1741         dev = &rte_eth_devices[port_id];
1742
1743         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1744         (*dev->dev_ops->allmulticast_enable)(dev);
1745         dev->data->all_multicast = 1;
1746 }
1747
1748 void
1749 rte_eth_allmulticast_disable(uint16_t port_id)
1750 {
1751         struct rte_eth_dev *dev;
1752
1753         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1754         dev = &rte_eth_devices[port_id];
1755
1756         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1757         dev->data->all_multicast = 0;
1758         (*dev->dev_ops->allmulticast_disable)(dev);
1759 }
1760
1761 int
1762 rte_eth_allmulticast_get(uint16_t port_id)
1763 {
1764         struct rte_eth_dev *dev;
1765
1766         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1767
1768         dev = &rte_eth_devices[port_id];
1769         return dev->data->all_multicast;
1770 }
1771
1772 void
1773 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1774 {
1775         struct rte_eth_dev *dev;
1776
1777         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1778         dev = &rte_eth_devices[port_id];
1779
1780         if (dev->data->dev_conf.intr_conf.lsc)
1781                 rte_eth_linkstatus_get(dev, eth_link);
1782         else {
1783                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1784                 (*dev->dev_ops->link_update)(dev, 1);
1785                 *eth_link = dev->data->dev_link;
1786         }
1787 }
1788
1789 void
1790 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1791 {
1792         struct rte_eth_dev *dev;
1793
1794         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1795         dev = &rte_eth_devices[port_id];
1796
1797         if (dev->data->dev_conf.intr_conf.lsc)
1798                 rte_eth_linkstatus_get(dev, eth_link);
1799         else {
1800                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1801                 (*dev->dev_ops->link_update)(dev, 0);
1802                 *eth_link = dev->data->dev_link;
1803         }
1804 }
1805
1806 int
1807 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1808 {
1809         struct rte_eth_dev *dev;
1810
1811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1812
1813         dev = &rte_eth_devices[port_id];
1814         memset(stats, 0, sizeof(*stats));
1815
1816         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1817         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1818         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1819 }
1820
1821 int
1822 rte_eth_stats_reset(uint16_t port_id)
1823 {
1824         struct rte_eth_dev *dev;
1825
1826         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1827         dev = &rte_eth_devices[port_id];
1828
1829         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1830         (*dev->dev_ops->stats_reset)(dev);
1831         dev->data->rx_mbuf_alloc_failed = 0;
1832
1833         return 0;
1834 }
1835
1836 static inline int
1837 get_xstats_basic_count(struct rte_eth_dev *dev)
1838 {
1839         uint16_t nb_rxqs, nb_txqs;
1840         int count;
1841
1842         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1843         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1844
1845         count = RTE_NB_STATS;
1846         count += nb_rxqs * RTE_NB_RXQ_STATS;
1847         count += nb_txqs * RTE_NB_TXQ_STATS;
1848
1849         return count;
1850 }
1851
1852 static int
1853 get_xstats_count(uint16_t port_id)
1854 {
1855         struct rte_eth_dev *dev;
1856         int count;
1857
1858         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1859         dev = &rte_eth_devices[port_id];
1860         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1861                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1862                                 NULL, 0);
1863                 if (count < 0)
1864                         return eth_err(port_id, count);
1865         }
1866         if (dev->dev_ops->xstats_get_names != NULL) {
1867                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1868                 if (count < 0)
1869                         return eth_err(port_id, count);
1870         } else
1871                 count = 0;
1872
1873
1874         count += get_xstats_basic_count(dev);
1875
1876         return count;
1877 }
1878
1879 int
1880 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1881                 uint64_t *id)
1882 {
1883         int cnt_xstats, idx_xstat;
1884
1885         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1886
1887         if (!id) {
1888                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1889                 return -ENOMEM;
1890         }
1891
1892         if (!xstat_name) {
1893                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1894                 return -ENOMEM;
1895         }
1896
1897         /* Get count */
1898         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1899         if (cnt_xstats  < 0) {
1900                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1901                 return -ENODEV;
1902         }
1903
1904         /* Get id-name lookup table */
1905         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1906
1907         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1908                         port_id, xstats_names, cnt_xstats, NULL)) {
1909                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1910                 return -1;
1911         }
1912
1913         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1914                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1915                         *id = idx_xstat;
1916                         return 0;
1917                 };
1918         }
1919
1920         return -EINVAL;
1921 }
1922
1923 /* retrieve basic stats names */
1924 static int
1925 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1926         struct rte_eth_xstat_name *xstats_names)
1927 {
1928         int cnt_used_entries = 0;
1929         uint32_t idx, id_queue;
1930         uint16_t num_q;
1931
1932         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1933                 snprintf(xstats_names[cnt_used_entries].name,
1934                         sizeof(xstats_names[0].name),
1935                         "%s", rte_stats_strings[idx].name);
1936                 cnt_used_entries++;
1937         }
1938         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1939         for (id_queue = 0; id_queue < num_q; id_queue++) {
1940                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1941                         snprintf(xstats_names[cnt_used_entries].name,
1942                                 sizeof(xstats_names[0].name),
1943                                 "rx_q%u%s",
1944                                 id_queue, rte_rxq_stats_strings[idx].name);
1945                         cnt_used_entries++;
1946                 }
1947
1948         }
1949         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1950         for (id_queue = 0; id_queue < num_q; id_queue++) {
1951                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1952                         snprintf(xstats_names[cnt_used_entries].name,
1953                                 sizeof(xstats_names[0].name),
1954                                 "tx_q%u%s",
1955                                 id_queue, rte_txq_stats_strings[idx].name);
1956                         cnt_used_entries++;
1957                 }
1958         }
1959         return cnt_used_entries;
1960 }
1961
1962 /* retrieve ethdev extended statistics names */
1963 int
1964 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1965         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1966         uint64_t *ids)
1967 {
1968         struct rte_eth_xstat_name *xstats_names_copy;
1969         unsigned int no_basic_stat_requested = 1;
1970         unsigned int no_ext_stat_requested = 1;
1971         unsigned int expected_entries;
1972         unsigned int basic_count;
1973         struct rte_eth_dev *dev;
1974         unsigned int i;
1975         int ret;
1976
1977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1978         dev = &rte_eth_devices[port_id];
1979
1980         basic_count = get_xstats_basic_count(dev);
1981         ret = get_xstats_count(port_id);
1982         if (ret < 0)
1983                 return ret;
1984         expected_entries = (unsigned int)ret;
1985
1986         /* Return max number of stats if no ids given */
1987         if (!ids) {
1988                 if (!xstats_names)
1989                         return expected_entries;
1990                 else if (xstats_names && size < expected_entries)
1991                         return expected_entries;
1992         }
1993
1994         if (ids && !xstats_names)
1995                 return -EINVAL;
1996
1997         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1998                 uint64_t ids_copy[size];
1999
2000                 for (i = 0; i < size; i++) {
2001                         if (ids[i] < basic_count) {
2002                                 no_basic_stat_requested = 0;
2003                                 break;
2004                         }
2005
2006                         /*
2007                          * Convert ids to xstats ids that PMD knows.
2008                          * ids known by user are basic + extended stats.
2009                          */
2010                         ids_copy[i] = ids[i] - basic_count;
2011                 }
2012
2013                 if (no_basic_stat_requested)
2014                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2015                                         xstats_names, ids_copy, size);
2016         }
2017
2018         /* Retrieve all stats */
2019         if (!ids) {
2020                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2021                                 expected_entries);
2022                 if (num_stats < 0 || num_stats > (int)expected_entries)
2023                         return num_stats;
2024                 else
2025                         return expected_entries;
2026         }
2027
2028         xstats_names_copy = calloc(expected_entries,
2029                 sizeof(struct rte_eth_xstat_name));
2030
2031         if (!xstats_names_copy) {
2032                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2033                 return -ENOMEM;
2034         }
2035
2036         if (ids) {
2037                 for (i = 0; i < size; i++) {
2038                         if (ids[i] >= basic_count) {
2039                                 no_ext_stat_requested = 0;
2040                                 break;
2041                         }
2042                 }
2043         }
2044
2045         /* Fill xstats_names_copy structure */
2046         if (ids && no_ext_stat_requested) {
2047                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2048         } else {
2049                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2050                         expected_entries);
2051                 if (ret < 0) {
2052                         free(xstats_names_copy);
2053                         return ret;
2054                 }
2055         }
2056
2057         /* Filter stats */
2058         for (i = 0; i < size; i++) {
2059                 if (ids[i] >= expected_entries) {
2060                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2061                         free(xstats_names_copy);
2062                         return -1;
2063                 }
2064                 xstats_names[i] = xstats_names_copy[ids[i]];
2065         }
2066
2067         free(xstats_names_copy);
2068         return size;
2069 }
2070
2071 int
2072 rte_eth_xstats_get_names(uint16_t port_id,
2073         struct rte_eth_xstat_name *xstats_names,
2074         unsigned int size)
2075 {
2076         struct rte_eth_dev *dev;
2077         int cnt_used_entries;
2078         int cnt_expected_entries;
2079         int cnt_driver_entries;
2080
2081         cnt_expected_entries = get_xstats_count(port_id);
2082         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2083                         (int)size < cnt_expected_entries)
2084                 return cnt_expected_entries;
2085
2086         /* port_id checked in get_xstats_count() */
2087         dev = &rte_eth_devices[port_id];
2088
2089         cnt_used_entries = rte_eth_basic_stats_get_names(
2090                 dev, xstats_names);
2091
2092         if (dev->dev_ops->xstats_get_names != NULL) {
2093                 /* If there are any driver-specific xstats, append them
2094                  * to end of list.
2095                  */
2096                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2097                         dev,
2098                         xstats_names + cnt_used_entries,
2099                         size - cnt_used_entries);
2100                 if (cnt_driver_entries < 0)
2101                         return eth_err(port_id, cnt_driver_entries);
2102                 cnt_used_entries += cnt_driver_entries;
2103         }
2104
2105         return cnt_used_entries;
2106 }
2107
2108
2109 static int
2110 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2111 {
2112         struct rte_eth_dev *dev;
2113         struct rte_eth_stats eth_stats;
2114         unsigned int count = 0, i, q;
2115         uint64_t val, *stats_ptr;
2116         uint16_t nb_rxqs, nb_txqs;
2117         int ret;
2118
2119         ret = rte_eth_stats_get(port_id, &eth_stats);
2120         if (ret < 0)
2121                 return ret;
2122
2123         dev = &rte_eth_devices[port_id];
2124
2125         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2126         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2127
2128         /* global stats */
2129         for (i = 0; i < RTE_NB_STATS; i++) {
2130                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2131                                         rte_stats_strings[i].offset);
2132                 val = *stats_ptr;
2133                 xstats[count++].value = val;
2134         }
2135
2136         /* per-rxq stats */
2137         for (q = 0; q < nb_rxqs; q++) {
2138                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2139                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2140                                         rte_rxq_stats_strings[i].offset +
2141                                         q * sizeof(uint64_t));
2142                         val = *stats_ptr;
2143                         xstats[count++].value = val;
2144                 }
2145         }
2146
2147         /* per-txq stats */
2148         for (q = 0; q < nb_txqs; q++) {
2149                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2150                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2151                                         rte_txq_stats_strings[i].offset +
2152                                         q * sizeof(uint64_t));
2153                         val = *stats_ptr;
2154                         xstats[count++].value = val;
2155                 }
2156         }
2157         return count;
2158 }
2159
2160 /* retrieve ethdev extended statistics */
2161 int
2162 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2163                          uint64_t *values, unsigned int size)
2164 {
2165         unsigned int no_basic_stat_requested = 1;
2166         unsigned int no_ext_stat_requested = 1;
2167         unsigned int num_xstats_filled;
2168         unsigned int basic_count;
2169         uint16_t expected_entries;
2170         struct rte_eth_dev *dev;
2171         unsigned int i;
2172         int ret;
2173
2174         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2175         ret = get_xstats_count(port_id);
2176         if (ret < 0)
2177                 return ret;
2178         expected_entries = (uint16_t)ret;
2179         struct rte_eth_xstat xstats[expected_entries];
2180         dev = &rte_eth_devices[port_id];
2181         basic_count = get_xstats_basic_count(dev);
2182
2183         /* Return max number of stats if no ids given */
2184         if (!ids) {
2185                 if (!values)
2186                         return expected_entries;
2187                 else if (values && size < expected_entries)
2188                         return expected_entries;
2189         }
2190
2191         if (ids && !values)
2192                 return -EINVAL;
2193
2194         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2195                 unsigned int basic_count = get_xstats_basic_count(dev);
2196                 uint64_t ids_copy[size];
2197
2198                 for (i = 0; i < size; i++) {
2199                         if (ids[i] < basic_count) {
2200                                 no_basic_stat_requested = 0;
2201                                 break;
2202                         }
2203
2204                         /*
2205                          * Convert ids to xstats ids that PMD knows.
2206                          * ids known by user are basic + extended stats.
2207                          */
2208                         ids_copy[i] = ids[i] - basic_count;
2209                 }
2210
2211                 if (no_basic_stat_requested)
2212                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2213                                         values, size);
2214         }
2215
2216         if (ids) {
2217                 for (i = 0; i < size; i++) {
2218                         if (ids[i] >= basic_count) {
2219                                 no_ext_stat_requested = 0;
2220                                 break;
2221                         }
2222                 }
2223         }
2224
2225         /* Fill the xstats structure */
2226         if (ids && no_ext_stat_requested)
2227                 ret = rte_eth_basic_stats_get(port_id, xstats);
2228         else
2229                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2230
2231         if (ret < 0)
2232                 return ret;
2233         num_xstats_filled = (unsigned int)ret;
2234
2235         /* Return all stats */
2236         if (!ids) {
2237                 for (i = 0; i < num_xstats_filled; i++)
2238                         values[i] = xstats[i].value;
2239                 return expected_entries;
2240         }
2241
2242         /* Filter stats */
2243         for (i = 0; i < size; i++) {
2244                 if (ids[i] >= expected_entries) {
2245                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2246                         return -1;
2247                 }
2248                 values[i] = xstats[ids[i]].value;
2249         }
2250         return size;
2251 }
2252
2253 int
2254 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2255         unsigned int n)
2256 {
2257         struct rte_eth_dev *dev;
2258         unsigned int count = 0, i;
2259         signed int xcount = 0;
2260         uint16_t nb_rxqs, nb_txqs;
2261         int ret;
2262
2263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2264
2265         dev = &rte_eth_devices[port_id];
2266
2267         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2268         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2269
2270         /* Return generic statistics */
2271         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2272                 (nb_txqs * RTE_NB_TXQ_STATS);
2273
2274         /* implemented by the driver */
2275         if (dev->dev_ops->xstats_get != NULL) {
2276                 /* Retrieve the xstats from the driver at the end of the
2277                  * xstats struct.
2278                  */
2279                 xcount = (*dev->dev_ops->xstats_get)(dev,
2280                                      xstats ? xstats + count : NULL,
2281                                      (n > count) ? n - count : 0);
2282
2283                 if (xcount < 0)
2284                         return eth_err(port_id, xcount);
2285         }
2286
2287         if (n < count + xcount || xstats == NULL)
2288                 return count + xcount;
2289
2290         /* now fill the xstats structure */
2291         ret = rte_eth_basic_stats_get(port_id, xstats);
2292         if (ret < 0)
2293                 return ret;
2294         count = ret;
2295
2296         for (i = 0; i < count; i++)
2297                 xstats[i].id = i;
2298         /* add an offset to driver-specific stats */
2299         for ( ; i < count + xcount; i++)
2300                 xstats[i].id += count;
2301
2302         return count + xcount;
2303 }
2304
2305 /* reset ethdev extended statistics */
2306 void
2307 rte_eth_xstats_reset(uint16_t port_id)
2308 {
2309         struct rte_eth_dev *dev;
2310
2311         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2312         dev = &rte_eth_devices[port_id];
2313
2314         /* implemented by the driver */
2315         if (dev->dev_ops->xstats_reset != NULL) {
2316                 (*dev->dev_ops->xstats_reset)(dev);
2317                 return;
2318         }
2319
2320         /* fallback to default */
2321         rte_eth_stats_reset(port_id);
2322 }
2323
2324 static int
2325 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2326                 uint8_t is_rx)
2327 {
2328         struct rte_eth_dev *dev;
2329
2330         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2331
2332         dev = &rte_eth_devices[port_id];
2333
2334         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2335         return (*dev->dev_ops->queue_stats_mapping_set)
2336                         (dev, queue_id, stat_idx, is_rx);
2337 }
2338
2339
2340 int
2341 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2342                 uint8_t stat_idx)
2343 {
2344         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2345                                                 stat_idx, STAT_QMAP_TX));
2346 }
2347
2348
2349 int
2350 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2351                 uint8_t stat_idx)
2352 {
2353         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2354                                                 stat_idx, STAT_QMAP_RX));
2355 }
2356
2357 int
2358 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2359 {
2360         struct rte_eth_dev *dev;
2361
2362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2363         dev = &rte_eth_devices[port_id];
2364
2365         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2366         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2367                                                         fw_version, fw_size));
2368 }
2369
2370 void
2371 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2372 {
2373         struct rte_eth_dev *dev;
2374         const struct rte_eth_desc_lim lim = {
2375                 .nb_max = UINT16_MAX,
2376                 .nb_min = 0,
2377                 .nb_align = 1,
2378         };
2379
2380         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2381         dev = &rte_eth_devices[port_id];
2382
2383         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2384         dev_info->rx_desc_lim = lim;
2385         dev_info->tx_desc_lim = lim;
2386
2387         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2388         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2389         dev_info->driver_name = dev->device->driver->name;
2390         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2391         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2392 }
2393
2394 int
2395 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2396                                  uint32_t *ptypes, int num)
2397 {
2398         int i, j;
2399         struct rte_eth_dev *dev;
2400         const uint32_t *all_ptypes;
2401
2402         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2403         dev = &rte_eth_devices[port_id];
2404         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2405         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2406
2407         if (!all_ptypes)
2408                 return 0;
2409
2410         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2411                 if (all_ptypes[i] & ptype_mask) {
2412                         if (j < num)
2413                                 ptypes[j] = all_ptypes[i];
2414                         j++;
2415                 }
2416
2417         return j;
2418 }
2419
2420 void
2421 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2422 {
2423         struct rte_eth_dev *dev;
2424
2425         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2426         dev = &rte_eth_devices[port_id];
2427         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2428 }
2429
2430
2431 int
2432 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2433 {
2434         struct rte_eth_dev *dev;
2435
2436         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2437
2438         dev = &rte_eth_devices[port_id];
2439         *mtu = dev->data->mtu;
2440         return 0;
2441 }
2442
2443 int
2444 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2445 {
2446         int ret;
2447         struct rte_eth_dev *dev;
2448
2449         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2450         dev = &rte_eth_devices[port_id];
2451         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2452
2453         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2454         if (!ret)
2455                 dev->data->mtu = mtu;
2456
2457         return eth_err(port_id, ret);
2458 }
2459
2460 int
2461 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2462 {
2463         struct rte_eth_dev *dev;
2464         int ret;
2465
2466         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2467         dev = &rte_eth_devices[port_id];
2468         if (!(dev->data->dev_conf.rxmode.offloads &
2469               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2470                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2471                 return -ENOSYS;
2472         }
2473
2474         if (vlan_id > 4095) {
2475                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2476                                 port_id, (unsigned) vlan_id);
2477                 return -EINVAL;
2478         }
2479         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2480
2481         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2482         if (ret == 0) {
2483                 struct rte_vlan_filter_conf *vfc;
2484                 int vidx;
2485                 int vbit;
2486
2487                 vfc = &dev->data->vlan_filter_conf;
2488                 vidx = vlan_id / 64;
2489                 vbit = vlan_id % 64;
2490
2491                 if (on)
2492                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2493                 else
2494                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2495         }
2496
2497         return eth_err(port_id, ret);
2498 }
2499
2500 int
2501 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2502                                     int on)
2503 {
2504         struct rte_eth_dev *dev;
2505
2506         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2507         dev = &rte_eth_devices[port_id];
2508         if (rx_queue_id >= dev->data->nb_rx_queues) {
2509                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2510                 return -EINVAL;
2511         }
2512
2513         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2514         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2515
2516         return 0;
2517 }
2518
2519 int
2520 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2521                                 enum rte_vlan_type vlan_type,
2522                                 uint16_t tpid)
2523 {
2524         struct rte_eth_dev *dev;
2525
2526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2527         dev = &rte_eth_devices[port_id];
2528         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2529
2530         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2531                                                                tpid));
2532 }
2533
2534 int
2535 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2536 {
2537         struct rte_eth_dev *dev;
2538         int ret = 0;
2539         int mask = 0;
2540         int cur, org = 0;
2541         uint64_t orig_offloads;
2542
2543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2544         dev = &rte_eth_devices[port_id];
2545
2546         /* save original values in case of failure */
2547         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2548
2549         /*check which option changed by application*/
2550         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2551         org = !!(dev->data->dev_conf.rxmode.offloads &
2552                  DEV_RX_OFFLOAD_VLAN_STRIP);
2553         if (cur != org) {
2554                 if (cur)
2555                         dev->data->dev_conf.rxmode.offloads |=
2556                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2557                 else
2558                         dev->data->dev_conf.rxmode.offloads &=
2559                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2560                 mask |= ETH_VLAN_STRIP_MASK;
2561         }
2562
2563         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2564         org = !!(dev->data->dev_conf.rxmode.offloads &
2565                  DEV_RX_OFFLOAD_VLAN_FILTER);
2566         if (cur != org) {
2567                 if (cur)
2568                         dev->data->dev_conf.rxmode.offloads |=
2569                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2570                 else
2571                         dev->data->dev_conf.rxmode.offloads &=
2572                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2573                 mask |= ETH_VLAN_FILTER_MASK;
2574         }
2575
2576         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2577         org = !!(dev->data->dev_conf.rxmode.offloads &
2578                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2579         if (cur != org) {
2580                 if (cur)
2581                         dev->data->dev_conf.rxmode.offloads |=
2582                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2583                 else
2584                         dev->data->dev_conf.rxmode.offloads &=
2585                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2586                 mask |= ETH_VLAN_EXTEND_MASK;
2587         }
2588
2589         /*no change*/
2590         if (mask == 0)
2591                 return ret;
2592
2593         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2594
2595         /*
2596          * Convert to the offload bitfield API just in case the underlying PMD
2597          * still supporting it.
2598          */
2599         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2600                                     &dev->data->dev_conf.rxmode);
2601         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2602         if (ret) {
2603                 /* hit an error restore  original values */
2604                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2605                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2606                                             &dev->data->dev_conf.rxmode);
2607         }
2608
2609         return eth_err(port_id, ret);
2610 }
2611
2612 int
2613 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2614 {
2615         struct rte_eth_dev *dev;
2616         int ret = 0;
2617
2618         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2619         dev = &rte_eth_devices[port_id];
2620
2621         if (dev->data->dev_conf.rxmode.offloads &
2622             DEV_RX_OFFLOAD_VLAN_STRIP)
2623                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2624
2625         if (dev->data->dev_conf.rxmode.offloads &
2626             DEV_RX_OFFLOAD_VLAN_FILTER)
2627                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2628
2629         if (dev->data->dev_conf.rxmode.offloads &
2630             DEV_RX_OFFLOAD_VLAN_EXTEND)
2631                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2632
2633         return ret;
2634 }
2635
2636 int
2637 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2638 {
2639         struct rte_eth_dev *dev;
2640
2641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2642         dev = &rte_eth_devices[port_id];
2643         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2644
2645         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2646 }
2647
2648 int
2649 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2650 {
2651         struct rte_eth_dev *dev;
2652
2653         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2654         dev = &rte_eth_devices[port_id];
2655         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2656         memset(fc_conf, 0, sizeof(*fc_conf));
2657         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2658 }
2659
2660 int
2661 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2662 {
2663         struct rte_eth_dev *dev;
2664
2665         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2666         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2667                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2668                 return -EINVAL;
2669         }
2670
2671         dev = &rte_eth_devices[port_id];
2672         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2673         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2674 }
2675
2676 int
2677 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2678                                    struct rte_eth_pfc_conf *pfc_conf)
2679 {
2680         struct rte_eth_dev *dev;
2681
2682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2683         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2684                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2685                 return -EINVAL;
2686         }
2687
2688         dev = &rte_eth_devices[port_id];
2689         /* High water, low water validation are device specific */
2690         if  (*dev->dev_ops->priority_flow_ctrl_set)
2691                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2692                                         (dev, pfc_conf));
2693         return -ENOTSUP;
2694 }
2695
2696 static int
2697 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2698                         uint16_t reta_size)
2699 {
2700         uint16_t i, num;
2701
2702         if (!reta_conf)
2703                 return -EINVAL;
2704
2705         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2706         for (i = 0; i < num; i++) {
2707                 if (reta_conf[i].mask)
2708                         return 0;
2709         }
2710
2711         return -EINVAL;
2712 }
2713
2714 static int
2715 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2716                          uint16_t reta_size,
2717                          uint16_t max_rxq)
2718 {
2719         uint16_t i, idx, shift;
2720
2721         if (!reta_conf)
2722                 return -EINVAL;
2723
2724         if (max_rxq == 0) {
2725                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2726                 return -EINVAL;
2727         }
2728
2729         for (i = 0; i < reta_size; i++) {
2730                 idx = i / RTE_RETA_GROUP_SIZE;
2731                 shift = i % RTE_RETA_GROUP_SIZE;
2732                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2733                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2734                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2735                                 "the maximum rxq index: %u\n", idx, shift,
2736                                 reta_conf[idx].reta[shift], max_rxq);
2737                         return -EINVAL;
2738                 }
2739         }
2740
2741         return 0;
2742 }
2743
2744 int
2745 rte_eth_dev_rss_reta_update(uint16_t port_id,
2746                             struct rte_eth_rss_reta_entry64 *reta_conf,
2747                             uint16_t reta_size)
2748 {
2749         struct rte_eth_dev *dev;
2750         int ret;
2751
2752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2753         /* Check mask bits */
2754         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2755         if (ret < 0)
2756                 return ret;
2757
2758         dev = &rte_eth_devices[port_id];
2759
2760         /* Check entry value */
2761         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2762                                 dev->data->nb_rx_queues);
2763         if (ret < 0)
2764                 return ret;
2765
2766         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2767         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2768                                                              reta_size));
2769 }
2770
2771 int
2772 rte_eth_dev_rss_reta_query(uint16_t port_id,
2773                            struct rte_eth_rss_reta_entry64 *reta_conf,
2774                            uint16_t reta_size)
2775 {
2776         struct rte_eth_dev *dev;
2777         int ret;
2778
2779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2780
2781         /* Check mask bits */
2782         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2783         if (ret < 0)
2784                 return ret;
2785
2786         dev = &rte_eth_devices[port_id];
2787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2788         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2789                                                             reta_size));
2790 }
2791
2792 int
2793 rte_eth_dev_rss_hash_update(uint16_t port_id,
2794                             struct rte_eth_rss_conf *rss_conf)
2795 {
2796         struct rte_eth_dev *dev;
2797
2798         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2799         dev = &rte_eth_devices[port_id];
2800         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2801         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2802                                                                  rss_conf));
2803 }
2804
2805 int
2806 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2807                               struct rte_eth_rss_conf *rss_conf)
2808 {
2809         struct rte_eth_dev *dev;
2810
2811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2812         dev = &rte_eth_devices[port_id];
2813         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2814         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2815                                                                    rss_conf));
2816 }
2817
2818 int
2819 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2820                                 struct rte_eth_udp_tunnel *udp_tunnel)
2821 {
2822         struct rte_eth_dev *dev;
2823
2824         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2825         if (udp_tunnel == NULL) {
2826                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2827                 return -EINVAL;
2828         }
2829
2830         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2831                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2832                 return -EINVAL;
2833         }
2834
2835         dev = &rte_eth_devices[port_id];
2836         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2837         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2838                                                                 udp_tunnel));
2839 }
2840
2841 int
2842 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2843                                    struct rte_eth_udp_tunnel *udp_tunnel)
2844 {
2845         struct rte_eth_dev *dev;
2846
2847         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2848         dev = &rte_eth_devices[port_id];
2849
2850         if (udp_tunnel == NULL) {
2851                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2852                 return -EINVAL;
2853         }
2854
2855         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2856                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2857                 return -EINVAL;
2858         }
2859
2860         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2861         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2862                                                                 udp_tunnel));
2863 }
2864
2865 int
2866 rte_eth_led_on(uint16_t port_id)
2867 {
2868         struct rte_eth_dev *dev;
2869
2870         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2871         dev = &rte_eth_devices[port_id];
2872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2873         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2874 }
2875
2876 int
2877 rte_eth_led_off(uint16_t port_id)
2878 {
2879         struct rte_eth_dev *dev;
2880
2881         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2882         dev = &rte_eth_devices[port_id];
2883         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2884         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2885 }
2886
2887 /*
2888  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2889  * an empty spot.
2890  */
2891 static int
2892 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2893 {
2894         struct rte_eth_dev_info dev_info;
2895         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2896         unsigned i;
2897
2898         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2899         rte_eth_dev_info_get(port_id, &dev_info);
2900
2901         for (i = 0; i < dev_info.max_mac_addrs; i++)
2902                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2903                         return i;
2904
2905         return -1;
2906 }
2907
2908 static const struct ether_addr null_mac_addr;
2909
2910 int
2911 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2912                         uint32_t pool)
2913 {
2914         struct rte_eth_dev *dev;
2915         int index;
2916         uint64_t pool_mask;
2917         int ret;
2918
2919         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2920         dev = &rte_eth_devices[port_id];
2921         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2922
2923         if (is_zero_ether_addr(addr)) {
2924                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2925                         port_id);
2926                 return -EINVAL;
2927         }
2928         if (pool >= ETH_64_POOLS) {
2929                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2930                 return -EINVAL;
2931         }
2932
2933         index = get_mac_addr_index(port_id, addr);
2934         if (index < 0) {
2935                 index = get_mac_addr_index(port_id, &null_mac_addr);
2936                 if (index < 0) {
2937                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2938                                 port_id);
2939                         return -ENOSPC;
2940                 }
2941         } else {
2942                 pool_mask = dev->data->mac_pool_sel[index];
2943
2944                 /* Check if both MAC address and pool is already there, and do nothing */
2945                 if (pool_mask & (1ULL << pool))
2946                         return 0;
2947         }
2948
2949         /* Update NIC */
2950         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2951
2952         if (ret == 0) {
2953                 /* Update address in NIC data structure */
2954                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2955
2956                 /* Update pool bitmap in NIC data structure */
2957                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2958         }
2959
2960         return eth_err(port_id, ret);
2961 }
2962
2963 int
2964 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2965 {
2966         struct rte_eth_dev *dev;
2967         int index;
2968
2969         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2970         dev = &rte_eth_devices[port_id];
2971         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2972
2973         index = get_mac_addr_index(port_id, addr);
2974         if (index == 0) {
2975                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2976                 return -EADDRINUSE;
2977         } else if (index < 0)
2978                 return 0;  /* Do nothing if address wasn't found */
2979
2980         /* Update NIC */
2981         (*dev->dev_ops->mac_addr_remove)(dev, index);
2982
2983         /* Update address in NIC data structure */
2984         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2985
2986         /* reset pool bitmap */
2987         dev->data->mac_pool_sel[index] = 0;
2988
2989         return 0;
2990 }
2991
2992 int
2993 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2994 {
2995         struct rte_eth_dev *dev;
2996
2997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2998
2999         if (!is_valid_assigned_ether_addr(addr))
3000                 return -EINVAL;
3001
3002         dev = &rte_eth_devices[port_id];
3003         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3004
3005         /* Update default address in NIC data structure */
3006         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3007
3008         (*dev->dev_ops->mac_addr_set)(dev, addr);
3009
3010         return 0;
3011 }
3012
3013
3014 /*
3015  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3016  * an empty spot.
3017  */
3018 static int
3019 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3020 {
3021         struct rte_eth_dev_info dev_info;
3022         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3023         unsigned i;
3024
3025         rte_eth_dev_info_get(port_id, &dev_info);
3026         if (!dev->data->hash_mac_addrs)
3027                 return -1;
3028
3029         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3030                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3031                         ETHER_ADDR_LEN) == 0)
3032                         return i;
3033
3034         return -1;
3035 }
3036
3037 int
3038 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3039                                 uint8_t on)
3040 {
3041         int index;
3042         int ret;
3043         struct rte_eth_dev *dev;
3044
3045         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3046
3047         dev = &rte_eth_devices[port_id];
3048         if (is_zero_ether_addr(addr)) {
3049                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3050                         port_id);
3051                 return -EINVAL;
3052         }
3053
3054         index = get_hash_mac_addr_index(port_id, addr);
3055         /* Check if it's already there, and do nothing */
3056         if ((index >= 0) && on)
3057                 return 0;
3058
3059         if (index < 0) {
3060                 if (!on) {
3061                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3062                                 "set in UTA\n", port_id);
3063                         return -EINVAL;
3064                 }
3065
3066                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3067                 if (index < 0) {
3068                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3069                                         port_id);
3070                         return -ENOSPC;
3071                 }
3072         }
3073
3074         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3075         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3076         if (ret == 0) {
3077                 /* Update address in NIC data structure */
3078                 if (on)
3079                         ether_addr_copy(addr,
3080                                         &dev->data->hash_mac_addrs[index]);
3081                 else
3082                         ether_addr_copy(&null_mac_addr,
3083                                         &dev->data->hash_mac_addrs[index]);
3084         }
3085
3086         return eth_err(port_id, ret);
3087 }
3088
3089 int
3090 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3091 {
3092         struct rte_eth_dev *dev;
3093
3094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3095
3096         dev = &rte_eth_devices[port_id];
3097
3098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3099         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3100                                                                        on));
3101 }
3102
3103 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3104                                         uint16_t tx_rate)
3105 {
3106         struct rte_eth_dev *dev;
3107         struct rte_eth_dev_info dev_info;
3108         struct rte_eth_link link;
3109
3110         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3111
3112         dev = &rte_eth_devices[port_id];
3113         rte_eth_dev_info_get(port_id, &dev_info);
3114         link = dev->data->dev_link;
3115
3116         if (queue_idx > dev_info.max_tx_queues) {
3117                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3118                                 "invalid queue id=%d\n", port_id, queue_idx);
3119                 return -EINVAL;
3120         }
3121
3122         if (tx_rate > link.link_speed) {
3123                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3124                                 "bigger than link speed= %d\n",
3125                         tx_rate, link.link_speed);
3126                 return -EINVAL;
3127         }
3128
3129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3130         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3131                                                         queue_idx, tx_rate));
3132 }
3133
3134 int
3135 rte_eth_mirror_rule_set(uint16_t port_id,
3136                         struct rte_eth_mirror_conf *mirror_conf,
3137                         uint8_t rule_id, uint8_t on)
3138 {
3139         struct rte_eth_dev *dev;
3140
3141         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3142         if (mirror_conf->rule_type == 0) {
3143                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3144                 return -EINVAL;
3145         }
3146
3147         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3148                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3149                                 ETH_64_POOLS - 1);
3150                 return -EINVAL;
3151         }
3152
3153         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3154              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3155             (mirror_conf->pool_mask == 0)) {
3156                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3157                 return -EINVAL;
3158         }
3159
3160         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3161             mirror_conf->vlan.vlan_mask == 0) {
3162                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3163                 return -EINVAL;
3164         }
3165
3166         dev = &rte_eth_devices[port_id];
3167         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3168
3169         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3170                                                 mirror_conf, rule_id, on));
3171 }
3172
3173 int
3174 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3175 {
3176         struct rte_eth_dev *dev;
3177
3178         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3179
3180         dev = &rte_eth_devices[port_id];
3181         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3182
3183         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3184                                                                    rule_id));
3185 }
3186
3187 RTE_INIT(eth_dev_init_cb_lists)
3188 {
3189         int i;
3190
3191         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3192                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3193 }
3194
3195 int
3196 rte_eth_dev_callback_register(uint16_t port_id,
3197                         enum rte_eth_event_type event,
3198                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3199 {
3200         struct rte_eth_dev *dev;
3201         struct rte_eth_dev_callback *user_cb;
3202         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3203         uint16_t last_port;
3204
3205         if (!cb_fn)
3206                 return -EINVAL;
3207
3208         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3209                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3210                 return -EINVAL;
3211         }
3212
3213         if (port_id == RTE_ETH_ALL) {
3214                 next_port = 0;
3215                 last_port = RTE_MAX_ETHPORTS - 1;
3216         } else {
3217                 next_port = last_port = port_id;
3218         }
3219
3220         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3221
3222         do {
3223                 dev = &rte_eth_devices[next_port];
3224
3225                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3226                         if (user_cb->cb_fn == cb_fn &&
3227                                 user_cb->cb_arg == cb_arg &&
3228                                 user_cb->event == event) {
3229                                 break;
3230                         }
3231                 }
3232
3233                 /* create a new callback. */
3234                 if (user_cb == NULL) {
3235                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3236                                 sizeof(struct rte_eth_dev_callback), 0);
3237                         if (user_cb != NULL) {
3238                                 user_cb->cb_fn = cb_fn;
3239                                 user_cb->cb_arg = cb_arg;
3240                                 user_cb->event = event;
3241                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3242                                                   user_cb, next);
3243                         } else {
3244                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3245                                 rte_eth_dev_callback_unregister(port_id, event,
3246                                                                 cb_fn, cb_arg);
3247                                 return -ENOMEM;
3248                         }
3249
3250                 }
3251         } while (++next_port <= last_port);
3252
3253         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3254         return 0;
3255 }
3256
3257 int
3258 rte_eth_dev_callback_unregister(uint16_t port_id,
3259                         enum rte_eth_event_type event,
3260                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3261 {
3262         int ret;
3263         struct rte_eth_dev *dev;
3264         struct rte_eth_dev_callback *cb, *next;
3265         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3266         uint16_t last_port;
3267
3268         if (!cb_fn)
3269                 return -EINVAL;
3270
3271         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3272                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3273                 return -EINVAL;
3274         }
3275
3276         if (port_id == RTE_ETH_ALL) {
3277                 next_port = 0;
3278                 last_port = RTE_MAX_ETHPORTS - 1;
3279         } else {
3280                 next_port = last_port = port_id;
3281         }
3282
3283         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3284
3285         do {
3286                 dev = &rte_eth_devices[next_port];
3287                 ret = 0;
3288                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3289                      cb = next) {
3290
3291                         next = TAILQ_NEXT(cb, next);
3292
3293                         if (cb->cb_fn != cb_fn || cb->event != event ||
3294                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3295                                 continue;
3296
3297                         /*
3298                          * if this callback is not executing right now,
3299                          * then remove it.
3300                          */
3301                         if (cb->active == 0) {
3302                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3303                                 rte_free(cb);
3304                         } else {
3305                                 ret = -EAGAIN;
3306                         }
3307                 }
3308         } while (++next_port <= last_port);
3309
3310         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3311         return ret;
3312 }
3313
3314 int
3315 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3316         enum rte_eth_event_type event, void *ret_param)
3317 {
3318         struct rte_eth_dev_callback *cb_lst;
3319         struct rte_eth_dev_callback dev_cb;
3320         int rc = 0;
3321
3322         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3323         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3324                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3325                         continue;
3326                 dev_cb = *cb_lst;
3327                 cb_lst->active = 1;
3328                 if (ret_param != NULL)
3329                         dev_cb.ret_param = ret_param;
3330
3331                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3332                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3333                                 dev_cb.cb_arg, dev_cb.ret_param);
3334                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3335                 cb_lst->active = 0;
3336         }
3337         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3338         return rc;
3339 }
3340
3341 int
3342 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3343 {
3344         uint32_t vec;
3345         struct rte_eth_dev *dev;
3346         struct rte_intr_handle *intr_handle;
3347         uint16_t qid;
3348         int rc;
3349
3350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3351
3352         dev = &rte_eth_devices[port_id];
3353
3354         if (!dev->intr_handle) {
3355                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3356                 return -ENOTSUP;
3357         }
3358
3359         intr_handle = dev->intr_handle;
3360         if (!intr_handle->intr_vec) {
3361                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3362                 return -EPERM;
3363         }
3364
3365         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3366                 vec = intr_handle->intr_vec[qid];
3367                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3368                 if (rc && rc != -EEXIST) {
3369                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3370                                         " op %d epfd %d vec %u\n",
3371                                         port_id, qid, op, epfd, vec);
3372                 }
3373         }
3374
3375         return 0;
3376 }
3377
3378 const struct rte_memzone *
3379 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3380                          uint16_t queue_id, size_t size, unsigned align,
3381                          int socket_id)
3382 {
3383         char z_name[RTE_MEMZONE_NAMESIZE];
3384         const struct rte_memzone *mz;
3385
3386         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3387                  dev->device->driver->name, ring_name,
3388                  dev->data->port_id, queue_id);
3389
3390         mz = rte_memzone_lookup(z_name);
3391         if (mz)
3392                 return mz;
3393
3394         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3395 }
3396
3397 int
3398 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3399                           int epfd, int op, void *data)
3400 {
3401         uint32_t vec;
3402         struct rte_eth_dev *dev;
3403         struct rte_intr_handle *intr_handle;
3404         int rc;
3405
3406         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3407
3408         dev = &rte_eth_devices[port_id];
3409         if (queue_id >= dev->data->nb_rx_queues) {
3410                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3411                 return -EINVAL;
3412         }
3413
3414         if (!dev->intr_handle) {
3415                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3416                 return -ENOTSUP;
3417         }
3418
3419         intr_handle = dev->intr_handle;
3420         if (!intr_handle->intr_vec) {
3421                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3422                 return -EPERM;
3423         }
3424
3425         vec = intr_handle->intr_vec[queue_id];
3426         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3427         if (rc && rc != -EEXIST) {
3428                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3429                                 " op %d epfd %d vec %u\n",
3430                                 port_id, queue_id, op, epfd, vec);
3431                 return rc;
3432         }
3433
3434         return 0;
3435 }
3436
3437 int
3438 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3439                            uint16_t queue_id)
3440 {
3441         struct rte_eth_dev *dev;
3442
3443         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3444
3445         dev = &rte_eth_devices[port_id];
3446
3447         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3448         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3449                                                                 queue_id));
3450 }
3451
3452 int
3453 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3454                             uint16_t queue_id)
3455 {
3456         struct rte_eth_dev *dev;
3457
3458         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3459
3460         dev = &rte_eth_devices[port_id];
3461
3462         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3463         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3464                                                                 queue_id));
3465 }
3466
3467
3468 int
3469 rte_eth_dev_filter_supported(uint16_t port_id,
3470                              enum rte_filter_type filter_type)
3471 {
3472         struct rte_eth_dev *dev;
3473
3474         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3475
3476         dev = &rte_eth_devices[port_id];
3477         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3478         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3479                                 RTE_ETH_FILTER_NOP, NULL);
3480 }
3481
3482 int
3483 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3484                         enum rte_filter_op filter_op, void *arg)
3485 {
3486         struct rte_eth_dev *dev;
3487
3488         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3489
3490         dev = &rte_eth_devices[port_id];
3491         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3492         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3493                                                              filter_op, arg));
3494 }
3495
3496 const struct rte_eth_rxtx_callback *
3497 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3498                 rte_rx_callback_fn fn, void *user_param)
3499 {
3500 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3501         rte_errno = ENOTSUP;
3502         return NULL;
3503 #endif
3504         /* check input parameters */
3505         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3506                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3507                 rte_errno = EINVAL;
3508                 return NULL;
3509         }
3510         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3511
3512         if (cb == NULL) {
3513                 rte_errno = ENOMEM;
3514                 return NULL;
3515         }
3516
3517         cb->fn.rx = fn;
3518         cb->param = user_param;
3519
3520         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3521         /* Add the callbacks in fifo order. */
3522         struct rte_eth_rxtx_callback *tail =
3523                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3524
3525         if (!tail) {
3526                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3527
3528         } else {
3529                 while (tail->next)
3530                         tail = tail->next;
3531                 tail->next = cb;
3532         }
3533         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3534
3535         return cb;
3536 }
3537
3538 const struct rte_eth_rxtx_callback *
3539 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3540                 rte_rx_callback_fn fn, void *user_param)
3541 {
3542 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3543         rte_errno = ENOTSUP;
3544         return NULL;
3545 #endif
3546         /* check input parameters */
3547         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3548                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3549                 rte_errno = EINVAL;
3550                 return NULL;
3551         }
3552
3553         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3554
3555         if (cb == NULL) {
3556                 rte_errno = ENOMEM;
3557                 return NULL;
3558         }
3559
3560         cb->fn.rx = fn;
3561         cb->param = user_param;
3562
3563         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3564         /* Add the callbacks at fisrt position*/
3565         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3566         rte_smp_wmb();
3567         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3568         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3569
3570         return cb;
3571 }
3572
3573 const struct rte_eth_rxtx_callback *
3574 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3575                 rte_tx_callback_fn fn, void *user_param)
3576 {
3577 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3578         rte_errno = ENOTSUP;
3579         return NULL;
3580 #endif
3581         /* check input parameters */
3582         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3583                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3584                 rte_errno = EINVAL;
3585                 return NULL;
3586         }
3587
3588         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3589
3590         if (cb == NULL) {
3591                 rte_errno = ENOMEM;
3592                 return NULL;
3593         }
3594
3595         cb->fn.tx = fn;
3596         cb->param = user_param;
3597
3598         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3599         /* Add the callbacks in fifo order. */
3600         struct rte_eth_rxtx_callback *tail =
3601                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3602
3603         if (!tail) {
3604                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3605
3606         } else {
3607                 while (tail->next)
3608                         tail = tail->next;
3609                 tail->next = cb;
3610         }
3611         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3612
3613         return cb;
3614 }
3615
3616 int
3617 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3618                 const struct rte_eth_rxtx_callback *user_cb)
3619 {
3620 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3621         return -ENOTSUP;
3622 #endif
3623         /* Check input parameters. */
3624         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3625         if (user_cb == NULL ||
3626                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3627                 return -EINVAL;
3628
3629         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3630         struct rte_eth_rxtx_callback *cb;
3631         struct rte_eth_rxtx_callback **prev_cb;
3632         int ret = -EINVAL;
3633
3634         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3635         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3636         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3637                 cb = *prev_cb;
3638                 if (cb == user_cb) {
3639                         /* Remove the user cb from the callback list. */
3640                         *prev_cb = cb->next;
3641                         ret = 0;
3642                         break;
3643                 }
3644         }
3645         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3646
3647         return ret;
3648 }
3649
3650 int
3651 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3652                 const struct rte_eth_rxtx_callback *user_cb)
3653 {
3654 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3655         return -ENOTSUP;
3656 #endif
3657         /* Check input parameters. */
3658         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3659         if (user_cb == NULL ||
3660                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3661                 return -EINVAL;
3662
3663         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3664         int ret = -EINVAL;
3665         struct rte_eth_rxtx_callback *cb;
3666         struct rte_eth_rxtx_callback **prev_cb;
3667
3668         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3669         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3670         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3671                 cb = *prev_cb;
3672                 if (cb == user_cb) {
3673                         /* Remove the user cb from the callback list. */
3674                         *prev_cb = cb->next;
3675                         ret = 0;
3676                         break;
3677                 }
3678         }
3679         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3680
3681         return ret;
3682 }
3683
3684 int
3685 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3686         struct rte_eth_rxq_info *qinfo)
3687 {
3688         struct rte_eth_dev *dev;
3689
3690         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3691
3692         if (qinfo == NULL)
3693                 return -EINVAL;
3694
3695         dev = &rte_eth_devices[port_id];
3696         if (queue_id >= dev->data->nb_rx_queues) {
3697                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3698                 return -EINVAL;
3699         }
3700
3701         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3702
3703         memset(qinfo, 0, sizeof(*qinfo));
3704         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3705         return 0;
3706 }
3707
3708 int
3709 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3710         struct rte_eth_txq_info *qinfo)
3711 {
3712         struct rte_eth_dev *dev;
3713
3714         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3715
3716         if (qinfo == NULL)
3717                 return -EINVAL;
3718
3719         dev = &rte_eth_devices[port_id];
3720         if (queue_id >= dev->data->nb_tx_queues) {
3721                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3722                 return -EINVAL;
3723         }
3724
3725         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3726
3727         memset(qinfo, 0, sizeof(*qinfo));
3728         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3729         return 0;
3730 }
3731
3732 int
3733 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3734                              struct ether_addr *mc_addr_set,
3735                              uint32_t nb_mc_addr)
3736 {
3737         struct rte_eth_dev *dev;
3738
3739         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3740
3741         dev = &rte_eth_devices[port_id];
3742         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3743         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3744                                                 mc_addr_set, nb_mc_addr));
3745 }
3746
3747 int
3748 rte_eth_timesync_enable(uint16_t port_id)
3749 {
3750         struct rte_eth_dev *dev;
3751
3752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3753         dev = &rte_eth_devices[port_id];
3754
3755         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3756         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3757 }
3758
3759 int
3760 rte_eth_timesync_disable(uint16_t port_id)
3761 {
3762         struct rte_eth_dev *dev;
3763
3764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3765         dev = &rte_eth_devices[port_id];
3766
3767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3768         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3769 }
3770
3771 int
3772 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3773                                    uint32_t flags)
3774 {
3775         struct rte_eth_dev *dev;
3776
3777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3778         dev = &rte_eth_devices[port_id];
3779
3780         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3781         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3782                                 (dev, timestamp, flags));
3783 }
3784
3785 int
3786 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3787                                    struct timespec *timestamp)
3788 {
3789         struct rte_eth_dev *dev;
3790
3791         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3792         dev = &rte_eth_devices[port_id];
3793
3794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3795         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3796                                 (dev, timestamp));
3797 }
3798
3799 int
3800 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3801 {
3802         struct rte_eth_dev *dev;
3803
3804         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3805         dev = &rte_eth_devices[port_id];
3806
3807         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3808         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3809                                                                       delta));
3810 }
3811
3812 int
3813 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3814 {
3815         struct rte_eth_dev *dev;
3816
3817         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3818         dev = &rte_eth_devices[port_id];
3819
3820         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3821         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3822                                                                 timestamp));
3823 }
3824
3825 int
3826 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3827 {
3828         struct rte_eth_dev *dev;
3829
3830         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3831         dev = &rte_eth_devices[port_id];
3832
3833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3834         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3835                                                                 timestamp));
3836 }
3837
3838 int
3839 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3840 {
3841         struct rte_eth_dev *dev;
3842
3843         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3844
3845         dev = &rte_eth_devices[port_id];
3846         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3847         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3848 }
3849
3850 int
3851 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3852 {
3853         struct rte_eth_dev *dev;
3854
3855         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3856
3857         dev = &rte_eth_devices[port_id];
3858         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3859         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3860 }
3861
3862 int
3863 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3864 {
3865         struct rte_eth_dev *dev;
3866
3867         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3868
3869         dev = &rte_eth_devices[port_id];
3870         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3871         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3872 }
3873
3874 int
3875 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3876 {
3877         struct rte_eth_dev *dev;
3878
3879         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3880
3881         dev = &rte_eth_devices[port_id];
3882         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3883         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3884 }
3885
3886 int
3887 rte_eth_dev_get_dcb_info(uint16_t port_id,
3888                              struct rte_eth_dcb_info *dcb_info)
3889 {
3890         struct rte_eth_dev *dev;
3891
3892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3893
3894         dev = &rte_eth_devices[port_id];
3895         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3896
3897         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3898         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3899 }
3900
3901 int
3902 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3903                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3904 {
3905         struct rte_eth_dev *dev;
3906
3907         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3908         if (l2_tunnel == NULL) {
3909                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3910                 return -EINVAL;
3911         }
3912
3913         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3914                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3915                 return -EINVAL;
3916         }
3917
3918         dev = &rte_eth_devices[port_id];
3919         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3920                                 -ENOTSUP);
3921         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3922                                                                 l2_tunnel));
3923 }
3924
3925 int
3926 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3927                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3928                                   uint32_t mask,
3929                                   uint8_t en)
3930 {
3931         struct rte_eth_dev *dev;
3932
3933         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3934
3935         if (l2_tunnel == NULL) {
3936                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3937                 return -EINVAL;
3938         }
3939
3940         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3941                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3942                 return -EINVAL;
3943         }
3944
3945         if (mask == 0) {
3946                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3947                 return -EINVAL;
3948         }
3949
3950         dev = &rte_eth_devices[port_id];
3951         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3952                                 -ENOTSUP);
3953         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
3954                                                         l2_tunnel, mask, en));
3955 }
3956
3957 static void
3958 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3959                            const struct rte_eth_desc_lim *desc_lim)
3960 {
3961         if (desc_lim->nb_align != 0)
3962                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3963
3964         if (desc_lim->nb_max != 0)
3965                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3966
3967         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3968 }
3969
3970 int
3971 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3972                                  uint16_t *nb_rx_desc,
3973                                  uint16_t *nb_tx_desc)
3974 {
3975         struct rte_eth_dev *dev;
3976         struct rte_eth_dev_info dev_info;
3977
3978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3979
3980         dev = &rte_eth_devices[port_id];
3981         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3982
3983         rte_eth_dev_info_get(port_id, &dev_info);
3984
3985         if (nb_rx_desc != NULL)
3986                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3987
3988         if (nb_tx_desc != NULL)
3989                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3990
3991         return 0;
3992 }
3993
3994 int
3995 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3996 {
3997         struct rte_eth_dev *dev;
3998
3999         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4000
4001         if (pool == NULL)
4002                 return -EINVAL;
4003
4004         dev = &rte_eth_devices[port_id];
4005
4006         if (*dev->dev_ops->pool_ops_supported == NULL)
4007                 return 1; /* all pools are supported */
4008
4009         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4010 }
4011
4012 RTE_INIT(ethdev_init_log);
4013 static void
4014 ethdev_init_log(void)
4015 {
4016         ethdev_logtype = rte_log_register("lib.ethdev");
4017         if (ethdev_logtype >= 0)
4018                 rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);
4019 }