ethdev: fix string length in name comparison
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_compat.h>
38
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46 static uint8_t eth_dev_last_created_port;
47
48 /* spinlock for eth device callbacks */
49 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
50
51 /* spinlock for add/remove rx callbacks */
52 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove tx callbacks */
55 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for shared data allocation */
58 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* store statistics names and its offset in stats structure  */
61 struct rte_eth_xstats_name_off {
62         char name[RTE_ETH_XSTATS_NAME_SIZE];
63         unsigned offset;
64 };
65
66 /* Shared memory between primary and secondary processes. */
67 static struct {
68         uint64_t next_owner_id;
69         rte_spinlock_t ownership_lock;
70         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
71 } *rte_eth_dev_shared_data;
72
73 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
74         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
75         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
76         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
77         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
78         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
79         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
80         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
81         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
82                 rx_nombuf)},
83 };
84
85 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
86
87 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
88         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
89         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
90         {"errors", offsetof(struct rte_eth_stats, q_errors)},
91 };
92
93 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
94                 sizeof(rte_rxq_stats_strings[0]))
95
96 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
97         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
98         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
99 };
100 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
101                 sizeof(rte_txq_stats_strings[0]))
102
103 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
104         { DEV_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } rte_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
123         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
124         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
125         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
126 };
127
128 #undef RTE_RX_OFFLOAD_BIT2STR
129
130 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
131         { DEV_TX_OFFLOAD_##_name, #_name }
132
133 static const struct {
134         uint64_t offload;
135         const char *name;
136 } rte_tx_offload_names[] = {
137         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
138         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
139         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
140         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
141         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
142         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
143         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
144         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
146         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
147         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
152         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
153         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
154         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
155 };
156
157 #undef RTE_TX_OFFLOAD_BIT2STR
158
159 /**
160  * The user application callback description.
161  *
162  * It contains callback address to be registered by user application,
163  * the pointer to the parameters for callback, and the event type.
164  */
165 struct rte_eth_dev_callback {
166         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
167         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
168         void *cb_arg;                           /**< Parameter for callback */
169         void *ret_param;                        /**< Return parameter */
170         enum rte_eth_event_type event;          /**< Interrupt event type */
171         uint32_t active;                        /**< Callback is executing */
172 };
173
174 enum {
175         STAT_QMAP_TX = 0,
176         STAT_QMAP_RX
177 };
178
179 uint16_t
180 rte_eth_find_next(uint16_t port_id)
181 {
182         while (port_id < RTE_MAX_ETHPORTS &&
183                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
184                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
185                 port_id++;
186
187         if (port_id >= RTE_MAX_ETHPORTS)
188                 return RTE_MAX_ETHPORTS;
189
190         return port_id;
191 }
192
193 static void
194 rte_eth_dev_shared_data_prepare(void)
195 {
196         const unsigned flags = 0;
197         const struct rte_memzone *mz;
198
199         rte_spinlock_lock(&rte_eth_shared_data_lock);
200
201         if (rte_eth_dev_shared_data == NULL) {
202                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
203                         /* Allocate port data and ownership shared memory. */
204                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
205                                         sizeof(*rte_eth_dev_shared_data),
206                                         rte_socket_id(), flags);
207                 } else
208                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
209                 if (mz == NULL)
210                         rte_panic("Cannot allocate ethdev shared data\n");
211
212                 rte_eth_dev_shared_data = mz->addr;
213                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
214                         rte_eth_dev_shared_data->next_owner_id =
215                                         RTE_ETH_DEV_NO_OWNER + 1;
216                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
217                         memset(rte_eth_dev_shared_data->data, 0,
218                                sizeof(rte_eth_dev_shared_data->data));
219                 }
220         }
221
222         rte_spinlock_unlock(&rte_eth_shared_data_lock);
223 }
224
225 struct rte_eth_dev *
226 rte_eth_dev_allocated(const char *name)
227 {
228         unsigned i;
229
230         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
231                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
232                     strcmp(rte_eth_devices[i].data->name, name) == 0)
233                         return &rte_eth_devices[i];
234         }
235         return NULL;
236 }
237
238 static uint16_t
239 rte_eth_dev_find_free_port(void)
240 {
241         unsigned i;
242
243         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
244                 /* Using shared name field to find a free port. */
245                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
246                         RTE_ASSERT(rte_eth_devices[i].state ==
247                                    RTE_ETH_DEV_UNUSED);
248                         return i;
249                 }
250         }
251         return RTE_MAX_ETHPORTS;
252 }
253
254 static struct rte_eth_dev *
255 eth_dev_get(uint16_t port_id)
256 {
257         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
258
259         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
260         eth_dev->state = RTE_ETH_DEV_ATTACHED;
261
262         eth_dev_last_created_port = port_id;
263
264         return eth_dev;
265 }
266
267 struct rte_eth_dev *
268 rte_eth_dev_allocate(const char *name)
269 {
270         uint16_t port_id;
271         struct rte_eth_dev *eth_dev = NULL;
272
273         rte_eth_dev_shared_data_prepare();
274
275         /* Synchronize port creation between primary and secondary threads. */
276         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
277
278         port_id = rte_eth_dev_find_free_port();
279         if (port_id == RTE_MAX_ETHPORTS) {
280                 RTE_LOG(ERR, EAL, "Reached maximum number of Ethernet ports\n");
281                 goto unlock;
282         }
283
284         if (rte_eth_dev_allocated(name) != NULL) {
285                 RTE_LOG(ERR, EAL, "Ethernet Device with name %s already allocated!\n",
286                                 name);
287                 goto unlock;
288         }
289
290         eth_dev = eth_dev_get(port_id);
291         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
292         eth_dev->data->port_id = port_id;
293         eth_dev->data->mtu = ETHER_MTU;
294
295 unlock:
296         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
297
298         if (eth_dev != NULL)
299                 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
300
301         return eth_dev;
302 }
303
304 /*
305  * Attach to a port already registered by the primary process, which
306  * makes sure that the same device would have the same port id both
307  * in the primary and secondary process.
308  */
309 struct rte_eth_dev *
310 rte_eth_dev_attach_secondary(const char *name)
311 {
312         uint16_t i;
313         struct rte_eth_dev *eth_dev = NULL;
314
315         rte_eth_dev_shared_data_prepare();
316
317         /* Synchronize port attachment to primary port creation and release. */
318         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
319
320         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
321                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
322                         break;
323         }
324         if (i == RTE_MAX_ETHPORTS) {
325                 RTE_PMD_DEBUG_TRACE(
326                         "device %s is not driven by the primary process\n",
327                         name);
328         } else {
329                 eth_dev = eth_dev_get(i);
330                 RTE_ASSERT(eth_dev->data->port_id == i);
331         }
332
333         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
334         return eth_dev;
335 }
336
337 int
338 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
339 {
340         if (eth_dev == NULL)
341                 return -EINVAL;
342
343         rte_eth_dev_shared_data_prepare();
344
345         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
346
347         eth_dev->state = RTE_ETH_DEV_UNUSED;
348
349         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
350
351         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
352
353         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
354
355         return 0;
356 }
357
358 int
359 rte_eth_dev_is_valid_port(uint16_t port_id)
360 {
361         if (port_id >= RTE_MAX_ETHPORTS ||
362             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
363                 return 0;
364         else
365                 return 1;
366 }
367
368 static int
369 rte_eth_is_valid_owner_id(uint64_t owner_id)
370 {
371         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
372             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
373                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
374                 return 0;
375         }
376         return 1;
377 }
378
379 uint64_t __rte_experimental
380 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
381 {
382         while (port_id < RTE_MAX_ETHPORTS &&
383                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
384                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
385                rte_eth_devices[port_id].data->owner.id != owner_id))
386                 port_id++;
387
388         if (port_id >= RTE_MAX_ETHPORTS)
389                 return RTE_MAX_ETHPORTS;
390
391         return port_id;
392 }
393
394 int __rte_experimental
395 rte_eth_dev_owner_new(uint64_t *owner_id)
396 {
397         rte_eth_dev_shared_data_prepare();
398
399         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
400
401         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
402
403         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
404         return 0;
405 }
406
407 static int
408 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
409                        const struct rte_eth_dev_owner *new_owner)
410 {
411         struct rte_eth_dev_owner *port_owner;
412         int sret;
413
414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
415
416         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
417             !rte_eth_is_valid_owner_id(old_owner_id))
418                 return -EINVAL;
419
420         port_owner = &rte_eth_devices[port_id].data->owner;
421         if (port_owner->id != old_owner_id) {
422                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
423                                     " by %s_%016lX.\n", port_id,
424                                     port_owner->name, port_owner->id);
425                 return -EPERM;
426         }
427
428         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
429                         new_owner->name);
430         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
431                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
432                                     port_id);
433
434         port_owner->id = new_owner->id;
435
436         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
437                             new_owner->name, new_owner->id);
438
439         return 0;
440 }
441
442 int __rte_experimental
443 rte_eth_dev_owner_set(const uint16_t port_id,
444                       const struct rte_eth_dev_owner *owner)
445 {
446         int ret;
447
448         rte_eth_dev_shared_data_prepare();
449
450         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
451
452         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
453
454         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
455         return ret;
456 }
457
458 int __rte_experimental
459 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
460 {
461         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
462                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
463         int ret;
464
465         rte_eth_dev_shared_data_prepare();
466
467         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
468
469         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
470
471         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
472         return ret;
473 }
474
475 void __rte_experimental
476 rte_eth_dev_owner_delete(const uint64_t owner_id)
477 {
478         uint16_t port_id;
479
480         rte_eth_dev_shared_data_prepare();
481
482         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
483
484         if (rte_eth_is_valid_owner_id(owner_id)) {
485                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
486                         memset(&rte_eth_devices[port_id].data->owner, 0,
487                                sizeof(struct rte_eth_dev_owner));
488                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
489                                     " have removed.\n", owner_id);
490         }
491
492         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
493 }
494
495 int __rte_experimental
496 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
497 {
498         int ret = 0;
499
500         rte_eth_dev_shared_data_prepare();
501
502         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
503
504         if (!rte_eth_dev_is_valid_port(port_id)) {
505                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
506                 ret = -ENODEV;
507         } else {
508                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
509                            sizeof(*owner));
510         }
511
512         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
513         return ret;
514 }
515
516 int
517 rte_eth_dev_socket_id(uint16_t port_id)
518 {
519         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
520         return rte_eth_devices[port_id].data->numa_node;
521 }
522
523 void *
524 rte_eth_dev_get_sec_ctx(uint8_t port_id)
525 {
526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
527         return rte_eth_devices[port_id].security_ctx;
528 }
529
530 uint16_t
531 rte_eth_dev_count(void)
532 {
533         uint16_t p;
534         uint16_t count;
535
536         count = 0;
537
538         RTE_ETH_FOREACH_DEV(p)
539                 count++;
540
541         return count;
542 }
543
544 int
545 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
546 {
547         char *tmp;
548
549         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
550
551         if (name == NULL) {
552                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
553                 return -EINVAL;
554         }
555
556         /* shouldn't check 'rte_eth_devices[i].data',
557          * because it might be overwritten by VDEV PMD */
558         tmp = rte_eth_dev_shared_data->data[port_id].name;
559         strcpy(name, tmp);
560         return 0;
561 }
562
563 int
564 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
565 {
566         uint32_t pid;
567
568         if (name == NULL) {
569                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
570                 return -EINVAL;
571         }
572
573         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
574                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
575                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
576                         *port_id = pid;
577                         return 0;
578                 }
579         }
580
581         return -ENODEV;
582 }
583
584 static int
585 eth_err(uint16_t port_id, int ret)
586 {
587         if (ret == 0)
588                 return 0;
589         if (rte_eth_dev_is_removed(port_id))
590                 return -EIO;
591         return ret;
592 }
593
594 /* attach the new device, then store port_id of the device */
595 int
596 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
597 {
598         int ret = -1;
599         int current = rte_eth_dev_count();
600         char *name = NULL;
601         char *args = NULL;
602
603         if ((devargs == NULL) || (port_id == NULL)) {
604                 ret = -EINVAL;
605                 goto err;
606         }
607
608         /* parse devargs, then retrieve device name and args */
609         if (rte_eal_parse_devargs_str(devargs, &name, &args))
610                 goto err;
611
612         ret = rte_eal_dev_attach(name, args);
613         if (ret < 0)
614                 goto err;
615
616         /* no point looking at the port count if no port exists */
617         if (!rte_eth_dev_count()) {
618                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
619                 ret = -1;
620                 goto err;
621         }
622
623         /* if nothing happened, there is a bug here, since some driver told us
624          * it did attach a device, but did not create a port.
625          */
626         if (current == rte_eth_dev_count()) {
627                 ret = -1;
628                 goto err;
629         }
630
631         *port_id = eth_dev_last_created_port;
632         ret = 0;
633
634 err:
635         free(name);
636         free(args);
637         return ret;
638 }
639
640 /* detach the device, then store the name of the device */
641 int
642 rte_eth_dev_detach(uint16_t port_id, char *name)
643 {
644         uint32_t dev_flags;
645         int ret = -1;
646
647         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
648
649         if (name == NULL) {
650                 ret = -EINVAL;
651                 goto err;
652         }
653
654         dev_flags = rte_eth_devices[port_id].data->dev_flags;
655         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
656                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
657                         port_id);
658                 ret = -ENOTSUP;
659                 goto err;
660         }
661
662         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
663                  "%s", rte_eth_devices[port_id].data->name);
664
665         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
666         if (ret < 0)
667                 goto err;
668
669         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
670         return 0;
671
672 err:
673         return ret;
674 }
675
676 static int
677 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
678 {
679         uint16_t old_nb_queues = dev->data->nb_rx_queues;
680         void **rxq;
681         unsigned i;
682
683         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
684                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
685                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
686                                 RTE_CACHE_LINE_SIZE);
687                 if (dev->data->rx_queues == NULL) {
688                         dev->data->nb_rx_queues = 0;
689                         return -(ENOMEM);
690                 }
691         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
692                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
693
694                 rxq = dev->data->rx_queues;
695
696                 for (i = nb_queues; i < old_nb_queues; i++)
697                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
698                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
699                                 RTE_CACHE_LINE_SIZE);
700                 if (rxq == NULL)
701                         return -(ENOMEM);
702                 if (nb_queues > old_nb_queues) {
703                         uint16_t new_qs = nb_queues - old_nb_queues;
704
705                         memset(rxq + old_nb_queues, 0,
706                                 sizeof(rxq[0]) * new_qs);
707                 }
708
709                 dev->data->rx_queues = rxq;
710
711         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
712                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
713
714                 rxq = dev->data->rx_queues;
715
716                 for (i = nb_queues; i < old_nb_queues; i++)
717                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
718
719                 rte_free(dev->data->rx_queues);
720                 dev->data->rx_queues = NULL;
721         }
722         dev->data->nb_rx_queues = nb_queues;
723         return 0;
724 }
725
726 int
727 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
728 {
729         struct rte_eth_dev *dev;
730
731         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
732
733         dev = &rte_eth_devices[port_id];
734         if (rx_queue_id >= dev->data->nb_rx_queues) {
735                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
736                 return -EINVAL;
737         }
738
739         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
740
741         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
742                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
743                         " already started\n",
744                         rx_queue_id, port_id);
745                 return 0;
746         }
747
748         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
749                                                              rx_queue_id));
750
751 }
752
753 int
754 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
755 {
756         struct rte_eth_dev *dev;
757
758         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
759
760         dev = &rte_eth_devices[port_id];
761         if (rx_queue_id >= dev->data->nb_rx_queues) {
762                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
763                 return -EINVAL;
764         }
765
766         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
767
768         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
769                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
770                         " already stopped\n",
771                         rx_queue_id, port_id);
772                 return 0;
773         }
774
775         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
776
777 }
778
779 int
780 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
781 {
782         struct rte_eth_dev *dev;
783
784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
785
786         dev = &rte_eth_devices[port_id];
787         if (tx_queue_id >= dev->data->nb_tx_queues) {
788                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
789                 return -EINVAL;
790         }
791
792         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
793
794         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
795                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
796                         " already started\n",
797                         tx_queue_id, port_id);
798                 return 0;
799         }
800
801         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
802                                                              tx_queue_id));
803
804 }
805
806 int
807 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
808 {
809         struct rte_eth_dev *dev;
810
811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
812
813         dev = &rte_eth_devices[port_id];
814         if (tx_queue_id >= dev->data->nb_tx_queues) {
815                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
816                 return -EINVAL;
817         }
818
819         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
820
821         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
822                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
823                         " already stopped\n",
824                         tx_queue_id, port_id);
825                 return 0;
826         }
827
828         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
829
830 }
831
832 static int
833 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
834 {
835         uint16_t old_nb_queues = dev->data->nb_tx_queues;
836         void **txq;
837         unsigned i;
838
839         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
840                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
841                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
842                                                    RTE_CACHE_LINE_SIZE);
843                 if (dev->data->tx_queues == NULL) {
844                         dev->data->nb_tx_queues = 0;
845                         return -(ENOMEM);
846                 }
847         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
848                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
849
850                 txq = dev->data->tx_queues;
851
852                 for (i = nb_queues; i < old_nb_queues; i++)
853                         (*dev->dev_ops->tx_queue_release)(txq[i]);
854                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
855                                   RTE_CACHE_LINE_SIZE);
856                 if (txq == NULL)
857                         return -ENOMEM;
858                 if (nb_queues > old_nb_queues) {
859                         uint16_t new_qs = nb_queues - old_nb_queues;
860
861                         memset(txq + old_nb_queues, 0,
862                                sizeof(txq[0]) * new_qs);
863                 }
864
865                 dev->data->tx_queues = txq;
866
867         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
868                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
869
870                 txq = dev->data->tx_queues;
871
872                 for (i = nb_queues; i < old_nb_queues; i++)
873                         (*dev->dev_ops->tx_queue_release)(txq[i]);
874
875                 rte_free(dev->data->tx_queues);
876                 dev->data->tx_queues = NULL;
877         }
878         dev->data->nb_tx_queues = nb_queues;
879         return 0;
880 }
881
882 uint32_t
883 rte_eth_speed_bitflag(uint32_t speed, int duplex)
884 {
885         switch (speed) {
886         case ETH_SPEED_NUM_10M:
887                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
888         case ETH_SPEED_NUM_100M:
889                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
890         case ETH_SPEED_NUM_1G:
891                 return ETH_LINK_SPEED_1G;
892         case ETH_SPEED_NUM_2_5G:
893                 return ETH_LINK_SPEED_2_5G;
894         case ETH_SPEED_NUM_5G:
895                 return ETH_LINK_SPEED_5G;
896         case ETH_SPEED_NUM_10G:
897                 return ETH_LINK_SPEED_10G;
898         case ETH_SPEED_NUM_20G:
899                 return ETH_LINK_SPEED_20G;
900         case ETH_SPEED_NUM_25G:
901                 return ETH_LINK_SPEED_25G;
902         case ETH_SPEED_NUM_40G:
903                 return ETH_LINK_SPEED_40G;
904         case ETH_SPEED_NUM_50G:
905                 return ETH_LINK_SPEED_50G;
906         case ETH_SPEED_NUM_56G:
907                 return ETH_LINK_SPEED_56G;
908         case ETH_SPEED_NUM_100G:
909                 return ETH_LINK_SPEED_100G;
910         default:
911                 return 0;
912         }
913 }
914
915 /**
916  * A conversion function from rxmode bitfield API.
917  */
918 static void
919 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
920                                     uint64_t *rx_offloads)
921 {
922         uint64_t offloads = 0;
923
924         if (rxmode->header_split == 1)
925                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
926         if (rxmode->hw_ip_checksum == 1)
927                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
928         if (rxmode->hw_vlan_filter == 1)
929                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
930         if (rxmode->hw_vlan_strip == 1)
931                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
932         if (rxmode->hw_vlan_extend == 1)
933                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
934         if (rxmode->jumbo_frame == 1)
935                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
936         if (rxmode->hw_strip_crc == 1)
937                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
938         if (rxmode->enable_scatter == 1)
939                 offloads |= DEV_RX_OFFLOAD_SCATTER;
940         if (rxmode->enable_lro == 1)
941                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
942         if (rxmode->hw_timestamp == 1)
943                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
944         if (rxmode->security == 1)
945                 offloads |= DEV_RX_OFFLOAD_SECURITY;
946
947         *rx_offloads = offloads;
948 }
949
950 /**
951  * A conversion function from rxmode offloads API.
952  */
953 static void
954 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
955                             struct rte_eth_rxmode *rxmode)
956 {
957
958         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
959                 rxmode->header_split = 1;
960         else
961                 rxmode->header_split = 0;
962         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
963                 rxmode->hw_ip_checksum = 1;
964         else
965                 rxmode->hw_ip_checksum = 0;
966         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
967                 rxmode->hw_vlan_filter = 1;
968         else
969                 rxmode->hw_vlan_filter = 0;
970         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
971                 rxmode->hw_vlan_strip = 1;
972         else
973                 rxmode->hw_vlan_strip = 0;
974         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
975                 rxmode->hw_vlan_extend = 1;
976         else
977                 rxmode->hw_vlan_extend = 0;
978         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
979                 rxmode->jumbo_frame = 1;
980         else
981                 rxmode->jumbo_frame = 0;
982         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
983                 rxmode->hw_strip_crc = 1;
984         else
985                 rxmode->hw_strip_crc = 0;
986         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
987                 rxmode->enable_scatter = 1;
988         else
989                 rxmode->enable_scatter = 0;
990         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
991                 rxmode->enable_lro = 1;
992         else
993                 rxmode->enable_lro = 0;
994         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
995                 rxmode->hw_timestamp = 1;
996         else
997                 rxmode->hw_timestamp = 0;
998         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
999                 rxmode->security = 1;
1000         else
1001                 rxmode->security = 0;
1002 }
1003
1004 const char * __rte_experimental
1005 rte_eth_dev_rx_offload_name(uint64_t offload)
1006 {
1007         const char *name = "UNKNOWN";
1008         unsigned int i;
1009
1010         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1011                 if (offload == rte_rx_offload_names[i].offload) {
1012                         name = rte_rx_offload_names[i].name;
1013                         break;
1014                 }
1015         }
1016
1017         return name;
1018 }
1019
1020 const char * __rte_experimental
1021 rte_eth_dev_tx_offload_name(uint64_t offload)
1022 {
1023         const char *name = "UNKNOWN";
1024         unsigned int i;
1025
1026         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1027                 if (offload == rte_tx_offload_names[i].offload) {
1028                         name = rte_tx_offload_names[i].name;
1029                         break;
1030                 }
1031         }
1032
1033         return name;
1034 }
1035
1036 int
1037 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1038                       const struct rte_eth_conf *dev_conf)
1039 {
1040         struct rte_eth_dev *dev;
1041         struct rte_eth_dev_info dev_info;
1042         struct rte_eth_conf local_conf = *dev_conf;
1043         int diag;
1044
1045         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1046
1047         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1048                 RTE_PMD_DEBUG_TRACE(
1049                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1050                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1051                 return -EINVAL;
1052         }
1053
1054         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1055                 RTE_PMD_DEBUG_TRACE(
1056                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1057                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1058                 return -EINVAL;
1059         }
1060
1061         dev = &rte_eth_devices[port_id];
1062
1063         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1064         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1065
1066         if (dev->data->dev_started) {
1067                 RTE_PMD_DEBUG_TRACE(
1068                     "port %d must be stopped to allow configuration\n", port_id);
1069                 return -EBUSY;
1070         }
1071
1072         /*
1073          * Convert between the offloads API to enable PMDs to support
1074          * only one of them.
1075          */
1076         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
1077                 rte_eth_convert_rx_offload_bitfield(
1078                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1079         } else {
1080                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
1081                                             &local_conf.rxmode);
1082         }
1083
1084         /* Copy the dev_conf parameter into the dev structure */
1085         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1086
1087         /*
1088          * Check that the numbers of RX and TX queues are not greater
1089          * than the maximum number of RX and TX queues supported by the
1090          * configured device.
1091          */
1092         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1093
1094         if (nb_rx_q == 0 && nb_tx_q == 0) {
1095                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
1096                 return -EINVAL;
1097         }
1098
1099         if (nb_rx_q > dev_info.max_rx_queues) {
1100                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1101                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1102                 return -EINVAL;
1103         }
1104
1105         if (nb_tx_q > dev_info.max_tx_queues) {
1106                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1107                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1108                 return -EINVAL;
1109         }
1110
1111         /* Check that the device supports requested interrupts */
1112         if ((dev_conf->intr_conf.lsc == 1) &&
1113                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1114                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1115                                         dev->device->driver->name);
1116                         return -EINVAL;
1117         }
1118         if ((dev_conf->intr_conf.rmv == 1) &&
1119             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1120                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1121                                     dev->device->driver->name);
1122                 return -EINVAL;
1123         }
1124
1125         /*
1126          * If jumbo frames are enabled, check that the maximum RX packet
1127          * length is supported by the configured device.
1128          */
1129         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1130                 if (dev_conf->rxmode.max_rx_pkt_len >
1131                     dev_info.max_rx_pktlen) {
1132                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1133                                 " > max valid value %u\n",
1134                                 port_id,
1135                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1136                                 (unsigned)dev_info.max_rx_pktlen);
1137                         return -EINVAL;
1138                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1139                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1140                                 " < min valid value %u\n",
1141                                 port_id,
1142                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1143                                 (unsigned)ETHER_MIN_LEN);
1144                         return -EINVAL;
1145                 }
1146         } else {
1147                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1148                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1149                         /* Use default value */
1150                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1151                                                         ETHER_MAX_LEN;
1152         }
1153
1154         /*
1155          * Setup new number of RX/TX queues and reconfigure device.
1156          */
1157         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1158         if (diag != 0) {
1159                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1160                                 port_id, diag);
1161                 return diag;
1162         }
1163
1164         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1165         if (diag != 0) {
1166                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1167                                 port_id, diag);
1168                 rte_eth_dev_rx_queue_config(dev, 0);
1169                 return diag;
1170         }
1171
1172         diag = (*dev->dev_ops->dev_configure)(dev);
1173         if (diag != 0) {
1174                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1175                                 port_id, diag);
1176                 rte_eth_dev_rx_queue_config(dev, 0);
1177                 rte_eth_dev_tx_queue_config(dev, 0);
1178                 return eth_err(port_id, diag);
1179         }
1180
1181         /* Initialize Rx profiling if enabled at compilation time. */
1182         diag = __rte_eth_profile_rx_init(port_id, dev);
1183         if (diag != 0) {
1184                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1185                                 port_id, diag);
1186                 rte_eth_dev_rx_queue_config(dev, 0);
1187                 rte_eth_dev_tx_queue_config(dev, 0);
1188                 return eth_err(port_id, diag);
1189         }
1190
1191         return 0;
1192 }
1193
1194 void
1195 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1196 {
1197         if (dev->data->dev_started) {
1198                 RTE_PMD_DEBUG_TRACE(
1199                         "port %d must be stopped to allow reset\n",
1200                         dev->data->port_id);
1201                 return;
1202         }
1203
1204         rte_eth_dev_rx_queue_config(dev, 0);
1205         rte_eth_dev_tx_queue_config(dev, 0);
1206
1207         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1208 }
1209
1210 static void
1211 rte_eth_dev_config_restore(uint16_t port_id)
1212 {
1213         struct rte_eth_dev *dev;
1214         struct rte_eth_dev_info dev_info;
1215         struct ether_addr *addr;
1216         uint16_t i;
1217         uint32_t pool = 0;
1218         uint64_t pool_mask;
1219
1220         dev = &rte_eth_devices[port_id];
1221
1222         rte_eth_dev_info_get(port_id, &dev_info);
1223
1224         /* replay MAC address configuration including default MAC */
1225         addr = &dev->data->mac_addrs[0];
1226         if (*dev->dev_ops->mac_addr_set != NULL)
1227                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1228         else if (*dev->dev_ops->mac_addr_add != NULL)
1229                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1230
1231         if (*dev->dev_ops->mac_addr_add != NULL) {
1232                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1233                         addr = &dev->data->mac_addrs[i];
1234
1235                         /* skip zero address */
1236                         if (is_zero_ether_addr(addr))
1237                                 continue;
1238
1239                         pool = 0;
1240                         pool_mask = dev->data->mac_pool_sel[i];
1241
1242                         do {
1243                                 if (pool_mask & 1ULL)
1244                                         (*dev->dev_ops->mac_addr_add)(dev,
1245                                                 addr, i, pool);
1246                                 pool_mask >>= 1;
1247                                 pool++;
1248                         } while (pool_mask);
1249                 }
1250         }
1251
1252         /* replay promiscuous configuration */
1253         if (rte_eth_promiscuous_get(port_id) == 1)
1254                 rte_eth_promiscuous_enable(port_id);
1255         else if (rte_eth_promiscuous_get(port_id) == 0)
1256                 rte_eth_promiscuous_disable(port_id);
1257
1258         /* replay all multicast configuration */
1259         if (rte_eth_allmulticast_get(port_id) == 1)
1260                 rte_eth_allmulticast_enable(port_id);
1261         else if (rte_eth_allmulticast_get(port_id) == 0)
1262                 rte_eth_allmulticast_disable(port_id);
1263 }
1264
1265 int
1266 rte_eth_dev_start(uint16_t port_id)
1267 {
1268         struct rte_eth_dev *dev;
1269         int diag;
1270
1271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1272
1273         dev = &rte_eth_devices[port_id];
1274
1275         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1276
1277         if (dev->data->dev_started != 0) {
1278                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1279                         " already started\n",
1280                         port_id);
1281                 return 0;
1282         }
1283
1284         diag = (*dev->dev_ops->dev_start)(dev);
1285         if (diag == 0)
1286                 dev->data->dev_started = 1;
1287         else
1288                 return eth_err(port_id, diag);
1289
1290         rte_eth_dev_config_restore(port_id);
1291
1292         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1293                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1294                 (*dev->dev_ops->link_update)(dev, 0);
1295         }
1296         return 0;
1297 }
1298
1299 void
1300 rte_eth_dev_stop(uint16_t port_id)
1301 {
1302         struct rte_eth_dev *dev;
1303
1304         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1305         dev = &rte_eth_devices[port_id];
1306
1307         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1308
1309         if (dev->data->dev_started == 0) {
1310                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1311                         " already stopped\n",
1312                         port_id);
1313                 return;
1314         }
1315
1316         dev->data->dev_started = 0;
1317         (*dev->dev_ops->dev_stop)(dev);
1318 }
1319
1320 int
1321 rte_eth_dev_set_link_up(uint16_t port_id)
1322 {
1323         struct rte_eth_dev *dev;
1324
1325         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1326
1327         dev = &rte_eth_devices[port_id];
1328
1329         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1330         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1331 }
1332
1333 int
1334 rte_eth_dev_set_link_down(uint16_t port_id)
1335 {
1336         struct rte_eth_dev *dev;
1337
1338         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1339
1340         dev = &rte_eth_devices[port_id];
1341
1342         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1343         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1344 }
1345
1346 void
1347 rte_eth_dev_close(uint16_t port_id)
1348 {
1349         struct rte_eth_dev *dev;
1350
1351         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1352         dev = &rte_eth_devices[port_id];
1353
1354         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1355         dev->data->dev_started = 0;
1356         (*dev->dev_ops->dev_close)(dev);
1357
1358         dev->data->nb_rx_queues = 0;
1359         rte_free(dev->data->rx_queues);
1360         dev->data->rx_queues = NULL;
1361         dev->data->nb_tx_queues = 0;
1362         rte_free(dev->data->tx_queues);
1363         dev->data->tx_queues = NULL;
1364 }
1365
1366 int
1367 rte_eth_dev_reset(uint16_t port_id)
1368 {
1369         struct rte_eth_dev *dev;
1370         int ret;
1371
1372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1373         dev = &rte_eth_devices[port_id];
1374
1375         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1376
1377         rte_eth_dev_stop(port_id);
1378         ret = dev->dev_ops->dev_reset(dev);
1379
1380         return eth_err(port_id, ret);
1381 }
1382
1383 int __rte_experimental
1384 rte_eth_dev_is_removed(uint16_t port_id)
1385 {
1386         struct rte_eth_dev *dev;
1387         int ret;
1388
1389         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1390
1391         dev = &rte_eth_devices[port_id];
1392
1393         if (dev->state == RTE_ETH_DEV_REMOVED)
1394                 return 1;
1395
1396         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1397
1398         ret = dev->dev_ops->is_removed(dev);
1399         if (ret != 0)
1400                 /* Device is physically removed. */
1401                 dev->state = RTE_ETH_DEV_REMOVED;
1402
1403         return ret;
1404 }
1405
1406 int
1407 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1408                        uint16_t nb_rx_desc, unsigned int socket_id,
1409                        const struct rte_eth_rxconf *rx_conf,
1410                        struct rte_mempool *mp)
1411 {
1412         int ret;
1413         uint32_t mbp_buf_size;
1414         struct rte_eth_dev *dev;
1415         struct rte_eth_dev_info dev_info;
1416         struct rte_eth_rxconf local_conf;
1417         void **rxq;
1418
1419         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1420
1421         dev = &rte_eth_devices[port_id];
1422         if (rx_queue_id >= dev->data->nb_rx_queues) {
1423                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1424                 return -EINVAL;
1425         }
1426
1427         if (dev->data->dev_started) {
1428                 RTE_PMD_DEBUG_TRACE(
1429                     "port %d must be stopped to allow configuration\n", port_id);
1430                 return -EBUSY;
1431         }
1432
1433         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1434         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1435
1436         /*
1437          * Check the size of the mbuf data buffer.
1438          * This value must be provided in the private data of the memory pool.
1439          * First check that the memory pool has a valid private data.
1440          */
1441         rte_eth_dev_info_get(port_id, &dev_info);
1442         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1443                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1444                                 mp->name, (int) mp->private_data_size,
1445                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1446                 return -ENOSPC;
1447         }
1448         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1449
1450         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1451                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1452                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1453                                 "=%d)\n",
1454                                 mp->name,
1455                                 (int)mbp_buf_size,
1456                                 (int)(RTE_PKTMBUF_HEADROOM +
1457                                       dev_info.min_rx_bufsize),
1458                                 (int)RTE_PKTMBUF_HEADROOM,
1459                                 (int)dev_info.min_rx_bufsize);
1460                 return -EINVAL;
1461         }
1462
1463         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1464                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1465                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1466
1467                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1468                         "should be: <= %hu, = %hu, and a product of %hu\n",
1469                         nb_rx_desc,
1470                         dev_info.rx_desc_lim.nb_max,
1471                         dev_info.rx_desc_lim.nb_min,
1472                         dev_info.rx_desc_lim.nb_align);
1473                 return -EINVAL;
1474         }
1475
1476         rxq = dev->data->rx_queues;
1477         if (rxq[rx_queue_id]) {
1478                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1479                                         -ENOTSUP);
1480                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1481                 rxq[rx_queue_id] = NULL;
1482         }
1483
1484         if (rx_conf == NULL)
1485                 rx_conf = &dev_info.default_rxconf;
1486
1487         local_conf = *rx_conf;
1488         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1489                 /**
1490                  * Reflect port offloads to queue offloads in order for
1491                  * offloads to not be discarded.
1492                  */
1493                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1494                                                     &local_conf.offloads);
1495         }
1496
1497         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1498                                               socket_id, &local_conf, mp);
1499         if (!ret) {
1500                 if (!dev->data->min_rx_buf_size ||
1501                     dev->data->min_rx_buf_size > mbp_buf_size)
1502                         dev->data->min_rx_buf_size = mbp_buf_size;
1503         }
1504
1505         return eth_err(port_id, ret);
1506 }
1507
1508 /**
1509  * A conversion function from txq_flags API.
1510  */
1511 static void
1512 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1513 {
1514         uint64_t offloads = 0;
1515
1516         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1517                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1518         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1519                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1520         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1521                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1522         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1523                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1524         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1525                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1526         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1527             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1528                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1529
1530         *tx_offloads = offloads;
1531 }
1532
1533 /**
1534  * A conversion function from offloads API.
1535  */
1536 static void
1537 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1538 {
1539         uint32_t flags = 0;
1540
1541         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1542                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1543         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1544                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1545         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1546                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1547         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1548                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1549         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1550                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1551         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1552                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1553
1554         *txq_flags = flags;
1555 }
1556
1557 int
1558 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1559                        uint16_t nb_tx_desc, unsigned int socket_id,
1560                        const struct rte_eth_txconf *tx_conf)
1561 {
1562         struct rte_eth_dev *dev;
1563         struct rte_eth_dev_info dev_info;
1564         struct rte_eth_txconf local_conf;
1565         void **txq;
1566
1567         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1568
1569         dev = &rte_eth_devices[port_id];
1570         if (tx_queue_id >= dev->data->nb_tx_queues) {
1571                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1572                 return -EINVAL;
1573         }
1574
1575         if (dev->data->dev_started) {
1576                 RTE_PMD_DEBUG_TRACE(
1577                     "port %d must be stopped to allow configuration\n", port_id);
1578                 return -EBUSY;
1579         }
1580
1581         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1582         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1583
1584         rte_eth_dev_info_get(port_id, &dev_info);
1585
1586         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1587             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1588             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1589                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1590                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1591                                 nb_tx_desc,
1592                                 dev_info.tx_desc_lim.nb_max,
1593                                 dev_info.tx_desc_lim.nb_min,
1594                                 dev_info.tx_desc_lim.nb_align);
1595                 return -EINVAL;
1596         }
1597
1598         txq = dev->data->tx_queues;
1599         if (txq[tx_queue_id]) {
1600                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1601                                         -ENOTSUP);
1602                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1603                 txq[tx_queue_id] = NULL;
1604         }
1605
1606         if (tx_conf == NULL)
1607                 tx_conf = &dev_info.default_txconf;
1608
1609         /*
1610          * Convert between the offloads API to enable PMDs to support
1611          * only one of them.
1612          */
1613         local_conf = *tx_conf;
1614         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1615                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1616                                              &local_conf.txq_flags);
1617                 /* Keep the ignore flag. */
1618                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1619         } else {
1620                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1621                                           &local_conf.offloads);
1622         }
1623
1624         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1625                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1626 }
1627
1628 void
1629 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1630                 void *userdata __rte_unused)
1631 {
1632         unsigned i;
1633
1634         for (i = 0; i < unsent; i++)
1635                 rte_pktmbuf_free(pkts[i]);
1636 }
1637
1638 void
1639 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1640                 void *userdata)
1641 {
1642         uint64_t *count = userdata;
1643         unsigned i;
1644
1645         for (i = 0; i < unsent; i++)
1646                 rte_pktmbuf_free(pkts[i]);
1647
1648         *count += unsent;
1649 }
1650
1651 int
1652 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1653                 buffer_tx_error_fn cbfn, void *userdata)
1654 {
1655         buffer->error_callback = cbfn;
1656         buffer->error_userdata = userdata;
1657         return 0;
1658 }
1659
1660 int
1661 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1662 {
1663         int ret = 0;
1664
1665         if (buffer == NULL)
1666                 return -EINVAL;
1667
1668         buffer->size = size;
1669         if (buffer->error_callback == NULL) {
1670                 ret = rte_eth_tx_buffer_set_err_callback(
1671                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1672         }
1673
1674         return ret;
1675 }
1676
1677 int
1678 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1679 {
1680         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1681         int ret;
1682
1683         /* Validate Input Data. Bail if not valid or not supported. */
1684         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1685         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1686
1687         /* Call driver to free pending mbufs. */
1688         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1689                                                free_cnt);
1690         return eth_err(port_id, ret);
1691 }
1692
1693 void
1694 rte_eth_promiscuous_enable(uint16_t port_id)
1695 {
1696         struct rte_eth_dev *dev;
1697
1698         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1699         dev = &rte_eth_devices[port_id];
1700
1701         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1702         (*dev->dev_ops->promiscuous_enable)(dev);
1703         dev->data->promiscuous = 1;
1704 }
1705
1706 void
1707 rte_eth_promiscuous_disable(uint16_t port_id)
1708 {
1709         struct rte_eth_dev *dev;
1710
1711         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1712         dev = &rte_eth_devices[port_id];
1713
1714         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1715         dev->data->promiscuous = 0;
1716         (*dev->dev_ops->promiscuous_disable)(dev);
1717 }
1718
1719 int
1720 rte_eth_promiscuous_get(uint16_t port_id)
1721 {
1722         struct rte_eth_dev *dev;
1723
1724         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1725
1726         dev = &rte_eth_devices[port_id];
1727         return dev->data->promiscuous;
1728 }
1729
1730 void
1731 rte_eth_allmulticast_enable(uint16_t port_id)
1732 {
1733         struct rte_eth_dev *dev;
1734
1735         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1736         dev = &rte_eth_devices[port_id];
1737
1738         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1739         (*dev->dev_ops->allmulticast_enable)(dev);
1740         dev->data->all_multicast = 1;
1741 }
1742
1743 void
1744 rte_eth_allmulticast_disable(uint16_t port_id)
1745 {
1746         struct rte_eth_dev *dev;
1747
1748         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1749         dev = &rte_eth_devices[port_id];
1750
1751         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1752         dev->data->all_multicast = 0;
1753         (*dev->dev_ops->allmulticast_disable)(dev);
1754 }
1755
1756 int
1757 rte_eth_allmulticast_get(uint16_t port_id)
1758 {
1759         struct rte_eth_dev *dev;
1760
1761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1762
1763         dev = &rte_eth_devices[port_id];
1764         return dev->data->all_multicast;
1765 }
1766
1767 static inline int
1768 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1769                                 struct rte_eth_link *link)
1770 {
1771         struct rte_eth_link *dst = link;
1772         struct rte_eth_link *src = &(dev->data->dev_link);
1773
1774         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1775                                         *(uint64_t *)src) == 0)
1776                 return -1;
1777
1778         return 0;
1779 }
1780
1781 void
1782 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1783 {
1784         struct rte_eth_dev *dev;
1785
1786         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1787         dev = &rte_eth_devices[port_id];
1788
1789         if (dev->data->dev_conf.intr_conf.lsc != 0)
1790                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1791         else {
1792                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1793                 (*dev->dev_ops->link_update)(dev, 1);
1794                 *eth_link = dev->data->dev_link;
1795         }
1796 }
1797
1798 void
1799 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1800 {
1801         struct rte_eth_dev *dev;
1802
1803         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1804         dev = &rte_eth_devices[port_id];
1805
1806         if (dev->data->dev_conf.intr_conf.lsc != 0)
1807                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1808         else {
1809                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1810                 (*dev->dev_ops->link_update)(dev, 0);
1811                 *eth_link = dev->data->dev_link;
1812         }
1813 }
1814
1815 int
1816 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1817 {
1818         struct rte_eth_dev *dev;
1819
1820         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1821
1822         dev = &rte_eth_devices[port_id];
1823         memset(stats, 0, sizeof(*stats));
1824
1825         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1826         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1827         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1828 }
1829
1830 int
1831 rte_eth_stats_reset(uint16_t port_id)
1832 {
1833         struct rte_eth_dev *dev;
1834
1835         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1836         dev = &rte_eth_devices[port_id];
1837
1838         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1839         (*dev->dev_ops->stats_reset)(dev);
1840         dev->data->rx_mbuf_alloc_failed = 0;
1841
1842         return 0;
1843 }
1844
1845 static inline int
1846 get_xstats_basic_count(struct rte_eth_dev *dev)
1847 {
1848         uint16_t nb_rxqs, nb_txqs;
1849         int count;
1850
1851         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1852         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1853
1854         count = RTE_NB_STATS;
1855         count += nb_rxqs * RTE_NB_RXQ_STATS;
1856         count += nb_txqs * RTE_NB_TXQ_STATS;
1857
1858         return count;
1859 }
1860
1861 static int
1862 get_xstats_count(uint16_t port_id)
1863 {
1864         struct rte_eth_dev *dev;
1865         int count;
1866
1867         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1868         dev = &rte_eth_devices[port_id];
1869         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1870                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1871                                 NULL, 0);
1872                 if (count < 0)
1873                         return eth_err(port_id, count);
1874         }
1875         if (dev->dev_ops->xstats_get_names != NULL) {
1876                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1877                 if (count < 0)
1878                         return eth_err(port_id, count);
1879         } else
1880                 count = 0;
1881
1882
1883         count += get_xstats_basic_count(dev);
1884
1885         return count;
1886 }
1887
1888 int
1889 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1890                 uint64_t *id)
1891 {
1892         int cnt_xstats, idx_xstat;
1893
1894         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1895
1896         if (!id) {
1897                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1898                 return -ENOMEM;
1899         }
1900
1901         if (!xstat_name) {
1902                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1903                 return -ENOMEM;
1904         }
1905
1906         /* Get count */
1907         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1908         if (cnt_xstats  < 0) {
1909                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1910                 return -ENODEV;
1911         }
1912
1913         /* Get id-name lookup table */
1914         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1915
1916         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1917                         port_id, xstats_names, cnt_xstats, NULL)) {
1918                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1919                 return -1;
1920         }
1921
1922         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1923                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1924                         *id = idx_xstat;
1925                         return 0;
1926                 };
1927         }
1928
1929         return -EINVAL;
1930 }
1931
1932 /* retrieve basic stats names */
1933 static int
1934 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1935         struct rte_eth_xstat_name *xstats_names)
1936 {
1937         int cnt_used_entries = 0;
1938         uint32_t idx, id_queue;
1939         uint16_t num_q;
1940
1941         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1942                 snprintf(xstats_names[cnt_used_entries].name,
1943                         sizeof(xstats_names[0].name),
1944                         "%s", rte_stats_strings[idx].name);
1945                 cnt_used_entries++;
1946         }
1947         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1948         for (id_queue = 0; id_queue < num_q; id_queue++) {
1949                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1950                         snprintf(xstats_names[cnt_used_entries].name,
1951                                 sizeof(xstats_names[0].name),
1952                                 "rx_q%u%s",
1953                                 id_queue, rte_rxq_stats_strings[idx].name);
1954                         cnt_used_entries++;
1955                 }
1956
1957         }
1958         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1959         for (id_queue = 0; id_queue < num_q; id_queue++) {
1960                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1961                         snprintf(xstats_names[cnt_used_entries].name,
1962                                 sizeof(xstats_names[0].name),
1963                                 "tx_q%u%s",
1964                                 id_queue, rte_txq_stats_strings[idx].name);
1965                         cnt_used_entries++;
1966                 }
1967         }
1968         return cnt_used_entries;
1969 }
1970
1971 /* retrieve ethdev extended statistics names */
1972 int
1973 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1974         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1975         uint64_t *ids)
1976 {
1977         struct rte_eth_xstat_name *xstats_names_copy;
1978         unsigned int no_basic_stat_requested = 1;
1979         unsigned int no_ext_stat_requested = 1;
1980         unsigned int expected_entries;
1981         unsigned int basic_count;
1982         struct rte_eth_dev *dev;
1983         unsigned int i;
1984         int ret;
1985
1986         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1987         dev = &rte_eth_devices[port_id];
1988
1989         basic_count = get_xstats_basic_count(dev);
1990         ret = get_xstats_count(port_id);
1991         if (ret < 0)
1992                 return ret;
1993         expected_entries = (unsigned int)ret;
1994
1995         /* Return max number of stats if no ids given */
1996         if (!ids) {
1997                 if (!xstats_names)
1998                         return expected_entries;
1999                 else if (xstats_names && size < expected_entries)
2000                         return expected_entries;
2001         }
2002
2003         if (ids && !xstats_names)
2004                 return -EINVAL;
2005
2006         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2007                 uint64_t ids_copy[size];
2008
2009                 for (i = 0; i < size; i++) {
2010                         if (ids[i] < basic_count) {
2011                                 no_basic_stat_requested = 0;
2012                                 break;
2013                         }
2014
2015                         /*
2016                          * Convert ids to xstats ids that PMD knows.
2017                          * ids known by user are basic + extended stats.
2018                          */
2019                         ids_copy[i] = ids[i] - basic_count;
2020                 }
2021
2022                 if (no_basic_stat_requested)
2023                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2024                                         xstats_names, ids_copy, size);
2025         }
2026
2027         /* Retrieve all stats */
2028         if (!ids) {
2029                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2030                                 expected_entries);
2031                 if (num_stats < 0 || num_stats > (int)expected_entries)
2032                         return num_stats;
2033                 else
2034                         return expected_entries;
2035         }
2036
2037         xstats_names_copy = calloc(expected_entries,
2038                 sizeof(struct rte_eth_xstat_name));
2039
2040         if (!xstats_names_copy) {
2041                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2042                 return -ENOMEM;
2043         }
2044
2045         if (ids) {
2046                 for (i = 0; i < size; i++) {
2047                         if (ids[i] >= basic_count) {
2048                                 no_ext_stat_requested = 0;
2049                                 break;
2050                         }
2051                 }
2052         }
2053
2054         /* Fill xstats_names_copy structure */
2055         if (ids && no_ext_stat_requested) {
2056                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2057         } else {
2058                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2059                         expected_entries);
2060                 if (ret < 0) {
2061                         free(xstats_names_copy);
2062                         return ret;
2063                 }
2064         }
2065
2066         /* Filter stats */
2067         for (i = 0; i < size; i++) {
2068                 if (ids[i] >= expected_entries) {
2069                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2070                         free(xstats_names_copy);
2071                         return -1;
2072                 }
2073                 xstats_names[i] = xstats_names_copy[ids[i]];
2074         }
2075
2076         free(xstats_names_copy);
2077         return size;
2078 }
2079
2080 int
2081 rte_eth_xstats_get_names(uint16_t port_id,
2082         struct rte_eth_xstat_name *xstats_names,
2083         unsigned int size)
2084 {
2085         struct rte_eth_dev *dev;
2086         int cnt_used_entries;
2087         int cnt_expected_entries;
2088         int cnt_driver_entries;
2089
2090         cnt_expected_entries = get_xstats_count(port_id);
2091         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2092                         (int)size < cnt_expected_entries)
2093                 return cnt_expected_entries;
2094
2095         /* port_id checked in get_xstats_count() */
2096         dev = &rte_eth_devices[port_id];
2097
2098         cnt_used_entries = rte_eth_basic_stats_get_names(
2099                 dev, xstats_names);
2100
2101         if (dev->dev_ops->xstats_get_names != NULL) {
2102                 /* If there are any driver-specific xstats, append them
2103                  * to end of list.
2104                  */
2105                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2106                         dev,
2107                         xstats_names + cnt_used_entries,
2108                         size - cnt_used_entries);
2109                 if (cnt_driver_entries < 0)
2110                         return eth_err(port_id, cnt_driver_entries);
2111                 cnt_used_entries += cnt_driver_entries;
2112         }
2113
2114         return cnt_used_entries;
2115 }
2116
2117
2118 static int
2119 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2120 {
2121         struct rte_eth_dev *dev;
2122         struct rte_eth_stats eth_stats;
2123         unsigned int count = 0, i, q;
2124         uint64_t val, *stats_ptr;
2125         uint16_t nb_rxqs, nb_txqs;
2126         int ret;
2127
2128         ret = rte_eth_stats_get(port_id, &eth_stats);
2129         if (ret < 0)
2130                 return ret;
2131
2132         dev = &rte_eth_devices[port_id];
2133
2134         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2135         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2136
2137         /* global stats */
2138         for (i = 0; i < RTE_NB_STATS; i++) {
2139                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2140                                         rte_stats_strings[i].offset);
2141                 val = *stats_ptr;
2142                 xstats[count++].value = val;
2143         }
2144
2145         /* per-rxq stats */
2146         for (q = 0; q < nb_rxqs; q++) {
2147                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2148                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2149                                         rte_rxq_stats_strings[i].offset +
2150                                         q * sizeof(uint64_t));
2151                         val = *stats_ptr;
2152                         xstats[count++].value = val;
2153                 }
2154         }
2155
2156         /* per-txq stats */
2157         for (q = 0; q < nb_txqs; q++) {
2158                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2159                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2160                                         rte_txq_stats_strings[i].offset +
2161                                         q * sizeof(uint64_t));
2162                         val = *stats_ptr;
2163                         xstats[count++].value = val;
2164                 }
2165         }
2166         return count;
2167 }
2168
2169 /* retrieve ethdev extended statistics */
2170 int
2171 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2172                          uint64_t *values, unsigned int size)
2173 {
2174         unsigned int no_basic_stat_requested = 1;
2175         unsigned int no_ext_stat_requested = 1;
2176         unsigned int num_xstats_filled;
2177         unsigned int basic_count;
2178         uint16_t expected_entries;
2179         struct rte_eth_dev *dev;
2180         unsigned int i;
2181         int ret;
2182
2183         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2184         ret = get_xstats_count(port_id);
2185         if (ret < 0)
2186                 return ret;
2187         expected_entries = (uint16_t)ret;
2188         struct rte_eth_xstat xstats[expected_entries];
2189         dev = &rte_eth_devices[port_id];
2190         basic_count = get_xstats_basic_count(dev);
2191
2192         /* Return max number of stats if no ids given */
2193         if (!ids) {
2194                 if (!values)
2195                         return expected_entries;
2196                 else if (values && size < expected_entries)
2197                         return expected_entries;
2198         }
2199
2200         if (ids && !values)
2201                 return -EINVAL;
2202
2203         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2204                 unsigned int basic_count = get_xstats_basic_count(dev);
2205                 uint64_t ids_copy[size];
2206
2207                 for (i = 0; i < size; i++) {
2208                         if (ids[i] < basic_count) {
2209                                 no_basic_stat_requested = 0;
2210                                 break;
2211                         }
2212
2213                         /*
2214                          * Convert ids to xstats ids that PMD knows.
2215                          * ids known by user are basic + extended stats.
2216                          */
2217                         ids_copy[i] = ids[i] - basic_count;
2218                 }
2219
2220                 if (no_basic_stat_requested)
2221                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2222                                         values, size);
2223         }
2224
2225         if (ids) {
2226                 for (i = 0; i < size; i++) {
2227                         if (ids[i] >= basic_count) {
2228                                 no_ext_stat_requested = 0;
2229                                 break;
2230                         }
2231                 }
2232         }
2233
2234         /* Fill the xstats structure */
2235         if (ids && no_ext_stat_requested)
2236                 ret = rte_eth_basic_stats_get(port_id, xstats);
2237         else
2238                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2239
2240         if (ret < 0)
2241                 return ret;
2242         num_xstats_filled = (unsigned int)ret;
2243
2244         /* Return all stats */
2245         if (!ids) {
2246                 for (i = 0; i < num_xstats_filled; i++)
2247                         values[i] = xstats[i].value;
2248                 return expected_entries;
2249         }
2250
2251         /* Filter stats */
2252         for (i = 0; i < size; i++) {
2253                 if (ids[i] >= expected_entries) {
2254                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2255                         return -1;
2256                 }
2257                 values[i] = xstats[ids[i]].value;
2258         }
2259         return size;
2260 }
2261
2262 int
2263 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2264         unsigned int n)
2265 {
2266         struct rte_eth_dev *dev;
2267         unsigned int count = 0, i;
2268         signed int xcount = 0;
2269         uint16_t nb_rxqs, nb_txqs;
2270         int ret;
2271
2272         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2273
2274         dev = &rte_eth_devices[port_id];
2275
2276         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2277         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2278
2279         /* Return generic statistics */
2280         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2281                 (nb_txqs * RTE_NB_TXQ_STATS);
2282
2283         /* implemented by the driver */
2284         if (dev->dev_ops->xstats_get != NULL) {
2285                 /* Retrieve the xstats from the driver at the end of the
2286                  * xstats struct.
2287                  */
2288                 xcount = (*dev->dev_ops->xstats_get)(dev,
2289                                      xstats ? xstats + count : NULL,
2290                                      (n > count) ? n - count : 0);
2291
2292                 if (xcount < 0)
2293                         return eth_err(port_id, xcount);
2294         }
2295
2296         if (n < count + xcount || xstats == NULL)
2297                 return count + xcount;
2298
2299         /* now fill the xstats structure */
2300         ret = rte_eth_basic_stats_get(port_id, xstats);
2301         if (ret < 0)
2302                 return ret;
2303         count = ret;
2304
2305         for (i = 0; i < count; i++)
2306                 xstats[i].id = i;
2307         /* add an offset to driver-specific stats */
2308         for ( ; i < count + xcount; i++)
2309                 xstats[i].id += count;
2310
2311         return count + xcount;
2312 }
2313
2314 /* reset ethdev extended statistics */
2315 void
2316 rte_eth_xstats_reset(uint16_t port_id)
2317 {
2318         struct rte_eth_dev *dev;
2319
2320         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2321         dev = &rte_eth_devices[port_id];
2322
2323         /* implemented by the driver */
2324         if (dev->dev_ops->xstats_reset != NULL) {
2325                 (*dev->dev_ops->xstats_reset)(dev);
2326                 return;
2327         }
2328
2329         /* fallback to default */
2330         rte_eth_stats_reset(port_id);
2331 }
2332
2333 static int
2334 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2335                 uint8_t is_rx)
2336 {
2337         struct rte_eth_dev *dev;
2338
2339         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2340
2341         dev = &rte_eth_devices[port_id];
2342
2343         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2344         return (*dev->dev_ops->queue_stats_mapping_set)
2345                         (dev, queue_id, stat_idx, is_rx);
2346 }
2347
2348
2349 int
2350 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2351                 uint8_t stat_idx)
2352 {
2353         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2354                                                 stat_idx, STAT_QMAP_TX));
2355 }
2356
2357
2358 int
2359 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2360                 uint8_t stat_idx)
2361 {
2362         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2363                                                 stat_idx, STAT_QMAP_RX));
2364 }
2365
2366 int
2367 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2368 {
2369         struct rte_eth_dev *dev;
2370
2371         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2372         dev = &rte_eth_devices[port_id];
2373
2374         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2375         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2376                                                         fw_version, fw_size));
2377 }
2378
2379 void
2380 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2381 {
2382         struct rte_eth_dev *dev;
2383         const struct rte_eth_desc_lim lim = {
2384                 .nb_max = UINT16_MAX,
2385                 .nb_min = 0,
2386                 .nb_align = 1,
2387         };
2388
2389         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2390         dev = &rte_eth_devices[port_id];
2391
2392         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2393         dev_info->rx_desc_lim = lim;
2394         dev_info->tx_desc_lim = lim;
2395
2396         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2397         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2398         dev_info->driver_name = dev->device->driver->name;
2399         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2400         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2401 }
2402
2403 int
2404 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2405                                  uint32_t *ptypes, int num)
2406 {
2407         int i, j;
2408         struct rte_eth_dev *dev;
2409         const uint32_t *all_ptypes;
2410
2411         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2412         dev = &rte_eth_devices[port_id];
2413         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2414         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2415
2416         if (!all_ptypes)
2417                 return 0;
2418
2419         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2420                 if (all_ptypes[i] & ptype_mask) {
2421                         if (j < num)
2422                                 ptypes[j] = all_ptypes[i];
2423                         j++;
2424                 }
2425
2426         return j;
2427 }
2428
2429 void
2430 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2431 {
2432         struct rte_eth_dev *dev;
2433
2434         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2435         dev = &rte_eth_devices[port_id];
2436         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2437 }
2438
2439
2440 int
2441 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2442 {
2443         struct rte_eth_dev *dev;
2444
2445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2446
2447         dev = &rte_eth_devices[port_id];
2448         *mtu = dev->data->mtu;
2449         return 0;
2450 }
2451
2452 int
2453 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2454 {
2455         int ret;
2456         struct rte_eth_dev *dev;
2457
2458         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2459         dev = &rte_eth_devices[port_id];
2460         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2461
2462         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2463         if (!ret)
2464                 dev->data->mtu = mtu;
2465
2466         return eth_err(port_id, ret);
2467 }
2468
2469 int
2470 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2471 {
2472         struct rte_eth_dev *dev;
2473         int ret;
2474
2475         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2476         dev = &rte_eth_devices[port_id];
2477         if (!(dev->data->dev_conf.rxmode.offloads &
2478               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2479                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2480                 return -ENOSYS;
2481         }
2482
2483         if (vlan_id > 4095) {
2484                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2485                                 port_id, (unsigned) vlan_id);
2486                 return -EINVAL;
2487         }
2488         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2489
2490         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2491         if (ret == 0) {
2492                 struct rte_vlan_filter_conf *vfc;
2493                 int vidx;
2494                 int vbit;
2495
2496                 vfc = &dev->data->vlan_filter_conf;
2497                 vidx = vlan_id / 64;
2498                 vbit = vlan_id % 64;
2499
2500                 if (on)
2501                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2502                 else
2503                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2504         }
2505
2506         return eth_err(port_id, ret);
2507 }
2508
2509 int
2510 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2511                                     int on)
2512 {
2513         struct rte_eth_dev *dev;
2514
2515         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2516         dev = &rte_eth_devices[port_id];
2517         if (rx_queue_id >= dev->data->nb_rx_queues) {
2518                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2519                 return -EINVAL;
2520         }
2521
2522         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2523         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2524
2525         return 0;
2526 }
2527
2528 int
2529 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2530                                 enum rte_vlan_type vlan_type,
2531                                 uint16_t tpid)
2532 {
2533         struct rte_eth_dev *dev;
2534
2535         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2536         dev = &rte_eth_devices[port_id];
2537         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2538
2539         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2540                                                                tpid));
2541 }
2542
2543 int
2544 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2545 {
2546         struct rte_eth_dev *dev;
2547         int ret = 0;
2548         int mask = 0;
2549         int cur, org = 0;
2550         uint64_t orig_offloads;
2551
2552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2553         dev = &rte_eth_devices[port_id];
2554
2555         /* save original values in case of failure */
2556         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2557
2558         /*check which option changed by application*/
2559         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2560         org = !!(dev->data->dev_conf.rxmode.offloads &
2561                  DEV_RX_OFFLOAD_VLAN_STRIP);
2562         if (cur != org) {
2563                 if (cur)
2564                         dev->data->dev_conf.rxmode.offloads |=
2565                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2566                 else
2567                         dev->data->dev_conf.rxmode.offloads &=
2568                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2569                 mask |= ETH_VLAN_STRIP_MASK;
2570         }
2571
2572         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2573         org = !!(dev->data->dev_conf.rxmode.offloads &
2574                  DEV_RX_OFFLOAD_VLAN_FILTER);
2575         if (cur != org) {
2576                 if (cur)
2577                         dev->data->dev_conf.rxmode.offloads |=
2578                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2579                 else
2580                         dev->data->dev_conf.rxmode.offloads &=
2581                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2582                 mask |= ETH_VLAN_FILTER_MASK;
2583         }
2584
2585         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2586         org = !!(dev->data->dev_conf.rxmode.offloads &
2587                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2588         if (cur != org) {
2589                 if (cur)
2590                         dev->data->dev_conf.rxmode.offloads |=
2591                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2592                 else
2593                         dev->data->dev_conf.rxmode.offloads &=
2594                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2595                 mask |= ETH_VLAN_EXTEND_MASK;
2596         }
2597
2598         /*no change*/
2599         if (mask == 0)
2600                 return ret;
2601
2602         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2603
2604         /*
2605          * Convert to the offload bitfield API just in case the underlying PMD
2606          * still supporting it.
2607          */
2608         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2609                                     &dev->data->dev_conf.rxmode);
2610         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2611         if (ret) {
2612                 /* hit an error restore  original values */
2613                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2614                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2615                                             &dev->data->dev_conf.rxmode);
2616         }
2617
2618         return eth_err(port_id, ret);
2619 }
2620
2621 int
2622 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2623 {
2624         struct rte_eth_dev *dev;
2625         int ret = 0;
2626
2627         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2628         dev = &rte_eth_devices[port_id];
2629
2630         if (dev->data->dev_conf.rxmode.offloads &
2631             DEV_RX_OFFLOAD_VLAN_STRIP)
2632                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2633
2634         if (dev->data->dev_conf.rxmode.offloads &
2635             DEV_RX_OFFLOAD_VLAN_FILTER)
2636                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2637
2638         if (dev->data->dev_conf.rxmode.offloads &
2639             DEV_RX_OFFLOAD_VLAN_EXTEND)
2640                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2641
2642         return ret;
2643 }
2644
2645 int
2646 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2647 {
2648         struct rte_eth_dev *dev;
2649
2650         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2651         dev = &rte_eth_devices[port_id];
2652         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2653
2654         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2655 }
2656
2657 int
2658 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2659 {
2660         struct rte_eth_dev *dev;
2661
2662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2663         dev = &rte_eth_devices[port_id];
2664         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2665         memset(fc_conf, 0, sizeof(*fc_conf));
2666         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2667 }
2668
2669 int
2670 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2671 {
2672         struct rte_eth_dev *dev;
2673
2674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2675         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2676                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2677                 return -EINVAL;
2678         }
2679
2680         dev = &rte_eth_devices[port_id];
2681         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2682         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2683 }
2684
2685 int
2686 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2687                                    struct rte_eth_pfc_conf *pfc_conf)
2688 {
2689         struct rte_eth_dev *dev;
2690
2691         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2692         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2693                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2694                 return -EINVAL;
2695         }
2696
2697         dev = &rte_eth_devices[port_id];
2698         /* High water, low water validation are device specific */
2699         if  (*dev->dev_ops->priority_flow_ctrl_set)
2700                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2701                                         (dev, pfc_conf));
2702         return -ENOTSUP;
2703 }
2704
2705 static int
2706 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2707                         uint16_t reta_size)
2708 {
2709         uint16_t i, num;
2710
2711         if (!reta_conf)
2712                 return -EINVAL;
2713
2714         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2715         for (i = 0; i < num; i++) {
2716                 if (reta_conf[i].mask)
2717                         return 0;
2718         }
2719
2720         return -EINVAL;
2721 }
2722
2723 static int
2724 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2725                          uint16_t reta_size,
2726                          uint16_t max_rxq)
2727 {
2728         uint16_t i, idx, shift;
2729
2730         if (!reta_conf)
2731                 return -EINVAL;
2732
2733         if (max_rxq == 0) {
2734                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2735                 return -EINVAL;
2736         }
2737
2738         for (i = 0; i < reta_size; i++) {
2739                 idx = i / RTE_RETA_GROUP_SIZE;
2740                 shift = i % RTE_RETA_GROUP_SIZE;
2741                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2742                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2743                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2744                                 "the maximum rxq index: %u\n", idx, shift,
2745                                 reta_conf[idx].reta[shift], max_rxq);
2746                         return -EINVAL;
2747                 }
2748         }
2749
2750         return 0;
2751 }
2752
2753 int
2754 rte_eth_dev_rss_reta_update(uint16_t port_id,
2755                             struct rte_eth_rss_reta_entry64 *reta_conf,
2756                             uint16_t reta_size)
2757 {
2758         struct rte_eth_dev *dev;
2759         int ret;
2760
2761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2762         /* Check mask bits */
2763         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2764         if (ret < 0)
2765                 return ret;
2766
2767         dev = &rte_eth_devices[port_id];
2768
2769         /* Check entry value */
2770         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2771                                 dev->data->nb_rx_queues);
2772         if (ret < 0)
2773                 return ret;
2774
2775         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2776         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2777                                                              reta_size));
2778 }
2779
2780 int
2781 rte_eth_dev_rss_reta_query(uint16_t port_id,
2782                            struct rte_eth_rss_reta_entry64 *reta_conf,
2783                            uint16_t reta_size)
2784 {
2785         struct rte_eth_dev *dev;
2786         int ret;
2787
2788         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2789
2790         /* Check mask bits */
2791         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2792         if (ret < 0)
2793                 return ret;
2794
2795         dev = &rte_eth_devices[port_id];
2796         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2797         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2798                                                             reta_size));
2799 }
2800
2801 int
2802 rte_eth_dev_rss_hash_update(uint16_t port_id,
2803                             struct rte_eth_rss_conf *rss_conf)
2804 {
2805         struct rte_eth_dev *dev;
2806
2807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2808         dev = &rte_eth_devices[port_id];
2809         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2810         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2811                                                                  rss_conf));
2812 }
2813
2814 int
2815 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2816                               struct rte_eth_rss_conf *rss_conf)
2817 {
2818         struct rte_eth_dev *dev;
2819
2820         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2821         dev = &rte_eth_devices[port_id];
2822         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2823         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2824                                                                    rss_conf));
2825 }
2826
2827 int
2828 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2829                                 struct rte_eth_udp_tunnel *udp_tunnel)
2830 {
2831         struct rte_eth_dev *dev;
2832
2833         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2834         if (udp_tunnel == NULL) {
2835                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2836                 return -EINVAL;
2837         }
2838
2839         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2840                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2841                 return -EINVAL;
2842         }
2843
2844         dev = &rte_eth_devices[port_id];
2845         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2846         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2847                                                                 udp_tunnel));
2848 }
2849
2850 int
2851 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2852                                    struct rte_eth_udp_tunnel *udp_tunnel)
2853 {
2854         struct rte_eth_dev *dev;
2855
2856         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2857         dev = &rte_eth_devices[port_id];
2858
2859         if (udp_tunnel == NULL) {
2860                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2861                 return -EINVAL;
2862         }
2863
2864         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2865                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2866                 return -EINVAL;
2867         }
2868
2869         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2870         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2871                                                                 udp_tunnel));
2872 }
2873
2874 int
2875 rte_eth_led_on(uint16_t port_id)
2876 {
2877         struct rte_eth_dev *dev;
2878
2879         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2880         dev = &rte_eth_devices[port_id];
2881         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2882         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2883 }
2884
2885 int
2886 rte_eth_led_off(uint16_t port_id)
2887 {
2888         struct rte_eth_dev *dev;
2889
2890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2891         dev = &rte_eth_devices[port_id];
2892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2893         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2894 }
2895
2896 /*
2897  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2898  * an empty spot.
2899  */
2900 static int
2901 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2902 {
2903         struct rte_eth_dev_info dev_info;
2904         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2905         unsigned i;
2906
2907         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2908         rte_eth_dev_info_get(port_id, &dev_info);
2909
2910         for (i = 0; i < dev_info.max_mac_addrs; i++)
2911                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2912                         return i;
2913
2914         return -1;
2915 }
2916
2917 static const struct ether_addr null_mac_addr;
2918
2919 int
2920 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2921                         uint32_t pool)
2922 {
2923         struct rte_eth_dev *dev;
2924         int index;
2925         uint64_t pool_mask;
2926         int ret;
2927
2928         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2929         dev = &rte_eth_devices[port_id];
2930         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2931
2932         if (is_zero_ether_addr(addr)) {
2933                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2934                         port_id);
2935                 return -EINVAL;
2936         }
2937         if (pool >= ETH_64_POOLS) {
2938                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2939                 return -EINVAL;
2940         }
2941
2942         index = get_mac_addr_index(port_id, addr);
2943         if (index < 0) {
2944                 index = get_mac_addr_index(port_id, &null_mac_addr);
2945                 if (index < 0) {
2946                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2947                                 port_id);
2948                         return -ENOSPC;
2949                 }
2950         } else {
2951                 pool_mask = dev->data->mac_pool_sel[index];
2952
2953                 /* Check if both MAC address and pool is already there, and do nothing */
2954                 if (pool_mask & (1ULL << pool))
2955                         return 0;
2956         }
2957
2958         /* Update NIC */
2959         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2960
2961         if (ret == 0) {
2962                 /* Update address in NIC data structure */
2963                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2964
2965                 /* Update pool bitmap in NIC data structure */
2966                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2967         }
2968
2969         return eth_err(port_id, ret);
2970 }
2971
2972 int
2973 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2974 {
2975         struct rte_eth_dev *dev;
2976         int index;
2977
2978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2979         dev = &rte_eth_devices[port_id];
2980         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2981
2982         index = get_mac_addr_index(port_id, addr);
2983         if (index == 0) {
2984                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2985                 return -EADDRINUSE;
2986         } else if (index < 0)
2987                 return 0;  /* Do nothing if address wasn't found */
2988
2989         /* Update NIC */
2990         (*dev->dev_ops->mac_addr_remove)(dev, index);
2991
2992         /* Update address in NIC data structure */
2993         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2994
2995         /* reset pool bitmap */
2996         dev->data->mac_pool_sel[index] = 0;
2997
2998         return 0;
2999 }
3000
3001 int
3002 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3003 {
3004         struct rte_eth_dev *dev;
3005
3006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3007
3008         if (!is_valid_assigned_ether_addr(addr))
3009                 return -EINVAL;
3010
3011         dev = &rte_eth_devices[port_id];
3012         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3013
3014         /* Update default address in NIC data structure */
3015         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3016
3017         (*dev->dev_ops->mac_addr_set)(dev, addr);
3018
3019         return 0;
3020 }
3021
3022
3023 /*
3024  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3025  * an empty spot.
3026  */
3027 static int
3028 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3029 {
3030         struct rte_eth_dev_info dev_info;
3031         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3032         unsigned i;
3033
3034         rte_eth_dev_info_get(port_id, &dev_info);
3035         if (!dev->data->hash_mac_addrs)
3036                 return -1;
3037
3038         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3039                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3040                         ETHER_ADDR_LEN) == 0)
3041                         return i;
3042
3043         return -1;
3044 }
3045
3046 int
3047 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3048                                 uint8_t on)
3049 {
3050         int index;
3051         int ret;
3052         struct rte_eth_dev *dev;
3053
3054         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3055
3056         dev = &rte_eth_devices[port_id];
3057         if (is_zero_ether_addr(addr)) {
3058                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3059                         port_id);
3060                 return -EINVAL;
3061         }
3062
3063         index = get_hash_mac_addr_index(port_id, addr);
3064         /* Check if it's already there, and do nothing */
3065         if ((index >= 0) && on)
3066                 return 0;
3067
3068         if (index < 0) {
3069                 if (!on) {
3070                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3071                                 "set in UTA\n", port_id);
3072                         return -EINVAL;
3073                 }
3074
3075                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3076                 if (index < 0) {
3077                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3078                                         port_id);
3079                         return -ENOSPC;
3080                 }
3081         }
3082
3083         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3084         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3085         if (ret == 0) {
3086                 /* Update address in NIC data structure */
3087                 if (on)
3088                         ether_addr_copy(addr,
3089                                         &dev->data->hash_mac_addrs[index]);
3090                 else
3091                         ether_addr_copy(&null_mac_addr,
3092                                         &dev->data->hash_mac_addrs[index]);
3093         }
3094
3095         return eth_err(port_id, ret);
3096 }
3097
3098 int
3099 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3100 {
3101         struct rte_eth_dev *dev;
3102
3103         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3104
3105         dev = &rte_eth_devices[port_id];
3106
3107         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3108         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3109                                                                        on));
3110 }
3111
3112 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3113                                         uint16_t tx_rate)
3114 {
3115         struct rte_eth_dev *dev;
3116         struct rte_eth_dev_info dev_info;
3117         struct rte_eth_link link;
3118
3119         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3120
3121         dev = &rte_eth_devices[port_id];
3122         rte_eth_dev_info_get(port_id, &dev_info);
3123         link = dev->data->dev_link;
3124
3125         if (queue_idx > dev_info.max_tx_queues) {
3126                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3127                                 "invalid queue id=%d\n", port_id, queue_idx);
3128                 return -EINVAL;
3129         }
3130
3131         if (tx_rate > link.link_speed) {
3132                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3133                                 "bigger than link speed= %d\n",
3134                         tx_rate, link.link_speed);
3135                 return -EINVAL;
3136         }
3137
3138         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3139         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3140                                                         queue_idx, tx_rate));
3141 }
3142
3143 int
3144 rte_eth_mirror_rule_set(uint16_t port_id,
3145                         struct rte_eth_mirror_conf *mirror_conf,
3146                         uint8_t rule_id, uint8_t on)
3147 {
3148         struct rte_eth_dev *dev;
3149
3150         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3151         if (mirror_conf->rule_type == 0) {
3152                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3153                 return -EINVAL;
3154         }
3155
3156         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3157                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3158                                 ETH_64_POOLS - 1);
3159                 return -EINVAL;
3160         }
3161
3162         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3163              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3164             (mirror_conf->pool_mask == 0)) {
3165                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3166                 return -EINVAL;
3167         }
3168
3169         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3170             mirror_conf->vlan.vlan_mask == 0) {
3171                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3172                 return -EINVAL;
3173         }
3174
3175         dev = &rte_eth_devices[port_id];
3176         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3177
3178         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3179                                                 mirror_conf, rule_id, on));
3180 }
3181
3182 int
3183 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3184 {
3185         struct rte_eth_dev *dev;
3186
3187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3188
3189         dev = &rte_eth_devices[port_id];
3190         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3191
3192         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3193                                                                    rule_id));
3194 }
3195
3196 RTE_INIT(eth_dev_init_cb_lists)
3197 {
3198         int i;
3199
3200         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3201                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3202 }
3203
3204 int
3205 rte_eth_dev_callback_register(uint16_t port_id,
3206                         enum rte_eth_event_type event,
3207                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3208 {
3209         struct rte_eth_dev *dev;
3210         struct rte_eth_dev_callback *user_cb;
3211         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3212         uint16_t last_port;
3213
3214         if (!cb_fn)
3215                 return -EINVAL;
3216
3217         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3218                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3219                 return -EINVAL;
3220         }
3221
3222         if (port_id == RTE_ETH_ALL) {
3223                 next_port = 0;
3224                 last_port = RTE_MAX_ETHPORTS - 1;
3225         } else {
3226                 next_port = last_port = port_id;
3227         }
3228
3229         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3230
3231         do {
3232                 dev = &rte_eth_devices[next_port];
3233
3234                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3235                         if (user_cb->cb_fn == cb_fn &&
3236                                 user_cb->cb_arg == cb_arg &&
3237                                 user_cb->event == event) {
3238                                 break;
3239                         }
3240                 }
3241
3242                 /* create a new callback. */
3243                 if (user_cb == NULL) {
3244                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3245                                 sizeof(struct rte_eth_dev_callback), 0);
3246                         if (user_cb != NULL) {
3247                                 user_cb->cb_fn = cb_fn;
3248                                 user_cb->cb_arg = cb_arg;
3249                                 user_cb->event = event;
3250                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3251                                                   user_cb, next);
3252                         } else {
3253                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3254                                 rte_eth_dev_callback_unregister(port_id, event,
3255                                                                 cb_fn, cb_arg);
3256                                 return -ENOMEM;
3257                         }
3258
3259                 }
3260         } while (++next_port <= last_port);
3261
3262         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3263         return 0;
3264 }
3265
3266 int
3267 rte_eth_dev_callback_unregister(uint16_t port_id,
3268                         enum rte_eth_event_type event,
3269                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3270 {
3271         int ret;
3272         struct rte_eth_dev *dev;
3273         struct rte_eth_dev_callback *cb, *next;
3274         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3275         uint16_t last_port;
3276
3277         if (!cb_fn)
3278                 return -EINVAL;
3279
3280         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3281                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3282                 return -EINVAL;
3283         }
3284
3285         if (port_id == RTE_ETH_ALL) {
3286                 next_port = 0;
3287                 last_port = RTE_MAX_ETHPORTS - 1;
3288         } else {
3289                 next_port = last_port = port_id;
3290         }
3291
3292         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3293
3294         do {
3295                 dev = &rte_eth_devices[next_port];
3296                 ret = 0;
3297                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3298                      cb = next) {
3299
3300                         next = TAILQ_NEXT(cb, next);
3301
3302                         if (cb->cb_fn != cb_fn || cb->event != event ||
3303                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3304                                 continue;
3305
3306                         /*
3307                          * if this callback is not executing right now,
3308                          * then remove it.
3309                          */
3310                         if (cb->active == 0) {
3311                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3312                                 rte_free(cb);
3313                         } else {
3314                                 ret = -EAGAIN;
3315                         }
3316                 }
3317         } while (++next_port <= last_port);
3318
3319         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3320         return ret;
3321 }
3322
3323 int
3324 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3325         enum rte_eth_event_type event, void *ret_param)
3326 {
3327         struct rte_eth_dev_callback *cb_lst;
3328         struct rte_eth_dev_callback dev_cb;
3329         int rc = 0;
3330
3331         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3332         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3333                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3334                         continue;
3335                 dev_cb = *cb_lst;
3336                 cb_lst->active = 1;
3337                 if (ret_param != NULL)
3338                         dev_cb.ret_param = ret_param;
3339
3340                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3341                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3342                                 dev_cb.cb_arg, dev_cb.ret_param);
3343                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3344                 cb_lst->active = 0;
3345         }
3346         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3347         return rc;
3348 }
3349
3350 int
3351 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3352 {
3353         uint32_t vec;
3354         struct rte_eth_dev *dev;
3355         struct rte_intr_handle *intr_handle;
3356         uint16_t qid;
3357         int rc;
3358
3359         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3360
3361         dev = &rte_eth_devices[port_id];
3362
3363         if (!dev->intr_handle) {
3364                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3365                 return -ENOTSUP;
3366         }
3367
3368         intr_handle = dev->intr_handle;
3369         if (!intr_handle->intr_vec) {
3370                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3371                 return -EPERM;
3372         }
3373
3374         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3375                 vec = intr_handle->intr_vec[qid];
3376                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3377                 if (rc && rc != -EEXIST) {
3378                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3379                                         " op %d epfd %d vec %u\n",
3380                                         port_id, qid, op, epfd, vec);
3381                 }
3382         }
3383
3384         return 0;
3385 }
3386
3387 const struct rte_memzone *
3388 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3389                          uint16_t queue_id, size_t size, unsigned align,
3390                          int socket_id)
3391 {
3392         char z_name[RTE_MEMZONE_NAMESIZE];
3393         const struct rte_memzone *mz;
3394
3395         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3396                  dev->device->driver->name, ring_name,
3397                  dev->data->port_id, queue_id);
3398
3399         mz = rte_memzone_lookup(z_name);
3400         if (mz)
3401                 return mz;
3402
3403         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3404 }
3405
3406 int
3407 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3408                           int epfd, int op, void *data)
3409 {
3410         uint32_t vec;
3411         struct rte_eth_dev *dev;
3412         struct rte_intr_handle *intr_handle;
3413         int rc;
3414
3415         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3416
3417         dev = &rte_eth_devices[port_id];
3418         if (queue_id >= dev->data->nb_rx_queues) {
3419                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3420                 return -EINVAL;
3421         }
3422
3423         if (!dev->intr_handle) {
3424                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3425                 return -ENOTSUP;
3426         }
3427
3428         intr_handle = dev->intr_handle;
3429         if (!intr_handle->intr_vec) {
3430                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3431                 return -EPERM;
3432         }
3433
3434         vec = intr_handle->intr_vec[queue_id];
3435         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3436         if (rc && rc != -EEXIST) {
3437                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3438                                 " op %d epfd %d vec %u\n",
3439                                 port_id, queue_id, op, epfd, vec);
3440                 return rc;
3441         }
3442
3443         return 0;
3444 }
3445
3446 int
3447 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3448                            uint16_t queue_id)
3449 {
3450         struct rte_eth_dev *dev;
3451
3452         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3453
3454         dev = &rte_eth_devices[port_id];
3455
3456         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3457         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3458                                                                 queue_id));
3459 }
3460
3461 int
3462 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3463                             uint16_t queue_id)
3464 {
3465         struct rte_eth_dev *dev;
3466
3467         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3468
3469         dev = &rte_eth_devices[port_id];
3470
3471         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3472         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3473                                                                 queue_id));
3474 }
3475
3476
3477 int
3478 rte_eth_dev_filter_supported(uint16_t port_id,
3479                              enum rte_filter_type filter_type)
3480 {
3481         struct rte_eth_dev *dev;
3482
3483         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3484
3485         dev = &rte_eth_devices[port_id];
3486         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3487         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3488                                 RTE_ETH_FILTER_NOP, NULL);
3489 }
3490
3491 int
3492 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3493                             enum rte_filter_type filter_type,
3494                             enum rte_filter_op filter_op, void *arg);
3495
3496 int
3497 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3498                             enum rte_filter_type filter_type,
3499                             enum rte_filter_op filter_op, void *arg)
3500 {
3501         struct rte_eth_fdir_info_v22 {
3502                 enum rte_fdir_mode mode;
3503                 struct rte_eth_fdir_masks mask;
3504                 struct rte_eth_fdir_flex_conf flex_conf;
3505                 uint32_t guarant_spc;
3506                 uint32_t best_spc;
3507                 uint32_t flow_types_mask[1];
3508                 uint32_t max_flexpayload;
3509                 uint32_t flex_payload_unit;
3510                 uint32_t max_flex_payload_segment_num;
3511                 uint16_t flex_payload_limit;
3512                 uint32_t flex_bitmask_unit;
3513                 uint32_t max_flex_bitmask_num;
3514         };
3515
3516         struct rte_eth_hash_global_conf_v22 {
3517                 enum rte_eth_hash_function hash_func;
3518                 uint32_t sym_hash_enable_mask[1];
3519                 uint32_t valid_bit_mask[1];
3520         };
3521
3522         struct rte_eth_hash_filter_info_v22 {
3523                 enum rte_eth_hash_filter_info_type info_type;
3524                 union {
3525                         uint8_t enable;
3526                         struct rte_eth_hash_global_conf_v22 global_conf;
3527                         struct rte_eth_input_set_conf input_set_conf;
3528                 } info;
3529         };
3530
3531         struct rte_eth_dev *dev;
3532
3533         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3534
3535         dev = &rte_eth_devices[port_id];
3536         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3537         if (filter_op == RTE_ETH_FILTER_INFO) {
3538                 int retval;
3539                 struct rte_eth_fdir_info_v22 *fdir_info_v22;
3540                 struct rte_eth_fdir_info fdir_info;
3541
3542                 fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
3543
3544                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3545                           filter_op, (void *)&fdir_info);
3546                 fdir_info_v22->mode = fdir_info.mode;
3547                 fdir_info_v22->mask = fdir_info.mask;
3548                 fdir_info_v22->flex_conf = fdir_info.flex_conf;
3549                 fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
3550                 fdir_info_v22->best_spc = fdir_info.best_spc;
3551                 fdir_info_v22->flow_types_mask[0] =
3552                         (uint32_t)fdir_info.flow_types_mask[0];
3553                 fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
3554                 fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
3555                 fdir_info_v22->max_flex_payload_segment_num =
3556                         fdir_info.max_flex_payload_segment_num;
3557                 fdir_info_v22->flex_payload_limit =
3558                         fdir_info.flex_payload_limit;
3559                 fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
3560                 fdir_info_v22->max_flex_bitmask_num =
3561                         fdir_info.max_flex_bitmask_num;
3562                 return retval;
3563         } else if (filter_op == RTE_ETH_FILTER_GET) {
3564                 int retval;
3565                 struct rte_eth_hash_filter_info f_info;
3566                 struct rte_eth_hash_filter_info_v22 *f_info_v22 =
3567                         (struct rte_eth_hash_filter_info_v22 *)arg;
3568
3569                 f_info.info_type = f_info_v22->info_type;
3570                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3571                           filter_op, (void *)&f_info);
3572
3573                 switch (f_info_v22->info_type) {
3574                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3575                         f_info_v22->info.enable = f_info.info.enable;
3576                         break;
3577                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3578                         f_info_v22->info.global_conf.hash_func =
3579                                 f_info.info.global_conf.hash_func;
3580                         f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
3581                                 (uint32_t)
3582                                 f_info.info.global_conf.sym_hash_enable_mask[0];
3583                         f_info_v22->info.global_conf.valid_bit_mask[0] =
3584                                 (uint32_t)
3585                                 f_info.info.global_conf.valid_bit_mask[0];
3586                         break;
3587                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3588                         f_info_v22->info.input_set_conf =
3589                                 f_info.info.input_set_conf;
3590                         break;
3591                 default:
3592                         break;
3593                 }
3594                 return retval;
3595         } else if (filter_op == RTE_ETH_FILTER_SET) {
3596                 struct rte_eth_hash_filter_info f_info;
3597                 struct rte_eth_hash_filter_info_v22 *f_v22 =
3598                         (struct rte_eth_hash_filter_info_v22 *)arg;
3599
3600                 f_info.info_type = f_v22->info_type;
3601                 switch (f_v22->info_type) {
3602                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3603                         f_info.info.enable = f_v22->info.enable;
3604                         break;
3605                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3606                         f_info.info.global_conf.hash_func =
3607                                 f_v22->info.global_conf.hash_func;
3608                         f_info.info.global_conf.sym_hash_enable_mask[0] =
3609                                 (uint32_t)
3610                                 f_v22->info.global_conf.sym_hash_enable_mask[0];
3611                         f_info.info.global_conf.valid_bit_mask[0] =
3612                                 (uint32_t)
3613                                 f_v22->info.global_conf.valid_bit_mask[0];
3614                         break;
3615                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3616                         f_info.info.input_set_conf =
3617                                 f_v22->info.input_set_conf;
3618                         break;
3619                 default:
3620                         break;
3621                 }
3622                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3623                                                     (void *)&f_info);
3624         } else
3625                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3626                                                     arg);
3627 }
3628 VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
3629
3630 int
3631 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3632                               enum rte_filter_type filter_type,
3633                               enum rte_filter_op filter_op, void *arg);
3634
3635 int
3636 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3637                               enum rte_filter_type filter_type,
3638                               enum rte_filter_op filter_op, void *arg)
3639 {
3640         struct rte_eth_dev *dev;
3641
3642         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3643
3644         dev = &rte_eth_devices[port_id];
3645         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3646         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3647                                                              filter_op, arg));
3648 }
3649 BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
3650 MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
3651                   enum rte_filter_type filter_type,
3652                   enum rte_filter_op filter_op, void *arg),
3653                   rte_eth_dev_filter_ctrl_v1802);
3654
3655 void *
3656 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3657                 rte_rx_callback_fn fn, void *user_param)
3658 {
3659 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3660         rte_errno = ENOTSUP;
3661         return NULL;
3662 #endif
3663         /* check input parameters */
3664         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3665                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3666                 rte_errno = EINVAL;
3667                 return NULL;
3668         }
3669         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3670
3671         if (cb == NULL) {
3672                 rte_errno = ENOMEM;
3673                 return NULL;
3674         }
3675
3676         cb->fn.rx = fn;
3677         cb->param = user_param;
3678
3679         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3680         /* Add the callbacks in fifo order. */
3681         struct rte_eth_rxtx_callback *tail =
3682                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3683
3684         if (!tail) {
3685                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3686
3687         } else {
3688                 while (tail->next)
3689                         tail = tail->next;
3690                 tail->next = cb;
3691         }
3692         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3693
3694         return cb;
3695 }
3696
3697 void *
3698 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3699                 rte_rx_callback_fn fn, void *user_param)
3700 {
3701 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3702         rte_errno = ENOTSUP;
3703         return NULL;
3704 #endif
3705         /* check input parameters */
3706         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3707                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3708                 rte_errno = EINVAL;
3709                 return NULL;
3710         }
3711
3712         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3713
3714         if (cb == NULL) {
3715                 rte_errno = ENOMEM;
3716                 return NULL;
3717         }
3718
3719         cb->fn.rx = fn;
3720         cb->param = user_param;
3721
3722         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3723         /* Add the callbacks at fisrt position*/
3724         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3725         rte_smp_wmb();
3726         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3727         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3728
3729         return cb;
3730 }
3731
3732 void *
3733 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3734                 rte_tx_callback_fn fn, void *user_param)
3735 {
3736 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3737         rte_errno = ENOTSUP;
3738         return NULL;
3739 #endif
3740         /* check input parameters */
3741         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3742                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3743                 rte_errno = EINVAL;
3744                 return NULL;
3745         }
3746
3747         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3748
3749         if (cb == NULL) {
3750                 rte_errno = ENOMEM;
3751                 return NULL;
3752         }
3753
3754         cb->fn.tx = fn;
3755         cb->param = user_param;
3756
3757         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3758         /* Add the callbacks in fifo order. */
3759         struct rte_eth_rxtx_callback *tail =
3760                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3761
3762         if (!tail) {
3763                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3764
3765         } else {
3766                 while (tail->next)
3767                         tail = tail->next;
3768                 tail->next = cb;
3769         }
3770         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3771
3772         return cb;
3773 }
3774
3775 int
3776 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3777                 struct rte_eth_rxtx_callback *user_cb)
3778 {
3779 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3780         return -ENOTSUP;
3781 #endif
3782         /* Check input parameters. */
3783         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3784         if (user_cb == NULL ||
3785                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3786                 return -EINVAL;
3787
3788         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3789         struct rte_eth_rxtx_callback *cb;
3790         struct rte_eth_rxtx_callback **prev_cb;
3791         int ret = -EINVAL;
3792
3793         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3794         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3795         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3796                 cb = *prev_cb;
3797                 if (cb == user_cb) {
3798                         /* Remove the user cb from the callback list. */
3799                         *prev_cb = cb->next;
3800                         ret = 0;
3801                         break;
3802                 }
3803         }
3804         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3805
3806         return ret;
3807 }
3808
3809 int
3810 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3811                 struct rte_eth_rxtx_callback *user_cb)
3812 {
3813 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3814         return -ENOTSUP;
3815 #endif
3816         /* Check input parameters. */
3817         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3818         if (user_cb == NULL ||
3819                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3820                 return -EINVAL;
3821
3822         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3823         int ret = -EINVAL;
3824         struct rte_eth_rxtx_callback *cb;
3825         struct rte_eth_rxtx_callback **prev_cb;
3826
3827         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3828         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3829         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3830                 cb = *prev_cb;
3831                 if (cb == user_cb) {
3832                         /* Remove the user cb from the callback list. */
3833                         *prev_cb = cb->next;
3834                         ret = 0;
3835                         break;
3836                 }
3837         }
3838         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3839
3840         return ret;
3841 }
3842
3843 int
3844 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3845         struct rte_eth_rxq_info *qinfo)
3846 {
3847         struct rte_eth_dev *dev;
3848
3849         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3850
3851         if (qinfo == NULL)
3852                 return -EINVAL;
3853
3854         dev = &rte_eth_devices[port_id];
3855         if (queue_id >= dev->data->nb_rx_queues) {
3856                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3857                 return -EINVAL;
3858         }
3859
3860         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3861
3862         memset(qinfo, 0, sizeof(*qinfo));
3863         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3864         return 0;
3865 }
3866
3867 int
3868 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3869         struct rte_eth_txq_info *qinfo)
3870 {
3871         struct rte_eth_dev *dev;
3872
3873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3874
3875         if (qinfo == NULL)
3876                 return -EINVAL;
3877
3878         dev = &rte_eth_devices[port_id];
3879         if (queue_id >= dev->data->nb_tx_queues) {
3880                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3881                 return -EINVAL;
3882         }
3883
3884         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3885
3886         memset(qinfo, 0, sizeof(*qinfo));
3887         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3888         return 0;
3889 }
3890
3891 int
3892 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3893                              struct ether_addr *mc_addr_set,
3894                              uint32_t nb_mc_addr)
3895 {
3896         struct rte_eth_dev *dev;
3897
3898         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3899
3900         dev = &rte_eth_devices[port_id];
3901         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3902         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3903                                                 mc_addr_set, nb_mc_addr));
3904 }
3905
3906 int
3907 rte_eth_timesync_enable(uint16_t port_id)
3908 {
3909         struct rte_eth_dev *dev;
3910
3911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3912         dev = &rte_eth_devices[port_id];
3913
3914         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3915         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3916 }
3917
3918 int
3919 rte_eth_timesync_disable(uint16_t port_id)
3920 {
3921         struct rte_eth_dev *dev;
3922
3923         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3924         dev = &rte_eth_devices[port_id];
3925
3926         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3927         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3928 }
3929
3930 int
3931 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3932                                    uint32_t flags)
3933 {
3934         struct rte_eth_dev *dev;
3935
3936         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3937         dev = &rte_eth_devices[port_id];
3938
3939         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3940         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3941                                 (dev, timestamp, flags));
3942 }
3943
3944 int
3945 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3946                                    struct timespec *timestamp)
3947 {
3948         struct rte_eth_dev *dev;
3949
3950         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3951         dev = &rte_eth_devices[port_id];
3952
3953         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3954         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3955                                 (dev, timestamp));
3956 }
3957
3958 int
3959 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3960 {
3961         struct rte_eth_dev *dev;
3962
3963         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3964         dev = &rte_eth_devices[port_id];
3965
3966         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3967         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3968                                                                       delta));
3969 }
3970
3971 int
3972 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3973 {
3974         struct rte_eth_dev *dev;
3975
3976         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3977         dev = &rte_eth_devices[port_id];
3978
3979         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3980         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3981                                                                 timestamp));
3982 }
3983
3984 int
3985 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3986 {
3987         struct rte_eth_dev *dev;
3988
3989         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3990         dev = &rte_eth_devices[port_id];
3991
3992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3993         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3994                                                                 timestamp));
3995 }
3996
3997 int
3998 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3999 {
4000         struct rte_eth_dev *dev;
4001
4002         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4003
4004         dev = &rte_eth_devices[port_id];
4005         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4006         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4007 }
4008
4009 int
4010 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4011 {
4012         struct rte_eth_dev *dev;
4013
4014         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4015
4016         dev = &rte_eth_devices[port_id];
4017         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4018         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4019 }
4020
4021 int
4022 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4023 {
4024         struct rte_eth_dev *dev;
4025
4026         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4027
4028         dev = &rte_eth_devices[port_id];
4029         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4030         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4031 }
4032
4033 int
4034 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4035 {
4036         struct rte_eth_dev *dev;
4037
4038         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4039
4040         dev = &rte_eth_devices[port_id];
4041         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4042         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4043 }
4044
4045 int
4046 rte_eth_dev_get_dcb_info(uint16_t port_id,
4047                              struct rte_eth_dcb_info *dcb_info)
4048 {
4049         struct rte_eth_dev *dev;
4050
4051         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4052
4053         dev = &rte_eth_devices[port_id];
4054         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4055
4056         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4057         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4058 }
4059
4060 int
4061 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4062                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4063 {
4064         struct rte_eth_dev *dev;
4065
4066         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4067         if (l2_tunnel == NULL) {
4068                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4069                 return -EINVAL;
4070         }
4071
4072         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4073                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
4074                 return -EINVAL;
4075         }
4076
4077         dev = &rte_eth_devices[port_id];
4078         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4079                                 -ENOTSUP);
4080         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4081                                                                 l2_tunnel));
4082 }
4083
4084 int
4085 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4086                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4087                                   uint32_t mask,
4088                                   uint8_t en)
4089 {
4090         struct rte_eth_dev *dev;
4091
4092         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4093
4094         if (l2_tunnel == NULL) {
4095                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4096                 return -EINVAL;
4097         }
4098
4099         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4100                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
4101                 return -EINVAL;
4102         }
4103
4104         if (mask == 0) {
4105                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
4106                 return -EINVAL;
4107         }
4108
4109         dev = &rte_eth_devices[port_id];
4110         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4111                                 -ENOTSUP);
4112         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4113                                                         l2_tunnel, mask, en));
4114 }
4115
4116 static void
4117 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4118                            const struct rte_eth_desc_lim *desc_lim)
4119 {
4120         if (desc_lim->nb_align != 0)
4121                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4122
4123         if (desc_lim->nb_max != 0)
4124                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4125
4126         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4127 }
4128
4129 int
4130 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4131                                  uint16_t *nb_rx_desc,
4132                                  uint16_t *nb_tx_desc)
4133 {
4134         struct rte_eth_dev *dev;
4135         struct rte_eth_dev_info dev_info;
4136
4137         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4138
4139         dev = &rte_eth_devices[port_id];
4140         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4141
4142         rte_eth_dev_info_get(port_id, &dev_info);
4143
4144         if (nb_rx_desc != NULL)
4145                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4146
4147         if (nb_tx_desc != NULL)
4148                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4149
4150         return 0;
4151 }
4152
4153 int
4154 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4155 {
4156         struct rte_eth_dev *dev;
4157
4158         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4159
4160         if (pool == NULL)
4161                 return -EINVAL;
4162
4163         dev = &rte_eth_devices[port_id];
4164
4165         if (*dev->dev_ops->pool_ops_supported == NULL)
4166                 return 1; /* all pools are supported */
4167
4168         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4169 }