ethdev: add port ownership
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_compat.h>
38
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46 static uint8_t eth_dev_last_created_port;
47
48 /* spinlock for eth device callbacks */
49 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
50
51 /* spinlock for add/remove rx callbacks */
52 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove tx callbacks */
55 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for shared data allocation */
58 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* store statistics names and its offset in stats structure  */
61 struct rte_eth_xstats_name_off {
62         char name[RTE_ETH_XSTATS_NAME_SIZE];
63         unsigned offset;
64 };
65
66 /* Shared memory between primary and secondary processes. */
67 static struct {
68         uint64_t next_owner_id;
69         rte_spinlock_t ownership_lock;
70         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
71 } *rte_eth_dev_shared_data;
72
73 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
74         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
75         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
76         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
77         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
78         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
79         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
80         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
81         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
82                 rx_nombuf)},
83 };
84
85 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
86
87 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
88         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
89         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
90         {"errors", offsetof(struct rte_eth_stats, q_errors)},
91 };
92
93 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
94                 sizeof(rte_rxq_stats_strings[0]))
95
96 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
97         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
98         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
99 };
100 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
101                 sizeof(rte_txq_stats_strings[0]))
102
103 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
104         { DEV_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } rte_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
123         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
124         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
125         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
126 };
127
128 #undef RTE_RX_OFFLOAD_BIT2STR
129
130 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
131         { DEV_TX_OFFLOAD_##_name, #_name }
132
133 static const struct {
134         uint64_t offload;
135         const char *name;
136 } rte_tx_offload_names[] = {
137         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
138         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
139         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
140         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
141         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
142         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
143         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
144         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
146         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
147         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
152         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
153         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
154         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
155 };
156
157 #undef RTE_TX_OFFLOAD_BIT2STR
158
159 /**
160  * The user application callback description.
161  *
162  * It contains callback address to be registered by user application,
163  * the pointer to the parameters for callback, and the event type.
164  */
165 struct rte_eth_dev_callback {
166         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
167         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
168         void *cb_arg;                           /**< Parameter for callback */
169         void *ret_param;                        /**< Return parameter */
170         enum rte_eth_event_type event;          /**< Interrupt event type */
171         uint32_t active;                        /**< Callback is executing */
172 };
173
174 enum {
175         STAT_QMAP_TX = 0,
176         STAT_QMAP_RX
177 };
178
179 uint16_t
180 rte_eth_find_next(uint16_t port_id)
181 {
182         while (port_id < RTE_MAX_ETHPORTS &&
183                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
184                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
185                 port_id++;
186
187         if (port_id >= RTE_MAX_ETHPORTS)
188                 return RTE_MAX_ETHPORTS;
189
190         return port_id;
191 }
192
193 static void
194 rte_eth_dev_shared_data_prepare(void)
195 {
196         const unsigned flags = 0;
197         const struct rte_memzone *mz;
198
199         rte_spinlock_lock(&rte_eth_shared_data_lock);
200
201         if (rte_eth_dev_shared_data == NULL) {
202                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
203                         /* Allocate port data and ownership shared memory. */
204                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
205                                         sizeof(*rte_eth_dev_shared_data),
206                                         rte_socket_id(), flags);
207                 } else
208                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
209                 if (mz == NULL)
210                         rte_panic("Cannot allocate ethdev shared data\n");
211
212                 rte_eth_dev_shared_data = mz->addr;
213                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
214                         rte_eth_dev_shared_data->next_owner_id =
215                                         RTE_ETH_DEV_NO_OWNER + 1;
216                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
217                         memset(rte_eth_dev_shared_data->data, 0,
218                                sizeof(rte_eth_dev_shared_data->data));
219                 }
220         }
221
222         rte_spinlock_unlock(&rte_eth_shared_data_lock);
223 }
224
225 struct rte_eth_dev *
226 rte_eth_dev_allocated(const char *name)
227 {
228         unsigned i;
229
230         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
231                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
232                     strcmp(rte_eth_devices[i].data->name, name) == 0)
233                         return &rte_eth_devices[i];
234         }
235         return NULL;
236 }
237
238 static uint16_t
239 rte_eth_dev_find_free_port(void)
240 {
241         unsigned i;
242
243         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
244                 /* Using shared name field to find a free port. */
245                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
246                         RTE_ASSERT(rte_eth_devices[i].state ==
247                                    RTE_ETH_DEV_UNUSED);
248                         return i;
249                 }
250         }
251         return RTE_MAX_ETHPORTS;
252 }
253
254 static struct rte_eth_dev *
255 eth_dev_get(uint16_t port_id)
256 {
257         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
258
259         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
260         eth_dev->state = RTE_ETH_DEV_ATTACHED;
261
262         eth_dev_last_created_port = port_id;
263
264         return eth_dev;
265 }
266
267 struct rte_eth_dev *
268 rte_eth_dev_allocate(const char *name)
269 {
270         uint16_t port_id;
271         struct rte_eth_dev *eth_dev;
272
273         port_id = rte_eth_dev_find_free_port();
274         if (port_id == RTE_MAX_ETHPORTS) {
275                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
276                 return NULL;
277         }
278
279         rte_eth_dev_shared_data_prepare();
280
281         if (rte_eth_dev_allocated(name) != NULL) {
282                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
283                                 name);
284                 return NULL;
285         }
286
287         eth_dev = eth_dev_get(port_id);
288         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
289         eth_dev->data->port_id = port_id;
290         eth_dev->data->mtu = ETHER_MTU;
291
292         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
293
294         return eth_dev;
295 }
296
297 /*
298  * Attach to a port already registered by the primary process, which
299  * makes sure that the same device would have the same port id both
300  * in the primary and secondary process.
301  */
302 struct rte_eth_dev *
303 rte_eth_dev_attach_secondary(const char *name)
304 {
305         uint16_t i;
306         struct rte_eth_dev *eth_dev;
307
308         rte_eth_dev_shared_data_prepare();
309
310         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
311                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
312                         break;
313         }
314         if (i == RTE_MAX_ETHPORTS) {
315                 RTE_PMD_DEBUG_TRACE(
316                         "device %s is not driven by the primary process\n",
317                         name);
318                 return NULL;
319         }
320
321         eth_dev = eth_dev_get(i);
322         RTE_ASSERT(eth_dev->data->port_id == i);
323
324         return eth_dev;
325 }
326
327 int
328 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
329 {
330         if (eth_dev == NULL)
331                 return -EINVAL;
332
333         rte_eth_dev_shared_data_prepare();
334
335         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
336
337         eth_dev->state = RTE_ETH_DEV_UNUSED;
338
339         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
340
341         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
342
343         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
344
345         return 0;
346 }
347
348 int
349 rte_eth_dev_is_valid_port(uint16_t port_id)
350 {
351         if (port_id >= RTE_MAX_ETHPORTS ||
352             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
353                 return 0;
354         else
355                 return 1;
356 }
357
358 static int
359 rte_eth_is_valid_owner_id(uint64_t owner_id)
360 {
361         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
362             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
363                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
364                 return 0;
365         }
366         return 1;
367 }
368
369 uint64_t __rte_experimental
370 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
371 {
372         while (port_id < RTE_MAX_ETHPORTS &&
373                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
374                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
375                rte_eth_devices[port_id].data->owner.id != owner_id))
376                 port_id++;
377
378         if (port_id >= RTE_MAX_ETHPORTS)
379                 return RTE_MAX_ETHPORTS;
380
381         return port_id;
382 }
383
384 int __rte_experimental
385 rte_eth_dev_owner_new(uint64_t *owner_id)
386 {
387         rte_eth_dev_shared_data_prepare();
388
389         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
390
391         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
392
393         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
394         return 0;
395 }
396
397 static int
398 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
399                        const struct rte_eth_dev_owner *new_owner)
400 {
401         struct rte_eth_dev_owner *port_owner;
402         int sret;
403
404         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
405
406         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
407             !rte_eth_is_valid_owner_id(old_owner_id))
408                 return -EINVAL;
409
410         port_owner = &rte_eth_devices[port_id].data->owner;
411         if (port_owner->id != old_owner_id) {
412                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
413                                     " by %s_%016lX.\n", port_id,
414                                     port_owner->name, port_owner->id);
415                 return -EPERM;
416         }
417
418         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
419                         new_owner->name);
420         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
421                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
422                                     port_id);
423
424         port_owner->id = new_owner->id;
425
426         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
427                             new_owner->name, new_owner->id);
428
429         return 0;
430 }
431
432 int __rte_experimental
433 rte_eth_dev_owner_set(const uint16_t port_id,
434                       const struct rte_eth_dev_owner *owner)
435 {
436         int ret;
437
438         rte_eth_dev_shared_data_prepare();
439
440         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
441
442         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
443
444         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
445         return ret;
446 }
447
448 int __rte_experimental
449 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
450 {
451         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
452                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
453         int ret;
454
455         rte_eth_dev_shared_data_prepare();
456
457         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
458
459         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
460
461         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
462         return ret;
463 }
464
465 void __rte_experimental
466 rte_eth_dev_owner_delete(const uint64_t owner_id)
467 {
468         uint16_t port_id;
469
470         rte_eth_dev_shared_data_prepare();
471
472         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
473
474         if (rte_eth_is_valid_owner_id(owner_id)) {
475                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
476                         memset(&rte_eth_devices[port_id].data->owner, 0,
477                                sizeof(struct rte_eth_dev_owner));
478                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
479                                     " have removed.\n", owner_id);
480         }
481
482         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
483 }
484
485 int __rte_experimental
486 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
487 {
488         int ret = 0;
489
490         rte_eth_dev_shared_data_prepare();
491
492         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
493
494         if (!rte_eth_dev_is_valid_port(port_id)) {
495                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
496                 ret = -ENODEV;
497         } else {
498                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
499                            sizeof(*owner));
500         }
501
502         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
503         return ret;
504 }
505
506 int
507 rte_eth_dev_socket_id(uint16_t port_id)
508 {
509         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
510         return rte_eth_devices[port_id].data->numa_node;
511 }
512
513 void *
514 rte_eth_dev_get_sec_ctx(uint8_t port_id)
515 {
516         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
517         return rte_eth_devices[port_id].security_ctx;
518 }
519
520 uint16_t
521 rte_eth_dev_count(void)
522 {
523         uint16_t p;
524         uint16_t count;
525
526         count = 0;
527
528         RTE_ETH_FOREACH_DEV(p)
529                 count++;
530
531         return count;
532 }
533
534 int
535 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
536 {
537         char *tmp;
538
539         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
540
541         if (name == NULL) {
542                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
543                 return -EINVAL;
544         }
545
546         /* shouldn't check 'rte_eth_devices[i].data',
547          * because it might be overwritten by VDEV PMD */
548         tmp = rte_eth_dev_shared_data->data[port_id].name;
549         strcpy(name, tmp);
550         return 0;
551 }
552
553 int
554 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
555 {
556         uint32_t pid;
557
558         if (name == NULL) {
559                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
560                 return -EINVAL;
561         }
562
563         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
564                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
565                     !strncmp(name, rte_eth_dev_shared_data->data[pid].name,
566                              strlen(name))) {
567                         *port_id = pid;
568                         return 0;
569                 }
570         }
571
572         return -ENODEV;
573 }
574
575 static int
576 eth_err(uint16_t port_id, int ret)
577 {
578         if (ret == 0)
579                 return 0;
580         if (rte_eth_dev_is_removed(port_id))
581                 return -EIO;
582         return ret;
583 }
584
585 /* attach the new device, then store port_id of the device */
586 int
587 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
588 {
589         int ret = -1;
590         int current = rte_eth_dev_count();
591         char *name = NULL;
592         char *args = NULL;
593
594         if ((devargs == NULL) || (port_id == NULL)) {
595                 ret = -EINVAL;
596                 goto err;
597         }
598
599         /* parse devargs, then retrieve device name and args */
600         if (rte_eal_parse_devargs_str(devargs, &name, &args))
601                 goto err;
602
603         ret = rte_eal_dev_attach(name, args);
604         if (ret < 0)
605                 goto err;
606
607         /* no point looking at the port count if no port exists */
608         if (!rte_eth_dev_count()) {
609                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
610                 ret = -1;
611                 goto err;
612         }
613
614         /* if nothing happened, there is a bug here, since some driver told us
615          * it did attach a device, but did not create a port.
616          */
617         if (current == rte_eth_dev_count()) {
618                 ret = -1;
619                 goto err;
620         }
621
622         *port_id = eth_dev_last_created_port;
623         ret = 0;
624
625 err:
626         free(name);
627         free(args);
628         return ret;
629 }
630
631 /* detach the device, then store the name of the device */
632 int
633 rte_eth_dev_detach(uint16_t port_id, char *name)
634 {
635         uint32_t dev_flags;
636         int ret = -1;
637
638         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
639
640         if (name == NULL) {
641                 ret = -EINVAL;
642                 goto err;
643         }
644
645         dev_flags = rte_eth_devices[port_id].data->dev_flags;
646         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
647                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
648                         port_id);
649                 ret = -ENOTSUP;
650                 goto err;
651         }
652
653         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
654                  "%s", rte_eth_devices[port_id].data->name);
655
656         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
657         if (ret < 0)
658                 goto err;
659
660         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
661         return 0;
662
663 err:
664         return ret;
665 }
666
667 static int
668 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
669 {
670         uint16_t old_nb_queues = dev->data->nb_rx_queues;
671         void **rxq;
672         unsigned i;
673
674         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
675                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
676                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
677                                 RTE_CACHE_LINE_SIZE);
678                 if (dev->data->rx_queues == NULL) {
679                         dev->data->nb_rx_queues = 0;
680                         return -(ENOMEM);
681                 }
682         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
683                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
684
685                 rxq = dev->data->rx_queues;
686
687                 for (i = nb_queues; i < old_nb_queues; i++)
688                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
689                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
690                                 RTE_CACHE_LINE_SIZE);
691                 if (rxq == NULL)
692                         return -(ENOMEM);
693                 if (nb_queues > old_nb_queues) {
694                         uint16_t new_qs = nb_queues - old_nb_queues;
695
696                         memset(rxq + old_nb_queues, 0,
697                                 sizeof(rxq[0]) * new_qs);
698                 }
699
700                 dev->data->rx_queues = rxq;
701
702         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
703                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
704
705                 rxq = dev->data->rx_queues;
706
707                 for (i = nb_queues; i < old_nb_queues; i++)
708                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
709
710                 rte_free(dev->data->rx_queues);
711                 dev->data->rx_queues = NULL;
712         }
713         dev->data->nb_rx_queues = nb_queues;
714         return 0;
715 }
716
717 int
718 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
719 {
720         struct rte_eth_dev *dev;
721
722         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
723
724         dev = &rte_eth_devices[port_id];
725         if (rx_queue_id >= dev->data->nb_rx_queues) {
726                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
727                 return -EINVAL;
728         }
729
730         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
731
732         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
733                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
734                         " already started\n",
735                         rx_queue_id, port_id);
736                 return 0;
737         }
738
739         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
740                                                              rx_queue_id));
741
742 }
743
744 int
745 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
746 {
747         struct rte_eth_dev *dev;
748
749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
750
751         dev = &rte_eth_devices[port_id];
752         if (rx_queue_id >= dev->data->nb_rx_queues) {
753                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
754                 return -EINVAL;
755         }
756
757         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
758
759         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
760                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
761                         " already stopped\n",
762                         rx_queue_id, port_id);
763                 return 0;
764         }
765
766         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
767
768 }
769
770 int
771 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
772 {
773         struct rte_eth_dev *dev;
774
775         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
776
777         dev = &rte_eth_devices[port_id];
778         if (tx_queue_id >= dev->data->nb_tx_queues) {
779                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
780                 return -EINVAL;
781         }
782
783         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
784
785         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
786                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
787                         " already started\n",
788                         tx_queue_id, port_id);
789                 return 0;
790         }
791
792         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
793                                                              tx_queue_id));
794
795 }
796
797 int
798 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
799 {
800         struct rte_eth_dev *dev;
801
802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
803
804         dev = &rte_eth_devices[port_id];
805         if (tx_queue_id >= dev->data->nb_tx_queues) {
806                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
807                 return -EINVAL;
808         }
809
810         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
811
812         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
813                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
814                         " already stopped\n",
815                         tx_queue_id, port_id);
816                 return 0;
817         }
818
819         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
820
821 }
822
823 static int
824 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
825 {
826         uint16_t old_nb_queues = dev->data->nb_tx_queues;
827         void **txq;
828         unsigned i;
829
830         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
831                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
832                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
833                                                    RTE_CACHE_LINE_SIZE);
834                 if (dev->data->tx_queues == NULL) {
835                         dev->data->nb_tx_queues = 0;
836                         return -(ENOMEM);
837                 }
838         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
839                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
840
841                 txq = dev->data->tx_queues;
842
843                 for (i = nb_queues; i < old_nb_queues; i++)
844                         (*dev->dev_ops->tx_queue_release)(txq[i]);
845                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
846                                   RTE_CACHE_LINE_SIZE);
847                 if (txq == NULL)
848                         return -ENOMEM;
849                 if (nb_queues > old_nb_queues) {
850                         uint16_t new_qs = nb_queues - old_nb_queues;
851
852                         memset(txq + old_nb_queues, 0,
853                                sizeof(txq[0]) * new_qs);
854                 }
855
856                 dev->data->tx_queues = txq;
857
858         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
859                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
860
861                 txq = dev->data->tx_queues;
862
863                 for (i = nb_queues; i < old_nb_queues; i++)
864                         (*dev->dev_ops->tx_queue_release)(txq[i]);
865
866                 rte_free(dev->data->tx_queues);
867                 dev->data->tx_queues = NULL;
868         }
869         dev->data->nb_tx_queues = nb_queues;
870         return 0;
871 }
872
873 uint32_t
874 rte_eth_speed_bitflag(uint32_t speed, int duplex)
875 {
876         switch (speed) {
877         case ETH_SPEED_NUM_10M:
878                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
879         case ETH_SPEED_NUM_100M:
880                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
881         case ETH_SPEED_NUM_1G:
882                 return ETH_LINK_SPEED_1G;
883         case ETH_SPEED_NUM_2_5G:
884                 return ETH_LINK_SPEED_2_5G;
885         case ETH_SPEED_NUM_5G:
886                 return ETH_LINK_SPEED_5G;
887         case ETH_SPEED_NUM_10G:
888                 return ETH_LINK_SPEED_10G;
889         case ETH_SPEED_NUM_20G:
890                 return ETH_LINK_SPEED_20G;
891         case ETH_SPEED_NUM_25G:
892                 return ETH_LINK_SPEED_25G;
893         case ETH_SPEED_NUM_40G:
894                 return ETH_LINK_SPEED_40G;
895         case ETH_SPEED_NUM_50G:
896                 return ETH_LINK_SPEED_50G;
897         case ETH_SPEED_NUM_56G:
898                 return ETH_LINK_SPEED_56G;
899         case ETH_SPEED_NUM_100G:
900                 return ETH_LINK_SPEED_100G;
901         default:
902                 return 0;
903         }
904 }
905
906 /**
907  * A conversion function from rxmode bitfield API.
908  */
909 static void
910 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
911                                     uint64_t *rx_offloads)
912 {
913         uint64_t offloads = 0;
914
915         if (rxmode->header_split == 1)
916                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
917         if (rxmode->hw_ip_checksum == 1)
918                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
919         if (rxmode->hw_vlan_filter == 1)
920                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
921         if (rxmode->hw_vlan_strip == 1)
922                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
923         if (rxmode->hw_vlan_extend == 1)
924                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
925         if (rxmode->jumbo_frame == 1)
926                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
927         if (rxmode->hw_strip_crc == 1)
928                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
929         if (rxmode->enable_scatter == 1)
930                 offloads |= DEV_RX_OFFLOAD_SCATTER;
931         if (rxmode->enable_lro == 1)
932                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
933         if (rxmode->hw_timestamp == 1)
934                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
935         if (rxmode->security == 1)
936                 offloads |= DEV_RX_OFFLOAD_SECURITY;
937
938         *rx_offloads = offloads;
939 }
940
941 /**
942  * A conversion function from rxmode offloads API.
943  */
944 static void
945 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
946                             struct rte_eth_rxmode *rxmode)
947 {
948
949         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
950                 rxmode->header_split = 1;
951         else
952                 rxmode->header_split = 0;
953         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
954                 rxmode->hw_ip_checksum = 1;
955         else
956                 rxmode->hw_ip_checksum = 0;
957         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
958                 rxmode->hw_vlan_filter = 1;
959         else
960                 rxmode->hw_vlan_filter = 0;
961         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
962                 rxmode->hw_vlan_strip = 1;
963         else
964                 rxmode->hw_vlan_strip = 0;
965         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
966                 rxmode->hw_vlan_extend = 1;
967         else
968                 rxmode->hw_vlan_extend = 0;
969         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
970                 rxmode->jumbo_frame = 1;
971         else
972                 rxmode->jumbo_frame = 0;
973         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
974                 rxmode->hw_strip_crc = 1;
975         else
976                 rxmode->hw_strip_crc = 0;
977         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
978                 rxmode->enable_scatter = 1;
979         else
980                 rxmode->enable_scatter = 0;
981         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
982                 rxmode->enable_lro = 1;
983         else
984                 rxmode->enable_lro = 0;
985         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
986                 rxmode->hw_timestamp = 1;
987         else
988                 rxmode->hw_timestamp = 0;
989         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
990                 rxmode->security = 1;
991         else
992                 rxmode->security = 0;
993 }
994
995 const char * __rte_experimental
996 rte_eth_dev_rx_offload_name(uint64_t offload)
997 {
998         const char *name = "UNKNOWN";
999         unsigned int i;
1000
1001         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1002                 if (offload == rte_rx_offload_names[i].offload) {
1003                         name = rte_rx_offload_names[i].name;
1004                         break;
1005                 }
1006         }
1007
1008         return name;
1009 }
1010
1011 const char * __rte_experimental
1012 rte_eth_dev_tx_offload_name(uint64_t offload)
1013 {
1014         const char *name = "UNKNOWN";
1015         unsigned int i;
1016
1017         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1018                 if (offload == rte_tx_offload_names[i].offload) {
1019                         name = rte_tx_offload_names[i].name;
1020                         break;
1021                 }
1022         }
1023
1024         return name;
1025 }
1026
1027 int
1028 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1029                       const struct rte_eth_conf *dev_conf)
1030 {
1031         struct rte_eth_dev *dev;
1032         struct rte_eth_dev_info dev_info;
1033         struct rte_eth_conf local_conf = *dev_conf;
1034         int diag;
1035
1036         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1037
1038         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1039                 RTE_PMD_DEBUG_TRACE(
1040                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1041                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1042                 return -EINVAL;
1043         }
1044
1045         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1046                 RTE_PMD_DEBUG_TRACE(
1047                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1048                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1049                 return -EINVAL;
1050         }
1051
1052         dev = &rte_eth_devices[port_id];
1053
1054         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1055         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1056
1057         if (dev->data->dev_started) {
1058                 RTE_PMD_DEBUG_TRACE(
1059                     "port %d must be stopped to allow configuration\n", port_id);
1060                 return -EBUSY;
1061         }
1062
1063         /*
1064          * Convert between the offloads API to enable PMDs to support
1065          * only one of them.
1066          */
1067         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
1068                 rte_eth_convert_rx_offload_bitfield(
1069                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1070         } else {
1071                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
1072                                             &local_conf.rxmode);
1073         }
1074
1075         /* Copy the dev_conf parameter into the dev structure */
1076         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1077
1078         /*
1079          * Check that the numbers of RX and TX queues are not greater
1080          * than the maximum number of RX and TX queues supported by the
1081          * configured device.
1082          */
1083         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1084
1085         if (nb_rx_q == 0 && nb_tx_q == 0) {
1086                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
1087                 return -EINVAL;
1088         }
1089
1090         if (nb_rx_q > dev_info.max_rx_queues) {
1091                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1092                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1093                 return -EINVAL;
1094         }
1095
1096         if (nb_tx_q > dev_info.max_tx_queues) {
1097                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1098                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1099                 return -EINVAL;
1100         }
1101
1102         /* Check that the device supports requested interrupts */
1103         if ((dev_conf->intr_conf.lsc == 1) &&
1104                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1105                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1106                                         dev->device->driver->name);
1107                         return -EINVAL;
1108         }
1109         if ((dev_conf->intr_conf.rmv == 1) &&
1110             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1111                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1112                                     dev->device->driver->name);
1113                 return -EINVAL;
1114         }
1115
1116         /*
1117          * If jumbo frames are enabled, check that the maximum RX packet
1118          * length is supported by the configured device.
1119          */
1120         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1121                 if (dev_conf->rxmode.max_rx_pkt_len >
1122                     dev_info.max_rx_pktlen) {
1123                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1124                                 " > max valid value %u\n",
1125                                 port_id,
1126                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1127                                 (unsigned)dev_info.max_rx_pktlen);
1128                         return -EINVAL;
1129                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1130                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1131                                 " < min valid value %u\n",
1132                                 port_id,
1133                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1134                                 (unsigned)ETHER_MIN_LEN);
1135                         return -EINVAL;
1136                 }
1137         } else {
1138                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1139                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1140                         /* Use default value */
1141                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1142                                                         ETHER_MAX_LEN;
1143         }
1144
1145         /*
1146          * Setup new number of RX/TX queues and reconfigure device.
1147          */
1148         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1149         if (diag != 0) {
1150                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1151                                 port_id, diag);
1152                 return diag;
1153         }
1154
1155         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1156         if (diag != 0) {
1157                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1158                                 port_id, diag);
1159                 rte_eth_dev_rx_queue_config(dev, 0);
1160                 return diag;
1161         }
1162
1163         diag = (*dev->dev_ops->dev_configure)(dev);
1164         if (diag != 0) {
1165                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1166                                 port_id, diag);
1167                 rte_eth_dev_rx_queue_config(dev, 0);
1168                 rte_eth_dev_tx_queue_config(dev, 0);
1169                 return eth_err(port_id, diag);
1170         }
1171
1172         /* Initialize Rx profiling if enabled at compilation time. */
1173         diag = __rte_eth_profile_rx_init(port_id, dev);
1174         if (diag != 0) {
1175                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1176                                 port_id, diag);
1177                 rte_eth_dev_rx_queue_config(dev, 0);
1178                 rte_eth_dev_tx_queue_config(dev, 0);
1179                 return eth_err(port_id, diag);
1180         }
1181
1182         return 0;
1183 }
1184
1185 void
1186 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1187 {
1188         if (dev->data->dev_started) {
1189                 RTE_PMD_DEBUG_TRACE(
1190                         "port %d must be stopped to allow reset\n",
1191                         dev->data->port_id);
1192                 return;
1193         }
1194
1195         rte_eth_dev_rx_queue_config(dev, 0);
1196         rte_eth_dev_tx_queue_config(dev, 0);
1197
1198         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1199 }
1200
1201 static void
1202 rte_eth_dev_config_restore(uint16_t port_id)
1203 {
1204         struct rte_eth_dev *dev;
1205         struct rte_eth_dev_info dev_info;
1206         struct ether_addr *addr;
1207         uint16_t i;
1208         uint32_t pool = 0;
1209         uint64_t pool_mask;
1210
1211         dev = &rte_eth_devices[port_id];
1212
1213         rte_eth_dev_info_get(port_id, &dev_info);
1214
1215         /* replay MAC address configuration including default MAC */
1216         addr = &dev->data->mac_addrs[0];
1217         if (*dev->dev_ops->mac_addr_set != NULL)
1218                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1219         else if (*dev->dev_ops->mac_addr_add != NULL)
1220                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1221
1222         if (*dev->dev_ops->mac_addr_add != NULL) {
1223                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1224                         addr = &dev->data->mac_addrs[i];
1225
1226                         /* skip zero address */
1227                         if (is_zero_ether_addr(addr))
1228                                 continue;
1229
1230                         pool = 0;
1231                         pool_mask = dev->data->mac_pool_sel[i];
1232
1233                         do {
1234                                 if (pool_mask & 1ULL)
1235                                         (*dev->dev_ops->mac_addr_add)(dev,
1236                                                 addr, i, pool);
1237                                 pool_mask >>= 1;
1238                                 pool++;
1239                         } while (pool_mask);
1240                 }
1241         }
1242
1243         /* replay promiscuous configuration */
1244         if (rte_eth_promiscuous_get(port_id) == 1)
1245                 rte_eth_promiscuous_enable(port_id);
1246         else if (rte_eth_promiscuous_get(port_id) == 0)
1247                 rte_eth_promiscuous_disable(port_id);
1248
1249         /* replay all multicast configuration */
1250         if (rte_eth_allmulticast_get(port_id) == 1)
1251                 rte_eth_allmulticast_enable(port_id);
1252         else if (rte_eth_allmulticast_get(port_id) == 0)
1253                 rte_eth_allmulticast_disable(port_id);
1254 }
1255
1256 int
1257 rte_eth_dev_start(uint16_t port_id)
1258 {
1259         struct rte_eth_dev *dev;
1260         int diag;
1261
1262         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1263
1264         dev = &rte_eth_devices[port_id];
1265
1266         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1267
1268         if (dev->data->dev_started != 0) {
1269                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1270                         " already started\n",
1271                         port_id);
1272                 return 0;
1273         }
1274
1275         diag = (*dev->dev_ops->dev_start)(dev);
1276         if (diag == 0)
1277                 dev->data->dev_started = 1;
1278         else
1279                 return eth_err(port_id, diag);
1280
1281         rte_eth_dev_config_restore(port_id);
1282
1283         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1284                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1285                 (*dev->dev_ops->link_update)(dev, 0);
1286         }
1287         return 0;
1288 }
1289
1290 void
1291 rte_eth_dev_stop(uint16_t port_id)
1292 {
1293         struct rte_eth_dev *dev;
1294
1295         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1296         dev = &rte_eth_devices[port_id];
1297
1298         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1299
1300         if (dev->data->dev_started == 0) {
1301                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1302                         " already stopped\n",
1303                         port_id);
1304                 return;
1305         }
1306
1307         dev->data->dev_started = 0;
1308         (*dev->dev_ops->dev_stop)(dev);
1309 }
1310
1311 int
1312 rte_eth_dev_set_link_up(uint16_t port_id)
1313 {
1314         struct rte_eth_dev *dev;
1315
1316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1317
1318         dev = &rte_eth_devices[port_id];
1319
1320         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1321         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1322 }
1323
1324 int
1325 rte_eth_dev_set_link_down(uint16_t port_id)
1326 {
1327         struct rte_eth_dev *dev;
1328
1329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1330
1331         dev = &rte_eth_devices[port_id];
1332
1333         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1334         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1335 }
1336
1337 void
1338 rte_eth_dev_close(uint16_t port_id)
1339 {
1340         struct rte_eth_dev *dev;
1341
1342         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1343         dev = &rte_eth_devices[port_id];
1344
1345         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1346         dev->data->dev_started = 0;
1347         (*dev->dev_ops->dev_close)(dev);
1348
1349         dev->data->nb_rx_queues = 0;
1350         rte_free(dev->data->rx_queues);
1351         dev->data->rx_queues = NULL;
1352         dev->data->nb_tx_queues = 0;
1353         rte_free(dev->data->tx_queues);
1354         dev->data->tx_queues = NULL;
1355 }
1356
1357 int
1358 rte_eth_dev_reset(uint16_t port_id)
1359 {
1360         struct rte_eth_dev *dev;
1361         int ret;
1362
1363         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1364         dev = &rte_eth_devices[port_id];
1365
1366         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1367
1368         rte_eth_dev_stop(port_id);
1369         ret = dev->dev_ops->dev_reset(dev);
1370
1371         return eth_err(port_id, ret);
1372 }
1373
1374 int __rte_experimental
1375 rte_eth_dev_is_removed(uint16_t port_id)
1376 {
1377         struct rte_eth_dev *dev;
1378         int ret;
1379
1380         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1381
1382         dev = &rte_eth_devices[port_id];
1383
1384         if (dev->state == RTE_ETH_DEV_REMOVED)
1385                 return 1;
1386
1387         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1388
1389         ret = dev->dev_ops->is_removed(dev);
1390         if (ret != 0)
1391                 /* Device is physically removed. */
1392                 dev->state = RTE_ETH_DEV_REMOVED;
1393
1394         return ret;
1395 }
1396
1397 int
1398 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1399                        uint16_t nb_rx_desc, unsigned int socket_id,
1400                        const struct rte_eth_rxconf *rx_conf,
1401                        struct rte_mempool *mp)
1402 {
1403         int ret;
1404         uint32_t mbp_buf_size;
1405         struct rte_eth_dev *dev;
1406         struct rte_eth_dev_info dev_info;
1407         struct rte_eth_rxconf local_conf;
1408         void **rxq;
1409
1410         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1411
1412         dev = &rte_eth_devices[port_id];
1413         if (rx_queue_id >= dev->data->nb_rx_queues) {
1414                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1415                 return -EINVAL;
1416         }
1417
1418         if (dev->data->dev_started) {
1419                 RTE_PMD_DEBUG_TRACE(
1420                     "port %d must be stopped to allow configuration\n", port_id);
1421                 return -EBUSY;
1422         }
1423
1424         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1425         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1426
1427         /*
1428          * Check the size of the mbuf data buffer.
1429          * This value must be provided in the private data of the memory pool.
1430          * First check that the memory pool has a valid private data.
1431          */
1432         rte_eth_dev_info_get(port_id, &dev_info);
1433         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1434                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1435                                 mp->name, (int) mp->private_data_size,
1436                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1437                 return -ENOSPC;
1438         }
1439         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1440
1441         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1442                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1443                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1444                                 "=%d)\n",
1445                                 mp->name,
1446                                 (int)mbp_buf_size,
1447                                 (int)(RTE_PKTMBUF_HEADROOM +
1448                                       dev_info.min_rx_bufsize),
1449                                 (int)RTE_PKTMBUF_HEADROOM,
1450                                 (int)dev_info.min_rx_bufsize);
1451                 return -EINVAL;
1452         }
1453
1454         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1455                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1456                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1457
1458                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1459                         "should be: <= %hu, = %hu, and a product of %hu\n",
1460                         nb_rx_desc,
1461                         dev_info.rx_desc_lim.nb_max,
1462                         dev_info.rx_desc_lim.nb_min,
1463                         dev_info.rx_desc_lim.nb_align);
1464                 return -EINVAL;
1465         }
1466
1467         rxq = dev->data->rx_queues;
1468         if (rxq[rx_queue_id]) {
1469                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1470                                         -ENOTSUP);
1471                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1472                 rxq[rx_queue_id] = NULL;
1473         }
1474
1475         if (rx_conf == NULL)
1476                 rx_conf = &dev_info.default_rxconf;
1477
1478         local_conf = *rx_conf;
1479         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1480                 /**
1481                  * Reflect port offloads to queue offloads in order for
1482                  * offloads to not be discarded.
1483                  */
1484                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1485                                                     &local_conf.offloads);
1486         }
1487
1488         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1489                                               socket_id, &local_conf, mp);
1490         if (!ret) {
1491                 if (!dev->data->min_rx_buf_size ||
1492                     dev->data->min_rx_buf_size > mbp_buf_size)
1493                         dev->data->min_rx_buf_size = mbp_buf_size;
1494         }
1495
1496         return eth_err(port_id, ret);
1497 }
1498
1499 /**
1500  * A conversion function from txq_flags API.
1501  */
1502 static void
1503 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1504 {
1505         uint64_t offloads = 0;
1506
1507         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1508                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1509         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1510                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1511         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1512                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1513         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1514                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1515         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1516                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1517         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1518             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1519                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1520
1521         *tx_offloads = offloads;
1522 }
1523
1524 /**
1525  * A conversion function from offloads API.
1526  */
1527 static void
1528 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1529 {
1530         uint32_t flags = 0;
1531
1532         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1533                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1534         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1535                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1536         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1537                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1538         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1539                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1540         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1541                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1542         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1543                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1544
1545         *txq_flags = flags;
1546 }
1547
1548 int
1549 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1550                        uint16_t nb_tx_desc, unsigned int socket_id,
1551                        const struct rte_eth_txconf *tx_conf)
1552 {
1553         struct rte_eth_dev *dev;
1554         struct rte_eth_dev_info dev_info;
1555         struct rte_eth_txconf local_conf;
1556         void **txq;
1557
1558         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1559
1560         dev = &rte_eth_devices[port_id];
1561         if (tx_queue_id >= dev->data->nb_tx_queues) {
1562                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1563                 return -EINVAL;
1564         }
1565
1566         if (dev->data->dev_started) {
1567                 RTE_PMD_DEBUG_TRACE(
1568                     "port %d must be stopped to allow configuration\n", port_id);
1569                 return -EBUSY;
1570         }
1571
1572         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1573         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1574
1575         rte_eth_dev_info_get(port_id, &dev_info);
1576
1577         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1578             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1579             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1580                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1581                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1582                                 nb_tx_desc,
1583                                 dev_info.tx_desc_lim.nb_max,
1584                                 dev_info.tx_desc_lim.nb_min,
1585                                 dev_info.tx_desc_lim.nb_align);
1586                 return -EINVAL;
1587         }
1588
1589         txq = dev->data->tx_queues;
1590         if (txq[tx_queue_id]) {
1591                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1592                                         -ENOTSUP);
1593                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1594                 txq[tx_queue_id] = NULL;
1595         }
1596
1597         if (tx_conf == NULL)
1598                 tx_conf = &dev_info.default_txconf;
1599
1600         /*
1601          * Convert between the offloads API to enable PMDs to support
1602          * only one of them.
1603          */
1604         local_conf = *tx_conf;
1605         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1606                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1607                                              &local_conf.txq_flags);
1608                 /* Keep the ignore flag. */
1609                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1610         } else {
1611                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1612                                           &local_conf.offloads);
1613         }
1614
1615         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1616                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1617 }
1618
1619 void
1620 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1621                 void *userdata __rte_unused)
1622 {
1623         unsigned i;
1624
1625         for (i = 0; i < unsent; i++)
1626                 rte_pktmbuf_free(pkts[i]);
1627 }
1628
1629 void
1630 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1631                 void *userdata)
1632 {
1633         uint64_t *count = userdata;
1634         unsigned i;
1635
1636         for (i = 0; i < unsent; i++)
1637                 rte_pktmbuf_free(pkts[i]);
1638
1639         *count += unsent;
1640 }
1641
1642 int
1643 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1644                 buffer_tx_error_fn cbfn, void *userdata)
1645 {
1646         buffer->error_callback = cbfn;
1647         buffer->error_userdata = userdata;
1648         return 0;
1649 }
1650
1651 int
1652 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1653 {
1654         int ret = 0;
1655
1656         if (buffer == NULL)
1657                 return -EINVAL;
1658
1659         buffer->size = size;
1660         if (buffer->error_callback == NULL) {
1661                 ret = rte_eth_tx_buffer_set_err_callback(
1662                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1663         }
1664
1665         return ret;
1666 }
1667
1668 int
1669 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1670 {
1671         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1672         int ret;
1673
1674         /* Validate Input Data. Bail if not valid or not supported. */
1675         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1676         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1677
1678         /* Call driver to free pending mbufs. */
1679         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1680                                                free_cnt);
1681         return eth_err(port_id, ret);
1682 }
1683
1684 void
1685 rte_eth_promiscuous_enable(uint16_t port_id)
1686 {
1687         struct rte_eth_dev *dev;
1688
1689         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1690         dev = &rte_eth_devices[port_id];
1691
1692         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1693         (*dev->dev_ops->promiscuous_enable)(dev);
1694         dev->data->promiscuous = 1;
1695 }
1696
1697 void
1698 rte_eth_promiscuous_disable(uint16_t port_id)
1699 {
1700         struct rte_eth_dev *dev;
1701
1702         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1703         dev = &rte_eth_devices[port_id];
1704
1705         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1706         dev->data->promiscuous = 0;
1707         (*dev->dev_ops->promiscuous_disable)(dev);
1708 }
1709
1710 int
1711 rte_eth_promiscuous_get(uint16_t port_id)
1712 {
1713         struct rte_eth_dev *dev;
1714
1715         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1716
1717         dev = &rte_eth_devices[port_id];
1718         return dev->data->promiscuous;
1719 }
1720
1721 void
1722 rte_eth_allmulticast_enable(uint16_t port_id)
1723 {
1724         struct rte_eth_dev *dev;
1725
1726         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1727         dev = &rte_eth_devices[port_id];
1728
1729         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1730         (*dev->dev_ops->allmulticast_enable)(dev);
1731         dev->data->all_multicast = 1;
1732 }
1733
1734 void
1735 rte_eth_allmulticast_disable(uint16_t port_id)
1736 {
1737         struct rte_eth_dev *dev;
1738
1739         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1740         dev = &rte_eth_devices[port_id];
1741
1742         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1743         dev->data->all_multicast = 0;
1744         (*dev->dev_ops->allmulticast_disable)(dev);
1745 }
1746
1747 int
1748 rte_eth_allmulticast_get(uint16_t port_id)
1749 {
1750         struct rte_eth_dev *dev;
1751
1752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1753
1754         dev = &rte_eth_devices[port_id];
1755         return dev->data->all_multicast;
1756 }
1757
1758 static inline int
1759 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1760                                 struct rte_eth_link *link)
1761 {
1762         struct rte_eth_link *dst = link;
1763         struct rte_eth_link *src = &(dev->data->dev_link);
1764
1765         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1766                                         *(uint64_t *)src) == 0)
1767                 return -1;
1768
1769         return 0;
1770 }
1771
1772 void
1773 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1774 {
1775         struct rte_eth_dev *dev;
1776
1777         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1778         dev = &rte_eth_devices[port_id];
1779
1780         if (dev->data->dev_conf.intr_conf.lsc != 0)
1781                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1782         else {
1783                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1784                 (*dev->dev_ops->link_update)(dev, 1);
1785                 *eth_link = dev->data->dev_link;
1786         }
1787 }
1788
1789 void
1790 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1791 {
1792         struct rte_eth_dev *dev;
1793
1794         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1795         dev = &rte_eth_devices[port_id];
1796
1797         if (dev->data->dev_conf.intr_conf.lsc != 0)
1798                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1799         else {
1800                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1801                 (*dev->dev_ops->link_update)(dev, 0);
1802                 *eth_link = dev->data->dev_link;
1803         }
1804 }
1805
1806 int
1807 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1808 {
1809         struct rte_eth_dev *dev;
1810
1811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1812
1813         dev = &rte_eth_devices[port_id];
1814         memset(stats, 0, sizeof(*stats));
1815
1816         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1817         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1818         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1819 }
1820
1821 int
1822 rte_eth_stats_reset(uint16_t port_id)
1823 {
1824         struct rte_eth_dev *dev;
1825
1826         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1827         dev = &rte_eth_devices[port_id];
1828
1829         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1830         (*dev->dev_ops->stats_reset)(dev);
1831         dev->data->rx_mbuf_alloc_failed = 0;
1832
1833         return 0;
1834 }
1835
1836 static inline int
1837 get_xstats_basic_count(struct rte_eth_dev *dev)
1838 {
1839         uint16_t nb_rxqs, nb_txqs;
1840         int count;
1841
1842         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1843         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1844
1845         count = RTE_NB_STATS;
1846         count += nb_rxqs * RTE_NB_RXQ_STATS;
1847         count += nb_txqs * RTE_NB_TXQ_STATS;
1848
1849         return count;
1850 }
1851
1852 static int
1853 get_xstats_count(uint16_t port_id)
1854 {
1855         struct rte_eth_dev *dev;
1856         int count;
1857
1858         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1859         dev = &rte_eth_devices[port_id];
1860         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1861                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1862                                 NULL, 0);
1863                 if (count < 0)
1864                         return eth_err(port_id, count);
1865         }
1866         if (dev->dev_ops->xstats_get_names != NULL) {
1867                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1868                 if (count < 0)
1869                         return eth_err(port_id, count);
1870         } else
1871                 count = 0;
1872
1873
1874         count += get_xstats_basic_count(dev);
1875
1876         return count;
1877 }
1878
1879 int
1880 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1881                 uint64_t *id)
1882 {
1883         int cnt_xstats, idx_xstat;
1884
1885         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1886
1887         if (!id) {
1888                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1889                 return -ENOMEM;
1890         }
1891
1892         if (!xstat_name) {
1893                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1894                 return -ENOMEM;
1895         }
1896
1897         /* Get count */
1898         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1899         if (cnt_xstats  < 0) {
1900                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1901                 return -ENODEV;
1902         }
1903
1904         /* Get id-name lookup table */
1905         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1906
1907         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1908                         port_id, xstats_names, cnt_xstats, NULL)) {
1909                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1910                 return -1;
1911         }
1912
1913         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1914                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1915                         *id = idx_xstat;
1916                         return 0;
1917                 };
1918         }
1919
1920         return -EINVAL;
1921 }
1922
1923 /* retrieve basic stats names */
1924 static int
1925 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1926         struct rte_eth_xstat_name *xstats_names)
1927 {
1928         int cnt_used_entries = 0;
1929         uint32_t idx, id_queue;
1930         uint16_t num_q;
1931
1932         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1933                 snprintf(xstats_names[cnt_used_entries].name,
1934                         sizeof(xstats_names[0].name),
1935                         "%s", rte_stats_strings[idx].name);
1936                 cnt_used_entries++;
1937         }
1938         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1939         for (id_queue = 0; id_queue < num_q; id_queue++) {
1940                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1941                         snprintf(xstats_names[cnt_used_entries].name,
1942                                 sizeof(xstats_names[0].name),
1943                                 "rx_q%u%s",
1944                                 id_queue, rte_rxq_stats_strings[idx].name);
1945                         cnt_used_entries++;
1946                 }
1947
1948         }
1949         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1950         for (id_queue = 0; id_queue < num_q; id_queue++) {
1951                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1952                         snprintf(xstats_names[cnt_used_entries].name,
1953                                 sizeof(xstats_names[0].name),
1954                                 "tx_q%u%s",
1955                                 id_queue, rte_txq_stats_strings[idx].name);
1956                         cnt_used_entries++;
1957                 }
1958         }
1959         return cnt_used_entries;
1960 }
1961
1962 /* retrieve ethdev extended statistics names */
1963 int
1964 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1965         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1966         uint64_t *ids)
1967 {
1968         struct rte_eth_xstat_name *xstats_names_copy;
1969         unsigned int no_basic_stat_requested = 1;
1970         unsigned int no_ext_stat_requested = 1;
1971         unsigned int expected_entries;
1972         unsigned int basic_count;
1973         struct rte_eth_dev *dev;
1974         unsigned int i;
1975         int ret;
1976
1977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1978         dev = &rte_eth_devices[port_id];
1979
1980         basic_count = get_xstats_basic_count(dev);
1981         ret = get_xstats_count(port_id);
1982         if (ret < 0)
1983                 return ret;
1984         expected_entries = (unsigned int)ret;
1985
1986         /* Return max number of stats if no ids given */
1987         if (!ids) {
1988                 if (!xstats_names)
1989                         return expected_entries;
1990                 else if (xstats_names && size < expected_entries)
1991                         return expected_entries;
1992         }
1993
1994         if (ids && !xstats_names)
1995                 return -EINVAL;
1996
1997         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1998                 uint64_t ids_copy[size];
1999
2000                 for (i = 0; i < size; i++) {
2001                         if (ids[i] < basic_count) {
2002                                 no_basic_stat_requested = 0;
2003                                 break;
2004                         }
2005
2006                         /*
2007                          * Convert ids to xstats ids that PMD knows.
2008                          * ids known by user are basic + extended stats.
2009                          */
2010                         ids_copy[i] = ids[i] - basic_count;
2011                 }
2012
2013                 if (no_basic_stat_requested)
2014                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2015                                         xstats_names, ids_copy, size);
2016         }
2017
2018         /* Retrieve all stats */
2019         if (!ids) {
2020                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2021                                 expected_entries);
2022                 if (num_stats < 0 || num_stats > (int)expected_entries)
2023                         return num_stats;
2024                 else
2025                         return expected_entries;
2026         }
2027
2028         xstats_names_copy = calloc(expected_entries,
2029                 sizeof(struct rte_eth_xstat_name));
2030
2031         if (!xstats_names_copy) {
2032                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2033                 return -ENOMEM;
2034         }
2035
2036         if (ids) {
2037                 for (i = 0; i < size; i++) {
2038                         if (ids[i] > basic_count) {
2039                                 no_ext_stat_requested = 0;
2040                                 break;
2041                         }
2042                 }
2043         }
2044
2045         /* Fill xstats_names_copy structure */
2046         if (ids && no_ext_stat_requested) {
2047                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2048         } else {
2049                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2050                         expected_entries);
2051                 if (ret < 0) {
2052                         free(xstats_names_copy);
2053                         return ret;
2054                 }
2055         }
2056
2057         /* Filter stats */
2058         for (i = 0; i < size; i++) {
2059                 if (ids[i] >= expected_entries) {
2060                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2061                         free(xstats_names_copy);
2062                         return -1;
2063                 }
2064                 xstats_names[i] = xstats_names_copy[ids[i]];
2065         }
2066
2067         free(xstats_names_copy);
2068         return size;
2069 }
2070
2071 int
2072 rte_eth_xstats_get_names(uint16_t port_id,
2073         struct rte_eth_xstat_name *xstats_names,
2074         unsigned int size)
2075 {
2076         struct rte_eth_dev *dev;
2077         int cnt_used_entries;
2078         int cnt_expected_entries;
2079         int cnt_driver_entries;
2080
2081         cnt_expected_entries = get_xstats_count(port_id);
2082         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2083                         (int)size < cnt_expected_entries)
2084                 return cnt_expected_entries;
2085
2086         /* port_id checked in get_xstats_count() */
2087         dev = &rte_eth_devices[port_id];
2088
2089         cnt_used_entries = rte_eth_basic_stats_get_names(
2090                 dev, xstats_names);
2091
2092         if (dev->dev_ops->xstats_get_names != NULL) {
2093                 /* If there are any driver-specific xstats, append them
2094                  * to end of list.
2095                  */
2096                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2097                         dev,
2098                         xstats_names + cnt_used_entries,
2099                         size - cnt_used_entries);
2100                 if (cnt_driver_entries < 0)
2101                         return eth_err(port_id, cnt_driver_entries);
2102                 cnt_used_entries += cnt_driver_entries;
2103         }
2104
2105         return cnt_used_entries;
2106 }
2107
2108
2109 static int
2110 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2111 {
2112         struct rte_eth_dev *dev;
2113         struct rte_eth_stats eth_stats;
2114         unsigned int count = 0, i, q;
2115         uint64_t val, *stats_ptr;
2116         uint16_t nb_rxqs, nb_txqs;
2117         int ret;
2118
2119         ret = rte_eth_stats_get(port_id, &eth_stats);
2120         if (ret < 0)
2121                 return ret;
2122
2123         dev = &rte_eth_devices[port_id];
2124
2125         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2126         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2127
2128         /* global stats */
2129         for (i = 0; i < RTE_NB_STATS; i++) {
2130                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2131                                         rte_stats_strings[i].offset);
2132                 val = *stats_ptr;
2133                 xstats[count++].value = val;
2134         }
2135
2136         /* per-rxq stats */
2137         for (q = 0; q < nb_rxqs; q++) {
2138                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2139                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2140                                         rte_rxq_stats_strings[i].offset +
2141                                         q * sizeof(uint64_t));
2142                         val = *stats_ptr;
2143                         xstats[count++].value = val;
2144                 }
2145         }
2146
2147         /* per-txq stats */
2148         for (q = 0; q < nb_txqs; q++) {
2149                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2150                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2151                                         rte_txq_stats_strings[i].offset +
2152                                         q * sizeof(uint64_t));
2153                         val = *stats_ptr;
2154                         xstats[count++].value = val;
2155                 }
2156         }
2157         return count;
2158 }
2159
2160 /* retrieve ethdev extended statistics */
2161 int
2162 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2163                          uint64_t *values, unsigned int size)
2164 {
2165         unsigned int no_basic_stat_requested = 1;
2166         unsigned int no_ext_stat_requested = 1;
2167         unsigned int num_xstats_filled;
2168         unsigned int basic_count;
2169         uint16_t expected_entries;
2170         struct rte_eth_dev *dev;
2171         unsigned int i;
2172         int ret;
2173
2174         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2175         ret = get_xstats_count(port_id);
2176         if (ret < 0)
2177                 return ret;
2178         expected_entries = (uint16_t)ret;
2179         struct rte_eth_xstat xstats[expected_entries];
2180         dev = &rte_eth_devices[port_id];
2181         basic_count = get_xstats_basic_count(dev);
2182
2183         /* Return max number of stats if no ids given */
2184         if (!ids) {
2185                 if (!values)
2186                         return expected_entries;
2187                 else if (values && size < expected_entries)
2188                         return expected_entries;
2189         }
2190
2191         if (ids && !values)
2192                 return -EINVAL;
2193
2194         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2195                 unsigned int basic_count = get_xstats_basic_count(dev);
2196                 uint64_t ids_copy[size];
2197
2198                 for (i = 0; i < size; i++) {
2199                         if (ids[i] < basic_count) {
2200                                 no_basic_stat_requested = 0;
2201                                 break;
2202                         }
2203
2204                         /*
2205                          * Convert ids to xstats ids that PMD knows.
2206                          * ids known by user are basic + extended stats.
2207                          */
2208                         ids_copy[i] = ids[i] - basic_count;
2209                 }
2210
2211                 if (no_basic_stat_requested)
2212                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2213                                         values, size);
2214         }
2215
2216         if (ids) {
2217                 for (i = 0; i < size; i++) {
2218                         if (ids[i] > basic_count) {
2219                                 no_ext_stat_requested = 0;
2220                                 break;
2221                         }
2222                 }
2223         }
2224
2225         /* Fill the xstats structure */
2226         if (ids && no_ext_stat_requested)
2227                 ret = rte_eth_basic_stats_get(port_id, xstats);
2228         else
2229                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2230
2231         if (ret < 0)
2232                 return ret;
2233         num_xstats_filled = (unsigned int)ret;
2234
2235         /* Return all stats */
2236         if (!ids) {
2237                 for (i = 0; i < num_xstats_filled; i++)
2238                         values[i] = xstats[i].value;
2239                 return expected_entries;
2240         }
2241
2242         /* Filter stats */
2243         for (i = 0; i < size; i++) {
2244                 if (ids[i] >= expected_entries) {
2245                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2246                         return -1;
2247                 }
2248                 values[i] = xstats[ids[i]].value;
2249         }
2250         return size;
2251 }
2252
2253 int
2254 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2255         unsigned int n)
2256 {
2257         struct rte_eth_dev *dev;
2258         unsigned int count = 0, i;
2259         signed int xcount = 0;
2260         uint16_t nb_rxqs, nb_txqs;
2261         int ret;
2262
2263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2264
2265         dev = &rte_eth_devices[port_id];
2266
2267         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2268         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2269
2270         /* Return generic statistics */
2271         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2272                 (nb_txqs * RTE_NB_TXQ_STATS);
2273
2274         /* implemented by the driver */
2275         if (dev->dev_ops->xstats_get != NULL) {
2276                 /* Retrieve the xstats from the driver at the end of the
2277                  * xstats struct.
2278                  */
2279                 xcount = (*dev->dev_ops->xstats_get)(dev,
2280                                      xstats ? xstats + count : NULL,
2281                                      (n > count) ? n - count : 0);
2282
2283                 if (xcount < 0)
2284                         return eth_err(port_id, xcount);
2285         }
2286
2287         if (n < count + xcount || xstats == NULL)
2288                 return count + xcount;
2289
2290         /* now fill the xstats structure */
2291         ret = rte_eth_basic_stats_get(port_id, xstats);
2292         if (ret < 0)
2293                 return ret;
2294         count = ret;
2295
2296         for (i = 0; i < count; i++)
2297                 xstats[i].id = i;
2298         /* add an offset to driver-specific stats */
2299         for ( ; i < count + xcount; i++)
2300                 xstats[i].id += count;
2301
2302         return count + xcount;
2303 }
2304
2305 /* reset ethdev extended statistics */
2306 void
2307 rte_eth_xstats_reset(uint16_t port_id)
2308 {
2309         struct rte_eth_dev *dev;
2310
2311         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2312         dev = &rte_eth_devices[port_id];
2313
2314         /* implemented by the driver */
2315         if (dev->dev_ops->xstats_reset != NULL) {
2316                 (*dev->dev_ops->xstats_reset)(dev);
2317                 return;
2318         }
2319
2320         /* fallback to default */
2321         rte_eth_stats_reset(port_id);
2322 }
2323
2324 static int
2325 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2326                 uint8_t is_rx)
2327 {
2328         struct rte_eth_dev *dev;
2329
2330         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2331
2332         dev = &rte_eth_devices[port_id];
2333
2334         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2335         return (*dev->dev_ops->queue_stats_mapping_set)
2336                         (dev, queue_id, stat_idx, is_rx);
2337 }
2338
2339
2340 int
2341 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2342                 uint8_t stat_idx)
2343 {
2344         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2345                                                 stat_idx, STAT_QMAP_TX));
2346 }
2347
2348
2349 int
2350 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2351                 uint8_t stat_idx)
2352 {
2353         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2354                                                 stat_idx, STAT_QMAP_RX));
2355 }
2356
2357 int
2358 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2359 {
2360         struct rte_eth_dev *dev;
2361
2362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2363         dev = &rte_eth_devices[port_id];
2364
2365         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2366         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2367                                                         fw_version, fw_size));
2368 }
2369
2370 void
2371 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2372 {
2373         struct rte_eth_dev *dev;
2374         const struct rte_eth_desc_lim lim = {
2375                 .nb_max = UINT16_MAX,
2376                 .nb_min = 0,
2377                 .nb_align = 1,
2378         };
2379
2380         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2381         dev = &rte_eth_devices[port_id];
2382
2383         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2384         dev_info->rx_desc_lim = lim;
2385         dev_info->tx_desc_lim = lim;
2386
2387         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2388         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2389         dev_info->driver_name = dev->device->driver->name;
2390         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2391         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2392 }
2393
2394 int
2395 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2396                                  uint32_t *ptypes, int num)
2397 {
2398         int i, j;
2399         struct rte_eth_dev *dev;
2400         const uint32_t *all_ptypes;
2401
2402         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2403         dev = &rte_eth_devices[port_id];
2404         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2405         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2406
2407         if (!all_ptypes)
2408                 return 0;
2409
2410         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2411                 if (all_ptypes[i] & ptype_mask) {
2412                         if (j < num)
2413                                 ptypes[j] = all_ptypes[i];
2414                         j++;
2415                 }
2416
2417         return j;
2418 }
2419
2420 void
2421 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2422 {
2423         struct rte_eth_dev *dev;
2424
2425         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2426         dev = &rte_eth_devices[port_id];
2427         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2428 }
2429
2430
2431 int
2432 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2433 {
2434         struct rte_eth_dev *dev;
2435
2436         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2437
2438         dev = &rte_eth_devices[port_id];
2439         *mtu = dev->data->mtu;
2440         return 0;
2441 }
2442
2443 int
2444 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2445 {
2446         int ret;
2447         struct rte_eth_dev *dev;
2448
2449         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2450         dev = &rte_eth_devices[port_id];
2451         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2452
2453         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2454         if (!ret)
2455                 dev->data->mtu = mtu;
2456
2457         return eth_err(port_id, ret);
2458 }
2459
2460 int
2461 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2462 {
2463         struct rte_eth_dev *dev;
2464         int ret;
2465
2466         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2467         dev = &rte_eth_devices[port_id];
2468         if (!(dev->data->dev_conf.rxmode.offloads &
2469               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2470                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2471                 return -ENOSYS;
2472         }
2473
2474         if (vlan_id > 4095) {
2475                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2476                                 port_id, (unsigned) vlan_id);
2477                 return -EINVAL;
2478         }
2479         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2480
2481         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2482         if (ret == 0) {
2483                 struct rte_vlan_filter_conf *vfc;
2484                 int vidx;
2485                 int vbit;
2486
2487                 vfc = &dev->data->vlan_filter_conf;
2488                 vidx = vlan_id / 64;
2489                 vbit = vlan_id % 64;
2490
2491                 if (on)
2492                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2493                 else
2494                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2495         }
2496
2497         return eth_err(port_id, ret);
2498 }
2499
2500 int
2501 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2502                                     int on)
2503 {
2504         struct rte_eth_dev *dev;
2505
2506         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2507         dev = &rte_eth_devices[port_id];
2508         if (rx_queue_id >= dev->data->nb_rx_queues) {
2509                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2510                 return -EINVAL;
2511         }
2512
2513         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2514         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2515
2516         return 0;
2517 }
2518
2519 int
2520 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2521                                 enum rte_vlan_type vlan_type,
2522                                 uint16_t tpid)
2523 {
2524         struct rte_eth_dev *dev;
2525
2526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2527         dev = &rte_eth_devices[port_id];
2528         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2529
2530         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2531                                                                tpid));
2532 }
2533
2534 int
2535 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2536 {
2537         struct rte_eth_dev *dev;
2538         int ret = 0;
2539         int mask = 0;
2540         int cur, org = 0;
2541         uint64_t orig_offloads;
2542
2543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2544         dev = &rte_eth_devices[port_id];
2545
2546         /* save original values in case of failure */
2547         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2548
2549         /*check which option changed by application*/
2550         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2551         org = !!(dev->data->dev_conf.rxmode.offloads &
2552                  DEV_RX_OFFLOAD_VLAN_STRIP);
2553         if (cur != org) {
2554                 if (cur)
2555                         dev->data->dev_conf.rxmode.offloads |=
2556                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2557                 else
2558                         dev->data->dev_conf.rxmode.offloads &=
2559                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2560                 mask |= ETH_VLAN_STRIP_MASK;
2561         }
2562
2563         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2564         org = !!(dev->data->dev_conf.rxmode.offloads &
2565                  DEV_RX_OFFLOAD_VLAN_FILTER);
2566         if (cur != org) {
2567                 if (cur)
2568                         dev->data->dev_conf.rxmode.offloads |=
2569                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2570                 else
2571                         dev->data->dev_conf.rxmode.offloads &=
2572                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2573                 mask |= ETH_VLAN_FILTER_MASK;
2574         }
2575
2576         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2577         org = !!(dev->data->dev_conf.rxmode.offloads &
2578                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2579         if (cur != org) {
2580                 if (cur)
2581                         dev->data->dev_conf.rxmode.offloads |=
2582                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2583                 else
2584                         dev->data->dev_conf.rxmode.offloads &=
2585                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2586                 mask |= ETH_VLAN_EXTEND_MASK;
2587         }
2588
2589         /*no change*/
2590         if (mask == 0)
2591                 return ret;
2592
2593         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2594
2595         /*
2596          * Convert to the offload bitfield API just in case the underlying PMD
2597          * still supporting it.
2598          */
2599         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2600                                     &dev->data->dev_conf.rxmode);
2601         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2602         if (ret) {
2603                 /* hit an error restore  original values */
2604                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2605                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2606                                             &dev->data->dev_conf.rxmode);
2607         }
2608
2609         return eth_err(port_id, ret);
2610 }
2611
2612 int
2613 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2614 {
2615         struct rte_eth_dev *dev;
2616         int ret = 0;
2617
2618         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2619         dev = &rte_eth_devices[port_id];
2620
2621         if (dev->data->dev_conf.rxmode.offloads &
2622             DEV_RX_OFFLOAD_VLAN_STRIP)
2623                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2624
2625         if (dev->data->dev_conf.rxmode.offloads &
2626             DEV_RX_OFFLOAD_VLAN_FILTER)
2627                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2628
2629         if (dev->data->dev_conf.rxmode.offloads &
2630             DEV_RX_OFFLOAD_VLAN_EXTEND)
2631                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2632
2633         return ret;
2634 }
2635
2636 int
2637 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2638 {
2639         struct rte_eth_dev *dev;
2640
2641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2642         dev = &rte_eth_devices[port_id];
2643         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2644
2645         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2646 }
2647
2648 int
2649 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2650 {
2651         struct rte_eth_dev *dev;
2652
2653         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2654         dev = &rte_eth_devices[port_id];
2655         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2656         memset(fc_conf, 0, sizeof(*fc_conf));
2657         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2658 }
2659
2660 int
2661 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2662 {
2663         struct rte_eth_dev *dev;
2664
2665         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2666         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2667                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2668                 return -EINVAL;
2669         }
2670
2671         dev = &rte_eth_devices[port_id];
2672         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2673         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2674 }
2675
2676 int
2677 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2678                                    struct rte_eth_pfc_conf *pfc_conf)
2679 {
2680         struct rte_eth_dev *dev;
2681
2682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2683         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2684                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2685                 return -EINVAL;
2686         }
2687
2688         dev = &rte_eth_devices[port_id];
2689         /* High water, low water validation are device specific */
2690         if  (*dev->dev_ops->priority_flow_ctrl_set)
2691                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2692                                         (dev, pfc_conf));
2693         return -ENOTSUP;
2694 }
2695
2696 static int
2697 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2698                         uint16_t reta_size)
2699 {
2700         uint16_t i, num;
2701
2702         if (!reta_conf)
2703                 return -EINVAL;
2704
2705         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2706         for (i = 0; i < num; i++) {
2707                 if (reta_conf[i].mask)
2708                         return 0;
2709         }
2710
2711         return -EINVAL;
2712 }
2713
2714 static int
2715 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2716                          uint16_t reta_size,
2717                          uint16_t max_rxq)
2718 {
2719         uint16_t i, idx, shift;
2720
2721         if (!reta_conf)
2722                 return -EINVAL;
2723
2724         if (max_rxq == 0) {
2725                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2726                 return -EINVAL;
2727         }
2728
2729         for (i = 0; i < reta_size; i++) {
2730                 idx = i / RTE_RETA_GROUP_SIZE;
2731                 shift = i % RTE_RETA_GROUP_SIZE;
2732                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2733                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2734                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2735                                 "the maximum rxq index: %u\n", idx, shift,
2736                                 reta_conf[idx].reta[shift], max_rxq);
2737                         return -EINVAL;
2738                 }
2739         }
2740
2741         return 0;
2742 }
2743
2744 int
2745 rte_eth_dev_rss_reta_update(uint16_t port_id,
2746                             struct rte_eth_rss_reta_entry64 *reta_conf,
2747                             uint16_t reta_size)
2748 {
2749         struct rte_eth_dev *dev;
2750         int ret;
2751
2752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2753         /* Check mask bits */
2754         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2755         if (ret < 0)
2756                 return ret;
2757
2758         dev = &rte_eth_devices[port_id];
2759
2760         /* Check entry value */
2761         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2762                                 dev->data->nb_rx_queues);
2763         if (ret < 0)
2764                 return ret;
2765
2766         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2767         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2768                                                              reta_size));
2769 }
2770
2771 int
2772 rte_eth_dev_rss_reta_query(uint16_t port_id,
2773                            struct rte_eth_rss_reta_entry64 *reta_conf,
2774                            uint16_t reta_size)
2775 {
2776         struct rte_eth_dev *dev;
2777         int ret;
2778
2779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2780
2781         /* Check mask bits */
2782         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2783         if (ret < 0)
2784                 return ret;
2785
2786         dev = &rte_eth_devices[port_id];
2787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2788         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2789                                                             reta_size));
2790 }
2791
2792 int
2793 rte_eth_dev_rss_hash_update(uint16_t port_id,
2794                             struct rte_eth_rss_conf *rss_conf)
2795 {
2796         struct rte_eth_dev *dev;
2797
2798         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2799         dev = &rte_eth_devices[port_id];
2800         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2801         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2802                                                                  rss_conf));
2803 }
2804
2805 int
2806 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2807                               struct rte_eth_rss_conf *rss_conf)
2808 {
2809         struct rte_eth_dev *dev;
2810
2811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2812         dev = &rte_eth_devices[port_id];
2813         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2814         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2815                                                                    rss_conf));
2816 }
2817
2818 int
2819 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2820                                 struct rte_eth_udp_tunnel *udp_tunnel)
2821 {
2822         struct rte_eth_dev *dev;
2823
2824         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2825         if (udp_tunnel == NULL) {
2826                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2827                 return -EINVAL;
2828         }
2829
2830         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2831                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2832                 return -EINVAL;
2833         }
2834
2835         dev = &rte_eth_devices[port_id];
2836         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2837         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2838                                                                 udp_tunnel));
2839 }
2840
2841 int
2842 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2843                                    struct rte_eth_udp_tunnel *udp_tunnel)
2844 {
2845         struct rte_eth_dev *dev;
2846
2847         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2848         dev = &rte_eth_devices[port_id];
2849
2850         if (udp_tunnel == NULL) {
2851                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2852                 return -EINVAL;
2853         }
2854
2855         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2856                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2857                 return -EINVAL;
2858         }
2859
2860         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2861         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2862                                                                 udp_tunnel));
2863 }
2864
2865 int
2866 rte_eth_led_on(uint16_t port_id)
2867 {
2868         struct rte_eth_dev *dev;
2869
2870         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2871         dev = &rte_eth_devices[port_id];
2872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2873         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2874 }
2875
2876 int
2877 rte_eth_led_off(uint16_t port_id)
2878 {
2879         struct rte_eth_dev *dev;
2880
2881         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2882         dev = &rte_eth_devices[port_id];
2883         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2884         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2885 }
2886
2887 /*
2888  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2889  * an empty spot.
2890  */
2891 static int
2892 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2893 {
2894         struct rte_eth_dev_info dev_info;
2895         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2896         unsigned i;
2897
2898         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2899         rte_eth_dev_info_get(port_id, &dev_info);
2900
2901         for (i = 0; i < dev_info.max_mac_addrs; i++)
2902                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2903                         return i;
2904
2905         return -1;
2906 }
2907
2908 static const struct ether_addr null_mac_addr;
2909
2910 int
2911 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2912                         uint32_t pool)
2913 {
2914         struct rte_eth_dev *dev;
2915         int index;
2916         uint64_t pool_mask;
2917         int ret;
2918
2919         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2920         dev = &rte_eth_devices[port_id];
2921         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2922
2923         if (is_zero_ether_addr(addr)) {
2924                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2925                         port_id);
2926                 return -EINVAL;
2927         }
2928         if (pool >= ETH_64_POOLS) {
2929                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2930                 return -EINVAL;
2931         }
2932
2933         index = get_mac_addr_index(port_id, addr);
2934         if (index < 0) {
2935                 index = get_mac_addr_index(port_id, &null_mac_addr);
2936                 if (index < 0) {
2937                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2938                                 port_id);
2939                         return -ENOSPC;
2940                 }
2941         } else {
2942                 pool_mask = dev->data->mac_pool_sel[index];
2943
2944                 /* Check if both MAC address and pool is already there, and do nothing */
2945                 if (pool_mask & (1ULL << pool))
2946                         return 0;
2947         }
2948
2949         /* Update NIC */
2950         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2951
2952         if (ret == 0) {
2953                 /* Update address in NIC data structure */
2954                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2955
2956                 /* Update pool bitmap in NIC data structure */
2957                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2958         }
2959
2960         return eth_err(port_id, ret);
2961 }
2962
2963 int
2964 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2965 {
2966         struct rte_eth_dev *dev;
2967         int index;
2968
2969         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2970         dev = &rte_eth_devices[port_id];
2971         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2972
2973         index = get_mac_addr_index(port_id, addr);
2974         if (index == 0) {
2975                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2976                 return -EADDRINUSE;
2977         } else if (index < 0)
2978                 return 0;  /* Do nothing if address wasn't found */
2979
2980         /* Update NIC */
2981         (*dev->dev_ops->mac_addr_remove)(dev, index);
2982
2983         /* Update address in NIC data structure */
2984         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2985
2986         /* reset pool bitmap */
2987         dev->data->mac_pool_sel[index] = 0;
2988
2989         return 0;
2990 }
2991
2992 int
2993 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2994 {
2995         struct rte_eth_dev *dev;
2996
2997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2998
2999         if (!is_valid_assigned_ether_addr(addr))
3000                 return -EINVAL;
3001
3002         dev = &rte_eth_devices[port_id];
3003         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3004
3005         /* Update default address in NIC data structure */
3006         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3007
3008         (*dev->dev_ops->mac_addr_set)(dev, addr);
3009
3010         return 0;
3011 }
3012
3013
3014 /*
3015  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3016  * an empty spot.
3017  */
3018 static int
3019 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3020 {
3021         struct rte_eth_dev_info dev_info;
3022         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3023         unsigned i;
3024
3025         rte_eth_dev_info_get(port_id, &dev_info);
3026         if (!dev->data->hash_mac_addrs)
3027                 return -1;
3028
3029         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3030                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3031                         ETHER_ADDR_LEN) == 0)
3032                         return i;
3033
3034         return -1;
3035 }
3036
3037 int
3038 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3039                                 uint8_t on)
3040 {
3041         int index;
3042         int ret;
3043         struct rte_eth_dev *dev;
3044
3045         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3046
3047         dev = &rte_eth_devices[port_id];
3048         if (is_zero_ether_addr(addr)) {
3049                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3050                         port_id);
3051                 return -EINVAL;
3052         }
3053
3054         index = get_hash_mac_addr_index(port_id, addr);
3055         /* Check if it's already there, and do nothing */
3056         if ((index >= 0) && on)
3057                 return 0;
3058
3059         if (index < 0) {
3060                 if (!on) {
3061                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3062                                 "set in UTA\n", port_id);
3063                         return -EINVAL;
3064                 }
3065
3066                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3067                 if (index < 0) {
3068                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3069                                         port_id);
3070                         return -ENOSPC;
3071                 }
3072         }
3073
3074         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3075         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3076         if (ret == 0) {
3077                 /* Update address in NIC data structure */
3078                 if (on)
3079                         ether_addr_copy(addr,
3080                                         &dev->data->hash_mac_addrs[index]);
3081                 else
3082                         ether_addr_copy(&null_mac_addr,
3083                                         &dev->data->hash_mac_addrs[index]);
3084         }
3085
3086         return eth_err(port_id, ret);
3087 }
3088
3089 int
3090 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3091 {
3092         struct rte_eth_dev *dev;
3093
3094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3095
3096         dev = &rte_eth_devices[port_id];
3097
3098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3099         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3100                                                                        on));
3101 }
3102
3103 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3104                                         uint16_t tx_rate)
3105 {
3106         struct rte_eth_dev *dev;
3107         struct rte_eth_dev_info dev_info;
3108         struct rte_eth_link link;
3109
3110         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3111
3112         dev = &rte_eth_devices[port_id];
3113         rte_eth_dev_info_get(port_id, &dev_info);
3114         link = dev->data->dev_link;
3115
3116         if (queue_idx > dev_info.max_tx_queues) {
3117                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3118                                 "invalid queue id=%d\n", port_id, queue_idx);
3119                 return -EINVAL;
3120         }
3121
3122         if (tx_rate > link.link_speed) {
3123                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3124                                 "bigger than link speed= %d\n",
3125                         tx_rate, link.link_speed);
3126                 return -EINVAL;
3127         }
3128
3129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3130         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3131                                                         queue_idx, tx_rate));
3132 }
3133
3134 int
3135 rte_eth_mirror_rule_set(uint16_t port_id,
3136                         struct rte_eth_mirror_conf *mirror_conf,
3137                         uint8_t rule_id, uint8_t on)
3138 {
3139         struct rte_eth_dev *dev;
3140
3141         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3142         if (mirror_conf->rule_type == 0) {
3143                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3144                 return -EINVAL;
3145         }
3146
3147         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3148                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3149                                 ETH_64_POOLS - 1);
3150                 return -EINVAL;
3151         }
3152
3153         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3154              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3155             (mirror_conf->pool_mask == 0)) {
3156                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3157                 return -EINVAL;
3158         }
3159
3160         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3161             mirror_conf->vlan.vlan_mask == 0) {
3162                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3163                 return -EINVAL;
3164         }
3165
3166         dev = &rte_eth_devices[port_id];
3167         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3168
3169         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3170                                                 mirror_conf, rule_id, on));
3171 }
3172
3173 int
3174 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3175 {
3176         struct rte_eth_dev *dev;
3177
3178         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3179
3180         dev = &rte_eth_devices[port_id];
3181         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3182
3183         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3184                                                                    rule_id));
3185 }
3186
3187 RTE_INIT(eth_dev_init_cb_lists)
3188 {
3189         int i;
3190
3191         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3192                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3193 }
3194
3195 int
3196 rte_eth_dev_callback_register(uint16_t port_id,
3197                         enum rte_eth_event_type event,
3198                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3199 {
3200         struct rte_eth_dev *dev;
3201         struct rte_eth_dev_callback *user_cb;
3202         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3203         uint16_t last_port;
3204
3205         if (!cb_fn)
3206                 return -EINVAL;
3207
3208         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3209                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3210                 return -EINVAL;
3211         }
3212
3213         if (port_id == RTE_ETH_ALL) {
3214                 next_port = 0;
3215                 last_port = RTE_MAX_ETHPORTS - 1;
3216         } else {
3217                 next_port = last_port = port_id;
3218         }
3219
3220         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3221
3222         do {
3223                 dev = &rte_eth_devices[next_port];
3224
3225                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3226                         if (user_cb->cb_fn == cb_fn &&
3227                                 user_cb->cb_arg == cb_arg &&
3228                                 user_cb->event == event) {
3229                                 break;
3230                         }
3231                 }
3232
3233                 /* create a new callback. */
3234                 if (user_cb == NULL) {
3235                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3236                                 sizeof(struct rte_eth_dev_callback), 0);
3237                         if (user_cb != NULL) {
3238                                 user_cb->cb_fn = cb_fn;
3239                                 user_cb->cb_arg = cb_arg;
3240                                 user_cb->event = event;
3241                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3242                                                   user_cb, next);
3243                         } else {
3244                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3245                                 rte_eth_dev_callback_unregister(port_id, event,
3246                                                                 cb_fn, cb_arg);
3247                                 return -ENOMEM;
3248                         }
3249
3250                 }
3251         } while (++next_port <= last_port);
3252
3253         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3254         return 0;
3255 }
3256
3257 int
3258 rte_eth_dev_callback_unregister(uint16_t port_id,
3259                         enum rte_eth_event_type event,
3260                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3261 {
3262         int ret;
3263         struct rte_eth_dev *dev;
3264         struct rte_eth_dev_callback *cb, *next;
3265         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3266         uint16_t last_port;
3267
3268         if (!cb_fn)
3269                 return -EINVAL;
3270
3271         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3272                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3273                 return -EINVAL;
3274         }
3275
3276         if (port_id == RTE_ETH_ALL) {
3277                 next_port = 0;
3278                 last_port = RTE_MAX_ETHPORTS - 1;
3279         } else {
3280                 next_port = last_port = port_id;
3281         }
3282
3283         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3284
3285         do {
3286                 dev = &rte_eth_devices[next_port];
3287                 ret = 0;
3288                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3289                      cb = next) {
3290
3291                         next = TAILQ_NEXT(cb, next);
3292
3293                         if (cb->cb_fn != cb_fn || cb->event != event ||
3294                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3295                                 continue;
3296
3297                         /*
3298                          * if this callback is not executing right now,
3299                          * then remove it.
3300                          */
3301                         if (cb->active == 0) {
3302                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3303                                 rte_free(cb);
3304                         } else {
3305                                 ret = -EAGAIN;
3306                         }
3307                 }
3308         } while (++next_port <= last_port);
3309
3310         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3311         return ret;
3312 }
3313
3314 int
3315 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3316         enum rte_eth_event_type event, void *ret_param)
3317 {
3318         struct rte_eth_dev_callback *cb_lst;
3319         struct rte_eth_dev_callback dev_cb;
3320         int rc = 0;
3321
3322         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3323         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3324                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3325                         continue;
3326                 dev_cb = *cb_lst;
3327                 cb_lst->active = 1;
3328                 if (ret_param != NULL)
3329                         dev_cb.ret_param = ret_param;
3330
3331                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3332                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3333                                 dev_cb.cb_arg, dev_cb.ret_param);
3334                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3335                 cb_lst->active = 0;
3336         }
3337         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3338         return rc;
3339 }
3340
3341 int
3342 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3343 {
3344         uint32_t vec;
3345         struct rte_eth_dev *dev;
3346         struct rte_intr_handle *intr_handle;
3347         uint16_t qid;
3348         int rc;
3349
3350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3351
3352         dev = &rte_eth_devices[port_id];
3353
3354         if (!dev->intr_handle) {
3355                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3356                 return -ENOTSUP;
3357         }
3358
3359         intr_handle = dev->intr_handle;
3360         if (!intr_handle->intr_vec) {
3361                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3362                 return -EPERM;
3363         }
3364
3365         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3366                 vec = intr_handle->intr_vec[qid];
3367                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3368                 if (rc && rc != -EEXIST) {
3369                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3370                                         " op %d epfd %d vec %u\n",
3371                                         port_id, qid, op, epfd, vec);
3372                 }
3373         }
3374
3375         return 0;
3376 }
3377
3378 const struct rte_memzone *
3379 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3380                          uint16_t queue_id, size_t size, unsigned align,
3381                          int socket_id)
3382 {
3383         char z_name[RTE_MEMZONE_NAMESIZE];
3384         const struct rte_memzone *mz;
3385
3386         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3387                  dev->device->driver->name, ring_name,
3388                  dev->data->port_id, queue_id);
3389
3390         mz = rte_memzone_lookup(z_name);
3391         if (mz)
3392                 return mz;
3393
3394         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3395 }
3396
3397 int
3398 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3399                           int epfd, int op, void *data)
3400 {
3401         uint32_t vec;
3402         struct rte_eth_dev *dev;
3403         struct rte_intr_handle *intr_handle;
3404         int rc;
3405
3406         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3407
3408         dev = &rte_eth_devices[port_id];
3409         if (queue_id >= dev->data->nb_rx_queues) {
3410                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3411                 return -EINVAL;
3412         }
3413
3414         if (!dev->intr_handle) {
3415                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3416                 return -ENOTSUP;
3417         }
3418
3419         intr_handle = dev->intr_handle;
3420         if (!intr_handle->intr_vec) {
3421                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3422                 return -EPERM;
3423         }
3424
3425         vec = intr_handle->intr_vec[queue_id];
3426         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3427         if (rc && rc != -EEXIST) {
3428                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3429                                 " op %d epfd %d vec %u\n",
3430                                 port_id, queue_id, op, epfd, vec);
3431                 return rc;
3432         }
3433
3434         return 0;
3435 }
3436
3437 int
3438 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3439                            uint16_t queue_id)
3440 {
3441         struct rte_eth_dev *dev;
3442
3443         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3444
3445         dev = &rte_eth_devices[port_id];
3446
3447         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3448         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3449                                                                 queue_id));
3450 }
3451
3452 int
3453 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3454                             uint16_t queue_id)
3455 {
3456         struct rte_eth_dev *dev;
3457
3458         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3459
3460         dev = &rte_eth_devices[port_id];
3461
3462         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3463         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3464                                                                 queue_id));
3465 }
3466
3467
3468 int
3469 rte_eth_dev_filter_supported(uint16_t port_id,
3470                              enum rte_filter_type filter_type)
3471 {
3472         struct rte_eth_dev *dev;
3473
3474         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3475
3476         dev = &rte_eth_devices[port_id];
3477         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3478         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3479                                 RTE_ETH_FILTER_NOP, NULL);
3480 }
3481
3482 int
3483 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3484                             enum rte_filter_type filter_type,
3485                             enum rte_filter_op filter_op, void *arg);
3486
3487 int
3488 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3489                             enum rte_filter_type filter_type,
3490                             enum rte_filter_op filter_op, void *arg)
3491 {
3492         struct rte_eth_fdir_info_v22 {
3493                 enum rte_fdir_mode mode;
3494                 struct rte_eth_fdir_masks mask;
3495                 struct rte_eth_fdir_flex_conf flex_conf;
3496                 uint32_t guarant_spc;
3497                 uint32_t best_spc;
3498                 uint32_t flow_types_mask[1];
3499                 uint32_t max_flexpayload;
3500                 uint32_t flex_payload_unit;
3501                 uint32_t max_flex_payload_segment_num;
3502                 uint16_t flex_payload_limit;
3503                 uint32_t flex_bitmask_unit;
3504                 uint32_t max_flex_bitmask_num;
3505         };
3506
3507         struct rte_eth_hash_global_conf_v22 {
3508                 enum rte_eth_hash_function hash_func;
3509                 uint32_t sym_hash_enable_mask[1];
3510                 uint32_t valid_bit_mask[1];
3511         };
3512
3513         struct rte_eth_hash_filter_info_v22 {
3514                 enum rte_eth_hash_filter_info_type info_type;
3515                 union {
3516                         uint8_t enable;
3517                         struct rte_eth_hash_global_conf_v22 global_conf;
3518                         struct rte_eth_input_set_conf input_set_conf;
3519                 } info;
3520         };
3521
3522         struct rte_eth_dev *dev;
3523
3524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3525
3526         dev = &rte_eth_devices[port_id];
3527         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3528         if (filter_op == RTE_ETH_FILTER_INFO) {
3529                 int retval;
3530                 struct rte_eth_fdir_info_v22 *fdir_info_v22;
3531                 struct rte_eth_fdir_info fdir_info;
3532
3533                 fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
3534
3535                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3536                           filter_op, (void *)&fdir_info);
3537                 fdir_info_v22->mode = fdir_info.mode;
3538                 fdir_info_v22->mask = fdir_info.mask;
3539                 fdir_info_v22->flex_conf = fdir_info.flex_conf;
3540                 fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
3541                 fdir_info_v22->best_spc = fdir_info.best_spc;
3542                 fdir_info_v22->flow_types_mask[0] =
3543                         (uint32_t)fdir_info.flow_types_mask[0];
3544                 fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
3545                 fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
3546                 fdir_info_v22->max_flex_payload_segment_num =
3547                         fdir_info.max_flex_payload_segment_num;
3548                 fdir_info_v22->flex_payload_limit =
3549                         fdir_info.flex_payload_limit;
3550                 fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
3551                 fdir_info_v22->max_flex_bitmask_num =
3552                         fdir_info.max_flex_bitmask_num;
3553                 return retval;
3554         } else if (filter_op == RTE_ETH_FILTER_GET) {
3555                 int retval;
3556                 struct rte_eth_hash_filter_info f_info;
3557                 struct rte_eth_hash_filter_info_v22 *f_info_v22 =
3558                         (struct rte_eth_hash_filter_info_v22 *)arg;
3559
3560                 f_info.info_type = f_info_v22->info_type;
3561                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3562                           filter_op, (void *)&f_info);
3563
3564                 switch (f_info_v22->info_type) {
3565                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3566                         f_info_v22->info.enable = f_info.info.enable;
3567                         break;
3568                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3569                         f_info_v22->info.global_conf.hash_func =
3570                                 f_info.info.global_conf.hash_func;
3571                         f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
3572                                 (uint32_t)
3573                                 f_info.info.global_conf.sym_hash_enable_mask[0];
3574                         f_info_v22->info.global_conf.valid_bit_mask[0] =
3575                                 (uint32_t)
3576                                 f_info.info.global_conf.valid_bit_mask[0];
3577                         break;
3578                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3579                         f_info_v22->info.input_set_conf =
3580                                 f_info.info.input_set_conf;
3581                         break;
3582                 default:
3583                         break;
3584                 }
3585                 return retval;
3586         } else if (filter_op == RTE_ETH_FILTER_SET) {
3587                 struct rte_eth_hash_filter_info f_info;
3588                 struct rte_eth_hash_filter_info_v22 *f_v22 =
3589                         (struct rte_eth_hash_filter_info_v22 *)arg;
3590
3591                 f_info.info_type = f_v22->info_type;
3592                 switch (f_v22->info_type) {
3593                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3594                         f_info.info.enable = f_v22->info.enable;
3595                         break;
3596                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3597                         f_info.info.global_conf.hash_func =
3598                                 f_v22->info.global_conf.hash_func;
3599                         f_info.info.global_conf.sym_hash_enable_mask[0] =
3600                                 (uint32_t)
3601                                 f_v22->info.global_conf.sym_hash_enable_mask[0];
3602                         f_info.info.global_conf.valid_bit_mask[0] =
3603                                 (uint32_t)
3604                                 f_v22->info.global_conf.valid_bit_mask[0];
3605                         break;
3606                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3607                         f_info.info.input_set_conf =
3608                                 f_v22->info.input_set_conf;
3609                         break;
3610                 default:
3611                         break;
3612                 }
3613                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3614                                                     (void *)&f_info);
3615         } else
3616                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3617                                                     arg);
3618 }
3619 VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
3620
3621 int
3622 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3623                               enum rte_filter_type filter_type,
3624                               enum rte_filter_op filter_op, void *arg);
3625
3626 int
3627 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3628                               enum rte_filter_type filter_type,
3629                               enum rte_filter_op filter_op, void *arg)
3630 {
3631         struct rte_eth_dev *dev;
3632
3633         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3634
3635         dev = &rte_eth_devices[port_id];
3636         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3637         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3638                                                              filter_op, arg));
3639 }
3640 BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
3641 MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
3642                   enum rte_filter_type filter_type,
3643                   enum rte_filter_op filter_op, void *arg),
3644                   rte_eth_dev_filter_ctrl_v1802);
3645
3646 void *
3647 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3648                 rte_rx_callback_fn fn, void *user_param)
3649 {
3650 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3651         rte_errno = ENOTSUP;
3652         return NULL;
3653 #endif
3654         /* check input parameters */
3655         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3656                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3657                 rte_errno = EINVAL;
3658                 return NULL;
3659         }
3660         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3661
3662         if (cb == NULL) {
3663                 rte_errno = ENOMEM;
3664                 return NULL;
3665         }
3666
3667         cb->fn.rx = fn;
3668         cb->param = user_param;
3669
3670         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3671         /* Add the callbacks in fifo order. */
3672         struct rte_eth_rxtx_callback *tail =
3673                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3674
3675         if (!tail) {
3676                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3677
3678         } else {
3679                 while (tail->next)
3680                         tail = tail->next;
3681                 tail->next = cb;
3682         }
3683         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3684
3685         return cb;
3686 }
3687
3688 void *
3689 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3690                 rte_rx_callback_fn fn, void *user_param)
3691 {
3692 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3693         rte_errno = ENOTSUP;
3694         return NULL;
3695 #endif
3696         /* check input parameters */
3697         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3698                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3699                 rte_errno = EINVAL;
3700                 return NULL;
3701         }
3702
3703         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3704
3705         if (cb == NULL) {
3706                 rte_errno = ENOMEM;
3707                 return NULL;
3708         }
3709
3710         cb->fn.rx = fn;
3711         cb->param = user_param;
3712
3713         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3714         /* Add the callbacks at fisrt position*/
3715         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3716         rte_smp_wmb();
3717         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3718         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3719
3720         return cb;
3721 }
3722
3723 void *
3724 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3725                 rte_tx_callback_fn fn, void *user_param)
3726 {
3727 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3728         rte_errno = ENOTSUP;
3729         return NULL;
3730 #endif
3731         /* check input parameters */
3732         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3733                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3734                 rte_errno = EINVAL;
3735                 return NULL;
3736         }
3737
3738         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3739
3740         if (cb == NULL) {
3741                 rte_errno = ENOMEM;
3742                 return NULL;
3743         }
3744
3745         cb->fn.tx = fn;
3746         cb->param = user_param;
3747
3748         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3749         /* Add the callbacks in fifo order. */
3750         struct rte_eth_rxtx_callback *tail =
3751                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3752
3753         if (!tail) {
3754                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3755
3756         } else {
3757                 while (tail->next)
3758                         tail = tail->next;
3759                 tail->next = cb;
3760         }
3761         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3762
3763         return cb;
3764 }
3765
3766 int
3767 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3768                 struct rte_eth_rxtx_callback *user_cb)
3769 {
3770 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3771         return -ENOTSUP;
3772 #endif
3773         /* Check input parameters. */
3774         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3775         if (user_cb == NULL ||
3776                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3777                 return -EINVAL;
3778
3779         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3780         struct rte_eth_rxtx_callback *cb;
3781         struct rte_eth_rxtx_callback **prev_cb;
3782         int ret = -EINVAL;
3783
3784         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3785         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3786         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3787                 cb = *prev_cb;
3788                 if (cb == user_cb) {
3789                         /* Remove the user cb from the callback list. */
3790                         *prev_cb = cb->next;
3791                         ret = 0;
3792                         break;
3793                 }
3794         }
3795         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3796
3797         return ret;
3798 }
3799
3800 int
3801 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3802                 struct rte_eth_rxtx_callback *user_cb)
3803 {
3804 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3805         return -ENOTSUP;
3806 #endif
3807         /* Check input parameters. */
3808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3809         if (user_cb == NULL ||
3810                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3811                 return -EINVAL;
3812
3813         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3814         int ret = -EINVAL;
3815         struct rte_eth_rxtx_callback *cb;
3816         struct rte_eth_rxtx_callback **prev_cb;
3817
3818         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3819         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3820         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3821                 cb = *prev_cb;
3822                 if (cb == user_cb) {
3823                         /* Remove the user cb from the callback list. */
3824                         *prev_cb = cb->next;
3825                         ret = 0;
3826                         break;
3827                 }
3828         }
3829         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3830
3831         return ret;
3832 }
3833
3834 int
3835 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3836         struct rte_eth_rxq_info *qinfo)
3837 {
3838         struct rte_eth_dev *dev;
3839
3840         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3841
3842         if (qinfo == NULL)
3843                 return -EINVAL;
3844
3845         dev = &rte_eth_devices[port_id];
3846         if (queue_id >= dev->data->nb_rx_queues) {
3847                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3848                 return -EINVAL;
3849         }
3850
3851         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3852
3853         memset(qinfo, 0, sizeof(*qinfo));
3854         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3855         return 0;
3856 }
3857
3858 int
3859 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3860         struct rte_eth_txq_info *qinfo)
3861 {
3862         struct rte_eth_dev *dev;
3863
3864         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3865
3866         if (qinfo == NULL)
3867                 return -EINVAL;
3868
3869         dev = &rte_eth_devices[port_id];
3870         if (queue_id >= dev->data->nb_tx_queues) {
3871                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3872                 return -EINVAL;
3873         }
3874
3875         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3876
3877         memset(qinfo, 0, sizeof(*qinfo));
3878         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3879         return 0;
3880 }
3881
3882 int
3883 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3884                              struct ether_addr *mc_addr_set,
3885                              uint32_t nb_mc_addr)
3886 {
3887         struct rte_eth_dev *dev;
3888
3889         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3890
3891         dev = &rte_eth_devices[port_id];
3892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3893         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3894                                                 mc_addr_set, nb_mc_addr));
3895 }
3896
3897 int
3898 rte_eth_timesync_enable(uint16_t port_id)
3899 {
3900         struct rte_eth_dev *dev;
3901
3902         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3903         dev = &rte_eth_devices[port_id];
3904
3905         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3906         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3907 }
3908
3909 int
3910 rte_eth_timesync_disable(uint16_t port_id)
3911 {
3912         struct rte_eth_dev *dev;
3913
3914         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3915         dev = &rte_eth_devices[port_id];
3916
3917         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3918         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3919 }
3920
3921 int
3922 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3923                                    uint32_t flags)
3924 {
3925         struct rte_eth_dev *dev;
3926
3927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3928         dev = &rte_eth_devices[port_id];
3929
3930         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3931         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3932                                 (dev, timestamp, flags));
3933 }
3934
3935 int
3936 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3937                                    struct timespec *timestamp)
3938 {
3939         struct rte_eth_dev *dev;
3940
3941         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3942         dev = &rte_eth_devices[port_id];
3943
3944         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3945         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3946                                 (dev, timestamp));
3947 }
3948
3949 int
3950 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3951 {
3952         struct rte_eth_dev *dev;
3953
3954         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3955         dev = &rte_eth_devices[port_id];
3956
3957         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3958         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3959                                                                       delta));
3960 }
3961
3962 int
3963 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3964 {
3965         struct rte_eth_dev *dev;
3966
3967         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3968         dev = &rte_eth_devices[port_id];
3969
3970         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3971         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3972                                                                 timestamp));
3973 }
3974
3975 int
3976 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3977 {
3978         struct rte_eth_dev *dev;
3979
3980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3981         dev = &rte_eth_devices[port_id];
3982
3983         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3984         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3985                                                                 timestamp));
3986 }
3987
3988 int
3989 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3990 {
3991         struct rte_eth_dev *dev;
3992
3993         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3994
3995         dev = &rte_eth_devices[port_id];
3996         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3997         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3998 }
3999
4000 int
4001 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4002 {
4003         struct rte_eth_dev *dev;
4004
4005         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4006
4007         dev = &rte_eth_devices[port_id];
4008         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4009         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4010 }
4011
4012 int
4013 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4014 {
4015         struct rte_eth_dev *dev;
4016
4017         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4018
4019         dev = &rte_eth_devices[port_id];
4020         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4021         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4022 }
4023
4024 int
4025 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4026 {
4027         struct rte_eth_dev *dev;
4028
4029         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4030
4031         dev = &rte_eth_devices[port_id];
4032         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4033         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4034 }
4035
4036 int
4037 rte_eth_dev_get_dcb_info(uint16_t port_id,
4038                              struct rte_eth_dcb_info *dcb_info)
4039 {
4040         struct rte_eth_dev *dev;
4041
4042         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4043
4044         dev = &rte_eth_devices[port_id];
4045         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4046
4047         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4048         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4049 }
4050
4051 int
4052 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4053                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4054 {
4055         struct rte_eth_dev *dev;
4056
4057         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4058         if (l2_tunnel == NULL) {
4059                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4060                 return -EINVAL;
4061         }
4062
4063         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4064                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
4065                 return -EINVAL;
4066         }
4067
4068         dev = &rte_eth_devices[port_id];
4069         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4070                                 -ENOTSUP);
4071         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4072                                                                 l2_tunnel));
4073 }
4074
4075 int
4076 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4077                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4078                                   uint32_t mask,
4079                                   uint8_t en)
4080 {
4081         struct rte_eth_dev *dev;
4082
4083         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4084
4085         if (l2_tunnel == NULL) {
4086                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4087                 return -EINVAL;
4088         }
4089
4090         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4091                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
4092                 return -EINVAL;
4093         }
4094
4095         if (mask == 0) {
4096                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
4097                 return -EINVAL;
4098         }
4099
4100         dev = &rte_eth_devices[port_id];
4101         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4102                                 -ENOTSUP);
4103         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4104                                                         l2_tunnel, mask, en));
4105 }
4106
4107 static void
4108 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4109                            const struct rte_eth_desc_lim *desc_lim)
4110 {
4111         if (desc_lim->nb_align != 0)
4112                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4113
4114         if (desc_lim->nb_max != 0)
4115                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4116
4117         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4118 }
4119
4120 int
4121 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4122                                  uint16_t *nb_rx_desc,
4123                                  uint16_t *nb_tx_desc)
4124 {
4125         struct rte_eth_dev *dev;
4126         struct rte_eth_dev_info dev_info;
4127
4128         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4129
4130         dev = &rte_eth_devices[port_id];
4131         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4132
4133         rte_eth_dev_info_get(port_id, &dev_info);
4134
4135         if (nb_rx_desc != NULL)
4136                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4137
4138         if (nb_tx_desc != NULL)
4139                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4140
4141         return 0;
4142 }
4143
4144 int
4145 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4146 {
4147         struct rte_eth_dev *dev;
4148
4149         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4150
4151         if (pool == NULL)
4152                 return -EINVAL;
4153
4154         dev = &rte_eth_devices[port_id];
4155
4156         if (*dev->dev_ops->pool_ops_supported == NULL)
4157                 return 1; /* all pools are supported */
4158
4159         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4160 }