ethdev: fix port id storage
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37
38 #include "rte_ether.h"
39 #include "rte_ethdev.h"
40 #include "rte_ethdev_driver.h"
41 #include "ethdev_profile.h"
42
43 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
44 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
45 static uint8_t eth_dev_last_created_port;
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for shared data allocation */
57 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* store statistics names and its offset in stats structure  */
60 struct rte_eth_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 /* Shared memory between primary and secondary processes. */
66 static struct {
67         uint64_t next_owner_id;
68         rte_spinlock_t ownership_lock;
69         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
70 } *rte_eth_dev_shared_data;
71
72 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
73         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
74         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
75         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
76         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
77         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
78         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
79         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
80         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
81                 rx_nombuf)},
82 };
83
84 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
85
86 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
87         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
88         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
89         {"errors", offsetof(struct rte_eth_stats, q_errors)},
90 };
91
92 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
93                 sizeof(rte_rxq_stats_strings[0]))
94
95 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
96         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
97         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
98 };
99 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
100                 sizeof(rte_txq_stats_strings[0]))
101
102 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
103         { DEV_RX_OFFLOAD_##_name, #_name }
104
105 static const struct {
106         uint64_t offload;
107         const char *name;
108 } rte_rx_offload_names[] = {
109         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
110         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
111         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
114         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
117         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
118         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
120         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
121         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
123         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
124         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
125 };
126
127 #undef RTE_RX_OFFLOAD_BIT2STR
128
129 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
130         { DEV_TX_OFFLOAD_##_name, #_name }
131
132 static const struct {
133         uint64_t offload;
134         const char *name;
135 } rte_tx_offload_names[] = {
136         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
137         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
138         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
139         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
140         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
141         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
142         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
143         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
145         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
146         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
147         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
150         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
151         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
152         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
153         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
154 };
155
156 #undef RTE_TX_OFFLOAD_BIT2STR
157
158 /**
159  * The user application callback description.
160  *
161  * It contains callback address to be registered by user application,
162  * the pointer to the parameters for callback, and the event type.
163  */
164 struct rte_eth_dev_callback {
165         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
166         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
167         void *cb_arg;                           /**< Parameter for callback */
168         void *ret_param;                        /**< Return parameter */
169         enum rte_eth_event_type event;          /**< Interrupt event type */
170         uint32_t active;                        /**< Callback is executing */
171 };
172
173 enum {
174         STAT_QMAP_TX = 0,
175         STAT_QMAP_RX
176 };
177
178 uint16_t
179 rte_eth_find_next(uint16_t port_id)
180 {
181         while (port_id < RTE_MAX_ETHPORTS &&
182                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
183                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
184                 port_id++;
185
186         if (port_id >= RTE_MAX_ETHPORTS)
187                 return RTE_MAX_ETHPORTS;
188
189         return port_id;
190 }
191
192 static void
193 rte_eth_dev_shared_data_prepare(void)
194 {
195         const unsigned flags = 0;
196         const struct rte_memzone *mz;
197
198         rte_spinlock_lock(&rte_eth_shared_data_lock);
199
200         if (rte_eth_dev_shared_data == NULL) {
201                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
202                         /* Allocate port data and ownership shared memory. */
203                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
204                                         sizeof(*rte_eth_dev_shared_data),
205                                         rte_socket_id(), flags);
206                 } else
207                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
208                 if (mz == NULL)
209                         rte_panic("Cannot allocate ethdev shared data\n");
210
211                 rte_eth_dev_shared_data = mz->addr;
212                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
213                         rte_eth_dev_shared_data->next_owner_id =
214                                         RTE_ETH_DEV_NO_OWNER + 1;
215                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
216                         memset(rte_eth_dev_shared_data->data, 0,
217                                sizeof(rte_eth_dev_shared_data->data));
218                 }
219         }
220
221         rte_spinlock_unlock(&rte_eth_shared_data_lock);
222 }
223
224 struct rte_eth_dev *
225 rte_eth_dev_allocated(const char *name)
226 {
227         unsigned i;
228
229         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
230                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
231                     strcmp(rte_eth_devices[i].data->name, name) == 0)
232                         return &rte_eth_devices[i];
233         }
234         return NULL;
235 }
236
237 static uint16_t
238 rte_eth_dev_find_free_port(void)
239 {
240         unsigned i;
241
242         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
243                 /* Using shared name field to find a free port. */
244                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
245                         RTE_ASSERT(rte_eth_devices[i].state ==
246                                    RTE_ETH_DEV_UNUSED);
247                         return i;
248                 }
249         }
250         return RTE_MAX_ETHPORTS;
251 }
252
253 static struct rte_eth_dev *
254 eth_dev_get(uint16_t port_id)
255 {
256         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
257
258         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
259         eth_dev->state = RTE_ETH_DEV_ATTACHED;
260
261         eth_dev_last_created_port = port_id;
262
263         return eth_dev;
264 }
265
266 struct rte_eth_dev *
267 rte_eth_dev_allocate(const char *name)
268 {
269         uint16_t port_id;
270         struct rte_eth_dev *eth_dev = NULL;
271
272         rte_eth_dev_shared_data_prepare();
273
274         /* Synchronize port creation between primary and secondary threads. */
275         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
276
277         port_id = rte_eth_dev_find_free_port();
278         if (port_id == RTE_MAX_ETHPORTS) {
279                 RTE_LOG(ERR, EAL, "Reached maximum number of Ethernet ports\n");
280                 goto unlock;
281         }
282
283         if (rte_eth_dev_allocated(name) != NULL) {
284                 RTE_LOG(ERR, EAL, "Ethernet Device with name %s already allocated!\n",
285                                 name);
286                 goto unlock;
287         }
288
289         eth_dev = eth_dev_get(port_id);
290         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
291         eth_dev->data->port_id = port_id;
292         eth_dev->data->mtu = ETHER_MTU;
293
294 unlock:
295         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
296
297         if (eth_dev != NULL)
298                 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
299
300         return eth_dev;
301 }
302
303 /*
304  * Attach to a port already registered by the primary process, which
305  * makes sure that the same device would have the same port id both
306  * in the primary and secondary process.
307  */
308 struct rte_eth_dev *
309 rte_eth_dev_attach_secondary(const char *name)
310 {
311         uint16_t i;
312         struct rte_eth_dev *eth_dev = NULL;
313
314         rte_eth_dev_shared_data_prepare();
315
316         /* Synchronize port attachment to primary port creation and release. */
317         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
318
319         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
320                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
321                         break;
322         }
323         if (i == RTE_MAX_ETHPORTS) {
324                 RTE_PMD_DEBUG_TRACE(
325                         "device %s is not driven by the primary process\n",
326                         name);
327         } else {
328                 eth_dev = eth_dev_get(i);
329                 RTE_ASSERT(eth_dev->data->port_id == i);
330         }
331
332         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
333         return eth_dev;
334 }
335
336 int
337 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
338 {
339         if (eth_dev == NULL)
340                 return -EINVAL;
341
342         rte_eth_dev_shared_data_prepare();
343
344         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
345
346         eth_dev->state = RTE_ETH_DEV_UNUSED;
347
348         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
349
350         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
351
352         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
353
354         return 0;
355 }
356
357 int
358 rte_eth_dev_is_valid_port(uint16_t port_id)
359 {
360         if (port_id >= RTE_MAX_ETHPORTS ||
361             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
362                 return 0;
363         else
364                 return 1;
365 }
366
367 static int
368 rte_eth_is_valid_owner_id(uint64_t owner_id)
369 {
370         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
371             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
372                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
373                 return 0;
374         }
375         return 1;
376 }
377
378 uint64_t __rte_experimental
379 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
380 {
381         while (port_id < RTE_MAX_ETHPORTS &&
382                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
383                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
384                rte_eth_devices[port_id].data->owner.id != owner_id))
385                 port_id++;
386
387         if (port_id >= RTE_MAX_ETHPORTS)
388                 return RTE_MAX_ETHPORTS;
389
390         return port_id;
391 }
392
393 int __rte_experimental
394 rte_eth_dev_owner_new(uint64_t *owner_id)
395 {
396         rte_eth_dev_shared_data_prepare();
397
398         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
399
400         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
401
402         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
403         return 0;
404 }
405
406 static int
407 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
408                        const struct rte_eth_dev_owner *new_owner)
409 {
410         struct rte_eth_dev_owner *port_owner;
411         int sret;
412
413         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
414
415         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
416             !rte_eth_is_valid_owner_id(old_owner_id))
417                 return -EINVAL;
418
419         port_owner = &rte_eth_devices[port_id].data->owner;
420         if (port_owner->id != old_owner_id) {
421                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
422                                     " by %s_%016lX.\n", port_id,
423                                     port_owner->name, port_owner->id);
424                 return -EPERM;
425         }
426
427         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
428                         new_owner->name);
429         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
430                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
431                                     port_id);
432
433         port_owner->id = new_owner->id;
434
435         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
436                             new_owner->name, new_owner->id);
437
438         return 0;
439 }
440
441 int __rte_experimental
442 rte_eth_dev_owner_set(const uint16_t port_id,
443                       const struct rte_eth_dev_owner *owner)
444 {
445         int ret;
446
447         rte_eth_dev_shared_data_prepare();
448
449         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
450
451         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
452
453         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
454         return ret;
455 }
456
457 int __rte_experimental
458 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
459 {
460         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
461                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
462         int ret;
463
464         rte_eth_dev_shared_data_prepare();
465
466         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
467
468         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
469
470         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
471         return ret;
472 }
473
474 void __rte_experimental
475 rte_eth_dev_owner_delete(const uint64_t owner_id)
476 {
477         uint16_t port_id;
478
479         rte_eth_dev_shared_data_prepare();
480
481         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
482
483         if (rte_eth_is_valid_owner_id(owner_id)) {
484                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
485                         memset(&rte_eth_devices[port_id].data->owner, 0,
486                                sizeof(struct rte_eth_dev_owner));
487                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
488                                     " have removed.\n", owner_id);
489         }
490
491         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
492 }
493
494 int __rte_experimental
495 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
496 {
497         int ret = 0;
498
499         rte_eth_dev_shared_data_prepare();
500
501         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
502
503         if (!rte_eth_dev_is_valid_port(port_id)) {
504                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
505                 ret = -ENODEV;
506         } else {
507                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
508                            sizeof(*owner));
509         }
510
511         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
512         return ret;
513 }
514
515 int
516 rte_eth_dev_socket_id(uint16_t port_id)
517 {
518         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
519         return rte_eth_devices[port_id].data->numa_node;
520 }
521
522 void *
523 rte_eth_dev_get_sec_ctx(uint16_t port_id)
524 {
525         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
526         return rte_eth_devices[port_id].security_ctx;
527 }
528
529 uint16_t
530 rte_eth_dev_count(void)
531 {
532         uint16_t p;
533         uint16_t count;
534
535         count = 0;
536
537         RTE_ETH_FOREACH_DEV(p)
538                 count++;
539
540         return count;
541 }
542
543 int
544 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
545 {
546         char *tmp;
547
548         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
549
550         if (name == NULL) {
551                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
552                 return -EINVAL;
553         }
554
555         /* shouldn't check 'rte_eth_devices[i].data',
556          * because it might be overwritten by VDEV PMD */
557         tmp = rte_eth_dev_shared_data->data[port_id].name;
558         strcpy(name, tmp);
559         return 0;
560 }
561
562 int
563 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
564 {
565         uint32_t pid;
566
567         if (name == NULL) {
568                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
569                 return -EINVAL;
570         }
571
572         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
573                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
574                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
575                         *port_id = pid;
576                         return 0;
577                 }
578         }
579
580         return -ENODEV;
581 }
582
583 static int
584 eth_err(uint16_t port_id, int ret)
585 {
586         if (ret == 0)
587                 return 0;
588         if (rte_eth_dev_is_removed(port_id))
589                 return -EIO;
590         return ret;
591 }
592
593 /* attach the new device, then store port_id of the device */
594 int
595 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
596 {
597         int ret = -1;
598         int current = rte_eth_dev_count();
599         char *name = NULL;
600         char *args = NULL;
601
602         if ((devargs == NULL) || (port_id == NULL)) {
603                 ret = -EINVAL;
604                 goto err;
605         }
606
607         /* parse devargs, then retrieve device name and args */
608         if (rte_eal_parse_devargs_str(devargs, &name, &args))
609                 goto err;
610
611         ret = rte_eal_dev_attach(name, args);
612         if (ret < 0)
613                 goto err;
614
615         /* no point looking at the port count if no port exists */
616         if (!rte_eth_dev_count()) {
617                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
618                 ret = -1;
619                 goto err;
620         }
621
622         /* if nothing happened, there is a bug here, since some driver told us
623          * it did attach a device, but did not create a port.
624          */
625         if (current == rte_eth_dev_count()) {
626                 ret = -1;
627                 goto err;
628         }
629
630         *port_id = eth_dev_last_created_port;
631         ret = 0;
632
633 err:
634         free(name);
635         free(args);
636         return ret;
637 }
638
639 /* detach the device, then store the name of the device */
640 int
641 rte_eth_dev_detach(uint16_t port_id, char *name)
642 {
643         uint32_t dev_flags;
644         int ret = -1;
645
646         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
647
648         if (name == NULL) {
649                 ret = -EINVAL;
650                 goto err;
651         }
652
653         dev_flags = rte_eth_devices[port_id].data->dev_flags;
654         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
655                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
656                         port_id);
657                 ret = -ENOTSUP;
658                 goto err;
659         }
660
661         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
662                  "%s", rte_eth_devices[port_id].data->name);
663
664         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
665         if (ret < 0)
666                 goto err;
667
668         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
669         return 0;
670
671 err:
672         return ret;
673 }
674
675 static int
676 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
677 {
678         uint16_t old_nb_queues = dev->data->nb_rx_queues;
679         void **rxq;
680         unsigned i;
681
682         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
683                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
684                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
685                                 RTE_CACHE_LINE_SIZE);
686                 if (dev->data->rx_queues == NULL) {
687                         dev->data->nb_rx_queues = 0;
688                         return -(ENOMEM);
689                 }
690         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
691                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
692
693                 rxq = dev->data->rx_queues;
694
695                 for (i = nb_queues; i < old_nb_queues; i++)
696                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
697                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
698                                 RTE_CACHE_LINE_SIZE);
699                 if (rxq == NULL)
700                         return -(ENOMEM);
701                 if (nb_queues > old_nb_queues) {
702                         uint16_t new_qs = nb_queues - old_nb_queues;
703
704                         memset(rxq + old_nb_queues, 0,
705                                 sizeof(rxq[0]) * new_qs);
706                 }
707
708                 dev->data->rx_queues = rxq;
709
710         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
711                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
712
713                 rxq = dev->data->rx_queues;
714
715                 for (i = nb_queues; i < old_nb_queues; i++)
716                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
717
718                 rte_free(dev->data->rx_queues);
719                 dev->data->rx_queues = NULL;
720         }
721         dev->data->nb_rx_queues = nb_queues;
722         return 0;
723 }
724
725 int
726 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
727 {
728         struct rte_eth_dev *dev;
729
730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
731
732         dev = &rte_eth_devices[port_id];
733         if (rx_queue_id >= dev->data->nb_rx_queues) {
734                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
735                 return -EINVAL;
736         }
737
738         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
739
740         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
741                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
742                         " already started\n",
743                         rx_queue_id, port_id);
744                 return 0;
745         }
746
747         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
748                                                              rx_queue_id));
749
750 }
751
752 int
753 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
754 {
755         struct rte_eth_dev *dev;
756
757         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
758
759         dev = &rte_eth_devices[port_id];
760         if (rx_queue_id >= dev->data->nb_rx_queues) {
761                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
762                 return -EINVAL;
763         }
764
765         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
766
767         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
768                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
769                         " already stopped\n",
770                         rx_queue_id, port_id);
771                 return 0;
772         }
773
774         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
775
776 }
777
778 int
779 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
780 {
781         struct rte_eth_dev *dev;
782
783         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
784
785         dev = &rte_eth_devices[port_id];
786         if (tx_queue_id >= dev->data->nb_tx_queues) {
787                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
788                 return -EINVAL;
789         }
790
791         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
792
793         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
794                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
795                         " already started\n",
796                         tx_queue_id, port_id);
797                 return 0;
798         }
799
800         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
801                                                              tx_queue_id));
802
803 }
804
805 int
806 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
807 {
808         struct rte_eth_dev *dev;
809
810         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
811
812         dev = &rte_eth_devices[port_id];
813         if (tx_queue_id >= dev->data->nb_tx_queues) {
814                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
815                 return -EINVAL;
816         }
817
818         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
819
820         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
821                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
822                         " already stopped\n",
823                         tx_queue_id, port_id);
824                 return 0;
825         }
826
827         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
828
829 }
830
831 static int
832 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
833 {
834         uint16_t old_nb_queues = dev->data->nb_tx_queues;
835         void **txq;
836         unsigned i;
837
838         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
839                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
840                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
841                                                    RTE_CACHE_LINE_SIZE);
842                 if (dev->data->tx_queues == NULL) {
843                         dev->data->nb_tx_queues = 0;
844                         return -(ENOMEM);
845                 }
846         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
847                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
848
849                 txq = dev->data->tx_queues;
850
851                 for (i = nb_queues; i < old_nb_queues; i++)
852                         (*dev->dev_ops->tx_queue_release)(txq[i]);
853                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
854                                   RTE_CACHE_LINE_SIZE);
855                 if (txq == NULL)
856                         return -ENOMEM;
857                 if (nb_queues > old_nb_queues) {
858                         uint16_t new_qs = nb_queues - old_nb_queues;
859
860                         memset(txq + old_nb_queues, 0,
861                                sizeof(txq[0]) * new_qs);
862                 }
863
864                 dev->data->tx_queues = txq;
865
866         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
867                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
868
869                 txq = dev->data->tx_queues;
870
871                 for (i = nb_queues; i < old_nb_queues; i++)
872                         (*dev->dev_ops->tx_queue_release)(txq[i]);
873
874                 rte_free(dev->data->tx_queues);
875                 dev->data->tx_queues = NULL;
876         }
877         dev->data->nb_tx_queues = nb_queues;
878         return 0;
879 }
880
881 uint32_t
882 rte_eth_speed_bitflag(uint32_t speed, int duplex)
883 {
884         switch (speed) {
885         case ETH_SPEED_NUM_10M:
886                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
887         case ETH_SPEED_NUM_100M:
888                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
889         case ETH_SPEED_NUM_1G:
890                 return ETH_LINK_SPEED_1G;
891         case ETH_SPEED_NUM_2_5G:
892                 return ETH_LINK_SPEED_2_5G;
893         case ETH_SPEED_NUM_5G:
894                 return ETH_LINK_SPEED_5G;
895         case ETH_SPEED_NUM_10G:
896                 return ETH_LINK_SPEED_10G;
897         case ETH_SPEED_NUM_20G:
898                 return ETH_LINK_SPEED_20G;
899         case ETH_SPEED_NUM_25G:
900                 return ETH_LINK_SPEED_25G;
901         case ETH_SPEED_NUM_40G:
902                 return ETH_LINK_SPEED_40G;
903         case ETH_SPEED_NUM_50G:
904                 return ETH_LINK_SPEED_50G;
905         case ETH_SPEED_NUM_56G:
906                 return ETH_LINK_SPEED_56G;
907         case ETH_SPEED_NUM_100G:
908                 return ETH_LINK_SPEED_100G;
909         default:
910                 return 0;
911         }
912 }
913
914 /**
915  * A conversion function from rxmode bitfield API.
916  */
917 static void
918 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
919                                     uint64_t *rx_offloads)
920 {
921         uint64_t offloads = 0;
922
923         if (rxmode->header_split == 1)
924                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
925         if (rxmode->hw_ip_checksum == 1)
926                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
927         if (rxmode->hw_vlan_filter == 1)
928                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
929         if (rxmode->hw_vlan_strip == 1)
930                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
931         if (rxmode->hw_vlan_extend == 1)
932                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
933         if (rxmode->jumbo_frame == 1)
934                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
935         if (rxmode->hw_strip_crc == 1)
936                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
937         if (rxmode->enable_scatter == 1)
938                 offloads |= DEV_RX_OFFLOAD_SCATTER;
939         if (rxmode->enable_lro == 1)
940                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
941         if (rxmode->hw_timestamp == 1)
942                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
943         if (rxmode->security == 1)
944                 offloads |= DEV_RX_OFFLOAD_SECURITY;
945
946         *rx_offloads = offloads;
947 }
948
949 /**
950  * A conversion function from rxmode offloads API.
951  */
952 static void
953 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
954                             struct rte_eth_rxmode *rxmode)
955 {
956
957         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
958                 rxmode->header_split = 1;
959         else
960                 rxmode->header_split = 0;
961         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
962                 rxmode->hw_ip_checksum = 1;
963         else
964                 rxmode->hw_ip_checksum = 0;
965         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
966                 rxmode->hw_vlan_filter = 1;
967         else
968                 rxmode->hw_vlan_filter = 0;
969         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
970                 rxmode->hw_vlan_strip = 1;
971         else
972                 rxmode->hw_vlan_strip = 0;
973         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
974                 rxmode->hw_vlan_extend = 1;
975         else
976                 rxmode->hw_vlan_extend = 0;
977         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
978                 rxmode->jumbo_frame = 1;
979         else
980                 rxmode->jumbo_frame = 0;
981         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
982                 rxmode->hw_strip_crc = 1;
983         else
984                 rxmode->hw_strip_crc = 0;
985         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
986                 rxmode->enable_scatter = 1;
987         else
988                 rxmode->enable_scatter = 0;
989         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
990                 rxmode->enable_lro = 1;
991         else
992                 rxmode->enable_lro = 0;
993         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
994                 rxmode->hw_timestamp = 1;
995         else
996                 rxmode->hw_timestamp = 0;
997         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
998                 rxmode->security = 1;
999         else
1000                 rxmode->security = 0;
1001 }
1002
1003 const char * __rte_experimental
1004 rte_eth_dev_rx_offload_name(uint64_t offload)
1005 {
1006         const char *name = "UNKNOWN";
1007         unsigned int i;
1008
1009         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1010                 if (offload == rte_rx_offload_names[i].offload) {
1011                         name = rte_rx_offload_names[i].name;
1012                         break;
1013                 }
1014         }
1015
1016         return name;
1017 }
1018
1019 const char * __rte_experimental
1020 rte_eth_dev_tx_offload_name(uint64_t offload)
1021 {
1022         const char *name = "UNKNOWN";
1023         unsigned int i;
1024
1025         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1026                 if (offload == rte_tx_offload_names[i].offload) {
1027                         name = rte_tx_offload_names[i].name;
1028                         break;
1029                 }
1030         }
1031
1032         return name;
1033 }
1034
1035 int
1036 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1037                       const struct rte_eth_conf *dev_conf)
1038 {
1039         struct rte_eth_dev *dev;
1040         struct rte_eth_dev_info dev_info;
1041         struct rte_eth_conf local_conf = *dev_conf;
1042         int diag;
1043
1044         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1045
1046         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1047                 RTE_PMD_DEBUG_TRACE(
1048                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1049                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1050                 return -EINVAL;
1051         }
1052
1053         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1054                 RTE_PMD_DEBUG_TRACE(
1055                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1056                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1057                 return -EINVAL;
1058         }
1059
1060         dev = &rte_eth_devices[port_id];
1061
1062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1063         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1064
1065         if (dev->data->dev_started) {
1066                 RTE_PMD_DEBUG_TRACE(
1067                     "port %d must be stopped to allow configuration\n", port_id);
1068                 return -EBUSY;
1069         }
1070
1071         /*
1072          * Convert between the offloads API to enable PMDs to support
1073          * only one of them.
1074          */
1075         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
1076                 rte_eth_convert_rx_offload_bitfield(
1077                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1078         } else {
1079                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
1080                                             &local_conf.rxmode);
1081         }
1082
1083         /* Copy the dev_conf parameter into the dev structure */
1084         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1085
1086         /*
1087          * Check that the numbers of RX and TX queues are not greater
1088          * than the maximum number of RX and TX queues supported by the
1089          * configured device.
1090          */
1091         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1092
1093         if (nb_rx_q == 0 && nb_tx_q == 0) {
1094                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
1095                 return -EINVAL;
1096         }
1097
1098         if (nb_rx_q > dev_info.max_rx_queues) {
1099                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1100                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1101                 return -EINVAL;
1102         }
1103
1104         if (nb_tx_q > dev_info.max_tx_queues) {
1105                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1106                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1107                 return -EINVAL;
1108         }
1109
1110         /* Check that the device supports requested interrupts */
1111         if ((dev_conf->intr_conf.lsc == 1) &&
1112                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1113                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1114                                         dev->device->driver->name);
1115                         return -EINVAL;
1116         }
1117         if ((dev_conf->intr_conf.rmv == 1) &&
1118             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1119                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1120                                     dev->device->driver->name);
1121                 return -EINVAL;
1122         }
1123
1124         /*
1125          * If jumbo frames are enabled, check that the maximum RX packet
1126          * length is supported by the configured device.
1127          */
1128         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1129                 if (dev_conf->rxmode.max_rx_pkt_len >
1130                     dev_info.max_rx_pktlen) {
1131                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1132                                 " > max valid value %u\n",
1133                                 port_id,
1134                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1135                                 (unsigned)dev_info.max_rx_pktlen);
1136                         return -EINVAL;
1137                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1138                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1139                                 " < min valid value %u\n",
1140                                 port_id,
1141                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1142                                 (unsigned)ETHER_MIN_LEN);
1143                         return -EINVAL;
1144                 }
1145         } else {
1146                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1147                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1148                         /* Use default value */
1149                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1150                                                         ETHER_MAX_LEN;
1151         }
1152
1153         /*
1154          * Setup new number of RX/TX queues and reconfigure device.
1155          */
1156         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1157         if (diag != 0) {
1158                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1159                                 port_id, diag);
1160                 return diag;
1161         }
1162
1163         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1164         if (diag != 0) {
1165                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1166                                 port_id, diag);
1167                 rte_eth_dev_rx_queue_config(dev, 0);
1168                 return diag;
1169         }
1170
1171         diag = (*dev->dev_ops->dev_configure)(dev);
1172         if (diag != 0) {
1173                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1174                                 port_id, diag);
1175                 rte_eth_dev_rx_queue_config(dev, 0);
1176                 rte_eth_dev_tx_queue_config(dev, 0);
1177                 return eth_err(port_id, diag);
1178         }
1179
1180         /* Initialize Rx profiling if enabled at compilation time. */
1181         diag = __rte_eth_profile_rx_init(port_id, dev);
1182         if (diag != 0) {
1183                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1184                                 port_id, diag);
1185                 rte_eth_dev_rx_queue_config(dev, 0);
1186                 rte_eth_dev_tx_queue_config(dev, 0);
1187                 return eth_err(port_id, diag);
1188         }
1189
1190         return 0;
1191 }
1192
1193 void
1194 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1195 {
1196         if (dev->data->dev_started) {
1197                 RTE_PMD_DEBUG_TRACE(
1198                         "port %d must be stopped to allow reset\n",
1199                         dev->data->port_id);
1200                 return;
1201         }
1202
1203         rte_eth_dev_rx_queue_config(dev, 0);
1204         rte_eth_dev_tx_queue_config(dev, 0);
1205
1206         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1207 }
1208
1209 static void
1210 rte_eth_dev_config_restore(uint16_t port_id)
1211 {
1212         struct rte_eth_dev *dev;
1213         struct rte_eth_dev_info dev_info;
1214         struct ether_addr *addr;
1215         uint16_t i;
1216         uint32_t pool = 0;
1217         uint64_t pool_mask;
1218
1219         dev = &rte_eth_devices[port_id];
1220
1221         rte_eth_dev_info_get(port_id, &dev_info);
1222
1223         /* replay MAC address configuration including default MAC */
1224         addr = &dev->data->mac_addrs[0];
1225         if (*dev->dev_ops->mac_addr_set != NULL)
1226                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1227         else if (*dev->dev_ops->mac_addr_add != NULL)
1228                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1229
1230         if (*dev->dev_ops->mac_addr_add != NULL) {
1231                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1232                         addr = &dev->data->mac_addrs[i];
1233
1234                         /* skip zero address */
1235                         if (is_zero_ether_addr(addr))
1236                                 continue;
1237
1238                         pool = 0;
1239                         pool_mask = dev->data->mac_pool_sel[i];
1240
1241                         do {
1242                                 if (pool_mask & 1ULL)
1243                                         (*dev->dev_ops->mac_addr_add)(dev,
1244                                                 addr, i, pool);
1245                                 pool_mask >>= 1;
1246                                 pool++;
1247                         } while (pool_mask);
1248                 }
1249         }
1250
1251         /* replay promiscuous configuration */
1252         if (rte_eth_promiscuous_get(port_id) == 1)
1253                 rte_eth_promiscuous_enable(port_id);
1254         else if (rte_eth_promiscuous_get(port_id) == 0)
1255                 rte_eth_promiscuous_disable(port_id);
1256
1257         /* replay all multicast configuration */
1258         if (rte_eth_allmulticast_get(port_id) == 1)
1259                 rte_eth_allmulticast_enable(port_id);
1260         else if (rte_eth_allmulticast_get(port_id) == 0)
1261                 rte_eth_allmulticast_disable(port_id);
1262 }
1263
1264 int
1265 rte_eth_dev_start(uint16_t port_id)
1266 {
1267         struct rte_eth_dev *dev;
1268         int diag;
1269
1270         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1271
1272         dev = &rte_eth_devices[port_id];
1273
1274         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1275
1276         if (dev->data->dev_started != 0) {
1277                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1278                         " already started\n",
1279                         port_id);
1280                 return 0;
1281         }
1282
1283         diag = (*dev->dev_ops->dev_start)(dev);
1284         if (diag == 0)
1285                 dev->data->dev_started = 1;
1286         else
1287                 return eth_err(port_id, diag);
1288
1289         rte_eth_dev_config_restore(port_id);
1290
1291         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1292                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1293                 (*dev->dev_ops->link_update)(dev, 0);
1294         }
1295         return 0;
1296 }
1297
1298 void
1299 rte_eth_dev_stop(uint16_t port_id)
1300 {
1301         struct rte_eth_dev *dev;
1302
1303         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1304         dev = &rte_eth_devices[port_id];
1305
1306         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1307
1308         if (dev->data->dev_started == 0) {
1309                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1310                         " already stopped\n",
1311                         port_id);
1312                 return;
1313         }
1314
1315         dev->data->dev_started = 0;
1316         (*dev->dev_ops->dev_stop)(dev);
1317 }
1318
1319 int
1320 rte_eth_dev_set_link_up(uint16_t port_id)
1321 {
1322         struct rte_eth_dev *dev;
1323
1324         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1325
1326         dev = &rte_eth_devices[port_id];
1327
1328         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1329         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1330 }
1331
1332 int
1333 rte_eth_dev_set_link_down(uint16_t port_id)
1334 {
1335         struct rte_eth_dev *dev;
1336
1337         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1338
1339         dev = &rte_eth_devices[port_id];
1340
1341         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1342         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1343 }
1344
1345 void
1346 rte_eth_dev_close(uint16_t port_id)
1347 {
1348         struct rte_eth_dev *dev;
1349
1350         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1351         dev = &rte_eth_devices[port_id];
1352
1353         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1354         dev->data->dev_started = 0;
1355         (*dev->dev_ops->dev_close)(dev);
1356
1357         dev->data->nb_rx_queues = 0;
1358         rte_free(dev->data->rx_queues);
1359         dev->data->rx_queues = NULL;
1360         dev->data->nb_tx_queues = 0;
1361         rte_free(dev->data->tx_queues);
1362         dev->data->tx_queues = NULL;
1363 }
1364
1365 int
1366 rte_eth_dev_reset(uint16_t port_id)
1367 {
1368         struct rte_eth_dev *dev;
1369         int ret;
1370
1371         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1372         dev = &rte_eth_devices[port_id];
1373
1374         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1375
1376         rte_eth_dev_stop(port_id);
1377         ret = dev->dev_ops->dev_reset(dev);
1378
1379         return eth_err(port_id, ret);
1380 }
1381
1382 int __rte_experimental
1383 rte_eth_dev_is_removed(uint16_t port_id)
1384 {
1385         struct rte_eth_dev *dev;
1386         int ret;
1387
1388         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1389
1390         dev = &rte_eth_devices[port_id];
1391
1392         if (dev->state == RTE_ETH_DEV_REMOVED)
1393                 return 1;
1394
1395         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1396
1397         ret = dev->dev_ops->is_removed(dev);
1398         if (ret != 0)
1399                 /* Device is physically removed. */
1400                 dev->state = RTE_ETH_DEV_REMOVED;
1401
1402         return ret;
1403 }
1404
1405 int
1406 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1407                        uint16_t nb_rx_desc, unsigned int socket_id,
1408                        const struct rte_eth_rxconf *rx_conf,
1409                        struct rte_mempool *mp)
1410 {
1411         int ret;
1412         uint32_t mbp_buf_size;
1413         struct rte_eth_dev *dev;
1414         struct rte_eth_dev_info dev_info;
1415         struct rte_eth_rxconf local_conf;
1416         void **rxq;
1417
1418         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1419
1420         dev = &rte_eth_devices[port_id];
1421         if (rx_queue_id >= dev->data->nb_rx_queues) {
1422                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1423                 return -EINVAL;
1424         }
1425
1426         if (dev->data->dev_started) {
1427                 RTE_PMD_DEBUG_TRACE(
1428                     "port %d must be stopped to allow configuration\n", port_id);
1429                 return -EBUSY;
1430         }
1431
1432         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1433         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1434
1435         /*
1436          * Check the size of the mbuf data buffer.
1437          * This value must be provided in the private data of the memory pool.
1438          * First check that the memory pool has a valid private data.
1439          */
1440         rte_eth_dev_info_get(port_id, &dev_info);
1441         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1442                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1443                                 mp->name, (int) mp->private_data_size,
1444                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1445                 return -ENOSPC;
1446         }
1447         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1448
1449         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1450                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1451                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1452                                 "=%d)\n",
1453                                 mp->name,
1454                                 (int)mbp_buf_size,
1455                                 (int)(RTE_PKTMBUF_HEADROOM +
1456                                       dev_info.min_rx_bufsize),
1457                                 (int)RTE_PKTMBUF_HEADROOM,
1458                                 (int)dev_info.min_rx_bufsize);
1459                 return -EINVAL;
1460         }
1461
1462         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1463                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1464                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1465
1466                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1467                         "should be: <= %hu, = %hu, and a product of %hu\n",
1468                         nb_rx_desc,
1469                         dev_info.rx_desc_lim.nb_max,
1470                         dev_info.rx_desc_lim.nb_min,
1471                         dev_info.rx_desc_lim.nb_align);
1472                 return -EINVAL;
1473         }
1474
1475         rxq = dev->data->rx_queues;
1476         if (rxq[rx_queue_id]) {
1477                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1478                                         -ENOTSUP);
1479                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1480                 rxq[rx_queue_id] = NULL;
1481         }
1482
1483         if (rx_conf == NULL)
1484                 rx_conf = &dev_info.default_rxconf;
1485
1486         local_conf = *rx_conf;
1487         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1488                 /**
1489                  * Reflect port offloads to queue offloads in order for
1490                  * offloads to not be discarded.
1491                  */
1492                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1493                                                     &local_conf.offloads);
1494         }
1495
1496         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1497                                               socket_id, &local_conf, mp);
1498         if (!ret) {
1499                 if (!dev->data->min_rx_buf_size ||
1500                     dev->data->min_rx_buf_size > mbp_buf_size)
1501                         dev->data->min_rx_buf_size = mbp_buf_size;
1502         }
1503
1504         return eth_err(port_id, ret);
1505 }
1506
1507 /**
1508  * A conversion function from txq_flags API.
1509  */
1510 static void
1511 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1512 {
1513         uint64_t offloads = 0;
1514
1515         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1516                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1517         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1518                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1519         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1520                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1521         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1522                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1523         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1524                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1525         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1526             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1527                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1528
1529         *tx_offloads = offloads;
1530 }
1531
1532 /**
1533  * A conversion function from offloads API.
1534  */
1535 static void
1536 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1537 {
1538         uint32_t flags = 0;
1539
1540         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1541                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1542         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1543                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1544         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1545                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1546         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1547                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1548         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1549                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1550         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1551                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1552
1553         *txq_flags = flags;
1554 }
1555
1556 int
1557 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1558                        uint16_t nb_tx_desc, unsigned int socket_id,
1559                        const struct rte_eth_txconf *tx_conf)
1560 {
1561         struct rte_eth_dev *dev;
1562         struct rte_eth_dev_info dev_info;
1563         struct rte_eth_txconf local_conf;
1564         void **txq;
1565
1566         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1567
1568         dev = &rte_eth_devices[port_id];
1569         if (tx_queue_id >= dev->data->nb_tx_queues) {
1570                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1571                 return -EINVAL;
1572         }
1573
1574         if (dev->data->dev_started) {
1575                 RTE_PMD_DEBUG_TRACE(
1576                     "port %d must be stopped to allow configuration\n", port_id);
1577                 return -EBUSY;
1578         }
1579
1580         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1581         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1582
1583         rte_eth_dev_info_get(port_id, &dev_info);
1584
1585         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1586             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1587             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1588                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1589                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1590                                 nb_tx_desc,
1591                                 dev_info.tx_desc_lim.nb_max,
1592                                 dev_info.tx_desc_lim.nb_min,
1593                                 dev_info.tx_desc_lim.nb_align);
1594                 return -EINVAL;
1595         }
1596
1597         txq = dev->data->tx_queues;
1598         if (txq[tx_queue_id]) {
1599                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1600                                         -ENOTSUP);
1601                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1602                 txq[tx_queue_id] = NULL;
1603         }
1604
1605         if (tx_conf == NULL)
1606                 tx_conf = &dev_info.default_txconf;
1607
1608         /*
1609          * Convert between the offloads API to enable PMDs to support
1610          * only one of them.
1611          */
1612         local_conf = *tx_conf;
1613         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1614                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1615                                              &local_conf.txq_flags);
1616                 /* Keep the ignore flag. */
1617                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1618         } else {
1619                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1620                                           &local_conf.offloads);
1621         }
1622
1623         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1624                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1625 }
1626
1627 void
1628 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1629                 void *userdata __rte_unused)
1630 {
1631         unsigned i;
1632
1633         for (i = 0; i < unsent; i++)
1634                 rte_pktmbuf_free(pkts[i]);
1635 }
1636
1637 void
1638 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1639                 void *userdata)
1640 {
1641         uint64_t *count = userdata;
1642         unsigned i;
1643
1644         for (i = 0; i < unsent; i++)
1645                 rte_pktmbuf_free(pkts[i]);
1646
1647         *count += unsent;
1648 }
1649
1650 int
1651 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1652                 buffer_tx_error_fn cbfn, void *userdata)
1653 {
1654         buffer->error_callback = cbfn;
1655         buffer->error_userdata = userdata;
1656         return 0;
1657 }
1658
1659 int
1660 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1661 {
1662         int ret = 0;
1663
1664         if (buffer == NULL)
1665                 return -EINVAL;
1666
1667         buffer->size = size;
1668         if (buffer->error_callback == NULL) {
1669                 ret = rte_eth_tx_buffer_set_err_callback(
1670                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1671         }
1672
1673         return ret;
1674 }
1675
1676 int
1677 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1678 {
1679         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1680         int ret;
1681
1682         /* Validate Input Data. Bail if not valid or not supported. */
1683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1684         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1685
1686         /* Call driver to free pending mbufs. */
1687         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1688                                                free_cnt);
1689         return eth_err(port_id, ret);
1690 }
1691
1692 void
1693 rte_eth_promiscuous_enable(uint16_t port_id)
1694 {
1695         struct rte_eth_dev *dev;
1696
1697         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1698         dev = &rte_eth_devices[port_id];
1699
1700         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1701         (*dev->dev_ops->promiscuous_enable)(dev);
1702         dev->data->promiscuous = 1;
1703 }
1704
1705 void
1706 rte_eth_promiscuous_disable(uint16_t port_id)
1707 {
1708         struct rte_eth_dev *dev;
1709
1710         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1711         dev = &rte_eth_devices[port_id];
1712
1713         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1714         dev->data->promiscuous = 0;
1715         (*dev->dev_ops->promiscuous_disable)(dev);
1716 }
1717
1718 int
1719 rte_eth_promiscuous_get(uint16_t port_id)
1720 {
1721         struct rte_eth_dev *dev;
1722
1723         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1724
1725         dev = &rte_eth_devices[port_id];
1726         return dev->data->promiscuous;
1727 }
1728
1729 void
1730 rte_eth_allmulticast_enable(uint16_t port_id)
1731 {
1732         struct rte_eth_dev *dev;
1733
1734         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1735         dev = &rte_eth_devices[port_id];
1736
1737         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1738         (*dev->dev_ops->allmulticast_enable)(dev);
1739         dev->data->all_multicast = 1;
1740 }
1741
1742 void
1743 rte_eth_allmulticast_disable(uint16_t port_id)
1744 {
1745         struct rte_eth_dev *dev;
1746
1747         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1748         dev = &rte_eth_devices[port_id];
1749
1750         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1751         dev->data->all_multicast = 0;
1752         (*dev->dev_ops->allmulticast_disable)(dev);
1753 }
1754
1755 int
1756 rte_eth_allmulticast_get(uint16_t port_id)
1757 {
1758         struct rte_eth_dev *dev;
1759
1760         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1761
1762         dev = &rte_eth_devices[port_id];
1763         return dev->data->all_multicast;
1764 }
1765
1766 void
1767 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1768 {
1769         struct rte_eth_dev *dev;
1770
1771         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1772         dev = &rte_eth_devices[port_id];
1773
1774         if (dev->data->dev_conf.intr_conf.lsc)
1775                 rte_eth_linkstatus_get(dev, eth_link);
1776         else {
1777                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1778                 (*dev->dev_ops->link_update)(dev, 1);
1779                 *eth_link = dev->data->dev_link;
1780         }
1781 }
1782
1783 void
1784 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1785 {
1786         struct rte_eth_dev *dev;
1787
1788         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1789         dev = &rte_eth_devices[port_id];
1790
1791         if (dev->data->dev_conf.intr_conf.lsc)
1792                 rte_eth_linkstatus_get(dev, eth_link);
1793         else {
1794                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1795                 (*dev->dev_ops->link_update)(dev, 0);
1796                 *eth_link = dev->data->dev_link;
1797         }
1798 }
1799
1800 int
1801 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1802 {
1803         struct rte_eth_dev *dev;
1804
1805         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1806
1807         dev = &rte_eth_devices[port_id];
1808         memset(stats, 0, sizeof(*stats));
1809
1810         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1811         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1812         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1813 }
1814
1815 int
1816 rte_eth_stats_reset(uint16_t port_id)
1817 {
1818         struct rte_eth_dev *dev;
1819
1820         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1821         dev = &rte_eth_devices[port_id];
1822
1823         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1824         (*dev->dev_ops->stats_reset)(dev);
1825         dev->data->rx_mbuf_alloc_failed = 0;
1826
1827         return 0;
1828 }
1829
1830 static inline int
1831 get_xstats_basic_count(struct rte_eth_dev *dev)
1832 {
1833         uint16_t nb_rxqs, nb_txqs;
1834         int count;
1835
1836         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1837         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1838
1839         count = RTE_NB_STATS;
1840         count += nb_rxqs * RTE_NB_RXQ_STATS;
1841         count += nb_txqs * RTE_NB_TXQ_STATS;
1842
1843         return count;
1844 }
1845
1846 static int
1847 get_xstats_count(uint16_t port_id)
1848 {
1849         struct rte_eth_dev *dev;
1850         int count;
1851
1852         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1853         dev = &rte_eth_devices[port_id];
1854         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1855                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1856                                 NULL, 0);
1857                 if (count < 0)
1858                         return eth_err(port_id, count);
1859         }
1860         if (dev->dev_ops->xstats_get_names != NULL) {
1861                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1862                 if (count < 0)
1863                         return eth_err(port_id, count);
1864         } else
1865                 count = 0;
1866
1867
1868         count += get_xstats_basic_count(dev);
1869
1870         return count;
1871 }
1872
1873 int
1874 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1875                 uint64_t *id)
1876 {
1877         int cnt_xstats, idx_xstat;
1878
1879         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1880
1881         if (!id) {
1882                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1883                 return -ENOMEM;
1884         }
1885
1886         if (!xstat_name) {
1887                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1888                 return -ENOMEM;
1889         }
1890
1891         /* Get count */
1892         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1893         if (cnt_xstats  < 0) {
1894                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1895                 return -ENODEV;
1896         }
1897
1898         /* Get id-name lookup table */
1899         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1900
1901         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1902                         port_id, xstats_names, cnt_xstats, NULL)) {
1903                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1904                 return -1;
1905         }
1906
1907         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1908                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1909                         *id = idx_xstat;
1910                         return 0;
1911                 };
1912         }
1913
1914         return -EINVAL;
1915 }
1916
1917 /* retrieve basic stats names */
1918 static int
1919 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1920         struct rte_eth_xstat_name *xstats_names)
1921 {
1922         int cnt_used_entries = 0;
1923         uint32_t idx, id_queue;
1924         uint16_t num_q;
1925
1926         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1927                 snprintf(xstats_names[cnt_used_entries].name,
1928                         sizeof(xstats_names[0].name),
1929                         "%s", rte_stats_strings[idx].name);
1930                 cnt_used_entries++;
1931         }
1932         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1933         for (id_queue = 0; id_queue < num_q; id_queue++) {
1934                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1935                         snprintf(xstats_names[cnt_used_entries].name,
1936                                 sizeof(xstats_names[0].name),
1937                                 "rx_q%u%s",
1938                                 id_queue, rte_rxq_stats_strings[idx].name);
1939                         cnt_used_entries++;
1940                 }
1941
1942         }
1943         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1944         for (id_queue = 0; id_queue < num_q; id_queue++) {
1945                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1946                         snprintf(xstats_names[cnt_used_entries].name,
1947                                 sizeof(xstats_names[0].name),
1948                                 "tx_q%u%s",
1949                                 id_queue, rte_txq_stats_strings[idx].name);
1950                         cnt_used_entries++;
1951                 }
1952         }
1953         return cnt_used_entries;
1954 }
1955
1956 /* retrieve ethdev extended statistics names */
1957 int
1958 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1959         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1960         uint64_t *ids)
1961 {
1962         struct rte_eth_xstat_name *xstats_names_copy;
1963         unsigned int no_basic_stat_requested = 1;
1964         unsigned int no_ext_stat_requested = 1;
1965         unsigned int expected_entries;
1966         unsigned int basic_count;
1967         struct rte_eth_dev *dev;
1968         unsigned int i;
1969         int ret;
1970
1971         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1972         dev = &rte_eth_devices[port_id];
1973
1974         basic_count = get_xstats_basic_count(dev);
1975         ret = get_xstats_count(port_id);
1976         if (ret < 0)
1977                 return ret;
1978         expected_entries = (unsigned int)ret;
1979
1980         /* Return max number of stats if no ids given */
1981         if (!ids) {
1982                 if (!xstats_names)
1983                         return expected_entries;
1984                 else if (xstats_names && size < expected_entries)
1985                         return expected_entries;
1986         }
1987
1988         if (ids && !xstats_names)
1989                 return -EINVAL;
1990
1991         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1992                 uint64_t ids_copy[size];
1993
1994                 for (i = 0; i < size; i++) {
1995                         if (ids[i] < basic_count) {
1996                                 no_basic_stat_requested = 0;
1997                                 break;
1998                         }
1999
2000                         /*
2001                          * Convert ids to xstats ids that PMD knows.
2002                          * ids known by user are basic + extended stats.
2003                          */
2004                         ids_copy[i] = ids[i] - basic_count;
2005                 }
2006
2007                 if (no_basic_stat_requested)
2008                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2009                                         xstats_names, ids_copy, size);
2010         }
2011
2012         /* Retrieve all stats */
2013         if (!ids) {
2014                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2015                                 expected_entries);
2016                 if (num_stats < 0 || num_stats > (int)expected_entries)
2017                         return num_stats;
2018                 else
2019                         return expected_entries;
2020         }
2021
2022         xstats_names_copy = calloc(expected_entries,
2023                 sizeof(struct rte_eth_xstat_name));
2024
2025         if (!xstats_names_copy) {
2026                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2027                 return -ENOMEM;
2028         }
2029
2030         if (ids) {
2031                 for (i = 0; i < size; i++) {
2032                         if (ids[i] >= basic_count) {
2033                                 no_ext_stat_requested = 0;
2034                                 break;
2035                         }
2036                 }
2037         }
2038
2039         /* Fill xstats_names_copy structure */
2040         if (ids && no_ext_stat_requested) {
2041                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2042         } else {
2043                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2044                         expected_entries);
2045                 if (ret < 0) {
2046                         free(xstats_names_copy);
2047                         return ret;
2048                 }
2049         }
2050
2051         /* Filter stats */
2052         for (i = 0; i < size; i++) {
2053                 if (ids[i] >= expected_entries) {
2054                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2055                         free(xstats_names_copy);
2056                         return -1;
2057                 }
2058                 xstats_names[i] = xstats_names_copy[ids[i]];
2059         }
2060
2061         free(xstats_names_copy);
2062         return size;
2063 }
2064
2065 int
2066 rte_eth_xstats_get_names(uint16_t port_id,
2067         struct rte_eth_xstat_name *xstats_names,
2068         unsigned int size)
2069 {
2070         struct rte_eth_dev *dev;
2071         int cnt_used_entries;
2072         int cnt_expected_entries;
2073         int cnt_driver_entries;
2074
2075         cnt_expected_entries = get_xstats_count(port_id);
2076         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2077                         (int)size < cnt_expected_entries)
2078                 return cnt_expected_entries;
2079
2080         /* port_id checked in get_xstats_count() */
2081         dev = &rte_eth_devices[port_id];
2082
2083         cnt_used_entries = rte_eth_basic_stats_get_names(
2084                 dev, xstats_names);
2085
2086         if (dev->dev_ops->xstats_get_names != NULL) {
2087                 /* If there are any driver-specific xstats, append them
2088                  * to end of list.
2089                  */
2090                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2091                         dev,
2092                         xstats_names + cnt_used_entries,
2093                         size - cnt_used_entries);
2094                 if (cnt_driver_entries < 0)
2095                         return eth_err(port_id, cnt_driver_entries);
2096                 cnt_used_entries += cnt_driver_entries;
2097         }
2098
2099         return cnt_used_entries;
2100 }
2101
2102
2103 static int
2104 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2105 {
2106         struct rte_eth_dev *dev;
2107         struct rte_eth_stats eth_stats;
2108         unsigned int count = 0, i, q;
2109         uint64_t val, *stats_ptr;
2110         uint16_t nb_rxqs, nb_txqs;
2111         int ret;
2112
2113         ret = rte_eth_stats_get(port_id, &eth_stats);
2114         if (ret < 0)
2115                 return ret;
2116
2117         dev = &rte_eth_devices[port_id];
2118
2119         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2120         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2121
2122         /* global stats */
2123         for (i = 0; i < RTE_NB_STATS; i++) {
2124                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2125                                         rte_stats_strings[i].offset);
2126                 val = *stats_ptr;
2127                 xstats[count++].value = val;
2128         }
2129
2130         /* per-rxq stats */
2131         for (q = 0; q < nb_rxqs; q++) {
2132                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2133                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2134                                         rte_rxq_stats_strings[i].offset +
2135                                         q * sizeof(uint64_t));
2136                         val = *stats_ptr;
2137                         xstats[count++].value = val;
2138                 }
2139         }
2140
2141         /* per-txq stats */
2142         for (q = 0; q < nb_txqs; q++) {
2143                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2144                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2145                                         rte_txq_stats_strings[i].offset +
2146                                         q * sizeof(uint64_t));
2147                         val = *stats_ptr;
2148                         xstats[count++].value = val;
2149                 }
2150         }
2151         return count;
2152 }
2153
2154 /* retrieve ethdev extended statistics */
2155 int
2156 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2157                          uint64_t *values, unsigned int size)
2158 {
2159         unsigned int no_basic_stat_requested = 1;
2160         unsigned int no_ext_stat_requested = 1;
2161         unsigned int num_xstats_filled;
2162         unsigned int basic_count;
2163         uint16_t expected_entries;
2164         struct rte_eth_dev *dev;
2165         unsigned int i;
2166         int ret;
2167
2168         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2169         ret = get_xstats_count(port_id);
2170         if (ret < 0)
2171                 return ret;
2172         expected_entries = (uint16_t)ret;
2173         struct rte_eth_xstat xstats[expected_entries];
2174         dev = &rte_eth_devices[port_id];
2175         basic_count = get_xstats_basic_count(dev);
2176
2177         /* Return max number of stats if no ids given */
2178         if (!ids) {
2179                 if (!values)
2180                         return expected_entries;
2181                 else if (values && size < expected_entries)
2182                         return expected_entries;
2183         }
2184
2185         if (ids && !values)
2186                 return -EINVAL;
2187
2188         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2189                 unsigned int basic_count = get_xstats_basic_count(dev);
2190                 uint64_t ids_copy[size];
2191
2192                 for (i = 0; i < size; i++) {
2193                         if (ids[i] < basic_count) {
2194                                 no_basic_stat_requested = 0;
2195                                 break;
2196                         }
2197
2198                         /*
2199                          * Convert ids to xstats ids that PMD knows.
2200                          * ids known by user are basic + extended stats.
2201                          */
2202                         ids_copy[i] = ids[i] - basic_count;
2203                 }
2204
2205                 if (no_basic_stat_requested)
2206                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2207                                         values, size);
2208         }
2209
2210         if (ids) {
2211                 for (i = 0; i < size; i++) {
2212                         if (ids[i] >= basic_count) {
2213                                 no_ext_stat_requested = 0;
2214                                 break;
2215                         }
2216                 }
2217         }
2218
2219         /* Fill the xstats structure */
2220         if (ids && no_ext_stat_requested)
2221                 ret = rte_eth_basic_stats_get(port_id, xstats);
2222         else
2223                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2224
2225         if (ret < 0)
2226                 return ret;
2227         num_xstats_filled = (unsigned int)ret;
2228
2229         /* Return all stats */
2230         if (!ids) {
2231                 for (i = 0; i < num_xstats_filled; i++)
2232                         values[i] = xstats[i].value;
2233                 return expected_entries;
2234         }
2235
2236         /* Filter stats */
2237         for (i = 0; i < size; i++) {
2238                 if (ids[i] >= expected_entries) {
2239                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2240                         return -1;
2241                 }
2242                 values[i] = xstats[ids[i]].value;
2243         }
2244         return size;
2245 }
2246
2247 int
2248 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2249         unsigned int n)
2250 {
2251         struct rte_eth_dev *dev;
2252         unsigned int count = 0, i;
2253         signed int xcount = 0;
2254         uint16_t nb_rxqs, nb_txqs;
2255         int ret;
2256
2257         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2258
2259         dev = &rte_eth_devices[port_id];
2260
2261         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2262         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2263
2264         /* Return generic statistics */
2265         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2266                 (nb_txqs * RTE_NB_TXQ_STATS);
2267
2268         /* implemented by the driver */
2269         if (dev->dev_ops->xstats_get != NULL) {
2270                 /* Retrieve the xstats from the driver at the end of the
2271                  * xstats struct.
2272                  */
2273                 xcount = (*dev->dev_ops->xstats_get)(dev,
2274                                      xstats ? xstats + count : NULL,
2275                                      (n > count) ? n - count : 0);
2276
2277                 if (xcount < 0)
2278                         return eth_err(port_id, xcount);
2279         }
2280
2281         if (n < count + xcount || xstats == NULL)
2282                 return count + xcount;
2283
2284         /* now fill the xstats structure */
2285         ret = rte_eth_basic_stats_get(port_id, xstats);
2286         if (ret < 0)
2287                 return ret;
2288         count = ret;
2289
2290         for (i = 0; i < count; i++)
2291                 xstats[i].id = i;
2292         /* add an offset to driver-specific stats */
2293         for ( ; i < count + xcount; i++)
2294                 xstats[i].id += count;
2295
2296         return count + xcount;
2297 }
2298
2299 /* reset ethdev extended statistics */
2300 void
2301 rte_eth_xstats_reset(uint16_t port_id)
2302 {
2303         struct rte_eth_dev *dev;
2304
2305         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2306         dev = &rte_eth_devices[port_id];
2307
2308         /* implemented by the driver */
2309         if (dev->dev_ops->xstats_reset != NULL) {
2310                 (*dev->dev_ops->xstats_reset)(dev);
2311                 return;
2312         }
2313
2314         /* fallback to default */
2315         rte_eth_stats_reset(port_id);
2316 }
2317
2318 static int
2319 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2320                 uint8_t is_rx)
2321 {
2322         struct rte_eth_dev *dev;
2323
2324         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2325
2326         dev = &rte_eth_devices[port_id];
2327
2328         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2329         return (*dev->dev_ops->queue_stats_mapping_set)
2330                         (dev, queue_id, stat_idx, is_rx);
2331 }
2332
2333
2334 int
2335 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2336                 uint8_t stat_idx)
2337 {
2338         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2339                                                 stat_idx, STAT_QMAP_TX));
2340 }
2341
2342
2343 int
2344 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2345                 uint8_t stat_idx)
2346 {
2347         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2348                                                 stat_idx, STAT_QMAP_RX));
2349 }
2350
2351 int
2352 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2353 {
2354         struct rte_eth_dev *dev;
2355
2356         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2357         dev = &rte_eth_devices[port_id];
2358
2359         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2360         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2361                                                         fw_version, fw_size));
2362 }
2363
2364 void
2365 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2366 {
2367         struct rte_eth_dev *dev;
2368         const struct rte_eth_desc_lim lim = {
2369                 .nb_max = UINT16_MAX,
2370                 .nb_min = 0,
2371                 .nb_align = 1,
2372         };
2373
2374         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2375         dev = &rte_eth_devices[port_id];
2376
2377         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2378         dev_info->rx_desc_lim = lim;
2379         dev_info->tx_desc_lim = lim;
2380
2381         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2382         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2383         dev_info->driver_name = dev->device->driver->name;
2384         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2385         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2386 }
2387
2388 int
2389 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2390                                  uint32_t *ptypes, int num)
2391 {
2392         int i, j;
2393         struct rte_eth_dev *dev;
2394         const uint32_t *all_ptypes;
2395
2396         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2397         dev = &rte_eth_devices[port_id];
2398         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2399         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2400
2401         if (!all_ptypes)
2402                 return 0;
2403
2404         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2405                 if (all_ptypes[i] & ptype_mask) {
2406                         if (j < num)
2407                                 ptypes[j] = all_ptypes[i];
2408                         j++;
2409                 }
2410
2411         return j;
2412 }
2413
2414 void
2415 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2416 {
2417         struct rte_eth_dev *dev;
2418
2419         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2420         dev = &rte_eth_devices[port_id];
2421         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2422 }
2423
2424
2425 int
2426 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2427 {
2428         struct rte_eth_dev *dev;
2429
2430         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2431
2432         dev = &rte_eth_devices[port_id];
2433         *mtu = dev->data->mtu;
2434         return 0;
2435 }
2436
2437 int
2438 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2439 {
2440         int ret;
2441         struct rte_eth_dev *dev;
2442
2443         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2444         dev = &rte_eth_devices[port_id];
2445         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2446
2447         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2448         if (!ret)
2449                 dev->data->mtu = mtu;
2450
2451         return eth_err(port_id, ret);
2452 }
2453
2454 int
2455 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2456 {
2457         struct rte_eth_dev *dev;
2458         int ret;
2459
2460         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2461         dev = &rte_eth_devices[port_id];
2462         if (!(dev->data->dev_conf.rxmode.offloads &
2463               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2464                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2465                 return -ENOSYS;
2466         }
2467
2468         if (vlan_id > 4095) {
2469                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2470                                 port_id, (unsigned) vlan_id);
2471                 return -EINVAL;
2472         }
2473         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2474
2475         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2476         if (ret == 0) {
2477                 struct rte_vlan_filter_conf *vfc;
2478                 int vidx;
2479                 int vbit;
2480
2481                 vfc = &dev->data->vlan_filter_conf;
2482                 vidx = vlan_id / 64;
2483                 vbit = vlan_id % 64;
2484
2485                 if (on)
2486                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2487                 else
2488                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2489         }
2490
2491         return eth_err(port_id, ret);
2492 }
2493
2494 int
2495 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2496                                     int on)
2497 {
2498         struct rte_eth_dev *dev;
2499
2500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2501         dev = &rte_eth_devices[port_id];
2502         if (rx_queue_id >= dev->data->nb_rx_queues) {
2503                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2504                 return -EINVAL;
2505         }
2506
2507         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2508         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2509
2510         return 0;
2511 }
2512
2513 int
2514 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2515                                 enum rte_vlan_type vlan_type,
2516                                 uint16_t tpid)
2517 {
2518         struct rte_eth_dev *dev;
2519
2520         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2521         dev = &rte_eth_devices[port_id];
2522         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2523
2524         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2525                                                                tpid));
2526 }
2527
2528 int
2529 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2530 {
2531         struct rte_eth_dev *dev;
2532         int ret = 0;
2533         int mask = 0;
2534         int cur, org = 0;
2535         uint64_t orig_offloads;
2536
2537         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2538         dev = &rte_eth_devices[port_id];
2539
2540         /* save original values in case of failure */
2541         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2542
2543         /*check which option changed by application*/
2544         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2545         org = !!(dev->data->dev_conf.rxmode.offloads &
2546                  DEV_RX_OFFLOAD_VLAN_STRIP);
2547         if (cur != org) {
2548                 if (cur)
2549                         dev->data->dev_conf.rxmode.offloads |=
2550                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2551                 else
2552                         dev->data->dev_conf.rxmode.offloads &=
2553                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2554                 mask |= ETH_VLAN_STRIP_MASK;
2555         }
2556
2557         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2558         org = !!(dev->data->dev_conf.rxmode.offloads &
2559                  DEV_RX_OFFLOAD_VLAN_FILTER);
2560         if (cur != org) {
2561                 if (cur)
2562                         dev->data->dev_conf.rxmode.offloads |=
2563                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2564                 else
2565                         dev->data->dev_conf.rxmode.offloads &=
2566                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2567                 mask |= ETH_VLAN_FILTER_MASK;
2568         }
2569
2570         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2571         org = !!(dev->data->dev_conf.rxmode.offloads &
2572                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2573         if (cur != org) {
2574                 if (cur)
2575                         dev->data->dev_conf.rxmode.offloads |=
2576                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2577                 else
2578                         dev->data->dev_conf.rxmode.offloads &=
2579                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2580                 mask |= ETH_VLAN_EXTEND_MASK;
2581         }
2582
2583         /*no change*/
2584         if (mask == 0)
2585                 return ret;
2586
2587         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2588
2589         /*
2590          * Convert to the offload bitfield API just in case the underlying PMD
2591          * still supporting it.
2592          */
2593         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2594                                     &dev->data->dev_conf.rxmode);
2595         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2596         if (ret) {
2597                 /* hit an error restore  original values */
2598                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2599                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2600                                             &dev->data->dev_conf.rxmode);
2601         }
2602
2603         return eth_err(port_id, ret);
2604 }
2605
2606 int
2607 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2608 {
2609         struct rte_eth_dev *dev;
2610         int ret = 0;
2611
2612         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2613         dev = &rte_eth_devices[port_id];
2614
2615         if (dev->data->dev_conf.rxmode.offloads &
2616             DEV_RX_OFFLOAD_VLAN_STRIP)
2617                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2618
2619         if (dev->data->dev_conf.rxmode.offloads &
2620             DEV_RX_OFFLOAD_VLAN_FILTER)
2621                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2622
2623         if (dev->data->dev_conf.rxmode.offloads &
2624             DEV_RX_OFFLOAD_VLAN_EXTEND)
2625                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2626
2627         return ret;
2628 }
2629
2630 int
2631 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2632 {
2633         struct rte_eth_dev *dev;
2634
2635         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2636         dev = &rte_eth_devices[port_id];
2637         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2638
2639         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2640 }
2641
2642 int
2643 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2644 {
2645         struct rte_eth_dev *dev;
2646
2647         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2648         dev = &rte_eth_devices[port_id];
2649         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2650         memset(fc_conf, 0, sizeof(*fc_conf));
2651         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2652 }
2653
2654 int
2655 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2656 {
2657         struct rte_eth_dev *dev;
2658
2659         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2660         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2661                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2662                 return -EINVAL;
2663         }
2664
2665         dev = &rte_eth_devices[port_id];
2666         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2667         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2668 }
2669
2670 int
2671 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2672                                    struct rte_eth_pfc_conf *pfc_conf)
2673 {
2674         struct rte_eth_dev *dev;
2675
2676         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2677         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2678                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2679                 return -EINVAL;
2680         }
2681
2682         dev = &rte_eth_devices[port_id];
2683         /* High water, low water validation are device specific */
2684         if  (*dev->dev_ops->priority_flow_ctrl_set)
2685                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2686                                         (dev, pfc_conf));
2687         return -ENOTSUP;
2688 }
2689
2690 static int
2691 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2692                         uint16_t reta_size)
2693 {
2694         uint16_t i, num;
2695
2696         if (!reta_conf)
2697                 return -EINVAL;
2698
2699         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2700         for (i = 0; i < num; i++) {
2701                 if (reta_conf[i].mask)
2702                         return 0;
2703         }
2704
2705         return -EINVAL;
2706 }
2707
2708 static int
2709 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2710                          uint16_t reta_size,
2711                          uint16_t max_rxq)
2712 {
2713         uint16_t i, idx, shift;
2714
2715         if (!reta_conf)
2716                 return -EINVAL;
2717
2718         if (max_rxq == 0) {
2719                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2720                 return -EINVAL;
2721         }
2722
2723         for (i = 0; i < reta_size; i++) {
2724                 idx = i / RTE_RETA_GROUP_SIZE;
2725                 shift = i % RTE_RETA_GROUP_SIZE;
2726                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2727                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2728                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2729                                 "the maximum rxq index: %u\n", idx, shift,
2730                                 reta_conf[idx].reta[shift], max_rxq);
2731                         return -EINVAL;
2732                 }
2733         }
2734
2735         return 0;
2736 }
2737
2738 int
2739 rte_eth_dev_rss_reta_update(uint16_t port_id,
2740                             struct rte_eth_rss_reta_entry64 *reta_conf,
2741                             uint16_t reta_size)
2742 {
2743         struct rte_eth_dev *dev;
2744         int ret;
2745
2746         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2747         /* Check mask bits */
2748         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2749         if (ret < 0)
2750                 return ret;
2751
2752         dev = &rte_eth_devices[port_id];
2753
2754         /* Check entry value */
2755         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2756                                 dev->data->nb_rx_queues);
2757         if (ret < 0)
2758                 return ret;
2759
2760         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2761         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2762                                                              reta_size));
2763 }
2764
2765 int
2766 rte_eth_dev_rss_reta_query(uint16_t port_id,
2767                            struct rte_eth_rss_reta_entry64 *reta_conf,
2768                            uint16_t reta_size)
2769 {
2770         struct rte_eth_dev *dev;
2771         int ret;
2772
2773         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2774
2775         /* Check mask bits */
2776         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2777         if (ret < 0)
2778                 return ret;
2779
2780         dev = &rte_eth_devices[port_id];
2781         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2782         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2783                                                             reta_size));
2784 }
2785
2786 int
2787 rte_eth_dev_rss_hash_update(uint16_t port_id,
2788                             struct rte_eth_rss_conf *rss_conf)
2789 {
2790         struct rte_eth_dev *dev;
2791
2792         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2793         dev = &rte_eth_devices[port_id];
2794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2795         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2796                                                                  rss_conf));
2797 }
2798
2799 int
2800 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2801                               struct rte_eth_rss_conf *rss_conf)
2802 {
2803         struct rte_eth_dev *dev;
2804
2805         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2806         dev = &rte_eth_devices[port_id];
2807         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2808         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2809                                                                    rss_conf));
2810 }
2811
2812 int
2813 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2814                                 struct rte_eth_udp_tunnel *udp_tunnel)
2815 {
2816         struct rte_eth_dev *dev;
2817
2818         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2819         if (udp_tunnel == NULL) {
2820                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2821                 return -EINVAL;
2822         }
2823
2824         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2825                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2826                 return -EINVAL;
2827         }
2828
2829         dev = &rte_eth_devices[port_id];
2830         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2831         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2832                                                                 udp_tunnel));
2833 }
2834
2835 int
2836 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2837                                    struct rte_eth_udp_tunnel *udp_tunnel)
2838 {
2839         struct rte_eth_dev *dev;
2840
2841         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2842         dev = &rte_eth_devices[port_id];
2843
2844         if (udp_tunnel == NULL) {
2845                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2846                 return -EINVAL;
2847         }
2848
2849         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2850                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2851                 return -EINVAL;
2852         }
2853
2854         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2855         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2856                                                                 udp_tunnel));
2857 }
2858
2859 int
2860 rte_eth_led_on(uint16_t port_id)
2861 {
2862         struct rte_eth_dev *dev;
2863
2864         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2865         dev = &rte_eth_devices[port_id];
2866         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2867         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2868 }
2869
2870 int
2871 rte_eth_led_off(uint16_t port_id)
2872 {
2873         struct rte_eth_dev *dev;
2874
2875         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2876         dev = &rte_eth_devices[port_id];
2877         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2878         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2879 }
2880
2881 /*
2882  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2883  * an empty spot.
2884  */
2885 static int
2886 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2887 {
2888         struct rte_eth_dev_info dev_info;
2889         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2890         unsigned i;
2891
2892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2893         rte_eth_dev_info_get(port_id, &dev_info);
2894
2895         for (i = 0; i < dev_info.max_mac_addrs; i++)
2896                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2897                         return i;
2898
2899         return -1;
2900 }
2901
2902 static const struct ether_addr null_mac_addr;
2903
2904 int
2905 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2906                         uint32_t pool)
2907 {
2908         struct rte_eth_dev *dev;
2909         int index;
2910         uint64_t pool_mask;
2911         int ret;
2912
2913         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2914         dev = &rte_eth_devices[port_id];
2915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2916
2917         if (is_zero_ether_addr(addr)) {
2918                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2919                         port_id);
2920                 return -EINVAL;
2921         }
2922         if (pool >= ETH_64_POOLS) {
2923                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2924                 return -EINVAL;
2925         }
2926
2927         index = get_mac_addr_index(port_id, addr);
2928         if (index < 0) {
2929                 index = get_mac_addr_index(port_id, &null_mac_addr);
2930                 if (index < 0) {
2931                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2932                                 port_id);
2933                         return -ENOSPC;
2934                 }
2935         } else {
2936                 pool_mask = dev->data->mac_pool_sel[index];
2937
2938                 /* Check if both MAC address and pool is already there, and do nothing */
2939                 if (pool_mask & (1ULL << pool))
2940                         return 0;
2941         }
2942
2943         /* Update NIC */
2944         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2945
2946         if (ret == 0) {
2947                 /* Update address in NIC data structure */
2948                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2949
2950                 /* Update pool bitmap in NIC data structure */
2951                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2952         }
2953
2954         return eth_err(port_id, ret);
2955 }
2956
2957 int
2958 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2959 {
2960         struct rte_eth_dev *dev;
2961         int index;
2962
2963         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2964         dev = &rte_eth_devices[port_id];
2965         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2966
2967         index = get_mac_addr_index(port_id, addr);
2968         if (index == 0) {
2969                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2970                 return -EADDRINUSE;
2971         } else if (index < 0)
2972                 return 0;  /* Do nothing if address wasn't found */
2973
2974         /* Update NIC */
2975         (*dev->dev_ops->mac_addr_remove)(dev, index);
2976
2977         /* Update address in NIC data structure */
2978         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2979
2980         /* reset pool bitmap */
2981         dev->data->mac_pool_sel[index] = 0;
2982
2983         return 0;
2984 }
2985
2986 int
2987 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2988 {
2989         struct rte_eth_dev *dev;
2990
2991         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2992
2993         if (!is_valid_assigned_ether_addr(addr))
2994                 return -EINVAL;
2995
2996         dev = &rte_eth_devices[port_id];
2997         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2998
2999         /* Update default address in NIC data structure */
3000         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3001
3002         (*dev->dev_ops->mac_addr_set)(dev, addr);
3003
3004         return 0;
3005 }
3006
3007
3008 /*
3009  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3010  * an empty spot.
3011  */
3012 static int
3013 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3014 {
3015         struct rte_eth_dev_info dev_info;
3016         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3017         unsigned i;
3018
3019         rte_eth_dev_info_get(port_id, &dev_info);
3020         if (!dev->data->hash_mac_addrs)
3021                 return -1;
3022
3023         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3024                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3025                         ETHER_ADDR_LEN) == 0)
3026                         return i;
3027
3028         return -1;
3029 }
3030
3031 int
3032 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3033                                 uint8_t on)
3034 {
3035         int index;
3036         int ret;
3037         struct rte_eth_dev *dev;
3038
3039         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3040
3041         dev = &rte_eth_devices[port_id];
3042         if (is_zero_ether_addr(addr)) {
3043                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3044                         port_id);
3045                 return -EINVAL;
3046         }
3047
3048         index = get_hash_mac_addr_index(port_id, addr);
3049         /* Check if it's already there, and do nothing */
3050         if ((index >= 0) && on)
3051                 return 0;
3052
3053         if (index < 0) {
3054                 if (!on) {
3055                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3056                                 "set in UTA\n", port_id);
3057                         return -EINVAL;
3058                 }
3059
3060                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3061                 if (index < 0) {
3062                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3063                                         port_id);
3064                         return -ENOSPC;
3065                 }
3066         }
3067
3068         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3069         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3070         if (ret == 0) {
3071                 /* Update address in NIC data structure */
3072                 if (on)
3073                         ether_addr_copy(addr,
3074                                         &dev->data->hash_mac_addrs[index]);
3075                 else
3076                         ether_addr_copy(&null_mac_addr,
3077                                         &dev->data->hash_mac_addrs[index]);
3078         }
3079
3080         return eth_err(port_id, ret);
3081 }
3082
3083 int
3084 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3085 {
3086         struct rte_eth_dev *dev;
3087
3088         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3089
3090         dev = &rte_eth_devices[port_id];
3091
3092         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3093         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3094                                                                        on));
3095 }
3096
3097 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3098                                         uint16_t tx_rate)
3099 {
3100         struct rte_eth_dev *dev;
3101         struct rte_eth_dev_info dev_info;
3102         struct rte_eth_link link;
3103
3104         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3105
3106         dev = &rte_eth_devices[port_id];
3107         rte_eth_dev_info_get(port_id, &dev_info);
3108         link = dev->data->dev_link;
3109
3110         if (queue_idx > dev_info.max_tx_queues) {
3111                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3112                                 "invalid queue id=%d\n", port_id, queue_idx);
3113                 return -EINVAL;
3114         }
3115
3116         if (tx_rate > link.link_speed) {
3117                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3118                                 "bigger than link speed= %d\n",
3119                         tx_rate, link.link_speed);
3120                 return -EINVAL;
3121         }
3122
3123         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3124         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3125                                                         queue_idx, tx_rate));
3126 }
3127
3128 int
3129 rte_eth_mirror_rule_set(uint16_t port_id,
3130                         struct rte_eth_mirror_conf *mirror_conf,
3131                         uint8_t rule_id, uint8_t on)
3132 {
3133         struct rte_eth_dev *dev;
3134
3135         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3136         if (mirror_conf->rule_type == 0) {
3137                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3138                 return -EINVAL;
3139         }
3140
3141         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3142                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3143                                 ETH_64_POOLS - 1);
3144                 return -EINVAL;
3145         }
3146
3147         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3148              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3149             (mirror_conf->pool_mask == 0)) {
3150                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3151                 return -EINVAL;
3152         }
3153
3154         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3155             mirror_conf->vlan.vlan_mask == 0) {
3156                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3157                 return -EINVAL;
3158         }
3159
3160         dev = &rte_eth_devices[port_id];
3161         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3162
3163         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3164                                                 mirror_conf, rule_id, on));
3165 }
3166
3167 int
3168 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3169 {
3170         struct rte_eth_dev *dev;
3171
3172         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3173
3174         dev = &rte_eth_devices[port_id];
3175         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3176
3177         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3178                                                                    rule_id));
3179 }
3180
3181 RTE_INIT(eth_dev_init_cb_lists)
3182 {
3183         int i;
3184
3185         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3186                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3187 }
3188
3189 int
3190 rte_eth_dev_callback_register(uint16_t port_id,
3191                         enum rte_eth_event_type event,
3192                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3193 {
3194         struct rte_eth_dev *dev;
3195         struct rte_eth_dev_callback *user_cb;
3196         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3197         uint16_t last_port;
3198
3199         if (!cb_fn)
3200                 return -EINVAL;
3201
3202         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3203                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3204                 return -EINVAL;
3205         }
3206
3207         if (port_id == RTE_ETH_ALL) {
3208                 next_port = 0;
3209                 last_port = RTE_MAX_ETHPORTS - 1;
3210         } else {
3211                 next_port = last_port = port_id;
3212         }
3213
3214         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3215
3216         do {
3217                 dev = &rte_eth_devices[next_port];
3218
3219                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3220                         if (user_cb->cb_fn == cb_fn &&
3221                                 user_cb->cb_arg == cb_arg &&
3222                                 user_cb->event == event) {
3223                                 break;
3224                         }
3225                 }
3226
3227                 /* create a new callback. */
3228                 if (user_cb == NULL) {
3229                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3230                                 sizeof(struct rte_eth_dev_callback), 0);
3231                         if (user_cb != NULL) {
3232                                 user_cb->cb_fn = cb_fn;
3233                                 user_cb->cb_arg = cb_arg;
3234                                 user_cb->event = event;
3235                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3236                                                   user_cb, next);
3237                         } else {
3238                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3239                                 rte_eth_dev_callback_unregister(port_id, event,
3240                                                                 cb_fn, cb_arg);
3241                                 return -ENOMEM;
3242                         }
3243
3244                 }
3245         } while (++next_port <= last_port);
3246
3247         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3248         return 0;
3249 }
3250
3251 int
3252 rte_eth_dev_callback_unregister(uint16_t port_id,
3253                         enum rte_eth_event_type event,
3254                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3255 {
3256         int ret;
3257         struct rte_eth_dev *dev;
3258         struct rte_eth_dev_callback *cb, *next;
3259         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3260         uint16_t last_port;
3261
3262         if (!cb_fn)
3263                 return -EINVAL;
3264
3265         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3266                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3267                 return -EINVAL;
3268         }
3269
3270         if (port_id == RTE_ETH_ALL) {
3271                 next_port = 0;
3272                 last_port = RTE_MAX_ETHPORTS - 1;
3273         } else {
3274                 next_port = last_port = port_id;
3275         }
3276
3277         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3278
3279         do {
3280                 dev = &rte_eth_devices[next_port];
3281                 ret = 0;
3282                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3283                      cb = next) {
3284
3285                         next = TAILQ_NEXT(cb, next);
3286
3287                         if (cb->cb_fn != cb_fn || cb->event != event ||
3288                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3289                                 continue;
3290
3291                         /*
3292                          * if this callback is not executing right now,
3293                          * then remove it.
3294                          */
3295                         if (cb->active == 0) {
3296                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3297                                 rte_free(cb);
3298                         } else {
3299                                 ret = -EAGAIN;
3300                         }
3301                 }
3302         } while (++next_port <= last_port);
3303
3304         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3305         return ret;
3306 }
3307
3308 int
3309 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3310         enum rte_eth_event_type event, void *ret_param)
3311 {
3312         struct rte_eth_dev_callback *cb_lst;
3313         struct rte_eth_dev_callback dev_cb;
3314         int rc = 0;
3315
3316         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3317         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3318                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3319                         continue;
3320                 dev_cb = *cb_lst;
3321                 cb_lst->active = 1;
3322                 if (ret_param != NULL)
3323                         dev_cb.ret_param = ret_param;
3324
3325                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3326                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3327                                 dev_cb.cb_arg, dev_cb.ret_param);
3328                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3329                 cb_lst->active = 0;
3330         }
3331         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3332         return rc;
3333 }
3334
3335 int
3336 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3337 {
3338         uint32_t vec;
3339         struct rte_eth_dev *dev;
3340         struct rte_intr_handle *intr_handle;
3341         uint16_t qid;
3342         int rc;
3343
3344         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3345
3346         dev = &rte_eth_devices[port_id];
3347
3348         if (!dev->intr_handle) {
3349                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3350                 return -ENOTSUP;
3351         }
3352
3353         intr_handle = dev->intr_handle;
3354         if (!intr_handle->intr_vec) {
3355                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3356                 return -EPERM;
3357         }
3358
3359         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3360                 vec = intr_handle->intr_vec[qid];
3361                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3362                 if (rc && rc != -EEXIST) {
3363                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3364                                         " op %d epfd %d vec %u\n",
3365                                         port_id, qid, op, epfd, vec);
3366                 }
3367         }
3368
3369         return 0;
3370 }
3371
3372 const struct rte_memzone *
3373 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3374                          uint16_t queue_id, size_t size, unsigned align,
3375                          int socket_id)
3376 {
3377         char z_name[RTE_MEMZONE_NAMESIZE];
3378         const struct rte_memzone *mz;
3379
3380         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3381                  dev->device->driver->name, ring_name,
3382                  dev->data->port_id, queue_id);
3383
3384         mz = rte_memzone_lookup(z_name);
3385         if (mz)
3386                 return mz;
3387
3388         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3389 }
3390
3391 int
3392 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3393                           int epfd, int op, void *data)
3394 {
3395         uint32_t vec;
3396         struct rte_eth_dev *dev;
3397         struct rte_intr_handle *intr_handle;
3398         int rc;
3399
3400         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3401
3402         dev = &rte_eth_devices[port_id];
3403         if (queue_id >= dev->data->nb_rx_queues) {
3404                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3405                 return -EINVAL;
3406         }
3407
3408         if (!dev->intr_handle) {
3409                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3410                 return -ENOTSUP;
3411         }
3412
3413         intr_handle = dev->intr_handle;
3414         if (!intr_handle->intr_vec) {
3415                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3416                 return -EPERM;
3417         }
3418
3419         vec = intr_handle->intr_vec[queue_id];
3420         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3421         if (rc && rc != -EEXIST) {
3422                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3423                                 " op %d epfd %d vec %u\n",
3424                                 port_id, queue_id, op, epfd, vec);
3425                 return rc;
3426         }
3427
3428         return 0;
3429 }
3430
3431 int
3432 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3433                            uint16_t queue_id)
3434 {
3435         struct rte_eth_dev *dev;
3436
3437         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3438
3439         dev = &rte_eth_devices[port_id];
3440
3441         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3442         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3443                                                                 queue_id));
3444 }
3445
3446 int
3447 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3448                             uint16_t queue_id)
3449 {
3450         struct rte_eth_dev *dev;
3451
3452         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3453
3454         dev = &rte_eth_devices[port_id];
3455
3456         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3457         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3458                                                                 queue_id));
3459 }
3460
3461
3462 int
3463 rte_eth_dev_filter_supported(uint16_t port_id,
3464                              enum rte_filter_type filter_type)
3465 {
3466         struct rte_eth_dev *dev;
3467
3468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3469
3470         dev = &rte_eth_devices[port_id];
3471         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3472         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3473                                 RTE_ETH_FILTER_NOP, NULL);
3474 }
3475
3476 int
3477 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3478                         enum rte_filter_op filter_op, void *arg)
3479 {
3480         struct rte_eth_dev *dev;
3481
3482         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3483
3484         dev = &rte_eth_devices[port_id];
3485         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3486         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3487                                                              filter_op, arg));
3488 }
3489
3490 void *
3491 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3492                 rte_rx_callback_fn fn, void *user_param)
3493 {
3494 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3495         rte_errno = ENOTSUP;
3496         return NULL;
3497 #endif
3498         /* check input parameters */
3499         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3500                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3501                 rte_errno = EINVAL;
3502                 return NULL;
3503         }
3504         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3505
3506         if (cb == NULL) {
3507                 rte_errno = ENOMEM;
3508                 return NULL;
3509         }
3510
3511         cb->fn.rx = fn;
3512         cb->param = user_param;
3513
3514         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3515         /* Add the callbacks in fifo order. */
3516         struct rte_eth_rxtx_callback *tail =
3517                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3518
3519         if (!tail) {
3520                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3521
3522         } else {
3523                 while (tail->next)
3524                         tail = tail->next;
3525                 tail->next = cb;
3526         }
3527         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3528
3529         return cb;
3530 }
3531
3532 void *
3533 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3534                 rte_rx_callback_fn fn, void *user_param)
3535 {
3536 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3537         rte_errno = ENOTSUP;
3538         return NULL;
3539 #endif
3540         /* check input parameters */
3541         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3542                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3543                 rte_errno = EINVAL;
3544                 return NULL;
3545         }
3546
3547         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3548
3549         if (cb == NULL) {
3550                 rte_errno = ENOMEM;
3551                 return NULL;
3552         }
3553
3554         cb->fn.rx = fn;
3555         cb->param = user_param;
3556
3557         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3558         /* Add the callbacks at fisrt position*/
3559         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3560         rte_smp_wmb();
3561         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3562         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3563
3564         return cb;
3565 }
3566
3567 void *
3568 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3569                 rte_tx_callback_fn fn, void *user_param)
3570 {
3571 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3572         rte_errno = ENOTSUP;
3573         return NULL;
3574 #endif
3575         /* check input parameters */
3576         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3577                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3578                 rte_errno = EINVAL;
3579                 return NULL;
3580         }
3581
3582         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3583
3584         if (cb == NULL) {
3585                 rte_errno = ENOMEM;
3586                 return NULL;
3587         }
3588
3589         cb->fn.tx = fn;
3590         cb->param = user_param;
3591
3592         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3593         /* Add the callbacks in fifo order. */
3594         struct rte_eth_rxtx_callback *tail =
3595                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3596
3597         if (!tail) {
3598                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3599
3600         } else {
3601                 while (tail->next)
3602                         tail = tail->next;
3603                 tail->next = cb;
3604         }
3605         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3606
3607         return cb;
3608 }
3609
3610 int
3611 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3612                 struct rte_eth_rxtx_callback *user_cb)
3613 {
3614 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3615         return -ENOTSUP;
3616 #endif
3617         /* Check input parameters. */
3618         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3619         if (user_cb == NULL ||
3620                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3621                 return -EINVAL;
3622
3623         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3624         struct rte_eth_rxtx_callback *cb;
3625         struct rte_eth_rxtx_callback **prev_cb;
3626         int ret = -EINVAL;
3627
3628         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3629         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3630         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3631                 cb = *prev_cb;
3632                 if (cb == user_cb) {
3633                         /* Remove the user cb from the callback list. */
3634                         *prev_cb = cb->next;
3635                         ret = 0;
3636                         break;
3637                 }
3638         }
3639         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3640
3641         return ret;
3642 }
3643
3644 int
3645 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3646                 struct rte_eth_rxtx_callback *user_cb)
3647 {
3648 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3649         return -ENOTSUP;
3650 #endif
3651         /* Check input parameters. */
3652         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3653         if (user_cb == NULL ||
3654                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3655                 return -EINVAL;
3656
3657         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3658         int ret = -EINVAL;
3659         struct rte_eth_rxtx_callback *cb;
3660         struct rte_eth_rxtx_callback **prev_cb;
3661
3662         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3663         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3664         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3665                 cb = *prev_cb;
3666                 if (cb == user_cb) {
3667                         /* Remove the user cb from the callback list. */
3668                         *prev_cb = cb->next;
3669                         ret = 0;
3670                         break;
3671                 }
3672         }
3673         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3674
3675         return ret;
3676 }
3677
3678 int
3679 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3680         struct rte_eth_rxq_info *qinfo)
3681 {
3682         struct rte_eth_dev *dev;
3683
3684         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3685
3686         if (qinfo == NULL)
3687                 return -EINVAL;
3688
3689         dev = &rte_eth_devices[port_id];
3690         if (queue_id >= dev->data->nb_rx_queues) {
3691                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3692                 return -EINVAL;
3693         }
3694
3695         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3696
3697         memset(qinfo, 0, sizeof(*qinfo));
3698         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3699         return 0;
3700 }
3701
3702 int
3703 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3704         struct rte_eth_txq_info *qinfo)
3705 {
3706         struct rte_eth_dev *dev;
3707
3708         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3709
3710         if (qinfo == NULL)
3711                 return -EINVAL;
3712
3713         dev = &rte_eth_devices[port_id];
3714         if (queue_id >= dev->data->nb_tx_queues) {
3715                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3716                 return -EINVAL;
3717         }
3718
3719         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3720
3721         memset(qinfo, 0, sizeof(*qinfo));
3722         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3723         return 0;
3724 }
3725
3726 int
3727 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3728                              struct ether_addr *mc_addr_set,
3729                              uint32_t nb_mc_addr)
3730 {
3731         struct rte_eth_dev *dev;
3732
3733         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3734
3735         dev = &rte_eth_devices[port_id];
3736         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3737         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3738                                                 mc_addr_set, nb_mc_addr));
3739 }
3740
3741 int
3742 rte_eth_timesync_enable(uint16_t port_id)
3743 {
3744         struct rte_eth_dev *dev;
3745
3746         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3747         dev = &rte_eth_devices[port_id];
3748
3749         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3750         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3751 }
3752
3753 int
3754 rte_eth_timesync_disable(uint16_t port_id)
3755 {
3756         struct rte_eth_dev *dev;
3757
3758         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3759         dev = &rte_eth_devices[port_id];
3760
3761         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3762         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3763 }
3764
3765 int
3766 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3767                                    uint32_t flags)
3768 {
3769         struct rte_eth_dev *dev;
3770
3771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3772         dev = &rte_eth_devices[port_id];
3773
3774         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3775         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3776                                 (dev, timestamp, flags));
3777 }
3778
3779 int
3780 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3781                                    struct timespec *timestamp)
3782 {
3783         struct rte_eth_dev *dev;
3784
3785         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3786         dev = &rte_eth_devices[port_id];
3787
3788         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3789         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3790                                 (dev, timestamp));
3791 }
3792
3793 int
3794 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3795 {
3796         struct rte_eth_dev *dev;
3797
3798         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3799         dev = &rte_eth_devices[port_id];
3800
3801         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3802         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3803                                                                       delta));
3804 }
3805
3806 int
3807 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3808 {
3809         struct rte_eth_dev *dev;
3810
3811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3812         dev = &rte_eth_devices[port_id];
3813
3814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3815         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3816                                                                 timestamp));
3817 }
3818
3819 int
3820 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3821 {
3822         struct rte_eth_dev *dev;
3823
3824         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3825         dev = &rte_eth_devices[port_id];
3826
3827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3828         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3829                                                                 timestamp));
3830 }
3831
3832 int
3833 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3834 {
3835         struct rte_eth_dev *dev;
3836
3837         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3838
3839         dev = &rte_eth_devices[port_id];
3840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3841         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3842 }
3843
3844 int
3845 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3846 {
3847         struct rte_eth_dev *dev;
3848
3849         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3850
3851         dev = &rte_eth_devices[port_id];
3852         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3853         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3854 }
3855
3856 int
3857 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3858 {
3859         struct rte_eth_dev *dev;
3860
3861         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3862
3863         dev = &rte_eth_devices[port_id];
3864         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3865         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3866 }
3867
3868 int
3869 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3870 {
3871         struct rte_eth_dev *dev;
3872
3873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3874
3875         dev = &rte_eth_devices[port_id];
3876         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3877         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3878 }
3879
3880 int
3881 rte_eth_dev_get_dcb_info(uint16_t port_id,
3882                              struct rte_eth_dcb_info *dcb_info)
3883 {
3884         struct rte_eth_dev *dev;
3885
3886         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3887
3888         dev = &rte_eth_devices[port_id];
3889         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3890
3891         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3892         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3893 }
3894
3895 int
3896 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3897                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3898 {
3899         struct rte_eth_dev *dev;
3900
3901         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3902         if (l2_tunnel == NULL) {
3903                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3904                 return -EINVAL;
3905         }
3906
3907         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3908                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3909                 return -EINVAL;
3910         }
3911
3912         dev = &rte_eth_devices[port_id];
3913         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3914                                 -ENOTSUP);
3915         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3916                                                                 l2_tunnel));
3917 }
3918
3919 int
3920 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3921                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3922                                   uint32_t mask,
3923                                   uint8_t en)
3924 {
3925         struct rte_eth_dev *dev;
3926
3927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3928
3929         if (l2_tunnel == NULL) {
3930                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3931                 return -EINVAL;
3932         }
3933
3934         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3935                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3936                 return -EINVAL;
3937         }
3938
3939         if (mask == 0) {
3940                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3941                 return -EINVAL;
3942         }
3943
3944         dev = &rte_eth_devices[port_id];
3945         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3946                                 -ENOTSUP);
3947         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
3948                                                         l2_tunnel, mask, en));
3949 }
3950
3951 static void
3952 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3953                            const struct rte_eth_desc_lim *desc_lim)
3954 {
3955         if (desc_lim->nb_align != 0)
3956                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3957
3958         if (desc_lim->nb_max != 0)
3959                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3960
3961         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3962 }
3963
3964 int
3965 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3966                                  uint16_t *nb_rx_desc,
3967                                  uint16_t *nb_tx_desc)
3968 {
3969         struct rte_eth_dev *dev;
3970         struct rte_eth_dev_info dev_info;
3971
3972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3973
3974         dev = &rte_eth_devices[port_id];
3975         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3976
3977         rte_eth_dev_info_get(port_id, &dev_info);
3978
3979         if (nb_rx_desc != NULL)
3980                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3981
3982         if (nb_tx_desc != NULL)
3983                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3984
3985         return 0;
3986 }
3987
3988 int
3989 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3990 {
3991         struct rte_eth_dev *dev;
3992
3993         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3994
3995         if (pool == NULL)
3996                 return -EINVAL;
3997
3998         dev = &rte_eth_devices[port_id];
3999
4000         if (*dev->dev_ops->pool_ops_supported == NULL)
4001                 return 1; /* all pools are supported */
4002
4003         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4004 }