f0f53d4352e42b267026774fcf40fdc9a43d41c4
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37
38 #include "rte_ether.h"
39 #include "rte_ethdev.h"
40 #include "rte_ethdev_driver.h"
41 #include "ethdev_profile.h"
42
43 static int ethdev_logtype;
44
45 #define ethdev_log(level, fmt, ...) \
46         rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
47
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50 static uint8_t eth_dev_last_created_port;
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
127         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
128         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
129         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133
134 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
135         { DEV_TX_OFFLOAD_##_name, #_name }
136
137 static const struct {
138         uint64_t offload;
139         const char *name;
140 } rte_tx_offload_names[] = {
141         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
142         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
143         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
147         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
150         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
156         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
157         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
158         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
159 };
160
161 #undef RTE_TX_OFFLOAD_BIT2STR
162
163 /**
164  * The user application callback description.
165  *
166  * It contains callback address to be registered by user application,
167  * the pointer to the parameters for callback, and the event type.
168  */
169 struct rte_eth_dev_callback {
170         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
171         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
172         void *cb_arg;                           /**< Parameter for callback */
173         void *ret_param;                        /**< Return parameter */
174         enum rte_eth_event_type event;          /**< Interrupt event type */
175         uint32_t active;                        /**< Callback is executing */
176 };
177
178 enum {
179         STAT_QMAP_TX = 0,
180         STAT_QMAP_RX
181 };
182
183 uint16_t
184 rte_eth_find_next(uint16_t port_id)
185 {
186         while (port_id < RTE_MAX_ETHPORTS &&
187                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
188                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
189                 port_id++;
190
191         if (port_id >= RTE_MAX_ETHPORTS)
192                 return RTE_MAX_ETHPORTS;
193
194         return port_id;
195 }
196
197 static void
198 rte_eth_dev_shared_data_prepare(void)
199 {
200         const unsigned flags = 0;
201         const struct rte_memzone *mz;
202
203         rte_spinlock_lock(&rte_eth_shared_data_lock);
204
205         if (rte_eth_dev_shared_data == NULL) {
206                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207                         /* Allocate port data and ownership shared memory. */
208                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
209                                         sizeof(*rte_eth_dev_shared_data),
210                                         rte_socket_id(), flags);
211                 } else
212                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
213                 if (mz == NULL)
214                         rte_panic("Cannot allocate ethdev shared data\n");
215
216                 rte_eth_dev_shared_data = mz->addr;
217                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
218                         rte_eth_dev_shared_data->next_owner_id =
219                                         RTE_ETH_DEV_NO_OWNER + 1;
220                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
221                         memset(rte_eth_dev_shared_data->data, 0,
222                                sizeof(rte_eth_dev_shared_data->data));
223                 }
224         }
225
226         rte_spinlock_unlock(&rte_eth_shared_data_lock);
227 }
228
229 struct rte_eth_dev *
230 rte_eth_dev_allocated(const char *name)
231 {
232         unsigned i;
233
234         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
235                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
236                     strcmp(rte_eth_devices[i].data->name, name) == 0)
237                         return &rte_eth_devices[i];
238         }
239         return NULL;
240 }
241
242 static uint16_t
243 rte_eth_dev_find_free_port(void)
244 {
245         unsigned i;
246
247         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
248                 /* Using shared name field to find a free port. */
249                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
250                         RTE_ASSERT(rte_eth_devices[i].state ==
251                                    RTE_ETH_DEV_UNUSED);
252                         return i;
253                 }
254         }
255         return RTE_MAX_ETHPORTS;
256 }
257
258 static struct rte_eth_dev *
259 eth_dev_get(uint16_t port_id)
260 {
261         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
262
263         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
264         eth_dev->state = RTE_ETH_DEV_ATTACHED;
265
266         eth_dev_last_created_port = port_id;
267
268         return eth_dev;
269 }
270
271 struct rte_eth_dev *
272 rte_eth_dev_allocate(const char *name)
273 {
274         uint16_t port_id;
275         struct rte_eth_dev *eth_dev = NULL;
276
277         rte_eth_dev_shared_data_prepare();
278
279         /* Synchronize port creation between primary and secondary threads. */
280         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
281
282         port_id = rte_eth_dev_find_free_port();
283         if (port_id == RTE_MAX_ETHPORTS) {
284                 ethdev_log(ERR, "Reached maximum number of Ethernet ports");
285                 goto unlock;
286         }
287
288         if (rte_eth_dev_allocated(name) != NULL) {
289                 ethdev_log(ERR,
290                         "Ethernet Device with name %s already allocated!",
291                         name);
292                 goto unlock;
293         }
294
295         eth_dev = eth_dev_get(port_id);
296         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
297         eth_dev->data->port_id = port_id;
298         eth_dev->data->mtu = ETHER_MTU;
299
300 unlock:
301         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
302
303         if (eth_dev != NULL)
304                 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
305
306         return eth_dev;
307 }
308
309 /*
310  * Attach to a port already registered by the primary process, which
311  * makes sure that the same device would have the same port id both
312  * in the primary and secondary process.
313  */
314 struct rte_eth_dev *
315 rte_eth_dev_attach_secondary(const char *name)
316 {
317         uint16_t i;
318         struct rte_eth_dev *eth_dev = NULL;
319
320         rte_eth_dev_shared_data_prepare();
321
322         /* Synchronize port attachment to primary port creation and release. */
323         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
324
325         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
326                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
327                         break;
328         }
329         if (i == RTE_MAX_ETHPORTS) {
330                 RTE_PMD_DEBUG_TRACE(
331                         "device %s is not driven by the primary process\n",
332                         name);
333         } else {
334                 eth_dev = eth_dev_get(i);
335                 RTE_ASSERT(eth_dev->data->port_id == i);
336         }
337
338         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
339         return eth_dev;
340 }
341
342 int
343 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
344 {
345         if (eth_dev == NULL)
346                 return -EINVAL;
347
348         rte_eth_dev_shared_data_prepare();
349
350         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
351
352         eth_dev->state = RTE_ETH_DEV_UNUSED;
353
354         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
355
356         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
357
358         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
359
360         return 0;
361 }
362
363 int
364 rte_eth_dev_is_valid_port(uint16_t port_id)
365 {
366         if (port_id >= RTE_MAX_ETHPORTS ||
367             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
368                 return 0;
369         else
370                 return 1;
371 }
372
373 static int
374 rte_eth_is_valid_owner_id(uint64_t owner_id)
375 {
376         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
377             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
378                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
379                 return 0;
380         }
381         return 1;
382 }
383
384 uint64_t __rte_experimental
385 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
386 {
387         while (port_id < RTE_MAX_ETHPORTS &&
388                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
389                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
390                rte_eth_devices[port_id].data->owner.id != owner_id))
391                 port_id++;
392
393         if (port_id >= RTE_MAX_ETHPORTS)
394                 return RTE_MAX_ETHPORTS;
395
396         return port_id;
397 }
398
399 int __rte_experimental
400 rte_eth_dev_owner_new(uint64_t *owner_id)
401 {
402         rte_eth_dev_shared_data_prepare();
403
404         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
405
406         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
407
408         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
409         return 0;
410 }
411
412 static int
413 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
414                        const struct rte_eth_dev_owner *new_owner)
415 {
416         struct rte_eth_dev_owner *port_owner;
417         int sret;
418
419         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
420
421         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
422             !rte_eth_is_valid_owner_id(old_owner_id))
423                 return -EINVAL;
424
425         port_owner = &rte_eth_devices[port_id].data->owner;
426         if (port_owner->id != old_owner_id) {
427                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
428                                     " by %s_%016lX.\n", port_id,
429                                     port_owner->name, port_owner->id);
430                 return -EPERM;
431         }
432
433         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
434                         new_owner->name);
435         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
436                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
437                                     port_id);
438
439         port_owner->id = new_owner->id;
440
441         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
442                             new_owner->name, new_owner->id);
443
444         return 0;
445 }
446
447 int __rte_experimental
448 rte_eth_dev_owner_set(const uint16_t port_id,
449                       const struct rte_eth_dev_owner *owner)
450 {
451         int ret;
452
453         rte_eth_dev_shared_data_prepare();
454
455         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
456
457         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
458
459         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
460         return ret;
461 }
462
463 int __rte_experimental
464 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
465 {
466         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
467                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
468         int ret;
469
470         rte_eth_dev_shared_data_prepare();
471
472         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
473
474         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
475
476         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
477         return ret;
478 }
479
480 void __rte_experimental
481 rte_eth_dev_owner_delete(const uint64_t owner_id)
482 {
483         uint16_t port_id;
484
485         rte_eth_dev_shared_data_prepare();
486
487         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
488
489         if (rte_eth_is_valid_owner_id(owner_id)) {
490                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
491                         memset(&rte_eth_devices[port_id].data->owner, 0,
492                                sizeof(struct rte_eth_dev_owner));
493                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
494                                     " have removed.\n", owner_id);
495         }
496
497         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
498 }
499
500 int __rte_experimental
501 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
502 {
503         int ret = 0;
504
505         rte_eth_dev_shared_data_prepare();
506
507         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
508
509         if (!rte_eth_dev_is_valid_port(port_id)) {
510                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
511                 ret = -ENODEV;
512         } else {
513                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
514                            sizeof(*owner));
515         }
516
517         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
518         return ret;
519 }
520
521 int
522 rte_eth_dev_socket_id(uint16_t port_id)
523 {
524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
525         return rte_eth_devices[port_id].data->numa_node;
526 }
527
528 void *
529 rte_eth_dev_get_sec_ctx(uint16_t port_id)
530 {
531         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
532         return rte_eth_devices[port_id].security_ctx;
533 }
534
535 uint16_t
536 rte_eth_dev_count(void)
537 {
538         return rte_eth_dev_count_avail();
539 }
540
541 uint16_t
542 rte_eth_dev_count_avail(void)
543 {
544         uint16_t p;
545         uint16_t count;
546
547         count = 0;
548
549         RTE_ETH_FOREACH_DEV(p)
550                 count++;
551
552         return count;
553 }
554
555 uint16_t
556 rte_eth_dev_count_total(void)
557 {
558         uint16_t port, count = 0;
559
560         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
561                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
562                         count++;
563
564         return count;
565 }
566
567 int
568 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
569 {
570         char *tmp;
571
572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
573
574         if (name == NULL) {
575                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
576                 return -EINVAL;
577         }
578
579         /* shouldn't check 'rte_eth_devices[i].data',
580          * because it might be overwritten by VDEV PMD */
581         tmp = rte_eth_dev_shared_data->data[port_id].name;
582         strcpy(name, tmp);
583         return 0;
584 }
585
586 int
587 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
588 {
589         uint32_t pid;
590
591         if (name == NULL) {
592                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
593                 return -EINVAL;
594         }
595
596         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
597                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
598                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
599                         *port_id = pid;
600                         return 0;
601                 }
602         }
603
604         return -ENODEV;
605 }
606
607 static int
608 eth_err(uint16_t port_id, int ret)
609 {
610         if (ret == 0)
611                 return 0;
612         if (rte_eth_dev_is_removed(port_id))
613                 return -EIO;
614         return ret;
615 }
616
617 /* attach the new device, then store port_id of the device */
618 int
619 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
620 {
621         int current = rte_eth_dev_count_total();
622         struct rte_devargs da;
623         int ret = -1;
624
625         memset(&da, 0, sizeof(da));
626
627         if ((devargs == NULL) || (port_id == NULL)) {
628                 ret = -EINVAL;
629                 goto err;
630         }
631
632         /* parse devargs */
633         if (rte_devargs_parse(&da, "%s", devargs))
634                 goto err;
635
636         ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
637         if (ret < 0)
638                 goto err;
639
640         /* no point looking at the port count if no port exists */
641         if (!rte_eth_dev_count_total()) {
642                 ethdev_log(ERR, "No port found for device (%s)", da.name);
643                 ret = -1;
644                 goto err;
645         }
646
647         /* if nothing happened, there is a bug here, since some driver told us
648          * it did attach a device, but did not create a port.
649          * FIXME: race condition in case of plug-out of another device
650          */
651         if (current == rte_eth_dev_count_total()) {
652                 ret = -1;
653                 goto err;
654         }
655
656         *port_id = eth_dev_last_created_port;
657         ret = 0;
658
659 err:
660         free(da.args);
661         return ret;
662 }
663
664 /* detach the device, then store the name of the device */
665 int
666 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
667 {
668         struct rte_device *dev;
669         struct rte_bus *bus;
670         uint32_t dev_flags;
671         int ret = -1;
672
673         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
674
675         dev_flags = rte_eth_devices[port_id].data->dev_flags;
676         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
677                 ethdev_log(ERR,
678                         "Port %" PRIu16 " is bonded, cannot detach", port_id);
679                 return -ENOTSUP;
680         }
681
682         dev = rte_eth_devices[port_id].device;
683         if (dev == NULL)
684                 return -EINVAL;
685
686         bus = rte_bus_find_by_device(dev);
687         if (bus == NULL)
688                 return -ENOENT;
689
690         ret = rte_eal_hotplug_remove(bus->name, dev->name);
691         if (ret < 0)
692                 return ret;
693
694         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
695         return 0;
696 }
697
698 static int
699 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
700 {
701         uint16_t old_nb_queues = dev->data->nb_rx_queues;
702         void **rxq;
703         unsigned i;
704
705         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
706                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
707                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
708                                 RTE_CACHE_LINE_SIZE);
709                 if (dev->data->rx_queues == NULL) {
710                         dev->data->nb_rx_queues = 0;
711                         return -(ENOMEM);
712                 }
713         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
714                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
715
716                 rxq = dev->data->rx_queues;
717
718                 for (i = nb_queues; i < old_nb_queues; i++)
719                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
720                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
721                                 RTE_CACHE_LINE_SIZE);
722                 if (rxq == NULL)
723                         return -(ENOMEM);
724                 if (nb_queues > old_nb_queues) {
725                         uint16_t new_qs = nb_queues - old_nb_queues;
726
727                         memset(rxq + old_nb_queues, 0,
728                                 sizeof(rxq[0]) * new_qs);
729                 }
730
731                 dev->data->rx_queues = rxq;
732
733         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
734                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
735
736                 rxq = dev->data->rx_queues;
737
738                 for (i = nb_queues; i < old_nb_queues; i++)
739                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
740
741                 rte_free(dev->data->rx_queues);
742                 dev->data->rx_queues = NULL;
743         }
744         dev->data->nb_rx_queues = nb_queues;
745         return 0;
746 }
747
748 int
749 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
750 {
751         struct rte_eth_dev *dev;
752
753         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
754
755         dev = &rte_eth_devices[port_id];
756         if (!dev->data->dev_started) {
757                 RTE_PMD_DEBUG_TRACE(
758                     "port %d must be started before start any queue\n", port_id);
759                 return -EINVAL;
760         }
761
762         if (rx_queue_id >= dev->data->nb_rx_queues) {
763                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
764                 return -EINVAL;
765         }
766
767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
768
769         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
770                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
771                         " already started\n",
772                         rx_queue_id, port_id);
773                 return 0;
774         }
775
776         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
777                                                              rx_queue_id));
778
779 }
780
781 int
782 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
783 {
784         struct rte_eth_dev *dev;
785
786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
787
788         dev = &rte_eth_devices[port_id];
789         if (rx_queue_id >= dev->data->nb_rx_queues) {
790                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
791                 return -EINVAL;
792         }
793
794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
795
796         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
797                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
798                         " already stopped\n",
799                         rx_queue_id, port_id);
800                 return 0;
801         }
802
803         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
804
805 }
806
807 int
808 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
809 {
810         struct rte_eth_dev *dev;
811
812         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
813
814         dev = &rte_eth_devices[port_id];
815         if (!dev->data->dev_started) {
816                 RTE_PMD_DEBUG_TRACE(
817                     "port %d must be started before start any queue\n", port_id);
818                 return -EINVAL;
819         }
820
821         if (tx_queue_id >= dev->data->nb_tx_queues) {
822                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
823                 return -EINVAL;
824         }
825
826         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
827
828         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
829                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
830                         " already started\n",
831                         tx_queue_id, port_id);
832                 return 0;
833         }
834
835         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
836                                                              tx_queue_id));
837
838 }
839
840 int
841 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
842 {
843         struct rte_eth_dev *dev;
844
845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
846
847         dev = &rte_eth_devices[port_id];
848         if (tx_queue_id >= dev->data->nb_tx_queues) {
849                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
850                 return -EINVAL;
851         }
852
853         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
854
855         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
856                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
857                         " already stopped\n",
858                         tx_queue_id, port_id);
859                 return 0;
860         }
861
862         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
863
864 }
865
866 static int
867 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
868 {
869         uint16_t old_nb_queues = dev->data->nb_tx_queues;
870         void **txq;
871         unsigned i;
872
873         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
874                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
875                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
876                                                    RTE_CACHE_LINE_SIZE);
877                 if (dev->data->tx_queues == NULL) {
878                         dev->data->nb_tx_queues = 0;
879                         return -(ENOMEM);
880                 }
881         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
882                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
883
884                 txq = dev->data->tx_queues;
885
886                 for (i = nb_queues; i < old_nb_queues; i++)
887                         (*dev->dev_ops->tx_queue_release)(txq[i]);
888                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
889                                   RTE_CACHE_LINE_SIZE);
890                 if (txq == NULL)
891                         return -ENOMEM;
892                 if (nb_queues > old_nb_queues) {
893                         uint16_t new_qs = nb_queues - old_nb_queues;
894
895                         memset(txq + old_nb_queues, 0,
896                                sizeof(txq[0]) * new_qs);
897                 }
898
899                 dev->data->tx_queues = txq;
900
901         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
902                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
903
904                 txq = dev->data->tx_queues;
905
906                 for (i = nb_queues; i < old_nb_queues; i++)
907                         (*dev->dev_ops->tx_queue_release)(txq[i]);
908
909                 rte_free(dev->data->tx_queues);
910                 dev->data->tx_queues = NULL;
911         }
912         dev->data->nb_tx_queues = nb_queues;
913         return 0;
914 }
915
916 uint32_t
917 rte_eth_speed_bitflag(uint32_t speed, int duplex)
918 {
919         switch (speed) {
920         case ETH_SPEED_NUM_10M:
921                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
922         case ETH_SPEED_NUM_100M:
923                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
924         case ETH_SPEED_NUM_1G:
925                 return ETH_LINK_SPEED_1G;
926         case ETH_SPEED_NUM_2_5G:
927                 return ETH_LINK_SPEED_2_5G;
928         case ETH_SPEED_NUM_5G:
929                 return ETH_LINK_SPEED_5G;
930         case ETH_SPEED_NUM_10G:
931                 return ETH_LINK_SPEED_10G;
932         case ETH_SPEED_NUM_20G:
933                 return ETH_LINK_SPEED_20G;
934         case ETH_SPEED_NUM_25G:
935                 return ETH_LINK_SPEED_25G;
936         case ETH_SPEED_NUM_40G:
937                 return ETH_LINK_SPEED_40G;
938         case ETH_SPEED_NUM_50G:
939                 return ETH_LINK_SPEED_50G;
940         case ETH_SPEED_NUM_56G:
941                 return ETH_LINK_SPEED_56G;
942         case ETH_SPEED_NUM_100G:
943                 return ETH_LINK_SPEED_100G;
944         default:
945                 return 0;
946         }
947 }
948
949 /**
950  * A conversion function from rxmode bitfield API.
951  */
952 static void
953 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
954                                     uint64_t *rx_offloads)
955 {
956         uint64_t offloads = 0;
957
958         if (rxmode->header_split == 1)
959                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
960         if (rxmode->hw_ip_checksum == 1)
961                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
962         if (rxmode->hw_vlan_filter == 1)
963                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
964         if (rxmode->hw_vlan_strip == 1)
965                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
966         if (rxmode->hw_vlan_extend == 1)
967                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
968         if (rxmode->jumbo_frame == 1)
969                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
970         if (rxmode->hw_strip_crc == 1)
971                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
972         if (rxmode->enable_scatter == 1)
973                 offloads |= DEV_RX_OFFLOAD_SCATTER;
974         if (rxmode->enable_lro == 1)
975                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
976         if (rxmode->hw_timestamp == 1)
977                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
978         if (rxmode->security == 1)
979                 offloads |= DEV_RX_OFFLOAD_SECURITY;
980
981         *rx_offloads = offloads;
982 }
983
984 /**
985  * A conversion function from rxmode offloads API.
986  */
987 static void
988 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
989                             struct rte_eth_rxmode *rxmode)
990 {
991
992         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
993                 rxmode->header_split = 1;
994         else
995                 rxmode->header_split = 0;
996         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
997                 rxmode->hw_ip_checksum = 1;
998         else
999                 rxmode->hw_ip_checksum = 0;
1000         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1001                 rxmode->hw_vlan_filter = 1;
1002         else
1003                 rxmode->hw_vlan_filter = 0;
1004         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1005                 rxmode->hw_vlan_strip = 1;
1006         else
1007                 rxmode->hw_vlan_strip = 0;
1008         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1009                 rxmode->hw_vlan_extend = 1;
1010         else
1011                 rxmode->hw_vlan_extend = 0;
1012         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1013                 rxmode->jumbo_frame = 1;
1014         else
1015                 rxmode->jumbo_frame = 0;
1016         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
1017                 rxmode->hw_strip_crc = 1;
1018         else
1019                 rxmode->hw_strip_crc = 0;
1020         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
1021                 rxmode->enable_scatter = 1;
1022         else
1023                 rxmode->enable_scatter = 0;
1024         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
1025                 rxmode->enable_lro = 1;
1026         else
1027                 rxmode->enable_lro = 0;
1028         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
1029                 rxmode->hw_timestamp = 1;
1030         else
1031                 rxmode->hw_timestamp = 0;
1032         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
1033                 rxmode->security = 1;
1034         else
1035                 rxmode->security = 0;
1036 }
1037
1038 const char * __rte_experimental
1039 rte_eth_dev_rx_offload_name(uint64_t offload)
1040 {
1041         const char *name = "UNKNOWN";
1042         unsigned int i;
1043
1044         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1045                 if (offload == rte_rx_offload_names[i].offload) {
1046                         name = rte_rx_offload_names[i].name;
1047                         break;
1048                 }
1049         }
1050
1051         return name;
1052 }
1053
1054 const char * __rte_experimental
1055 rte_eth_dev_tx_offload_name(uint64_t offload)
1056 {
1057         const char *name = "UNKNOWN";
1058         unsigned int i;
1059
1060         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1061                 if (offload == rte_tx_offload_names[i].offload) {
1062                         name = rte_tx_offload_names[i].name;
1063                         break;
1064                 }
1065         }
1066
1067         return name;
1068 }
1069
1070 int
1071 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1072                       const struct rte_eth_conf *dev_conf)
1073 {
1074         struct rte_eth_dev *dev;
1075         struct rte_eth_dev_info dev_info;
1076         struct rte_eth_conf local_conf = *dev_conf;
1077         int diag;
1078
1079         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1080
1081         dev = &rte_eth_devices[port_id];
1082
1083         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1084         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1085
1086         /* If number of queues specified by application for both Rx and Tx is
1087          * zero, use driver preferred values. This cannot be done individually
1088          * as it is valid for either Tx or Rx (but not both) to be zero.
1089          * If driver does not provide any preferred valued, fall back on
1090          * EAL defaults.
1091          */
1092         if (nb_rx_q == 0 && nb_tx_q == 0) {
1093                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1094                 if (nb_rx_q == 0)
1095                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1096                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1097                 if (nb_tx_q == 0)
1098                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1099         }
1100
1101         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1102                 RTE_PMD_DEBUG_TRACE(
1103                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1104                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1105                 return -EINVAL;
1106         }
1107
1108         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1109                 RTE_PMD_DEBUG_TRACE(
1110                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1111                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1112                 return -EINVAL;
1113         }
1114
1115         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1116         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1117
1118         if (dev->data->dev_started) {
1119                 RTE_PMD_DEBUG_TRACE(
1120                     "port %d must be stopped to allow configuration\n", port_id);
1121                 return -EBUSY;
1122         }
1123
1124         /*
1125          * Convert between the offloads API to enable PMDs to support
1126          * only one of them.
1127          */
1128         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
1129                 rte_eth_convert_rx_offload_bitfield(
1130                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1131         } else {
1132                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
1133                                             &local_conf.rxmode);
1134         }
1135
1136         /* Copy the dev_conf parameter into the dev structure */
1137         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1138
1139         /*
1140          * Check that the numbers of RX and TX queues are not greater
1141          * than the maximum number of RX and TX queues supported by the
1142          * configured device.
1143          */
1144         if (nb_rx_q > dev_info.max_rx_queues) {
1145                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1146                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1147                 return -EINVAL;
1148         }
1149
1150         if (nb_tx_q > dev_info.max_tx_queues) {
1151                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1152                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1153                 return -EINVAL;
1154         }
1155
1156         /* Check that the device supports requested interrupts */
1157         if ((dev_conf->intr_conf.lsc == 1) &&
1158                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1159                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1160                                         dev->device->driver->name);
1161                         return -EINVAL;
1162         }
1163         if ((dev_conf->intr_conf.rmv == 1) &&
1164             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1165                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1166                                     dev->device->driver->name);
1167                 return -EINVAL;
1168         }
1169
1170         /*
1171          * If jumbo frames are enabled, check that the maximum RX packet
1172          * length is supported by the configured device.
1173          */
1174         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1175                 if (dev_conf->rxmode.max_rx_pkt_len >
1176                     dev_info.max_rx_pktlen) {
1177                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1178                                 " > max valid value %u\n",
1179                                 port_id,
1180                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1181                                 (unsigned)dev_info.max_rx_pktlen);
1182                         return -EINVAL;
1183                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1184                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1185                                 " < min valid value %u\n",
1186                                 port_id,
1187                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1188                                 (unsigned)ETHER_MIN_LEN);
1189                         return -EINVAL;
1190                 }
1191         } else {
1192                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1193                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1194                         /* Use default value */
1195                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1196                                                         ETHER_MAX_LEN;
1197         }
1198
1199         /*
1200          * Setup new number of RX/TX queues and reconfigure device.
1201          */
1202         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1203         if (diag != 0) {
1204                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1205                                 port_id, diag);
1206                 return diag;
1207         }
1208
1209         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1210         if (diag != 0) {
1211                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1212                                 port_id, diag);
1213                 rte_eth_dev_rx_queue_config(dev, 0);
1214                 return diag;
1215         }
1216
1217         diag = (*dev->dev_ops->dev_configure)(dev);
1218         if (diag != 0) {
1219                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1220                                 port_id, diag);
1221                 rte_eth_dev_rx_queue_config(dev, 0);
1222                 rte_eth_dev_tx_queue_config(dev, 0);
1223                 return eth_err(port_id, diag);
1224         }
1225
1226         /* Initialize Rx profiling if enabled at compilation time. */
1227         diag = __rte_eth_profile_rx_init(port_id, dev);
1228         if (diag != 0) {
1229                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1230                                 port_id, diag);
1231                 rte_eth_dev_rx_queue_config(dev, 0);
1232                 rte_eth_dev_tx_queue_config(dev, 0);
1233                 return eth_err(port_id, diag);
1234         }
1235
1236         return 0;
1237 }
1238
1239 void
1240 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1241 {
1242         if (dev->data->dev_started) {
1243                 RTE_PMD_DEBUG_TRACE(
1244                         "port %d must be stopped to allow reset\n",
1245                         dev->data->port_id);
1246                 return;
1247         }
1248
1249         rte_eth_dev_rx_queue_config(dev, 0);
1250         rte_eth_dev_tx_queue_config(dev, 0);
1251
1252         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1253 }
1254
1255 static void
1256 rte_eth_dev_config_restore(uint16_t port_id)
1257 {
1258         struct rte_eth_dev *dev;
1259         struct rte_eth_dev_info dev_info;
1260         struct ether_addr *addr;
1261         uint16_t i;
1262         uint32_t pool = 0;
1263         uint64_t pool_mask;
1264
1265         dev = &rte_eth_devices[port_id];
1266
1267         rte_eth_dev_info_get(port_id, &dev_info);
1268
1269         /* replay MAC address configuration including default MAC */
1270         addr = &dev->data->mac_addrs[0];
1271         if (*dev->dev_ops->mac_addr_set != NULL)
1272                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1273         else if (*dev->dev_ops->mac_addr_add != NULL)
1274                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1275
1276         if (*dev->dev_ops->mac_addr_add != NULL) {
1277                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1278                         addr = &dev->data->mac_addrs[i];
1279
1280                         /* skip zero address */
1281                         if (is_zero_ether_addr(addr))
1282                                 continue;
1283
1284                         pool = 0;
1285                         pool_mask = dev->data->mac_pool_sel[i];
1286
1287                         do {
1288                                 if (pool_mask & 1ULL)
1289                                         (*dev->dev_ops->mac_addr_add)(dev,
1290                                                 addr, i, pool);
1291                                 pool_mask >>= 1;
1292                                 pool++;
1293                         } while (pool_mask);
1294                 }
1295         }
1296
1297         /* replay promiscuous configuration */
1298         if (rte_eth_promiscuous_get(port_id) == 1)
1299                 rte_eth_promiscuous_enable(port_id);
1300         else if (rte_eth_promiscuous_get(port_id) == 0)
1301                 rte_eth_promiscuous_disable(port_id);
1302
1303         /* replay all multicast configuration */
1304         if (rte_eth_allmulticast_get(port_id) == 1)
1305                 rte_eth_allmulticast_enable(port_id);
1306         else if (rte_eth_allmulticast_get(port_id) == 0)
1307                 rte_eth_allmulticast_disable(port_id);
1308 }
1309
1310 int
1311 rte_eth_dev_start(uint16_t port_id)
1312 {
1313         struct rte_eth_dev *dev;
1314         int diag;
1315
1316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1317
1318         dev = &rte_eth_devices[port_id];
1319
1320         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1321
1322         if (dev->data->dev_started != 0) {
1323                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1324                         " already started\n",
1325                         port_id);
1326                 return 0;
1327         }
1328
1329         diag = (*dev->dev_ops->dev_start)(dev);
1330         if (diag == 0)
1331                 dev->data->dev_started = 1;
1332         else
1333                 return eth_err(port_id, diag);
1334
1335         rte_eth_dev_config_restore(port_id);
1336
1337         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1338                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1339                 (*dev->dev_ops->link_update)(dev, 0);
1340         }
1341         return 0;
1342 }
1343
1344 void
1345 rte_eth_dev_stop(uint16_t port_id)
1346 {
1347         struct rte_eth_dev *dev;
1348
1349         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1350         dev = &rte_eth_devices[port_id];
1351
1352         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1353
1354         if (dev->data->dev_started == 0) {
1355                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1356                         " already stopped\n",
1357                         port_id);
1358                 return;
1359         }
1360
1361         dev->data->dev_started = 0;
1362         (*dev->dev_ops->dev_stop)(dev);
1363 }
1364
1365 int
1366 rte_eth_dev_set_link_up(uint16_t port_id)
1367 {
1368         struct rte_eth_dev *dev;
1369
1370         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1371
1372         dev = &rte_eth_devices[port_id];
1373
1374         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1375         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1376 }
1377
1378 int
1379 rte_eth_dev_set_link_down(uint16_t port_id)
1380 {
1381         struct rte_eth_dev *dev;
1382
1383         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1384
1385         dev = &rte_eth_devices[port_id];
1386
1387         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1388         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1389 }
1390
1391 void
1392 rte_eth_dev_close(uint16_t port_id)
1393 {
1394         struct rte_eth_dev *dev;
1395
1396         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1397         dev = &rte_eth_devices[port_id];
1398
1399         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1400         dev->data->dev_started = 0;
1401         (*dev->dev_ops->dev_close)(dev);
1402
1403         dev->data->nb_rx_queues = 0;
1404         rte_free(dev->data->rx_queues);
1405         dev->data->rx_queues = NULL;
1406         dev->data->nb_tx_queues = 0;
1407         rte_free(dev->data->tx_queues);
1408         dev->data->tx_queues = NULL;
1409 }
1410
1411 int
1412 rte_eth_dev_reset(uint16_t port_id)
1413 {
1414         struct rte_eth_dev *dev;
1415         int ret;
1416
1417         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1418         dev = &rte_eth_devices[port_id];
1419
1420         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1421
1422         rte_eth_dev_stop(port_id);
1423         ret = dev->dev_ops->dev_reset(dev);
1424
1425         return eth_err(port_id, ret);
1426 }
1427
1428 int __rte_experimental
1429 rte_eth_dev_is_removed(uint16_t port_id)
1430 {
1431         struct rte_eth_dev *dev;
1432         int ret;
1433
1434         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1435
1436         dev = &rte_eth_devices[port_id];
1437
1438         if (dev->state == RTE_ETH_DEV_REMOVED)
1439                 return 1;
1440
1441         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1442
1443         ret = dev->dev_ops->is_removed(dev);
1444         if (ret != 0)
1445                 /* Device is physically removed. */
1446                 dev->state = RTE_ETH_DEV_REMOVED;
1447
1448         return ret;
1449 }
1450
1451 int
1452 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1453                        uint16_t nb_rx_desc, unsigned int socket_id,
1454                        const struct rte_eth_rxconf *rx_conf,
1455                        struct rte_mempool *mp)
1456 {
1457         int ret;
1458         uint32_t mbp_buf_size;
1459         struct rte_eth_dev *dev;
1460         struct rte_eth_dev_info dev_info;
1461         struct rte_eth_rxconf local_conf;
1462         void **rxq;
1463
1464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1465
1466         dev = &rte_eth_devices[port_id];
1467         if (rx_queue_id >= dev->data->nb_rx_queues) {
1468                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1469                 return -EINVAL;
1470         }
1471
1472         if (dev->data->dev_started) {
1473                 RTE_PMD_DEBUG_TRACE(
1474                     "port %d must be stopped to allow configuration\n", port_id);
1475                 return -EBUSY;
1476         }
1477
1478         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1479         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1480
1481         /*
1482          * Check the size of the mbuf data buffer.
1483          * This value must be provided in the private data of the memory pool.
1484          * First check that the memory pool has a valid private data.
1485          */
1486         rte_eth_dev_info_get(port_id, &dev_info);
1487         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1488                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1489                                 mp->name, (int) mp->private_data_size,
1490                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1491                 return -ENOSPC;
1492         }
1493         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1494
1495         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1496                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1497                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1498                                 "=%d)\n",
1499                                 mp->name,
1500                                 (int)mbp_buf_size,
1501                                 (int)(RTE_PKTMBUF_HEADROOM +
1502                                       dev_info.min_rx_bufsize),
1503                                 (int)RTE_PKTMBUF_HEADROOM,
1504                                 (int)dev_info.min_rx_bufsize);
1505                 return -EINVAL;
1506         }
1507
1508         /* Use default specified by driver, if nb_rx_desc is zero */
1509         if (nb_rx_desc == 0) {
1510                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1511                 /* If driver default is also zero, fall back on EAL default */
1512                 if (nb_rx_desc == 0)
1513                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1514         }
1515
1516         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1517                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1518                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1519
1520                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1521                         "should be: <= %hu, = %hu, and a product of %hu\n",
1522                         nb_rx_desc,
1523                         dev_info.rx_desc_lim.nb_max,
1524                         dev_info.rx_desc_lim.nb_min,
1525                         dev_info.rx_desc_lim.nb_align);
1526                 return -EINVAL;
1527         }
1528
1529         rxq = dev->data->rx_queues;
1530         if (rxq[rx_queue_id]) {
1531                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1532                                         -ENOTSUP);
1533                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1534                 rxq[rx_queue_id] = NULL;
1535         }
1536
1537         if (rx_conf == NULL)
1538                 rx_conf = &dev_info.default_rxconf;
1539
1540         local_conf = *rx_conf;
1541         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1542                 /**
1543                  * Reflect port offloads to queue offloads in order for
1544                  * offloads to not be discarded.
1545                  */
1546                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1547                                                     &local_conf.offloads);
1548         }
1549
1550         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1551                                               socket_id, &local_conf, mp);
1552         if (!ret) {
1553                 if (!dev->data->min_rx_buf_size ||
1554                     dev->data->min_rx_buf_size > mbp_buf_size)
1555                         dev->data->min_rx_buf_size = mbp_buf_size;
1556         }
1557
1558         return eth_err(port_id, ret);
1559 }
1560
1561 /**
1562  * A conversion function from txq_flags API.
1563  */
1564 static void
1565 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1566 {
1567         uint64_t offloads = 0;
1568
1569         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1570                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1571         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1572                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1573         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1574                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1575         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1576                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1577         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1578                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1579         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1580             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1581                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1582
1583         *tx_offloads = offloads;
1584 }
1585
1586 /**
1587  * A conversion function from offloads API.
1588  */
1589 static void
1590 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1591 {
1592         uint32_t flags = 0;
1593
1594         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1595                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1596         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1597                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1598         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1599                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1600         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1601                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1602         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1603                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1604         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1605                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1606
1607         *txq_flags = flags;
1608 }
1609
1610 int
1611 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1612                        uint16_t nb_tx_desc, unsigned int socket_id,
1613                        const struct rte_eth_txconf *tx_conf)
1614 {
1615         struct rte_eth_dev *dev;
1616         struct rte_eth_dev_info dev_info;
1617         struct rte_eth_txconf local_conf;
1618         void **txq;
1619
1620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1621
1622         dev = &rte_eth_devices[port_id];
1623         if (tx_queue_id >= dev->data->nb_tx_queues) {
1624                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1625                 return -EINVAL;
1626         }
1627
1628         if (dev->data->dev_started) {
1629                 RTE_PMD_DEBUG_TRACE(
1630                     "port %d must be stopped to allow configuration\n", port_id);
1631                 return -EBUSY;
1632         }
1633
1634         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1635         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1636
1637         rte_eth_dev_info_get(port_id, &dev_info);
1638
1639         /* Use default specified by driver, if nb_tx_desc is zero */
1640         if (nb_tx_desc == 0) {
1641                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1642                 /* If driver default is zero, fall back on EAL default */
1643                 if (nb_tx_desc == 0)
1644                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1645         }
1646         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1647             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1648             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1649                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1650                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1651                                 nb_tx_desc,
1652                                 dev_info.tx_desc_lim.nb_max,
1653                                 dev_info.tx_desc_lim.nb_min,
1654                                 dev_info.tx_desc_lim.nb_align);
1655                 return -EINVAL;
1656         }
1657
1658         txq = dev->data->tx_queues;
1659         if (txq[tx_queue_id]) {
1660                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1661                                         -ENOTSUP);
1662                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1663                 txq[tx_queue_id] = NULL;
1664         }
1665
1666         if (tx_conf == NULL)
1667                 tx_conf = &dev_info.default_txconf;
1668
1669         /*
1670          * Convert between the offloads API to enable PMDs to support
1671          * only one of them.
1672          */
1673         local_conf = *tx_conf;
1674         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1675                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1676                                              &local_conf.txq_flags);
1677                 /* Keep the ignore flag. */
1678                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1679         } else {
1680                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1681                                           &local_conf.offloads);
1682         }
1683
1684         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1685                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1686 }
1687
1688 void
1689 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1690                 void *userdata __rte_unused)
1691 {
1692         unsigned i;
1693
1694         for (i = 0; i < unsent; i++)
1695                 rte_pktmbuf_free(pkts[i]);
1696 }
1697
1698 void
1699 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1700                 void *userdata)
1701 {
1702         uint64_t *count = userdata;
1703         unsigned i;
1704
1705         for (i = 0; i < unsent; i++)
1706                 rte_pktmbuf_free(pkts[i]);
1707
1708         *count += unsent;
1709 }
1710
1711 int
1712 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1713                 buffer_tx_error_fn cbfn, void *userdata)
1714 {
1715         buffer->error_callback = cbfn;
1716         buffer->error_userdata = userdata;
1717         return 0;
1718 }
1719
1720 int
1721 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1722 {
1723         int ret = 0;
1724
1725         if (buffer == NULL)
1726                 return -EINVAL;
1727
1728         buffer->size = size;
1729         if (buffer->error_callback == NULL) {
1730                 ret = rte_eth_tx_buffer_set_err_callback(
1731                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1732         }
1733
1734         return ret;
1735 }
1736
1737 int
1738 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1739 {
1740         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1741         int ret;
1742
1743         /* Validate Input Data. Bail if not valid or not supported. */
1744         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1745         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1746
1747         /* Call driver to free pending mbufs. */
1748         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1749                                                free_cnt);
1750         return eth_err(port_id, ret);
1751 }
1752
1753 void
1754 rte_eth_promiscuous_enable(uint16_t port_id)
1755 {
1756         struct rte_eth_dev *dev;
1757
1758         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1759         dev = &rte_eth_devices[port_id];
1760
1761         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1762         (*dev->dev_ops->promiscuous_enable)(dev);
1763         dev->data->promiscuous = 1;
1764 }
1765
1766 void
1767 rte_eth_promiscuous_disable(uint16_t port_id)
1768 {
1769         struct rte_eth_dev *dev;
1770
1771         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1772         dev = &rte_eth_devices[port_id];
1773
1774         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1775         dev->data->promiscuous = 0;
1776         (*dev->dev_ops->promiscuous_disable)(dev);
1777 }
1778
1779 int
1780 rte_eth_promiscuous_get(uint16_t port_id)
1781 {
1782         struct rte_eth_dev *dev;
1783
1784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1785
1786         dev = &rte_eth_devices[port_id];
1787         return dev->data->promiscuous;
1788 }
1789
1790 void
1791 rte_eth_allmulticast_enable(uint16_t port_id)
1792 {
1793         struct rte_eth_dev *dev;
1794
1795         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1796         dev = &rte_eth_devices[port_id];
1797
1798         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1799         (*dev->dev_ops->allmulticast_enable)(dev);
1800         dev->data->all_multicast = 1;
1801 }
1802
1803 void
1804 rte_eth_allmulticast_disable(uint16_t port_id)
1805 {
1806         struct rte_eth_dev *dev;
1807
1808         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1809         dev = &rte_eth_devices[port_id];
1810
1811         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1812         dev->data->all_multicast = 0;
1813         (*dev->dev_ops->allmulticast_disable)(dev);
1814 }
1815
1816 int
1817 rte_eth_allmulticast_get(uint16_t port_id)
1818 {
1819         struct rte_eth_dev *dev;
1820
1821         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1822
1823         dev = &rte_eth_devices[port_id];
1824         return dev->data->all_multicast;
1825 }
1826
1827 void
1828 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1829 {
1830         struct rte_eth_dev *dev;
1831
1832         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1833         dev = &rte_eth_devices[port_id];
1834
1835         if (dev->data->dev_conf.intr_conf.lsc &&
1836             dev->data->dev_started)
1837                 rte_eth_linkstatus_get(dev, eth_link);
1838         else {
1839                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1840                 (*dev->dev_ops->link_update)(dev, 1);
1841                 *eth_link = dev->data->dev_link;
1842         }
1843 }
1844
1845 void
1846 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1847 {
1848         struct rte_eth_dev *dev;
1849
1850         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1851         dev = &rte_eth_devices[port_id];
1852
1853         if (dev->data->dev_conf.intr_conf.lsc &&
1854             dev->data->dev_started)
1855                 rte_eth_linkstatus_get(dev, eth_link);
1856         else {
1857                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1858                 (*dev->dev_ops->link_update)(dev, 0);
1859                 *eth_link = dev->data->dev_link;
1860         }
1861 }
1862
1863 int
1864 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1865 {
1866         struct rte_eth_dev *dev;
1867
1868         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1869
1870         dev = &rte_eth_devices[port_id];
1871         memset(stats, 0, sizeof(*stats));
1872
1873         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1874         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1875         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1876 }
1877
1878 int
1879 rte_eth_stats_reset(uint16_t port_id)
1880 {
1881         struct rte_eth_dev *dev;
1882
1883         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1884         dev = &rte_eth_devices[port_id];
1885
1886         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1887         (*dev->dev_ops->stats_reset)(dev);
1888         dev->data->rx_mbuf_alloc_failed = 0;
1889
1890         return 0;
1891 }
1892
1893 static inline int
1894 get_xstats_basic_count(struct rte_eth_dev *dev)
1895 {
1896         uint16_t nb_rxqs, nb_txqs;
1897         int count;
1898
1899         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1900         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1901
1902         count = RTE_NB_STATS;
1903         count += nb_rxqs * RTE_NB_RXQ_STATS;
1904         count += nb_txqs * RTE_NB_TXQ_STATS;
1905
1906         return count;
1907 }
1908
1909 static int
1910 get_xstats_count(uint16_t port_id)
1911 {
1912         struct rte_eth_dev *dev;
1913         int count;
1914
1915         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1916         dev = &rte_eth_devices[port_id];
1917         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1918                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1919                                 NULL, 0);
1920                 if (count < 0)
1921                         return eth_err(port_id, count);
1922         }
1923         if (dev->dev_ops->xstats_get_names != NULL) {
1924                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1925                 if (count < 0)
1926                         return eth_err(port_id, count);
1927         } else
1928                 count = 0;
1929
1930
1931         count += get_xstats_basic_count(dev);
1932
1933         return count;
1934 }
1935
1936 int
1937 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1938                 uint64_t *id)
1939 {
1940         int cnt_xstats, idx_xstat;
1941
1942         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1943
1944         if (!id) {
1945                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1946                 return -ENOMEM;
1947         }
1948
1949         if (!xstat_name) {
1950                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1951                 return -ENOMEM;
1952         }
1953
1954         /* Get count */
1955         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1956         if (cnt_xstats  < 0) {
1957                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1958                 return -ENODEV;
1959         }
1960
1961         /* Get id-name lookup table */
1962         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1963
1964         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1965                         port_id, xstats_names, cnt_xstats, NULL)) {
1966                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1967                 return -1;
1968         }
1969
1970         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1971                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1972                         *id = idx_xstat;
1973                         return 0;
1974                 };
1975         }
1976
1977         return -EINVAL;
1978 }
1979
1980 /* retrieve basic stats names */
1981 static int
1982 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1983         struct rte_eth_xstat_name *xstats_names)
1984 {
1985         int cnt_used_entries = 0;
1986         uint32_t idx, id_queue;
1987         uint16_t num_q;
1988
1989         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1990                 snprintf(xstats_names[cnt_used_entries].name,
1991                         sizeof(xstats_names[0].name),
1992                         "%s", rte_stats_strings[idx].name);
1993                 cnt_used_entries++;
1994         }
1995         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1996         for (id_queue = 0; id_queue < num_q; id_queue++) {
1997                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1998                         snprintf(xstats_names[cnt_used_entries].name,
1999                                 sizeof(xstats_names[0].name),
2000                                 "rx_q%u%s",
2001                                 id_queue, rte_rxq_stats_strings[idx].name);
2002                         cnt_used_entries++;
2003                 }
2004
2005         }
2006         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2007         for (id_queue = 0; id_queue < num_q; id_queue++) {
2008                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2009                         snprintf(xstats_names[cnt_used_entries].name,
2010                                 sizeof(xstats_names[0].name),
2011                                 "tx_q%u%s",
2012                                 id_queue, rte_txq_stats_strings[idx].name);
2013                         cnt_used_entries++;
2014                 }
2015         }
2016         return cnt_used_entries;
2017 }
2018
2019 /* retrieve ethdev extended statistics names */
2020 int
2021 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2022         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2023         uint64_t *ids)
2024 {
2025         struct rte_eth_xstat_name *xstats_names_copy;
2026         unsigned int no_basic_stat_requested = 1;
2027         unsigned int no_ext_stat_requested = 1;
2028         unsigned int expected_entries;
2029         unsigned int basic_count;
2030         struct rte_eth_dev *dev;
2031         unsigned int i;
2032         int ret;
2033
2034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2035         dev = &rte_eth_devices[port_id];
2036
2037         basic_count = get_xstats_basic_count(dev);
2038         ret = get_xstats_count(port_id);
2039         if (ret < 0)
2040                 return ret;
2041         expected_entries = (unsigned int)ret;
2042
2043         /* Return max number of stats if no ids given */
2044         if (!ids) {
2045                 if (!xstats_names)
2046                         return expected_entries;
2047                 else if (xstats_names && size < expected_entries)
2048                         return expected_entries;
2049         }
2050
2051         if (ids && !xstats_names)
2052                 return -EINVAL;
2053
2054         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2055                 uint64_t ids_copy[size];
2056
2057                 for (i = 0; i < size; i++) {
2058                         if (ids[i] < basic_count) {
2059                                 no_basic_stat_requested = 0;
2060                                 break;
2061                         }
2062
2063                         /*
2064                          * Convert ids to xstats ids that PMD knows.
2065                          * ids known by user are basic + extended stats.
2066                          */
2067                         ids_copy[i] = ids[i] - basic_count;
2068                 }
2069
2070                 if (no_basic_stat_requested)
2071                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2072                                         xstats_names, ids_copy, size);
2073         }
2074
2075         /* Retrieve all stats */
2076         if (!ids) {
2077                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2078                                 expected_entries);
2079                 if (num_stats < 0 || num_stats > (int)expected_entries)
2080                         return num_stats;
2081                 else
2082                         return expected_entries;
2083         }
2084
2085         xstats_names_copy = calloc(expected_entries,
2086                 sizeof(struct rte_eth_xstat_name));
2087
2088         if (!xstats_names_copy) {
2089                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2090                 return -ENOMEM;
2091         }
2092
2093         if (ids) {
2094                 for (i = 0; i < size; i++) {
2095                         if (ids[i] >= basic_count) {
2096                                 no_ext_stat_requested = 0;
2097                                 break;
2098                         }
2099                 }
2100         }
2101
2102         /* Fill xstats_names_copy structure */
2103         if (ids && no_ext_stat_requested) {
2104                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2105         } else {
2106                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2107                         expected_entries);
2108                 if (ret < 0) {
2109                         free(xstats_names_copy);
2110                         return ret;
2111                 }
2112         }
2113
2114         /* Filter stats */
2115         for (i = 0; i < size; i++) {
2116                 if (ids[i] >= expected_entries) {
2117                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2118                         free(xstats_names_copy);
2119                         return -1;
2120                 }
2121                 xstats_names[i] = xstats_names_copy[ids[i]];
2122         }
2123
2124         free(xstats_names_copy);
2125         return size;
2126 }
2127
2128 int
2129 rte_eth_xstats_get_names(uint16_t port_id,
2130         struct rte_eth_xstat_name *xstats_names,
2131         unsigned int size)
2132 {
2133         struct rte_eth_dev *dev;
2134         int cnt_used_entries;
2135         int cnt_expected_entries;
2136         int cnt_driver_entries;
2137
2138         cnt_expected_entries = get_xstats_count(port_id);
2139         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2140                         (int)size < cnt_expected_entries)
2141                 return cnt_expected_entries;
2142
2143         /* port_id checked in get_xstats_count() */
2144         dev = &rte_eth_devices[port_id];
2145
2146         cnt_used_entries = rte_eth_basic_stats_get_names(
2147                 dev, xstats_names);
2148
2149         if (dev->dev_ops->xstats_get_names != NULL) {
2150                 /* If there are any driver-specific xstats, append them
2151                  * to end of list.
2152                  */
2153                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2154                         dev,
2155                         xstats_names + cnt_used_entries,
2156                         size - cnt_used_entries);
2157                 if (cnt_driver_entries < 0)
2158                         return eth_err(port_id, cnt_driver_entries);
2159                 cnt_used_entries += cnt_driver_entries;
2160         }
2161
2162         return cnt_used_entries;
2163 }
2164
2165
2166 static int
2167 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2168 {
2169         struct rte_eth_dev *dev;
2170         struct rte_eth_stats eth_stats;
2171         unsigned int count = 0, i, q;
2172         uint64_t val, *stats_ptr;
2173         uint16_t nb_rxqs, nb_txqs;
2174         int ret;
2175
2176         ret = rte_eth_stats_get(port_id, &eth_stats);
2177         if (ret < 0)
2178                 return ret;
2179
2180         dev = &rte_eth_devices[port_id];
2181
2182         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2183         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2184
2185         /* global stats */
2186         for (i = 0; i < RTE_NB_STATS; i++) {
2187                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2188                                         rte_stats_strings[i].offset);
2189                 val = *stats_ptr;
2190                 xstats[count++].value = val;
2191         }
2192
2193         /* per-rxq stats */
2194         for (q = 0; q < nb_rxqs; q++) {
2195                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2196                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2197                                         rte_rxq_stats_strings[i].offset +
2198                                         q * sizeof(uint64_t));
2199                         val = *stats_ptr;
2200                         xstats[count++].value = val;
2201                 }
2202         }
2203
2204         /* per-txq stats */
2205         for (q = 0; q < nb_txqs; q++) {
2206                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2207                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2208                                         rte_txq_stats_strings[i].offset +
2209                                         q * sizeof(uint64_t));
2210                         val = *stats_ptr;
2211                         xstats[count++].value = val;
2212                 }
2213         }
2214         return count;
2215 }
2216
2217 /* retrieve ethdev extended statistics */
2218 int
2219 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2220                          uint64_t *values, unsigned int size)
2221 {
2222         unsigned int no_basic_stat_requested = 1;
2223         unsigned int no_ext_stat_requested = 1;
2224         unsigned int num_xstats_filled;
2225         unsigned int basic_count;
2226         uint16_t expected_entries;
2227         struct rte_eth_dev *dev;
2228         unsigned int i;
2229         int ret;
2230
2231         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2232         ret = get_xstats_count(port_id);
2233         if (ret < 0)
2234                 return ret;
2235         expected_entries = (uint16_t)ret;
2236         struct rte_eth_xstat xstats[expected_entries];
2237         dev = &rte_eth_devices[port_id];
2238         basic_count = get_xstats_basic_count(dev);
2239
2240         /* Return max number of stats if no ids given */
2241         if (!ids) {
2242                 if (!values)
2243                         return expected_entries;
2244                 else if (values && size < expected_entries)
2245                         return expected_entries;
2246         }
2247
2248         if (ids && !values)
2249                 return -EINVAL;
2250
2251         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2252                 unsigned int basic_count = get_xstats_basic_count(dev);
2253                 uint64_t ids_copy[size];
2254
2255                 for (i = 0; i < size; i++) {
2256                         if (ids[i] < basic_count) {
2257                                 no_basic_stat_requested = 0;
2258                                 break;
2259                         }
2260
2261                         /*
2262                          * Convert ids to xstats ids that PMD knows.
2263                          * ids known by user are basic + extended stats.
2264                          */
2265                         ids_copy[i] = ids[i] - basic_count;
2266                 }
2267
2268                 if (no_basic_stat_requested)
2269                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2270                                         values, size);
2271         }
2272
2273         if (ids) {
2274                 for (i = 0; i < size; i++) {
2275                         if (ids[i] >= basic_count) {
2276                                 no_ext_stat_requested = 0;
2277                                 break;
2278                         }
2279                 }
2280         }
2281
2282         /* Fill the xstats structure */
2283         if (ids && no_ext_stat_requested)
2284                 ret = rte_eth_basic_stats_get(port_id, xstats);
2285         else
2286                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2287
2288         if (ret < 0)
2289                 return ret;
2290         num_xstats_filled = (unsigned int)ret;
2291
2292         /* Return all stats */
2293         if (!ids) {
2294                 for (i = 0; i < num_xstats_filled; i++)
2295                         values[i] = xstats[i].value;
2296                 return expected_entries;
2297         }
2298
2299         /* Filter stats */
2300         for (i = 0; i < size; i++) {
2301                 if (ids[i] >= expected_entries) {
2302                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2303                         return -1;
2304                 }
2305                 values[i] = xstats[ids[i]].value;
2306         }
2307         return size;
2308 }
2309
2310 int
2311 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2312         unsigned int n)
2313 {
2314         struct rte_eth_dev *dev;
2315         unsigned int count = 0, i;
2316         signed int xcount = 0;
2317         uint16_t nb_rxqs, nb_txqs;
2318         int ret;
2319
2320         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2321
2322         dev = &rte_eth_devices[port_id];
2323
2324         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2325         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2326
2327         /* Return generic statistics */
2328         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2329                 (nb_txqs * RTE_NB_TXQ_STATS);
2330
2331         /* implemented by the driver */
2332         if (dev->dev_ops->xstats_get != NULL) {
2333                 /* Retrieve the xstats from the driver at the end of the
2334                  * xstats struct.
2335                  */
2336                 xcount = (*dev->dev_ops->xstats_get)(dev,
2337                                      xstats ? xstats + count : NULL,
2338                                      (n > count) ? n - count : 0);
2339
2340                 if (xcount < 0)
2341                         return eth_err(port_id, xcount);
2342         }
2343
2344         if (n < count + xcount || xstats == NULL)
2345                 return count + xcount;
2346
2347         /* now fill the xstats structure */
2348         ret = rte_eth_basic_stats_get(port_id, xstats);
2349         if (ret < 0)
2350                 return ret;
2351         count = ret;
2352
2353         for (i = 0; i < count; i++)
2354                 xstats[i].id = i;
2355         /* add an offset to driver-specific stats */
2356         for ( ; i < count + xcount; i++)
2357                 xstats[i].id += count;
2358
2359         return count + xcount;
2360 }
2361
2362 /* reset ethdev extended statistics */
2363 void
2364 rte_eth_xstats_reset(uint16_t port_id)
2365 {
2366         struct rte_eth_dev *dev;
2367
2368         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2369         dev = &rte_eth_devices[port_id];
2370
2371         /* implemented by the driver */
2372         if (dev->dev_ops->xstats_reset != NULL) {
2373                 (*dev->dev_ops->xstats_reset)(dev);
2374                 return;
2375         }
2376
2377         /* fallback to default */
2378         rte_eth_stats_reset(port_id);
2379 }
2380
2381 static int
2382 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2383                 uint8_t is_rx)
2384 {
2385         struct rte_eth_dev *dev;
2386
2387         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2388
2389         dev = &rte_eth_devices[port_id];
2390
2391         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2392         return (*dev->dev_ops->queue_stats_mapping_set)
2393                         (dev, queue_id, stat_idx, is_rx);
2394 }
2395
2396
2397 int
2398 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2399                 uint8_t stat_idx)
2400 {
2401         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2402                                                 stat_idx, STAT_QMAP_TX));
2403 }
2404
2405
2406 int
2407 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2408                 uint8_t stat_idx)
2409 {
2410         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2411                                                 stat_idx, STAT_QMAP_RX));
2412 }
2413
2414 int
2415 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2416 {
2417         struct rte_eth_dev *dev;
2418
2419         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2420         dev = &rte_eth_devices[port_id];
2421
2422         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2423         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2424                                                         fw_version, fw_size));
2425 }
2426
2427 void
2428 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2429 {
2430         struct rte_eth_dev *dev;
2431         const struct rte_eth_desc_lim lim = {
2432                 .nb_max = UINT16_MAX,
2433                 .nb_min = 0,
2434                 .nb_align = 1,
2435         };
2436
2437         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2438         dev = &rte_eth_devices[port_id];
2439
2440         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2441         dev_info->rx_desc_lim = lim;
2442         dev_info->tx_desc_lim = lim;
2443         dev_info->device = dev->device;
2444
2445         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2446         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2447         dev_info->driver_name = dev->device->driver->name;
2448         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2449         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2450 }
2451
2452 int
2453 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2454                                  uint32_t *ptypes, int num)
2455 {
2456         int i, j;
2457         struct rte_eth_dev *dev;
2458         const uint32_t *all_ptypes;
2459
2460         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2461         dev = &rte_eth_devices[port_id];
2462         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2463         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2464
2465         if (!all_ptypes)
2466                 return 0;
2467
2468         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2469                 if (all_ptypes[i] & ptype_mask) {
2470                         if (j < num)
2471                                 ptypes[j] = all_ptypes[i];
2472                         j++;
2473                 }
2474
2475         return j;
2476 }
2477
2478 void
2479 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2480 {
2481         struct rte_eth_dev *dev;
2482
2483         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2484         dev = &rte_eth_devices[port_id];
2485         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2486 }
2487
2488
2489 int
2490 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2491 {
2492         struct rte_eth_dev *dev;
2493
2494         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2495
2496         dev = &rte_eth_devices[port_id];
2497         *mtu = dev->data->mtu;
2498         return 0;
2499 }
2500
2501 int
2502 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2503 {
2504         int ret;
2505         struct rte_eth_dev *dev;
2506
2507         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2508         dev = &rte_eth_devices[port_id];
2509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2510
2511         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2512         if (!ret)
2513                 dev->data->mtu = mtu;
2514
2515         return eth_err(port_id, ret);
2516 }
2517
2518 int
2519 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2520 {
2521         struct rte_eth_dev *dev;
2522         int ret;
2523
2524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2525         dev = &rte_eth_devices[port_id];
2526         if (!(dev->data->dev_conf.rxmode.offloads &
2527               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2528                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2529                 return -ENOSYS;
2530         }
2531
2532         if (vlan_id > 4095) {
2533                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2534                                 port_id, (unsigned) vlan_id);
2535                 return -EINVAL;
2536         }
2537         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2538
2539         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2540         if (ret == 0) {
2541                 struct rte_vlan_filter_conf *vfc;
2542                 int vidx;
2543                 int vbit;
2544
2545                 vfc = &dev->data->vlan_filter_conf;
2546                 vidx = vlan_id / 64;
2547                 vbit = vlan_id % 64;
2548
2549                 if (on)
2550                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2551                 else
2552                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2553         }
2554
2555         return eth_err(port_id, ret);
2556 }
2557
2558 int
2559 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2560                                     int on)
2561 {
2562         struct rte_eth_dev *dev;
2563
2564         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2565         dev = &rte_eth_devices[port_id];
2566         if (rx_queue_id >= dev->data->nb_rx_queues) {
2567                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2568                 return -EINVAL;
2569         }
2570
2571         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2572         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2573
2574         return 0;
2575 }
2576
2577 int
2578 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2579                                 enum rte_vlan_type vlan_type,
2580                                 uint16_t tpid)
2581 {
2582         struct rte_eth_dev *dev;
2583
2584         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2585         dev = &rte_eth_devices[port_id];
2586         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2587
2588         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2589                                                                tpid));
2590 }
2591
2592 int
2593 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2594 {
2595         struct rte_eth_dev *dev;
2596         int ret = 0;
2597         int mask = 0;
2598         int cur, org = 0;
2599         uint64_t orig_offloads;
2600
2601         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2602         dev = &rte_eth_devices[port_id];
2603
2604         /* save original values in case of failure */
2605         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2606
2607         /*check which option changed by application*/
2608         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2609         org = !!(dev->data->dev_conf.rxmode.offloads &
2610                  DEV_RX_OFFLOAD_VLAN_STRIP);
2611         if (cur != org) {
2612                 if (cur)
2613                         dev->data->dev_conf.rxmode.offloads |=
2614                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2615                 else
2616                         dev->data->dev_conf.rxmode.offloads &=
2617                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2618                 mask |= ETH_VLAN_STRIP_MASK;
2619         }
2620
2621         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2622         org = !!(dev->data->dev_conf.rxmode.offloads &
2623                  DEV_RX_OFFLOAD_VLAN_FILTER);
2624         if (cur != org) {
2625                 if (cur)
2626                         dev->data->dev_conf.rxmode.offloads |=
2627                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2628                 else
2629                         dev->data->dev_conf.rxmode.offloads &=
2630                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2631                 mask |= ETH_VLAN_FILTER_MASK;
2632         }
2633
2634         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2635         org = !!(dev->data->dev_conf.rxmode.offloads &
2636                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2637         if (cur != org) {
2638                 if (cur)
2639                         dev->data->dev_conf.rxmode.offloads |=
2640                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2641                 else
2642                         dev->data->dev_conf.rxmode.offloads &=
2643                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2644                 mask |= ETH_VLAN_EXTEND_MASK;
2645         }
2646
2647         /*no change*/
2648         if (mask == 0)
2649                 return ret;
2650
2651         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2652
2653         /*
2654          * Convert to the offload bitfield API just in case the underlying PMD
2655          * still supporting it.
2656          */
2657         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2658                                     &dev->data->dev_conf.rxmode);
2659         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2660         if (ret) {
2661                 /* hit an error restore  original values */
2662                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2663                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2664                                             &dev->data->dev_conf.rxmode);
2665         }
2666
2667         return eth_err(port_id, ret);
2668 }
2669
2670 int
2671 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2672 {
2673         struct rte_eth_dev *dev;
2674         int ret = 0;
2675
2676         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2677         dev = &rte_eth_devices[port_id];
2678
2679         if (dev->data->dev_conf.rxmode.offloads &
2680             DEV_RX_OFFLOAD_VLAN_STRIP)
2681                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2682
2683         if (dev->data->dev_conf.rxmode.offloads &
2684             DEV_RX_OFFLOAD_VLAN_FILTER)
2685                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2686
2687         if (dev->data->dev_conf.rxmode.offloads &
2688             DEV_RX_OFFLOAD_VLAN_EXTEND)
2689                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2690
2691         return ret;
2692 }
2693
2694 int
2695 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2696 {
2697         struct rte_eth_dev *dev;
2698
2699         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2700         dev = &rte_eth_devices[port_id];
2701         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2702
2703         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2704 }
2705
2706 int
2707 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2708 {
2709         struct rte_eth_dev *dev;
2710
2711         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2712         dev = &rte_eth_devices[port_id];
2713         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2714         memset(fc_conf, 0, sizeof(*fc_conf));
2715         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2716 }
2717
2718 int
2719 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2720 {
2721         struct rte_eth_dev *dev;
2722
2723         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2724         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2725                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2726                 return -EINVAL;
2727         }
2728
2729         dev = &rte_eth_devices[port_id];
2730         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2731         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2732 }
2733
2734 int
2735 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2736                                    struct rte_eth_pfc_conf *pfc_conf)
2737 {
2738         struct rte_eth_dev *dev;
2739
2740         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2741         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2742                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2743                 return -EINVAL;
2744         }
2745
2746         dev = &rte_eth_devices[port_id];
2747         /* High water, low water validation are device specific */
2748         if  (*dev->dev_ops->priority_flow_ctrl_set)
2749                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2750                                         (dev, pfc_conf));
2751         return -ENOTSUP;
2752 }
2753
2754 static int
2755 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2756                         uint16_t reta_size)
2757 {
2758         uint16_t i, num;
2759
2760         if (!reta_conf)
2761                 return -EINVAL;
2762
2763         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2764         for (i = 0; i < num; i++) {
2765                 if (reta_conf[i].mask)
2766                         return 0;
2767         }
2768
2769         return -EINVAL;
2770 }
2771
2772 static int
2773 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2774                          uint16_t reta_size,
2775                          uint16_t max_rxq)
2776 {
2777         uint16_t i, idx, shift;
2778
2779         if (!reta_conf)
2780                 return -EINVAL;
2781
2782         if (max_rxq == 0) {
2783                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2784                 return -EINVAL;
2785         }
2786
2787         for (i = 0; i < reta_size; i++) {
2788                 idx = i / RTE_RETA_GROUP_SIZE;
2789                 shift = i % RTE_RETA_GROUP_SIZE;
2790                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2791                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2792                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2793                                 "the maximum rxq index: %u\n", idx, shift,
2794                                 reta_conf[idx].reta[shift], max_rxq);
2795                         return -EINVAL;
2796                 }
2797         }
2798
2799         return 0;
2800 }
2801
2802 int
2803 rte_eth_dev_rss_reta_update(uint16_t port_id,
2804                             struct rte_eth_rss_reta_entry64 *reta_conf,
2805                             uint16_t reta_size)
2806 {
2807         struct rte_eth_dev *dev;
2808         int ret;
2809
2810         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2811         /* Check mask bits */
2812         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2813         if (ret < 0)
2814                 return ret;
2815
2816         dev = &rte_eth_devices[port_id];
2817
2818         /* Check entry value */
2819         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2820                                 dev->data->nb_rx_queues);
2821         if (ret < 0)
2822                 return ret;
2823
2824         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2825         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2826                                                              reta_size));
2827 }
2828
2829 int
2830 rte_eth_dev_rss_reta_query(uint16_t port_id,
2831                            struct rte_eth_rss_reta_entry64 *reta_conf,
2832                            uint16_t reta_size)
2833 {
2834         struct rte_eth_dev *dev;
2835         int ret;
2836
2837         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2838
2839         /* Check mask bits */
2840         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2841         if (ret < 0)
2842                 return ret;
2843
2844         dev = &rte_eth_devices[port_id];
2845         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2846         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2847                                                             reta_size));
2848 }
2849
2850 int
2851 rte_eth_dev_rss_hash_update(uint16_t port_id,
2852                             struct rte_eth_rss_conf *rss_conf)
2853 {
2854         struct rte_eth_dev *dev;
2855
2856         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2857         dev = &rte_eth_devices[port_id];
2858         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2859         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2860                                                                  rss_conf));
2861 }
2862
2863 int
2864 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2865                               struct rte_eth_rss_conf *rss_conf)
2866 {
2867         struct rte_eth_dev *dev;
2868
2869         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2870         dev = &rte_eth_devices[port_id];
2871         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2872         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2873                                                                    rss_conf));
2874 }
2875
2876 int
2877 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2878                                 struct rte_eth_udp_tunnel *udp_tunnel)
2879 {
2880         struct rte_eth_dev *dev;
2881
2882         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2883         if (udp_tunnel == NULL) {
2884                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2885                 return -EINVAL;
2886         }
2887
2888         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2889                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2890                 return -EINVAL;
2891         }
2892
2893         dev = &rte_eth_devices[port_id];
2894         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2895         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2896                                                                 udp_tunnel));
2897 }
2898
2899 int
2900 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2901                                    struct rte_eth_udp_tunnel *udp_tunnel)
2902 {
2903         struct rte_eth_dev *dev;
2904
2905         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2906         dev = &rte_eth_devices[port_id];
2907
2908         if (udp_tunnel == NULL) {
2909                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2910                 return -EINVAL;
2911         }
2912
2913         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2914                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2915                 return -EINVAL;
2916         }
2917
2918         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2919         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2920                                                                 udp_tunnel));
2921 }
2922
2923 int
2924 rte_eth_led_on(uint16_t port_id)
2925 {
2926         struct rte_eth_dev *dev;
2927
2928         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2929         dev = &rte_eth_devices[port_id];
2930         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2931         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2932 }
2933
2934 int
2935 rte_eth_led_off(uint16_t port_id)
2936 {
2937         struct rte_eth_dev *dev;
2938
2939         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2940         dev = &rte_eth_devices[port_id];
2941         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2942         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2943 }
2944
2945 /*
2946  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2947  * an empty spot.
2948  */
2949 static int
2950 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2951 {
2952         struct rte_eth_dev_info dev_info;
2953         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2954         unsigned i;
2955
2956         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2957         rte_eth_dev_info_get(port_id, &dev_info);
2958
2959         for (i = 0; i < dev_info.max_mac_addrs; i++)
2960                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2961                         return i;
2962
2963         return -1;
2964 }
2965
2966 static const struct ether_addr null_mac_addr;
2967
2968 int
2969 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2970                         uint32_t pool)
2971 {
2972         struct rte_eth_dev *dev;
2973         int index;
2974         uint64_t pool_mask;
2975         int ret;
2976
2977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2978         dev = &rte_eth_devices[port_id];
2979         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2980
2981         if (is_zero_ether_addr(addr)) {
2982                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2983                         port_id);
2984                 return -EINVAL;
2985         }
2986         if (pool >= ETH_64_POOLS) {
2987                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2988                 return -EINVAL;
2989         }
2990
2991         index = get_mac_addr_index(port_id, addr);
2992         if (index < 0) {
2993                 index = get_mac_addr_index(port_id, &null_mac_addr);
2994                 if (index < 0) {
2995                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2996                                 port_id);
2997                         return -ENOSPC;
2998                 }
2999         } else {
3000                 pool_mask = dev->data->mac_pool_sel[index];
3001
3002                 /* Check if both MAC address and pool is already there, and do nothing */
3003                 if (pool_mask & (1ULL << pool))
3004                         return 0;
3005         }
3006
3007         /* Update NIC */
3008         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3009
3010         if (ret == 0) {
3011                 /* Update address in NIC data structure */
3012                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3013
3014                 /* Update pool bitmap in NIC data structure */
3015                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3016         }
3017
3018         return eth_err(port_id, ret);
3019 }
3020
3021 int
3022 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
3023 {
3024         struct rte_eth_dev *dev;
3025         int index;
3026
3027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3028         dev = &rte_eth_devices[port_id];
3029         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3030
3031         index = get_mac_addr_index(port_id, addr);
3032         if (index == 0) {
3033                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
3034                 return -EADDRINUSE;
3035         } else if (index < 0)
3036                 return 0;  /* Do nothing if address wasn't found */
3037
3038         /* Update NIC */
3039         (*dev->dev_ops->mac_addr_remove)(dev, index);
3040
3041         /* Update address in NIC data structure */
3042         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3043
3044         /* reset pool bitmap */
3045         dev->data->mac_pool_sel[index] = 0;
3046
3047         return 0;
3048 }
3049
3050 int
3051 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3052 {
3053         struct rte_eth_dev *dev;
3054         int ret;
3055
3056         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3057
3058         if (!is_valid_assigned_ether_addr(addr))
3059                 return -EINVAL;
3060
3061         dev = &rte_eth_devices[port_id];
3062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3063
3064         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3065         if (ret < 0)
3066                 return ret;
3067
3068         /* Update default address in NIC data structure */
3069         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3070
3071         return 0;
3072 }
3073
3074
3075 /*
3076  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3077  * an empty spot.
3078  */
3079 static int
3080 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3081 {
3082         struct rte_eth_dev_info dev_info;
3083         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3084         unsigned i;
3085
3086         rte_eth_dev_info_get(port_id, &dev_info);
3087         if (!dev->data->hash_mac_addrs)
3088                 return -1;
3089
3090         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3091                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3092                         ETHER_ADDR_LEN) == 0)
3093                         return i;
3094
3095         return -1;
3096 }
3097
3098 int
3099 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3100                                 uint8_t on)
3101 {
3102         int index;
3103         int ret;
3104         struct rte_eth_dev *dev;
3105
3106         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3107
3108         dev = &rte_eth_devices[port_id];
3109         if (is_zero_ether_addr(addr)) {
3110                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3111                         port_id);
3112                 return -EINVAL;
3113         }
3114
3115         index = get_hash_mac_addr_index(port_id, addr);
3116         /* Check if it's already there, and do nothing */
3117         if ((index >= 0) && on)
3118                 return 0;
3119
3120         if (index < 0) {
3121                 if (!on) {
3122                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3123                                 "set in UTA\n", port_id);
3124                         return -EINVAL;
3125                 }
3126
3127                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3128                 if (index < 0) {
3129                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3130                                         port_id);
3131                         return -ENOSPC;
3132                 }
3133         }
3134
3135         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3136         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3137         if (ret == 0) {
3138                 /* Update address in NIC data structure */
3139                 if (on)
3140                         ether_addr_copy(addr,
3141                                         &dev->data->hash_mac_addrs[index]);
3142                 else
3143                         ether_addr_copy(&null_mac_addr,
3144                                         &dev->data->hash_mac_addrs[index]);
3145         }
3146
3147         return eth_err(port_id, ret);
3148 }
3149
3150 int
3151 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3152 {
3153         struct rte_eth_dev *dev;
3154
3155         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3156
3157         dev = &rte_eth_devices[port_id];
3158
3159         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3160         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3161                                                                        on));
3162 }
3163
3164 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3165                                         uint16_t tx_rate)
3166 {
3167         struct rte_eth_dev *dev;
3168         struct rte_eth_dev_info dev_info;
3169         struct rte_eth_link link;
3170
3171         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3172
3173         dev = &rte_eth_devices[port_id];
3174         rte_eth_dev_info_get(port_id, &dev_info);
3175         link = dev->data->dev_link;
3176
3177         if (queue_idx > dev_info.max_tx_queues) {
3178                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3179                                 "invalid queue id=%d\n", port_id, queue_idx);
3180                 return -EINVAL;
3181         }
3182
3183         if (tx_rate > link.link_speed) {
3184                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3185                                 "bigger than link speed= %d\n",
3186                         tx_rate, link.link_speed);
3187                 return -EINVAL;
3188         }
3189
3190         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3191         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3192                                                         queue_idx, tx_rate));
3193 }
3194
3195 int
3196 rte_eth_mirror_rule_set(uint16_t port_id,
3197                         struct rte_eth_mirror_conf *mirror_conf,
3198                         uint8_t rule_id, uint8_t on)
3199 {
3200         struct rte_eth_dev *dev;
3201
3202         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3203         if (mirror_conf->rule_type == 0) {
3204                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3205                 return -EINVAL;
3206         }
3207
3208         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3209                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3210                                 ETH_64_POOLS - 1);
3211                 return -EINVAL;
3212         }
3213
3214         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3215              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3216             (mirror_conf->pool_mask == 0)) {
3217                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3218                 return -EINVAL;
3219         }
3220
3221         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3222             mirror_conf->vlan.vlan_mask == 0) {
3223                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3224                 return -EINVAL;
3225         }
3226
3227         dev = &rte_eth_devices[port_id];
3228         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3229
3230         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3231                                                 mirror_conf, rule_id, on));
3232 }
3233
3234 int
3235 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3236 {
3237         struct rte_eth_dev *dev;
3238
3239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3240
3241         dev = &rte_eth_devices[port_id];
3242         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3243
3244         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3245                                                                    rule_id));
3246 }
3247
3248 RTE_INIT(eth_dev_init_cb_lists)
3249 {
3250         int i;
3251
3252         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3253                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3254 }
3255
3256 int
3257 rte_eth_dev_callback_register(uint16_t port_id,
3258                         enum rte_eth_event_type event,
3259                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3260 {
3261         struct rte_eth_dev *dev;
3262         struct rte_eth_dev_callback *user_cb;
3263         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3264         uint16_t last_port;
3265
3266         if (!cb_fn)
3267                 return -EINVAL;
3268
3269         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3270                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3271                 return -EINVAL;
3272         }
3273
3274         if (port_id == RTE_ETH_ALL) {
3275                 next_port = 0;
3276                 last_port = RTE_MAX_ETHPORTS - 1;
3277         } else {
3278                 next_port = last_port = port_id;
3279         }
3280
3281         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3282
3283         do {
3284                 dev = &rte_eth_devices[next_port];
3285
3286                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3287                         if (user_cb->cb_fn == cb_fn &&
3288                                 user_cb->cb_arg == cb_arg &&
3289                                 user_cb->event == event) {
3290                                 break;
3291                         }
3292                 }
3293
3294                 /* create a new callback. */
3295                 if (user_cb == NULL) {
3296                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3297                                 sizeof(struct rte_eth_dev_callback), 0);
3298                         if (user_cb != NULL) {
3299                                 user_cb->cb_fn = cb_fn;
3300                                 user_cb->cb_arg = cb_arg;
3301                                 user_cb->event = event;
3302                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3303                                                   user_cb, next);
3304                         } else {
3305                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3306                                 rte_eth_dev_callback_unregister(port_id, event,
3307                                                                 cb_fn, cb_arg);
3308                                 return -ENOMEM;
3309                         }
3310
3311                 }
3312         } while (++next_port <= last_port);
3313
3314         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3315         return 0;
3316 }
3317
3318 int
3319 rte_eth_dev_callback_unregister(uint16_t port_id,
3320                         enum rte_eth_event_type event,
3321                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3322 {
3323         int ret;
3324         struct rte_eth_dev *dev;
3325         struct rte_eth_dev_callback *cb, *next;
3326         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3327         uint16_t last_port;
3328
3329         if (!cb_fn)
3330                 return -EINVAL;
3331
3332         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3333                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3334                 return -EINVAL;
3335         }
3336
3337         if (port_id == RTE_ETH_ALL) {
3338                 next_port = 0;
3339                 last_port = RTE_MAX_ETHPORTS - 1;
3340         } else {
3341                 next_port = last_port = port_id;
3342         }
3343
3344         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3345
3346         do {
3347                 dev = &rte_eth_devices[next_port];
3348                 ret = 0;
3349                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3350                      cb = next) {
3351
3352                         next = TAILQ_NEXT(cb, next);
3353
3354                         if (cb->cb_fn != cb_fn || cb->event != event ||
3355                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3356                                 continue;
3357
3358                         /*
3359                          * if this callback is not executing right now,
3360                          * then remove it.
3361                          */
3362                         if (cb->active == 0) {
3363                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3364                                 rte_free(cb);
3365                         } else {
3366                                 ret = -EAGAIN;
3367                         }
3368                 }
3369         } while (++next_port <= last_port);
3370
3371         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3372         return ret;
3373 }
3374
3375 int
3376 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3377         enum rte_eth_event_type event, void *ret_param)
3378 {
3379         struct rte_eth_dev_callback *cb_lst;
3380         struct rte_eth_dev_callback dev_cb;
3381         int rc = 0;
3382
3383         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3384         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3385                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3386                         continue;
3387                 dev_cb = *cb_lst;
3388                 cb_lst->active = 1;
3389                 if (ret_param != NULL)
3390                         dev_cb.ret_param = ret_param;
3391
3392                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3393                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3394                                 dev_cb.cb_arg, dev_cb.ret_param);
3395                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3396                 cb_lst->active = 0;
3397         }
3398         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3399         return rc;
3400 }
3401
3402 int
3403 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3404 {
3405         uint32_t vec;
3406         struct rte_eth_dev *dev;
3407         struct rte_intr_handle *intr_handle;
3408         uint16_t qid;
3409         int rc;
3410
3411         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3412
3413         dev = &rte_eth_devices[port_id];
3414
3415         if (!dev->intr_handle) {
3416                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3417                 return -ENOTSUP;
3418         }
3419
3420         intr_handle = dev->intr_handle;
3421         if (!intr_handle->intr_vec) {
3422                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3423                 return -EPERM;
3424         }
3425
3426         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3427                 vec = intr_handle->intr_vec[qid];
3428                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3429                 if (rc && rc != -EEXIST) {
3430                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3431                                         " op %d epfd %d vec %u\n",
3432                                         port_id, qid, op, epfd, vec);
3433                 }
3434         }
3435
3436         return 0;
3437 }
3438
3439 const struct rte_memzone *
3440 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3441                          uint16_t queue_id, size_t size, unsigned align,
3442                          int socket_id)
3443 {
3444         char z_name[RTE_MEMZONE_NAMESIZE];
3445         const struct rte_memzone *mz;
3446
3447         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3448                  dev->device->driver->name, ring_name,
3449                  dev->data->port_id, queue_id);
3450
3451         mz = rte_memzone_lookup(z_name);
3452         if (mz)
3453                 return mz;
3454
3455         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3456                         RTE_MEMZONE_IOVA_CONTIG, align);
3457 }
3458
3459 int
3460 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3461                           int epfd, int op, void *data)
3462 {
3463         uint32_t vec;
3464         struct rte_eth_dev *dev;
3465         struct rte_intr_handle *intr_handle;
3466         int rc;
3467
3468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3469
3470         dev = &rte_eth_devices[port_id];
3471         if (queue_id >= dev->data->nb_rx_queues) {
3472                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3473                 return -EINVAL;
3474         }
3475
3476         if (!dev->intr_handle) {
3477                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3478                 return -ENOTSUP;
3479         }
3480
3481         intr_handle = dev->intr_handle;
3482         if (!intr_handle->intr_vec) {
3483                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3484                 return -EPERM;
3485         }
3486
3487         vec = intr_handle->intr_vec[queue_id];
3488         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3489         if (rc && rc != -EEXIST) {
3490                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3491                                 " op %d epfd %d vec %u\n",
3492                                 port_id, queue_id, op, epfd, vec);
3493                 return rc;
3494         }
3495
3496         return 0;
3497 }
3498
3499 int
3500 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3501                            uint16_t queue_id)
3502 {
3503         struct rte_eth_dev *dev;
3504
3505         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3506
3507         dev = &rte_eth_devices[port_id];
3508
3509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3510         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3511                                                                 queue_id));
3512 }
3513
3514 int
3515 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3516                             uint16_t queue_id)
3517 {
3518         struct rte_eth_dev *dev;
3519
3520         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3521
3522         dev = &rte_eth_devices[port_id];
3523
3524         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3525         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3526                                                                 queue_id));
3527 }
3528
3529
3530 int
3531 rte_eth_dev_filter_supported(uint16_t port_id,
3532                              enum rte_filter_type filter_type)
3533 {
3534         struct rte_eth_dev *dev;
3535
3536         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3537
3538         dev = &rte_eth_devices[port_id];
3539         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3540         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3541                                 RTE_ETH_FILTER_NOP, NULL);
3542 }
3543
3544 int
3545 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3546                         enum rte_filter_op filter_op, void *arg)
3547 {
3548         struct rte_eth_dev *dev;
3549
3550         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3551
3552         dev = &rte_eth_devices[port_id];
3553         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3554         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3555                                                              filter_op, arg));
3556 }
3557
3558 const struct rte_eth_rxtx_callback *
3559 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3560                 rte_rx_callback_fn fn, void *user_param)
3561 {
3562 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3563         rte_errno = ENOTSUP;
3564         return NULL;
3565 #endif
3566         /* check input parameters */
3567         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3568                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3569                 rte_errno = EINVAL;
3570                 return NULL;
3571         }
3572         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3573
3574         if (cb == NULL) {
3575                 rte_errno = ENOMEM;
3576                 return NULL;
3577         }
3578
3579         cb->fn.rx = fn;
3580         cb->param = user_param;
3581
3582         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3583         /* Add the callbacks in fifo order. */
3584         struct rte_eth_rxtx_callback *tail =
3585                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3586
3587         if (!tail) {
3588                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3589
3590         } else {
3591                 while (tail->next)
3592                         tail = tail->next;
3593                 tail->next = cb;
3594         }
3595         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3596
3597         return cb;
3598 }
3599
3600 const struct rte_eth_rxtx_callback *
3601 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3602                 rte_rx_callback_fn fn, void *user_param)
3603 {
3604 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3605         rte_errno = ENOTSUP;
3606         return NULL;
3607 #endif
3608         /* check input parameters */
3609         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3610                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3611                 rte_errno = EINVAL;
3612                 return NULL;
3613         }
3614
3615         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3616
3617         if (cb == NULL) {
3618                 rte_errno = ENOMEM;
3619                 return NULL;
3620         }
3621
3622         cb->fn.rx = fn;
3623         cb->param = user_param;
3624
3625         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3626         /* Add the callbacks at fisrt position*/
3627         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3628         rte_smp_wmb();
3629         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3630         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3631
3632         return cb;
3633 }
3634
3635 const struct rte_eth_rxtx_callback *
3636 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3637                 rte_tx_callback_fn fn, void *user_param)
3638 {
3639 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3640         rte_errno = ENOTSUP;
3641         return NULL;
3642 #endif
3643         /* check input parameters */
3644         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3645                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3646                 rte_errno = EINVAL;
3647                 return NULL;
3648         }
3649
3650         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3651
3652         if (cb == NULL) {
3653                 rte_errno = ENOMEM;
3654                 return NULL;
3655         }
3656
3657         cb->fn.tx = fn;
3658         cb->param = user_param;
3659
3660         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3661         /* Add the callbacks in fifo order. */
3662         struct rte_eth_rxtx_callback *tail =
3663                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3664
3665         if (!tail) {
3666                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3667
3668         } else {
3669                 while (tail->next)
3670                         tail = tail->next;
3671                 tail->next = cb;
3672         }
3673         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3674
3675         return cb;
3676 }
3677
3678 int
3679 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3680                 const struct rte_eth_rxtx_callback *user_cb)
3681 {
3682 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3683         return -ENOTSUP;
3684 #endif
3685         /* Check input parameters. */
3686         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3687         if (user_cb == NULL ||
3688                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3689                 return -EINVAL;
3690
3691         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3692         struct rte_eth_rxtx_callback *cb;
3693         struct rte_eth_rxtx_callback **prev_cb;
3694         int ret = -EINVAL;
3695
3696         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3697         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3698         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3699                 cb = *prev_cb;
3700                 if (cb == user_cb) {
3701                         /* Remove the user cb from the callback list. */
3702                         *prev_cb = cb->next;
3703                         ret = 0;
3704                         break;
3705                 }
3706         }
3707         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3708
3709         return ret;
3710 }
3711
3712 int
3713 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3714                 const struct rte_eth_rxtx_callback *user_cb)
3715 {
3716 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3717         return -ENOTSUP;
3718 #endif
3719         /* Check input parameters. */
3720         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3721         if (user_cb == NULL ||
3722                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3723                 return -EINVAL;
3724
3725         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3726         int ret = -EINVAL;
3727         struct rte_eth_rxtx_callback *cb;
3728         struct rte_eth_rxtx_callback **prev_cb;
3729
3730         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3731         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3732         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3733                 cb = *prev_cb;
3734                 if (cb == user_cb) {
3735                         /* Remove the user cb from the callback list. */
3736                         *prev_cb = cb->next;
3737                         ret = 0;
3738                         break;
3739                 }
3740         }
3741         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3742
3743         return ret;
3744 }
3745
3746 int
3747 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3748         struct rte_eth_rxq_info *qinfo)
3749 {
3750         struct rte_eth_dev *dev;
3751
3752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3753
3754         if (qinfo == NULL)
3755                 return -EINVAL;
3756
3757         dev = &rte_eth_devices[port_id];
3758         if (queue_id >= dev->data->nb_rx_queues) {
3759                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3760                 return -EINVAL;
3761         }
3762
3763         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3764
3765         memset(qinfo, 0, sizeof(*qinfo));
3766         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3767         return 0;
3768 }
3769
3770 int
3771 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3772         struct rte_eth_txq_info *qinfo)
3773 {
3774         struct rte_eth_dev *dev;
3775
3776         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3777
3778         if (qinfo == NULL)
3779                 return -EINVAL;
3780
3781         dev = &rte_eth_devices[port_id];
3782         if (queue_id >= dev->data->nb_tx_queues) {
3783                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3784                 return -EINVAL;
3785         }
3786
3787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3788
3789         memset(qinfo, 0, sizeof(*qinfo));
3790         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3791         return 0;
3792 }
3793
3794 int
3795 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3796                              struct ether_addr *mc_addr_set,
3797                              uint32_t nb_mc_addr)
3798 {
3799         struct rte_eth_dev *dev;
3800
3801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3802
3803         dev = &rte_eth_devices[port_id];
3804         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3805         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3806                                                 mc_addr_set, nb_mc_addr));
3807 }
3808
3809 int
3810 rte_eth_timesync_enable(uint16_t port_id)
3811 {
3812         struct rte_eth_dev *dev;
3813
3814         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3815         dev = &rte_eth_devices[port_id];
3816
3817         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3818         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3819 }
3820
3821 int
3822 rte_eth_timesync_disable(uint16_t port_id)
3823 {
3824         struct rte_eth_dev *dev;
3825
3826         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3827         dev = &rte_eth_devices[port_id];
3828
3829         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3830         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3831 }
3832
3833 int
3834 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3835                                    uint32_t flags)
3836 {
3837         struct rte_eth_dev *dev;
3838
3839         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3840         dev = &rte_eth_devices[port_id];
3841
3842         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3843         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3844                                 (dev, timestamp, flags));
3845 }
3846
3847 int
3848 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3849                                    struct timespec *timestamp)
3850 {
3851         struct rte_eth_dev *dev;
3852
3853         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3854         dev = &rte_eth_devices[port_id];
3855
3856         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3857         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3858                                 (dev, timestamp));
3859 }
3860
3861 int
3862 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3863 {
3864         struct rte_eth_dev *dev;
3865
3866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3867         dev = &rte_eth_devices[port_id];
3868
3869         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3870         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3871                                                                       delta));
3872 }
3873
3874 int
3875 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3876 {
3877         struct rte_eth_dev *dev;
3878
3879         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3880         dev = &rte_eth_devices[port_id];
3881
3882         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3883         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3884                                                                 timestamp));
3885 }
3886
3887 int
3888 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3889 {
3890         struct rte_eth_dev *dev;
3891
3892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3893         dev = &rte_eth_devices[port_id];
3894
3895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3896         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3897                                                                 timestamp));
3898 }
3899
3900 int
3901 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3902 {
3903         struct rte_eth_dev *dev;
3904
3905         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3906
3907         dev = &rte_eth_devices[port_id];
3908         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3909         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3910 }
3911
3912 int
3913 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3914 {
3915         struct rte_eth_dev *dev;
3916
3917         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3918
3919         dev = &rte_eth_devices[port_id];
3920         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3921         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3922 }
3923
3924 int
3925 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3926 {
3927         struct rte_eth_dev *dev;
3928
3929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3930
3931         dev = &rte_eth_devices[port_id];
3932         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3933         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3934 }
3935
3936 int
3937 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3938 {
3939         struct rte_eth_dev *dev;
3940
3941         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3942
3943         dev = &rte_eth_devices[port_id];
3944         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3945         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3946 }
3947
3948 int
3949 rte_eth_dev_get_dcb_info(uint16_t port_id,
3950                              struct rte_eth_dcb_info *dcb_info)
3951 {
3952         struct rte_eth_dev *dev;
3953
3954         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3955
3956         dev = &rte_eth_devices[port_id];
3957         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3958
3959         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3960         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3961 }
3962
3963 int
3964 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3965                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3966 {
3967         struct rte_eth_dev *dev;
3968
3969         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3970         if (l2_tunnel == NULL) {
3971                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3972                 return -EINVAL;
3973         }
3974
3975         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3976                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3977                 return -EINVAL;
3978         }
3979
3980         dev = &rte_eth_devices[port_id];
3981         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3982                                 -ENOTSUP);
3983         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3984                                                                 l2_tunnel));
3985 }
3986
3987 int
3988 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3989                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3990                                   uint32_t mask,
3991                                   uint8_t en)
3992 {
3993         struct rte_eth_dev *dev;
3994
3995         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3996
3997         if (l2_tunnel == NULL) {
3998                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3999                 return -EINVAL;
4000         }
4001
4002         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4003                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
4004                 return -EINVAL;
4005         }
4006
4007         if (mask == 0) {
4008                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
4009                 return -EINVAL;
4010         }
4011
4012         dev = &rte_eth_devices[port_id];
4013         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4014                                 -ENOTSUP);
4015         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4016                                                         l2_tunnel, mask, en));
4017 }
4018
4019 static void
4020 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4021                            const struct rte_eth_desc_lim *desc_lim)
4022 {
4023         if (desc_lim->nb_align != 0)
4024                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4025
4026         if (desc_lim->nb_max != 0)
4027                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4028
4029         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4030 }
4031
4032 int
4033 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4034                                  uint16_t *nb_rx_desc,
4035                                  uint16_t *nb_tx_desc)
4036 {
4037         struct rte_eth_dev *dev;
4038         struct rte_eth_dev_info dev_info;
4039
4040         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4041
4042         dev = &rte_eth_devices[port_id];
4043         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4044
4045         rte_eth_dev_info_get(port_id, &dev_info);
4046
4047         if (nb_rx_desc != NULL)
4048                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4049
4050         if (nb_tx_desc != NULL)
4051                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4052
4053         return 0;
4054 }
4055
4056 int
4057 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4058 {
4059         struct rte_eth_dev *dev;
4060
4061         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4062
4063         if (pool == NULL)
4064                 return -EINVAL;
4065
4066         dev = &rte_eth_devices[port_id];
4067
4068         if (*dev->dev_ops->pool_ops_supported == NULL)
4069                 return 1; /* all pools are supported */
4070
4071         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4072 }
4073
4074 RTE_INIT(ethdev_init_log);
4075 static void
4076 ethdev_init_log(void)
4077 {
4078         ethdev_logtype = rte_log_register("lib.ethdev");
4079         if (ethdev_logtype >= 0)
4080                 rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);
4081 }