6e8480e815cfb9faeb4684e596399d5cfc44ed11
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37
38 #include "rte_ether.h"
39 #include "rte_ethdev.h"
40 #include "rte_ethdev_driver.h"
41 #include "ethdev_profile.h"
42
43 static int ethdev_logtype;
44
45 #define ethdev_log(level, fmt, ...) \
46         rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
47
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50 static uint8_t eth_dev_last_created_port;
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
127         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
128         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
129         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133
134 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
135         { DEV_TX_OFFLOAD_##_name, #_name }
136
137 static const struct {
138         uint64_t offload;
139         const char *name;
140 } rte_tx_offload_names[] = {
141         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
142         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
143         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
147         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
150         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
156         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
157         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
158         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
159 };
160
161 #undef RTE_TX_OFFLOAD_BIT2STR
162
163 /**
164  * The user application callback description.
165  *
166  * It contains callback address to be registered by user application,
167  * the pointer to the parameters for callback, and the event type.
168  */
169 struct rte_eth_dev_callback {
170         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
171         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
172         void *cb_arg;                           /**< Parameter for callback */
173         void *ret_param;                        /**< Return parameter */
174         enum rte_eth_event_type event;          /**< Interrupt event type */
175         uint32_t active;                        /**< Callback is executing */
176 };
177
178 enum {
179         STAT_QMAP_TX = 0,
180         STAT_QMAP_RX
181 };
182
183 uint16_t
184 rte_eth_find_next(uint16_t port_id)
185 {
186         while (port_id < RTE_MAX_ETHPORTS &&
187                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
188                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
189                 port_id++;
190
191         if (port_id >= RTE_MAX_ETHPORTS)
192                 return RTE_MAX_ETHPORTS;
193
194         return port_id;
195 }
196
197 static void
198 rte_eth_dev_shared_data_prepare(void)
199 {
200         const unsigned flags = 0;
201         const struct rte_memzone *mz;
202
203         rte_spinlock_lock(&rte_eth_shared_data_lock);
204
205         if (rte_eth_dev_shared_data == NULL) {
206                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207                         /* Allocate port data and ownership shared memory. */
208                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
209                                         sizeof(*rte_eth_dev_shared_data),
210                                         rte_socket_id(), flags);
211                 } else
212                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
213                 if (mz == NULL)
214                         rte_panic("Cannot allocate ethdev shared data\n");
215
216                 rte_eth_dev_shared_data = mz->addr;
217                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
218                         rte_eth_dev_shared_data->next_owner_id =
219                                         RTE_ETH_DEV_NO_OWNER + 1;
220                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
221                         memset(rte_eth_dev_shared_data->data, 0,
222                                sizeof(rte_eth_dev_shared_data->data));
223                 }
224         }
225
226         rte_spinlock_unlock(&rte_eth_shared_data_lock);
227 }
228
229 struct rte_eth_dev *
230 rte_eth_dev_allocated(const char *name)
231 {
232         unsigned i;
233
234         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
235                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
236                     strcmp(rte_eth_devices[i].data->name, name) == 0)
237                         return &rte_eth_devices[i];
238         }
239         return NULL;
240 }
241
242 static uint16_t
243 rte_eth_dev_find_free_port(void)
244 {
245         unsigned i;
246
247         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
248                 /* Using shared name field to find a free port. */
249                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
250                         RTE_ASSERT(rte_eth_devices[i].state ==
251                                    RTE_ETH_DEV_UNUSED);
252                         return i;
253                 }
254         }
255         return RTE_MAX_ETHPORTS;
256 }
257
258 static struct rte_eth_dev *
259 eth_dev_get(uint16_t port_id)
260 {
261         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
262
263         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
264         eth_dev->state = RTE_ETH_DEV_ATTACHED;
265
266         eth_dev_last_created_port = port_id;
267
268         return eth_dev;
269 }
270
271 struct rte_eth_dev *
272 rte_eth_dev_allocate(const char *name)
273 {
274         uint16_t port_id;
275         struct rte_eth_dev *eth_dev = NULL;
276
277         rte_eth_dev_shared_data_prepare();
278
279         /* Synchronize port creation between primary and secondary threads. */
280         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
281
282         port_id = rte_eth_dev_find_free_port();
283         if (port_id == RTE_MAX_ETHPORTS) {
284                 ethdev_log(ERR, "Reached maximum number of Ethernet ports");
285                 goto unlock;
286         }
287
288         if (rte_eth_dev_allocated(name) != NULL) {
289                 ethdev_log(ERR,
290                         "Ethernet Device with name %s already allocated!",
291                         name);
292                 goto unlock;
293         }
294
295         eth_dev = eth_dev_get(port_id);
296         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
297         eth_dev->data->port_id = port_id;
298         eth_dev->data->mtu = ETHER_MTU;
299
300 unlock:
301         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
302
303         if (eth_dev != NULL)
304                 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
305
306         return eth_dev;
307 }
308
309 /*
310  * Attach to a port already registered by the primary process, which
311  * makes sure that the same device would have the same port id both
312  * in the primary and secondary process.
313  */
314 struct rte_eth_dev *
315 rte_eth_dev_attach_secondary(const char *name)
316 {
317         uint16_t i;
318         struct rte_eth_dev *eth_dev = NULL;
319
320         rte_eth_dev_shared_data_prepare();
321
322         /* Synchronize port attachment to primary port creation and release. */
323         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
324
325         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
326                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
327                         break;
328         }
329         if (i == RTE_MAX_ETHPORTS) {
330                 RTE_PMD_DEBUG_TRACE(
331                         "device %s is not driven by the primary process\n",
332                         name);
333         } else {
334                 eth_dev = eth_dev_get(i);
335                 RTE_ASSERT(eth_dev->data->port_id == i);
336         }
337
338         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
339         return eth_dev;
340 }
341
342 int
343 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
344 {
345         if (eth_dev == NULL)
346                 return -EINVAL;
347
348         rte_eth_dev_shared_data_prepare();
349
350         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
351
352         eth_dev->state = RTE_ETH_DEV_UNUSED;
353
354         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
355
356         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
357
358         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
359
360         return 0;
361 }
362
363 int
364 rte_eth_dev_is_valid_port(uint16_t port_id)
365 {
366         if (port_id >= RTE_MAX_ETHPORTS ||
367             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
368                 return 0;
369         else
370                 return 1;
371 }
372
373 static int
374 rte_eth_is_valid_owner_id(uint64_t owner_id)
375 {
376         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
377             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
378                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
379                 return 0;
380         }
381         return 1;
382 }
383
384 uint64_t __rte_experimental
385 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
386 {
387         while (port_id < RTE_MAX_ETHPORTS &&
388                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
389                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
390                rte_eth_devices[port_id].data->owner.id != owner_id))
391                 port_id++;
392
393         if (port_id >= RTE_MAX_ETHPORTS)
394                 return RTE_MAX_ETHPORTS;
395
396         return port_id;
397 }
398
399 int __rte_experimental
400 rte_eth_dev_owner_new(uint64_t *owner_id)
401 {
402         rte_eth_dev_shared_data_prepare();
403
404         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
405
406         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
407
408         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
409         return 0;
410 }
411
412 static int
413 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
414                        const struct rte_eth_dev_owner *new_owner)
415 {
416         struct rte_eth_dev_owner *port_owner;
417         int sret;
418
419         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
420
421         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
422             !rte_eth_is_valid_owner_id(old_owner_id))
423                 return -EINVAL;
424
425         port_owner = &rte_eth_devices[port_id].data->owner;
426         if (port_owner->id != old_owner_id) {
427                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
428                                     " by %s_%016lX.\n", port_id,
429                                     port_owner->name, port_owner->id);
430                 return -EPERM;
431         }
432
433         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
434                         new_owner->name);
435         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
436                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
437                                     port_id);
438
439         port_owner->id = new_owner->id;
440
441         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
442                             new_owner->name, new_owner->id);
443
444         return 0;
445 }
446
447 int __rte_experimental
448 rte_eth_dev_owner_set(const uint16_t port_id,
449                       const struct rte_eth_dev_owner *owner)
450 {
451         int ret;
452
453         rte_eth_dev_shared_data_prepare();
454
455         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
456
457         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
458
459         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
460         return ret;
461 }
462
463 int __rte_experimental
464 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
465 {
466         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
467                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
468         int ret;
469
470         rte_eth_dev_shared_data_prepare();
471
472         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
473
474         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
475
476         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
477         return ret;
478 }
479
480 void __rte_experimental
481 rte_eth_dev_owner_delete(const uint64_t owner_id)
482 {
483         uint16_t port_id;
484
485         rte_eth_dev_shared_data_prepare();
486
487         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
488
489         if (rte_eth_is_valid_owner_id(owner_id)) {
490                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
491                         memset(&rte_eth_devices[port_id].data->owner, 0,
492                                sizeof(struct rte_eth_dev_owner));
493                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
494                                     " have removed.\n", owner_id);
495         }
496
497         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
498 }
499
500 int __rte_experimental
501 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
502 {
503         int ret = 0;
504
505         rte_eth_dev_shared_data_prepare();
506
507         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
508
509         if (!rte_eth_dev_is_valid_port(port_id)) {
510                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
511                 ret = -ENODEV;
512         } else {
513                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
514                            sizeof(*owner));
515         }
516
517         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
518         return ret;
519 }
520
521 int
522 rte_eth_dev_socket_id(uint16_t port_id)
523 {
524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
525         return rte_eth_devices[port_id].data->numa_node;
526 }
527
528 void *
529 rte_eth_dev_get_sec_ctx(uint16_t port_id)
530 {
531         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
532         return rte_eth_devices[port_id].security_ctx;
533 }
534
535 uint16_t
536 rte_eth_dev_count(void)
537 {
538         return rte_eth_dev_count_avail();
539 }
540
541 uint16_t
542 rte_eth_dev_count_avail(void)
543 {
544         uint16_t p;
545         uint16_t count;
546
547         count = 0;
548
549         RTE_ETH_FOREACH_DEV(p)
550                 count++;
551
552         return count;
553 }
554
555 uint16_t
556 rte_eth_dev_count_total(void)
557 {
558         uint16_t port, count = 0;
559
560         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
561                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
562                         count++;
563
564         return count;
565 }
566
567 int
568 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
569 {
570         char *tmp;
571
572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
573
574         if (name == NULL) {
575                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
576                 return -EINVAL;
577         }
578
579         /* shouldn't check 'rte_eth_devices[i].data',
580          * because it might be overwritten by VDEV PMD */
581         tmp = rte_eth_dev_shared_data->data[port_id].name;
582         strcpy(name, tmp);
583         return 0;
584 }
585
586 int
587 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
588 {
589         uint32_t pid;
590
591         if (name == NULL) {
592                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
593                 return -EINVAL;
594         }
595
596         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
597                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
598                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
599                         *port_id = pid;
600                         return 0;
601                 }
602         }
603
604         return -ENODEV;
605 }
606
607 static int
608 eth_err(uint16_t port_id, int ret)
609 {
610         if (ret == 0)
611                 return 0;
612         if (rte_eth_dev_is_removed(port_id))
613                 return -EIO;
614         return ret;
615 }
616
617 /* attach the new device, then store port_id of the device */
618 int
619 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
620 {
621         int current = rte_eth_dev_count_total();
622         struct rte_devargs da;
623         int ret = -1;
624
625         memset(&da, 0, sizeof(da));
626
627         if ((devargs == NULL) || (port_id == NULL)) {
628                 ret = -EINVAL;
629                 goto err;
630         }
631
632         /* parse devargs */
633         if (rte_devargs_parse(&da, "%s", devargs))
634                 goto err;
635
636         ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
637         if (ret < 0)
638                 goto err;
639
640         /* no point looking at the port count if no port exists */
641         if (!rte_eth_dev_count_total()) {
642                 ethdev_log(ERR, "No port found for device (%s)", da.name);
643                 ret = -1;
644                 goto err;
645         }
646
647         /* if nothing happened, there is a bug here, since some driver told us
648          * it did attach a device, but did not create a port.
649          * FIXME: race condition in case of plug-out of another device
650          */
651         if (current == rte_eth_dev_count_total()) {
652                 ret = -1;
653                 goto err;
654         }
655
656         *port_id = eth_dev_last_created_port;
657         ret = 0;
658
659 err:
660         free(da.args);
661         return ret;
662 }
663
664 /* detach the device, then store the name of the device */
665 int
666 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
667 {
668         struct rte_device *dev;
669         struct rte_bus *bus;
670         uint32_t dev_flags;
671         int ret = -1;
672
673         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
674
675         dev_flags = rte_eth_devices[port_id].data->dev_flags;
676         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
677                 ethdev_log(ERR,
678                         "Port %" PRIu16 " is bonded, cannot detach", port_id);
679                 return -ENOTSUP;
680         }
681
682         dev = rte_eth_devices[port_id].device;
683         if (dev == NULL)
684                 return -EINVAL;
685
686         bus = rte_bus_find_by_device(dev);
687         if (bus == NULL)
688                 return -ENOENT;
689
690         ret = rte_eal_hotplug_remove(bus->name, dev->name);
691         if (ret < 0)
692                 return ret;
693
694         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
695         return 0;
696 }
697
698 static int
699 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
700 {
701         uint16_t old_nb_queues = dev->data->nb_rx_queues;
702         void **rxq;
703         unsigned i;
704
705         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
706                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
707                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
708                                 RTE_CACHE_LINE_SIZE);
709                 if (dev->data->rx_queues == NULL) {
710                         dev->data->nb_rx_queues = 0;
711                         return -(ENOMEM);
712                 }
713         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
714                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
715
716                 rxq = dev->data->rx_queues;
717
718                 for (i = nb_queues; i < old_nb_queues; i++)
719                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
720                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
721                                 RTE_CACHE_LINE_SIZE);
722                 if (rxq == NULL)
723                         return -(ENOMEM);
724                 if (nb_queues > old_nb_queues) {
725                         uint16_t new_qs = nb_queues - old_nb_queues;
726
727                         memset(rxq + old_nb_queues, 0,
728                                 sizeof(rxq[0]) * new_qs);
729                 }
730
731                 dev->data->rx_queues = rxq;
732
733         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
734                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
735
736                 rxq = dev->data->rx_queues;
737
738                 for (i = nb_queues; i < old_nb_queues; i++)
739                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
740
741                 rte_free(dev->data->rx_queues);
742                 dev->data->rx_queues = NULL;
743         }
744         dev->data->nb_rx_queues = nb_queues;
745         return 0;
746 }
747
748 int
749 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
750 {
751         struct rte_eth_dev *dev;
752
753         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
754
755         dev = &rte_eth_devices[port_id];
756         if (!dev->data->dev_started) {
757                 RTE_PMD_DEBUG_TRACE(
758                     "port %d must be started before start any queue\n", port_id);
759                 return -EINVAL;
760         }
761
762         if (rx_queue_id >= dev->data->nb_rx_queues) {
763                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
764                 return -EINVAL;
765         }
766
767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
768
769         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
770                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
771                         " already started\n",
772                         rx_queue_id, port_id);
773                 return 0;
774         }
775
776         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
777                                                              rx_queue_id));
778
779 }
780
781 int
782 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
783 {
784         struct rte_eth_dev *dev;
785
786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
787
788         dev = &rte_eth_devices[port_id];
789         if (rx_queue_id >= dev->data->nb_rx_queues) {
790                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
791                 return -EINVAL;
792         }
793
794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
795
796         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
797                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
798                         " already stopped\n",
799                         rx_queue_id, port_id);
800                 return 0;
801         }
802
803         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
804
805 }
806
807 int
808 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
809 {
810         struct rte_eth_dev *dev;
811
812         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
813
814         dev = &rte_eth_devices[port_id];
815         if (!dev->data->dev_started) {
816                 RTE_PMD_DEBUG_TRACE(
817                     "port %d must be started before start any queue\n", port_id);
818                 return -EINVAL;
819         }
820
821         if (tx_queue_id >= dev->data->nb_tx_queues) {
822                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
823                 return -EINVAL;
824         }
825
826         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
827
828         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
829                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
830                         " already started\n",
831                         tx_queue_id, port_id);
832                 return 0;
833         }
834
835         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
836                                                              tx_queue_id));
837
838 }
839
840 int
841 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
842 {
843         struct rte_eth_dev *dev;
844
845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
846
847         dev = &rte_eth_devices[port_id];
848         if (tx_queue_id >= dev->data->nb_tx_queues) {
849                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
850                 return -EINVAL;
851         }
852
853         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
854
855         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
856                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
857                         " already stopped\n",
858                         tx_queue_id, port_id);
859                 return 0;
860         }
861
862         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
863
864 }
865
866 static int
867 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
868 {
869         uint16_t old_nb_queues = dev->data->nb_tx_queues;
870         void **txq;
871         unsigned i;
872
873         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
874                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
875                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
876                                                    RTE_CACHE_LINE_SIZE);
877                 if (dev->data->tx_queues == NULL) {
878                         dev->data->nb_tx_queues = 0;
879                         return -(ENOMEM);
880                 }
881         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
882                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
883
884                 txq = dev->data->tx_queues;
885
886                 for (i = nb_queues; i < old_nb_queues; i++)
887                         (*dev->dev_ops->tx_queue_release)(txq[i]);
888                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
889                                   RTE_CACHE_LINE_SIZE);
890                 if (txq == NULL)
891                         return -ENOMEM;
892                 if (nb_queues > old_nb_queues) {
893                         uint16_t new_qs = nb_queues - old_nb_queues;
894
895                         memset(txq + old_nb_queues, 0,
896                                sizeof(txq[0]) * new_qs);
897                 }
898
899                 dev->data->tx_queues = txq;
900
901         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
902                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
903
904                 txq = dev->data->tx_queues;
905
906                 for (i = nb_queues; i < old_nb_queues; i++)
907                         (*dev->dev_ops->tx_queue_release)(txq[i]);
908
909                 rte_free(dev->data->tx_queues);
910                 dev->data->tx_queues = NULL;
911         }
912         dev->data->nb_tx_queues = nb_queues;
913         return 0;
914 }
915
916 uint32_t
917 rte_eth_speed_bitflag(uint32_t speed, int duplex)
918 {
919         switch (speed) {
920         case ETH_SPEED_NUM_10M:
921                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
922         case ETH_SPEED_NUM_100M:
923                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
924         case ETH_SPEED_NUM_1G:
925                 return ETH_LINK_SPEED_1G;
926         case ETH_SPEED_NUM_2_5G:
927                 return ETH_LINK_SPEED_2_5G;
928         case ETH_SPEED_NUM_5G:
929                 return ETH_LINK_SPEED_5G;
930         case ETH_SPEED_NUM_10G:
931                 return ETH_LINK_SPEED_10G;
932         case ETH_SPEED_NUM_20G:
933                 return ETH_LINK_SPEED_20G;
934         case ETH_SPEED_NUM_25G:
935                 return ETH_LINK_SPEED_25G;
936         case ETH_SPEED_NUM_40G:
937                 return ETH_LINK_SPEED_40G;
938         case ETH_SPEED_NUM_50G:
939                 return ETH_LINK_SPEED_50G;
940         case ETH_SPEED_NUM_56G:
941                 return ETH_LINK_SPEED_56G;
942         case ETH_SPEED_NUM_100G:
943                 return ETH_LINK_SPEED_100G;
944         default:
945                 return 0;
946         }
947 }
948
949 /**
950  * A conversion function from rxmode bitfield API.
951  */
952 static void
953 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
954                                     uint64_t *rx_offloads)
955 {
956         uint64_t offloads = 0;
957
958         if (rxmode->header_split == 1)
959                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
960         if (rxmode->hw_ip_checksum == 1)
961                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
962         if (rxmode->hw_vlan_filter == 1)
963                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
964         if (rxmode->hw_vlan_strip == 1)
965                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
966         if (rxmode->hw_vlan_extend == 1)
967                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
968         if (rxmode->jumbo_frame == 1)
969                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
970         if (rxmode->hw_strip_crc == 1)
971                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
972         if (rxmode->enable_scatter == 1)
973                 offloads |= DEV_RX_OFFLOAD_SCATTER;
974         if (rxmode->enable_lro == 1)
975                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
976         if (rxmode->hw_timestamp == 1)
977                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
978         if (rxmode->security == 1)
979                 offloads |= DEV_RX_OFFLOAD_SECURITY;
980
981         *rx_offloads = offloads;
982 }
983
984 const char * __rte_experimental
985 rte_eth_dev_rx_offload_name(uint64_t offload)
986 {
987         const char *name = "UNKNOWN";
988         unsigned int i;
989
990         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
991                 if (offload == rte_rx_offload_names[i].offload) {
992                         name = rte_rx_offload_names[i].name;
993                         break;
994                 }
995         }
996
997         return name;
998 }
999
1000 const char * __rte_experimental
1001 rte_eth_dev_tx_offload_name(uint64_t offload)
1002 {
1003         const char *name = "UNKNOWN";
1004         unsigned int i;
1005
1006         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1007                 if (offload == rte_tx_offload_names[i].offload) {
1008                         name = rte_tx_offload_names[i].name;
1009                         break;
1010                 }
1011         }
1012
1013         return name;
1014 }
1015
1016 int
1017 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1018                       const struct rte_eth_conf *dev_conf)
1019 {
1020         struct rte_eth_dev *dev;
1021         struct rte_eth_dev_info dev_info;
1022         struct rte_eth_conf local_conf = *dev_conf;
1023         int diag;
1024
1025         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1026
1027         dev = &rte_eth_devices[port_id];
1028
1029         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1030         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1031
1032         /* If number of queues specified by application for both Rx and Tx is
1033          * zero, use driver preferred values. This cannot be done individually
1034          * as it is valid for either Tx or Rx (but not both) to be zero.
1035          * If driver does not provide any preferred valued, fall back on
1036          * EAL defaults.
1037          */
1038         if (nb_rx_q == 0 && nb_tx_q == 0) {
1039                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1040                 if (nb_rx_q == 0)
1041                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1042                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1043                 if (nb_tx_q == 0)
1044                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1045         }
1046
1047         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1048                 RTE_PMD_DEBUG_TRACE(
1049                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1050                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1051                 return -EINVAL;
1052         }
1053
1054         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1055                 RTE_PMD_DEBUG_TRACE(
1056                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1057                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1058                 return -EINVAL;
1059         }
1060
1061         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1063
1064         if (dev->data->dev_started) {
1065                 RTE_PMD_DEBUG_TRACE(
1066                     "port %d must be stopped to allow configuration\n", port_id);
1067                 return -EBUSY;
1068         }
1069
1070         /*
1071          * Convert between the offloads API to enable PMDs to support
1072          * only one of them.
1073          */
1074         if (dev_conf->rxmode.ignore_offload_bitfield == 0)
1075                 rte_eth_convert_rx_offload_bitfield(
1076                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1077
1078         /* Copy the dev_conf parameter into the dev structure */
1079         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1080
1081         /*
1082          * Check that the numbers of RX and TX queues are not greater
1083          * than the maximum number of RX and TX queues supported by the
1084          * configured device.
1085          */
1086         if (nb_rx_q > dev_info.max_rx_queues) {
1087                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1088                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1089                 return -EINVAL;
1090         }
1091
1092         if (nb_tx_q > dev_info.max_tx_queues) {
1093                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1094                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1095                 return -EINVAL;
1096         }
1097
1098         /* Check that the device supports requested interrupts */
1099         if ((dev_conf->intr_conf.lsc == 1) &&
1100                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1101                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1102                                         dev->device->driver->name);
1103                         return -EINVAL;
1104         }
1105         if ((dev_conf->intr_conf.rmv == 1) &&
1106             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1107                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1108                                     dev->device->driver->name);
1109                 return -EINVAL;
1110         }
1111
1112         /*
1113          * If jumbo frames are enabled, check that the maximum RX packet
1114          * length is supported by the configured device.
1115          */
1116         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1117                 if (dev_conf->rxmode.max_rx_pkt_len >
1118                     dev_info.max_rx_pktlen) {
1119                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1120                                 " > max valid value %u\n",
1121                                 port_id,
1122                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1123                                 (unsigned)dev_info.max_rx_pktlen);
1124                         return -EINVAL;
1125                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1126                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1127                                 " < min valid value %u\n",
1128                                 port_id,
1129                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1130                                 (unsigned)ETHER_MIN_LEN);
1131                         return -EINVAL;
1132                 }
1133         } else {
1134                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1135                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1136                         /* Use default value */
1137                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1138                                                         ETHER_MAX_LEN;
1139         }
1140
1141         /*
1142          * Setup new number of RX/TX queues and reconfigure device.
1143          */
1144         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1145         if (diag != 0) {
1146                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1147                                 port_id, diag);
1148                 return diag;
1149         }
1150
1151         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1152         if (diag != 0) {
1153                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1154                                 port_id, diag);
1155                 rte_eth_dev_rx_queue_config(dev, 0);
1156                 return diag;
1157         }
1158
1159         diag = (*dev->dev_ops->dev_configure)(dev);
1160         if (diag != 0) {
1161                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1162                                 port_id, diag);
1163                 rte_eth_dev_rx_queue_config(dev, 0);
1164                 rte_eth_dev_tx_queue_config(dev, 0);
1165                 return eth_err(port_id, diag);
1166         }
1167
1168         /* Initialize Rx profiling if enabled at compilation time. */
1169         diag = __rte_eth_profile_rx_init(port_id, dev);
1170         if (diag != 0) {
1171                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1172                                 port_id, diag);
1173                 rte_eth_dev_rx_queue_config(dev, 0);
1174                 rte_eth_dev_tx_queue_config(dev, 0);
1175                 return eth_err(port_id, diag);
1176         }
1177
1178         return 0;
1179 }
1180
1181 void
1182 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1183 {
1184         if (dev->data->dev_started) {
1185                 RTE_PMD_DEBUG_TRACE(
1186                         "port %d must be stopped to allow reset\n",
1187                         dev->data->port_id);
1188                 return;
1189         }
1190
1191         rte_eth_dev_rx_queue_config(dev, 0);
1192         rte_eth_dev_tx_queue_config(dev, 0);
1193
1194         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1195 }
1196
1197 static void
1198 rte_eth_dev_config_restore(uint16_t port_id)
1199 {
1200         struct rte_eth_dev *dev;
1201         struct rte_eth_dev_info dev_info;
1202         struct ether_addr *addr;
1203         uint16_t i;
1204         uint32_t pool = 0;
1205         uint64_t pool_mask;
1206
1207         dev = &rte_eth_devices[port_id];
1208
1209         rte_eth_dev_info_get(port_id, &dev_info);
1210
1211         /* replay MAC address configuration including default MAC */
1212         addr = &dev->data->mac_addrs[0];
1213         if (*dev->dev_ops->mac_addr_set != NULL)
1214                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1215         else if (*dev->dev_ops->mac_addr_add != NULL)
1216                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1217
1218         if (*dev->dev_ops->mac_addr_add != NULL) {
1219                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1220                         addr = &dev->data->mac_addrs[i];
1221
1222                         /* skip zero address */
1223                         if (is_zero_ether_addr(addr))
1224                                 continue;
1225
1226                         pool = 0;
1227                         pool_mask = dev->data->mac_pool_sel[i];
1228
1229                         do {
1230                                 if (pool_mask & 1ULL)
1231                                         (*dev->dev_ops->mac_addr_add)(dev,
1232                                                 addr, i, pool);
1233                                 pool_mask >>= 1;
1234                                 pool++;
1235                         } while (pool_mask);
1236                 }
1237         }
1238
1239         /* replay promiscuous configuration */
1240         if (rte_eth_promiscuous_get(port_id) == 1)
1241                 rte_eth_promiscuous_enable(port_id);
1242         else if (rte_eth_promiscuous_get(port_id) == 0)
1243                 rte_eth_promiscuous_disable(port_id);
1244
1245         /* replay all multicast configuration */
1246         if (rte_eth_allmulticast_get(port_id) == 1)
1247                 rte_eth_allmulticast_enable(port_id);
1248         else if (rte_eth_allmulticast_get(port_id) == 0)
1249                 rte_eth_allmulticast_disable(port_id);
1250 }
1251
1252 int
1253 rte_eth_dev_start(uint16_t port_id)
1254 {
1255         struct rte_eth_dev *dev;
1256         int diag;
1257
1258         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1259
1260         dev = &rte_eth_devices[port_id];
1261
1262         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1263
1264         if (dev->data->dev_started != 0) {
1265                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1266                         " already started\n",
1267                         port_id);
1268                 return 0;
1269         }
1270
1271         diag = (*dev->dev_ops->dev_start)(dev);
1272         if (diag == 0)
1273                 dev->data->dev_started = 1;
1274         else
1275                 return eth_err(port_id, diag);
1276
1277         rte_eth_dev_config_restore(port_id);
1278
1279         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1280                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1281                 (*dev->dev_ops->link_update)(dev, 0);
1282         }
1283         return 0;
1284 }
1285
1286 void
1287 rte_eth_dev_stop(uint16_t port_id)
1288 {
1289         struct rte_eth_dev *dev;
1290
1291         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1292         dev = &rte_eth_devices[port_id];
1293
1294         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1295
1296         if (dev->data->dev_started == 0) {
1297                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1298                         " already stopped\n",
1299                         port_id);
1300                 return;
1301         }
1302
1303         dev->data->dev_started = 0;
1304         (*dev->dev_ops->dev_stop)(dev);
1305 }
1306
1307 int
1308 rte_eth_dev_set_link_up(uint16_t port_id)
1309 {
1310         struct rte_eth_dev *dev;
1311
1312         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1313
1314         dev = &rte_eth_devices[port_id];
1315
1316         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1317         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1318 }
1319
1320 int
1321 rte_eth_dev_set_link_down(uint16_t port_id)
1322 {
1323         struct rte_eth_dev *dev;
1324
1325         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1326
1327         dev = &rte_eth_devices[port_id];
1328
1329         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1330         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1331 }
1332
1333 void
1334 rte_eth_dev_close(uint16_t port_id)
1335 {
1336         struct rte_eth_dev *dev;
1337
1338         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1339         dev = &rte_eth_devices[port_id];
1340
1341         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1342         dev->data->dev_started = 0;
1343         (*dev->dev_ops->dev_close)(dev);
1344
1345         dev->data->nb_rx_queues = 0;
1346         rte_free(dev->data->rx_queues);
1347         dev->data->rx_queues = NULL;
1348         dev->data->nb_tx_queues = 0;
1349         rte_free(dev->data->tx_queues);
1350         dev->data->tx_queues = NULL;
1351 }
1352
1353 int
1354 rte_eth_dev_reset(uint16_t port_id)
1355 {
1356         struct rte_eth_dev *dev;
1357         int ret;
1358
1359         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1360         dev = &rte_eth_devices[port_id];
1361
1362         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1363
1364         rte_eth_dev_stop(port_id);
1365         ret = dev->dev_ops->dev_reset(dev);
1366
1367         return eth_err(port_id, ret);
1368 }
1369
1370 int __rte_experimental
1371 rte_eth_dev_is_removed(uint16_t port_id)
1372 {
1373         struct rte_eth_dev *dev;
1374         int ret;
1375
1376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1377
1378         dev = &rte_eth_devices[port_id];
1379
1380         if (dev->state == RTE_ETH_DEV_REMOVED)
1381                 return 1;
1382
1383         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1384
1385         ret = dev->dev_ops->is_removed(dev);
1386         if (ret != 0)
1387                 /* Device is physically removed. */
1388                 dev->state = RTE_ETH_DEV_REMOVED;
1389
1390         return ret;
1391 }
1392
1393 int
1394 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1395                        uint16_t nb_rx_desc, unsigned int socket_id,
1396                        const struct rte_eth_rxconf *rx_conf,
1397                        struct rte_mempool *mp)
1398 {
1399         int ret;
1400         uint32_t mbp_buf_size;
1401         struct rte_eth_dev *dev;
1402         struct rte_eth_dev_info dev_info;
1403         struct rte_eth_rxconf local_conf;
1404         void **rxq;
1405
1406         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1407
1408         dev = &rte_eth_devices[port_id];
1409         if (rx_queue_id >= dev->data->nb_rx_queues) {
1410                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1411                 return -EINVAL;
1412         }
1413
1414         if (dev->data->dev_started) {
1415                 RTE_PMD_DEBUG_TRACE(
1416                     "port %d must be stopped to allow configuration\n", port_id);
1417                 return -EBUSY;
1418         }
1419
1420         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1421         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1422
1423         /*
1424          * Check the size of the mbuf data buffer.
1425          * This value must be provided in the private data of the memory pool.
1426          * First check that the memory pool has a valid private data.
1427          */
1428         rte_eth_dev_info_get(port_id, &dev_info);
1429         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1430                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1431                                 mp->name, (int) mp->private_data_size,
1432                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1433                 return -ENOSPC;
1434         }
1435         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1436
1437         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1438                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1439                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1440                                 "=%d)\n",
1441                                 mp->name,
1442                                 (int)mbp_buf_size,
1443                                 (int)(RTE_PKTMBUF_HEADROOM +
1444                                       dev_info.min_rx_bufsize),
1445                                 (int)RTE_PKTMBUF_HEADROOM,
1446                                 (int)dev_info.min_rx_bufsize);
1447                 return -EINVAL;
1448         }
1449
1450         /* Use default specified by driver, if nb_rx_desc is zero */
1451         if (nb_rx_desc == 0) {
1452                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1453                 /* If driver default is also zero, fall back on EAL default */
1454                 if (nb_rx_desc == 0)
1455                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1456         }
1457
1458         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1459                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1460                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1461
1462                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1463                         "should be: <= %hu, = %hu, and a product of %hu\n",
1464                         nb_rx_desc,
1465                         dev_info.rx_desc_lim.nb_max,
1466                         dev_info.rx_desc_lim.nb_min,
1467                         dev_info.rx_desc_lim.nb_align);
1468                 return -EINVAL;
1469         }
1470
1471         rxq = dev->data->rx_queues;
1472         if (rxq[rx_queue_id]) {
1473                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1474                                         -ENOTSUP);
1475                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1476                 rxq[rx_queue_id] = NULL;
1477         }
1478
1479         if (rx_conf == NULL)
1480                 rx_conf = &dev_info.default_rxconf;
1481
1482         local_conf = *rx_conf;
1483         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1484                 /**
1485                  * Reflect port offloads to queue offloads in order for
1486                  * offloads to not be discarded.
1487                  */
1488                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1489                                                     &local_conf.offloads);
1490         }
1491
1492         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1493                                               socket_id, &local_conf, mp);
1494         if (!ret) {
1495                 if (!dev->data->min_rx_buf_size ||
1496                     dev->data->min_rx_buf_size > mbp_buf_size)
1497                         dev->data->min_rx_buf_size = mbp_buf_size;
1498         }
1499
1500         return eth_err(port_id, ret);
1501 }
1502
1503 /**
1504  * A conversion function from txq_flags API.
1505  */
1506 static void
1507 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1508 {
1509         uint64_t offloads = 0;
1510
1511         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1512                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1513         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1514                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1515         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1516                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1517         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1518                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1519         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1520                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1521         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1522             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1523                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1524
1525         *tx_offloads = offloads;
1526 }
1527
1528 int
1529 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1530                        uint16_t nb_tx_desc, unsigned int socket_id,
1531                        const struct rte_eth_txconf *tx_conf)
1532 {
1533         struct rte_eth_dev *dev;
1534         struct rte_eth_dev_info dev_info;
1535         struct rte_eth_txconf local_conf;
1536         void **txq;
1537
1538         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1539
1540         dev = &rte_eth_devices[port_id];
1541         if (tx_queue_id >= dev->data->nb_tx_queues) {
1542                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1543                 return -EINVAL;
1544         }
1545
1546         if (dev->data->dev_started) {
1547                 RTE_PMD_DEBUG_TRACE(
1548                     "port %d must be stopped to allow configuration\n", port_id);
1549                 return -EBUSY;
1550         }
1551
1552         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1553         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1554
1555         rte_eth_dev_info_get(port_id, &dev_info);
1556
1557         /* Use default specified by driver, if nb_tx_desc is zero */
1558         if (nb_tx_desc == 0) {
1559                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1560                 /* If driver default is zero, fall back on EAL default */
1561                 if (nb_tx_desc == 0)
1562                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1563         }
1564         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1565             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1566             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1567                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1568                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1569                                 nb_tx_desc,
1570                                 dev_info.tx_desc_lim.nb_max,
1571                                 dev_info.tx_desc_lim.nb_min,
1572                                 dev_info.tx_desc_lim.nb_align);
1573                 return -EINVAL;
1574         }
1575
1576         txq = dev->data->tx_queues;
1577         if (txq[tx_queue_id]) {
1578                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1579                                         -ENOTSUP);
1580                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1581                 txq[tx_queue_id] = NULL;
1582         }
1583
1584         if (tx_conf == NULL)
1585                 tx_conf = &dev_info.default_txconf;
1586
1587         /*
1588          * Convert between the offloads API to enable PMDs to support
1589          * only one of them.
1590          */
1591         local_conf = *tx_conf;
1592         if (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
1593                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1594                                           &local_conf.offloads);
1595         }
1596
1597         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1598                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1599 }
1600
1601 void
1602 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1603                 void *userdata __rte_unused)
1604 {
1605         unsigned i;
1606
1607         for (i = 0; i < unsent; i++)
1608                 rte_pktmbuf_free(pkts[i]);
1609 }
1610
1611 void
1612 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1613                 void *userdata)
1614 {
1615         uint64_t *count = userdata;
1616         unsigned i;
1617
1618         for (i = 0; i < unsent; i++)
1619                 rte_pktmbuf_free(pkts[i]);
1620
1621         *count += unsent;
1622 }
1623
1624 int
1625 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1626                 buffer_tx_error_fn cbfn, void *userdata)
1627 {
1628         buffer->error_callback = cbfn;
1629         buffer->error_userdata = userdata;
1630         return 0;
1631 }
1632
1633 int
1634 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1635 {
1636         int ret = 0;
1637
1638         if (buffer == NULL)
1639                 return -EINVAL;
1640
1641         buffer->size = size;
1642         if (buffer->error_callback == NULL) {
1643                 ret = rte_eth_tx_buffer_set_err_callback(
1644                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1645         }
1646
1647         return ret;
1648 }
1649
1650 int
1651 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1652 {
1653         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1654         int ret;
1655
1656         /* Validate Input Data. Bail if not valid or not supported. */
1657         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1658         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1659
1660         /* Call driver to free pending mbufs. */
1661         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1662                                                free_cnt);
1663         return eth_err(port_id, ret);
1664 }
1665
1666 void
1667 rte_eth_promiscuous_enable(uint16_t port_id)
1668 {
1669         struct rte_eth_dev *dev;
1670
1671         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1672         dev = &rte_eth_devices[port_id];
1673
1674         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1675         (*dev->dev_ops->promiscuous_enable)(dev);
1676         dev->data->promiscuous = 1;
1677 }
1678
1679 void
1680 rte_eth_promiscuous_disable(uint16_t port_id)
1681 {
1682         struct rte_eth_dev *dev;
1683
1684         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1685         dev = &rte_eth_devices[port_id];
1686
1687         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1688         dev->data->promiscuous = 0;
1689         (*dev->dev_ops->promiscuous_disable)(dev);
1690 }
1691
1692 int
1693 rte_eth_promiscuous_get(uint16_t port_id)
1694 {
1695         struct rte_eth_dev *dev;
1696
1697         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1698
1699         dev = &rte_eth_devices[port_id];
1700         return dev->data->promiscuous;
1701 }
1702
1703 void
1704 rte_eth_allmulticast_enable(uint16_t port_id)
1705 {
1706         struct rte_eth_dev *dev;
1707
1708         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1709         dev = &rte_eth_devices[port_id];
1710
1711         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1712         (*dev->dev_ops->allmulticast_enable)(dev);
1713         dev->data->all_multicast = 1;
1714 }
1715
1716 void
1717 rte_eth_allmulticast_disable(uint16_t port_id)
1718 {
1719         struct rte_eth_dev *dev;
1720
1721         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1722         dev = &rte_eth_devices[port_id];
1723
1724         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1725         dev->data->all_multicast = 0;
1726         (*dev->dev_ops->allmulticast_disable)(dev);
1727 }
1728
1729 int
1730 rte_eth_allmulticast_get(uint16_t port_id)
1731 {
1732         struct rte_eth_dev *dev;
1733
1734         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1735
1736         dev = &rte_eth_devices[port_id];
1737         return dev->data->all_multicast;
1738 }
1739
1740 void
1741 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1742 {
1743         struct rte_eth_dev *dev;
1744
1745         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1746         dev = &rte_eth_devices[port_id];
1747
1748         if (dev->data->dev_conf.intr_conf.lsc &&
1749             dev->data->dev_started)
1750                 rte_eth_linkstatus_get(dev, eth_link);
1751         else {
1752                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1753                 (*dev->dev_ops->link_update)(dev, 1);
1754                 *eth_link = dev->data->dev_link;
1755         }
1756 }
1757
1758 void
1759 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1760 {
1761         struct rte_eth_dev *dev;
1762
1763         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1764         dev = &rte_eth_devices[port_id];
1765
1766         if (dev->data->dev_conf.intr_conf.lsc &&
1767             dev->data->dev_started)
1768                 rte_eth_linkstatus_get(dev, eth_link);
1769         else {
1770                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1771                 (*dev->dev_ops->link_update)(dev, 0);
1772                 *eth_link = dev->data->dev_link;
1773         }
1774 }
1775
1776 int
1777 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1778 {
1779         struct rte_eth_dev *dev;
1780
1781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1782
1783         dev = &rte_eth_devices[port_id];
1784         memset(stats, 0, sizeof(*stats));
1785
1786         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1787         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1788         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1789 }
1790
1791 int
1792 rte_eth_stats_reset(uint16_t port_id)
1793 {
1794         struct rte_eth_dev *dev;
1795
1796         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1797         dev = &rte_eth_devices[port_id];
1798
1799         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1800         (*dev->dev_ops->stats_reset)(dev);
1801         dev->data->rx_mbuf_alloc_failed = 0;
1802
1803         return 0;
1804 }
1805
1806 static inline int
1807 get_xstats_basic_count(struct rte_eth_dev *dev)
1808 {
1809         uint16_t nb_rxqs, nb_txqs;
1810         int count;
1811
1812         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1813         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1814
1815         count = RTE_NB_STATS;
1816         count += nb_rxqs * RTE_NB_RXQ_STATS;
1817         count += nb_txqs * RTE_NB_TXQ_STATS;
1818
1819         return count;
1820 }
1821
1822 static int
1823 get_xstats_count(uint16_t port_id)
1824 {
1825         struct rte_eth_dev *dev;
1826         int count;
1827
1828         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1829         dev = &rte_eth_devices[port_id];
1830         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1831                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1832                                 NULL, 0);
1833                 if (count < 0)
1834                         return eth_err(port_id, count);
1835         }
1836         if (dev->dev_ops->xstats_get_names != NULL) {
1837                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1838                 if (count < 0)
1839                         return eth_err(port_id, count);
1840         } else
1841                 count = 0;
1842
1843
1844         count += get_xstats_basic_count(dev);
1845
1846         return count;
1847 }
1848
1849 int
1850 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1851                 uint64_t *id)
1852 {
1853         int cnt_xstats, idx_xstat;
1854
1855         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1856
1857         if (!id) {
1858                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1859                 return -ENOMEM;
1860         }
1861
1862         if (!xstat_name) {
1863                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1864                 return -ENOMEM;
1865         }
1866
1867         /* Get count */
1868         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1869         if (cnt_xstats  < 0) {
1870                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1871                 return -ENODEV;
1872         }
1873
1874         /* Get id-name lookup table */
1875         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1876
1877         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1878                         port_id, xstats_names, cnt_xstats, NULL)) {
1879                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1880                 return -1;
1881         }
1882
1883         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1884                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1885                         *id = idx_xstat;
1886                         return 0;
1887                 };
1888         }
1889
1890         return -EINVAL;
1891 }
1892
1893 /* retrieve basic stats names */
1894 static int
1895 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1896         struct rte_eth_xstat_name *xstats_names)
1897 {
1898         int cnt_used_entries = 0;
1899         uint32_t idx, id_queue;
1900         uint16_t num_q;
1901
1902         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1903                 snprintf(xstats_names[cnt_used_entries].name,
1904                         sizeof(xstats_names[0].name),
1905                         "%s", rte_stats_strings[idx].name);
1906                 cnt_used_entries++;
1907         }
1908         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1909         for (id_queue = 0; id_queue < num_q; id_queue++) {
1910                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1911                         snprintf(xstats_names[cnt_used_entries].name,
1912                                 sizeof(xstats_names[0].name),
1913                                 "rx_q%u%s",
1914                                 id_queue, rte_rxq_stats_strings[idx].name);
1915                         cnt_used_entries++;
1916                 }
1917
1918         }
1919         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1920         for (id_queue = 0; id_queue < num_q; id_queue++) {
1921                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1922                         snprintf(xstats_names[cnt_used_entries].name,
1923                                 sizeof(xstats_names[0].name),
1924                                 "tx_q%u%s",
1925                                 id_queue, rte_txq_stats_strings[idx].name);
1926                         cnt_used_entries++;
1927                 }
1928         }
1929         return cnt_used_entries;
1930 }
1931
1932 /* retrieve ethdev extended statistics names */
1933 int
1934 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1935         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1936         uint64_t *ids)
1937 {
1938         struct rte_eth_xstat_name *xstats_names_copy;
1939         unsigned int no_basic_stat_requested = 1;
1940         unsigned int no_ext_stat_requested = 1;
1941         unsigned int expected_entries;
1942         unsigned int basic_count;
1943         struct rte_eth_dev *dev;
1944         unsigned int i;
1945         int ret;
1946
1947         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1948         dev = &rte_eth_devices[port_id];
1949
1950         basic_count = get_xstats_basic_count(dev);
1951         ret = get_xstats_count(port_id);
1952         if (ret < 0)
1953                 return ret;
1954         expected_entries = (unsigned int)ret;
1955
1956         /* Return max number of stats if no ids given */
1957         if (!ids) {
1958                 if (!xstats_names)
1959                         return expected_entries;
1960                 else if (xstats_names && size < expected_entries)
1961                         return expected_entries;
1962         }
1963
1964         if (ids && !xstats_names)
1965                 return -EINVAL;
1966
1967         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1968                 uint64_t ids_copy[size];
1969
1970                 for (i = 0; i < size; i++) {
1971                         if (ids[i] < basic_count) {
1972                                 no_basic_stat_requested = 0;
1973                                 break;
1974                         }
1975
1976                         /*
1977                          * Convert ids to xstats ids that PMD knows.
1978                          * ids known by user are basic + extended stats.
1979                          */
1980                         ids_copy[i] = ids[i] - basic_count;
1981                 }
1982
1983                 if (no_basic_stat_requested)
1984                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1985                                         xstats_names, ids_copy, size);
1986         }
1987
1988         /* Retrieve all stats */
1989         if (!ids) {
1990                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1991                                 expected_entries);
1992                 if (num_stats < 0 || num_stats > (int)expected_entries)
1993                         return num_stats;
1994                 else
1995                         return expected_entries;
1996         }
1997
1998         xstats_names_copy = calloc(expected_entries,
1999                 sizeof(struct rte_eth_xstat_name));
2000
2001         if (!xstats_names_copy) {
2002                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2003                 return -ENOMEM;
2004         }
2005
2006         if (ids) {
2007                 for (i = 0; i < size; i++) {
2008                         if (ids[i] >= basic_count) {
2009                                 no_ext_stat_requested = 0;
2010                                 break;
2011                         }
2012                 }
2013         }
2014
2015         /* Fill xstats_names_copy structure */
2016         if (ids && no_ext_stat_requested) {
2017                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2018         } else {
2019                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2020                         expected_entries);
2021                 if (ret < 0) {
2022                         free(xstats_names_copy);
2023                         return ret;
2024                 }
2025         }
2026
2027         /* Filter stats */
2028         for (i = 0; i < size; i++) {
2029                 if (ids[i] >= expected_entries) {
2030                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2031                         free(xstats_names_copy);
2032                         return -1;
2033                 }
2034                 xstats_names[i] = xstats_names_copy[ids[i]];
2035         }
2036
2037         free(xstats_names_copy);
2038         return size;
2039 }
2040
2041 int
2042 rte_eth_xstats_get_names(uint16_t port_id,
2043         struct rte_eth_xstat_name *xstats_names,
2044         unsigned int size)
2045 {
2046         struct rte_eth_dev *dev;
2047         int cnt_used_entries;
2048         int cnt_expected_entries;
2049         int cnt_driver_entries;
2050
2051         cnt_expected_entries = get_xstats_count(port_id);
2052         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2053                         (int)size < cnt_expected_entries)
2054                 return cnt_expected_entries;
2055
2056         /* port_id checked in get_xstats_count() */
2057         dev = &rte_eth_devices[port_id];
2058
2059         cnt_used_entries = rte_eth_basic_stats_get_names(
2060                 dev, xstats_names);
2061
2062         if (dev->dev_ops->xstats_get_names != NULL) {
2063                 /* If there are any driver-specific xstats, append them
2064                  * to end of list.
2065                  */
2066                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2067                         dev,
2068                         xstats_names + cnt_used_entries,
2069                         size - cnt_used_entries);
2070                 if (cnt_driver_entries < 0)
2071                         return eth_err(port_id, cnt_driver_entries);
2072                 cnt_used_entries += cnt_driver_entries;
2073         }
2074
2075         return cnt_used_entries;
2076 }
2077
2078
2079 static int
2080 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2081 {
2082         struct rte_eth_dev *dev;
2083         struct rte_eth_stats eth_stats;
2084         unsigned int count = 0, i, q;
2085         uint64_t val, *stats_ptr;
2086         uint16_t nb_rxqs, nb_txqs;
2087         int ret;
2088
2089         ret = rte_eth_stats_get(port_id, &eth_stats);
2090         if (ret < 0)
2091                 return ret;
2092
2093         dev = &rte_eth_devices[port_id];
2094
2095         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2096         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2097
2098         /* global stats */
2099         for (i = 0; i < RTE_NB_STATS; i++) {
2100                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2101                                         rte_stats_strings[i].offset);
2102                 val = *stats_ptr;
2103                 xstats[count++].value = val;
2104         }
2105
2106         /* per-rxq stats */
2107         for (q = 0; q < nb_rxqs; q++) {
2108                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2109                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2110                                         rte_rxq_stats_strings[i].offset +
2111                                         q * sizeof(uint64_t));
2112                         val = *stats_ptr;
2113                         xstats[count++].value = val;
2114                 }
2115         }
2116
2117         /* per-txq stats */
2118         for (q = 0; q < nb_txqs; q++) {
2119                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2120                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2121                                         rte_txq_stats_strings[i].offset +
2122                                         q * sizeof(uint64_t));
2123                         val = *stats_ptr;
2124                         xstats[count++].value = val;
2125                 }
2126         }
2127         return count;
2128 }
2129
2130 /* retrieve ethdev extended statistics */
2131 int
2132 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2133                          uint64_t *values, unsigned int size)
2134 {
2135         unsigned int no_basic_stat_requested = 1;
2136         unsigned int no_ext_stat_requested = 1;
2137         unsigned int num_xstats_filled;
2138         unsigned int basic_count;
2139         uint16_t expected_entries;
2140         struct rte_eth_dev *dev;
2141         unsigned int i;
2142         int ret;
2143
2144         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2145         ret = get_xstats_count(port_id);
2146         if (ret < 0)
2147                 return ret;
2148         expected_entries = (uint16_t)ret;
2149         struct rte_eth_xstat xstats[expected_entries];
2150         dev = &rte_eth_devices[port_id];
2151         basic_count = get_xstats_basic_count(dev);
2152
2153         /* Return max number of stats if no ids given */
2154         if (!ids) {
2155                 if (!values)
2156                         return expected_entries;
2157                 else if (values && size < expected_entries)
2158                         return expected_entries;
2159         }
2160
2161         if (ids && !values)
2162                 return -EINVAL;
2163
2164         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2165                 unsigned int basic_count = get_xstats_basic_count(dev);
2166                 uint64_t ids_copy[size];
2167
2168                 for (i = 0; i < size; i++) {
2169                         if (ids[i] < basic_count) {
2170                                 no_basic_stat_requested = 0;
2171                                 break;
2172                         }
2173
2174                         /*
2175                          * Convert ids to xstats ids that PMD knows.
2176                          * ids known by user are basic + extended stats.
2177                          */
2178                         ids_copy[i] = ids[i] - basic_count;
2179                 }
2180
2181                 if (no_basic_stat_requested)
2182                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2183                                         values, size);
2184         }
2185
2186         if (ids) {
2187                 for (i = 0; i < size; i++) {
2188                         if (ids[i] >= basic_count) {
2189                                 no_ext_stat_requested = 0;
2190                                 break;
2191                         }
2192                 }
2193         }
2194
2195         /* Fill the xstats structure */
2196         if (ids && no_ext_stat_requested)
2197                 ret = rte_eth_basic_stats_get(port_id, xstats);
2198         else
2199                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2200
2201         if (ret < 0)
2202                 return ret;
2203         num_xstats_filled = (unsigned int)ret;
2204
2205         /* Return all stats */
2206         if (!ids) {
2207                 for (i = 0; i < num_xstats_filled; i++)
2208                         values[i] = xstats[i].value;
2209                 return expected_entries;
2210         }
2211
2212         /* Filter stats */
2213         for (i = 0; i < size; i++) {
2214                 if (ids[i] >= expected_entries) {
2215                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2216                         return -1;
2217                 }
2218                 values[i] = xstats[ids[i]].value;
2219         }
2220         return size;
2221 }
2222
2223 int
2224 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2225         unsigned int n)
2226 {
2227         struct rte_eth_dev *dev;
2228         unsigned int count = 0, i;
2229         signed int xcount = 0;
2230         uint16_t nb_rxqs, nb_txqs;
2231         int ret;
2232
2233         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2234
2235         dev = &rte_eth_devices[port_id];
2236
2237         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2238         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2239
2240         /* Return generic statistics */
2241         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2242                 (nb_txqs * RTE_NB_TXQ_STATS);
2243
2244         /* implemented by the driver */
2245         if (dev->dev_ops->xstats_get != NULL) {
2246                 /* Retrieve the xstats from the driver at the end of the
2247                  * xstats struct.
2248                  */
2249                 xcount = (*dev->dev_ops->xstats_get)(dev,
2250                                      xstats ? xstats + count : NULL,
2251                                      (n > count) ? n - count : 0);
2252
2253                 if (xcount < 0)
2254                         return eth_err(port_id, xcount);
2255         }
2256
2257         if (n < count + xcount || xstats == NULL)
2258                 return count + xcount;
2259
2260         /* now fill the xstats structure */
2261         ret = rte_eth_basic_stats_get(port_id, xstats);
2262         if (ret < 0)
2263                 return ret;
2264         count = ret;
2265
2266         for (i = 0; i < count; i++)
2267                 xstats[i].id = i;
2268         /* add an offset to driver-specific stats */
2269         for ( ; i < count + xcount; i++)
2270                 xstats[i].id += count;
2271
2272         return count + xcount;
2273 }
2274
2275 /* reset ethdev extended statistics */
2276 void
2277 rte_eth_xstats_reset(uint16_t port_id)
2278 {
2279         struct rte_eth_dev *dev;
2280
2281         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2282         dev = &rte_eth_devices[port_id];
2283
2284         /* implemented by the driver */
2285         if (dev->dev_ops->xstats_reset != NULL) {
2286                 (*dev->dev_ops->xstats_reset)(dev);
2287                 return;
2288         }
2289
2290         /* fallback to default */
2291         rte_eth_stats_reset(port_id);
2292 }
2293
2294 static int
2295 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2296                 uint8_t is_rx)
2297 {
2298         struct rte_eth_dev *dev;
2299
2300         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2301
2302         dev = &rte_eth_devices[port_id];
2303
2304         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2305         return (*dev->dev_ops->queue_stats_mapping_set)
2306                         (dev, queue_id, stat_idx, is_rx);
2307 }
2308
2309
2310 int
2311 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2312                 uint8_t stat_idx)
2313 {
2314         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2315                                                 stat_idx, STAT_QMAP_TX));
2316 }
2317
2318
2319 int
2320 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2321                 uint8_t stat_idx)
2322 {
2323         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2324                                                 stat_idx, STAT_QMAP_RX));
2325 }
2326
2327 int
2328 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2329 {
2330         struct rte_eth_dev *dev;
2331
2332         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2333         dev = &rte_eth_devices[port_id];
2334
2335         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2336         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2337                                                         fw_version, fw_size));
2338 }
2339
2340 void
2341 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2342 {
2343         struct rte_eth_dev *dev;
2344         const struct rte_eth_desc_lim lim = {
2345                 .nb_max = UINT16_MAX,
2346                 .nb_min = 0,
2347                 .nb_align = 1,
2348         };
2349
2350         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2351         dev = &rte_eth_devices[port_id];
2352
2353         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2354         dev_info->rx_desc_lim = lim;
2355         dev_info->tx_desc_lim = lim;
2356         dev_info->device = dev->device;
2357
2358         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2359         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2360         dev_info->driver_name = dev->device->driver->name;
2361         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2362         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2363 }
2364
2365 int
2366 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2367                                  uint32_t *ptypes, int num)
2368 {
2369         int i, j;
2370         struct rte_eth_dev *dev;
2371         const uint32_t *all_ptypes;
2372
2373         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2374         dev = &rte_eth_devices[port_id];
2375         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2376         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2377
2378         if (!all_ptypes)
2379                 return 0;
2380
2381         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2382                 if (all_ptypes[i] & ptype_mask) {
2383                         if (j < num)
2384                                 ptypes[j] = all_ptypes[i];
2385                         j++;
2386                 }
2387
2388         return j;
2389 }
2390
2391 void
2392 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2393 {
2394         struct rte_eth_dev *dev;
2395
2396         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2397         dev = &rte_eth_devices[port_id];
2398         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2399 }
2400
2401
2402 int
2403 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2404 {
2405         struct rte_eth_dev *dev;
2406
2407         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2408
2409         dev = &rte_eth_devices[port_id];
2410         *mtu = dev->data->mtu;
2411         return 0;
2412 }
2413
2414 int
2415 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2416 {
2417         int ret;
2418         struct rte_eth_dev *dev;
2419
2420         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2421         dev = &rte_eth_devices[port_id];
2422         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2423
2424         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2425         if (!ret)
2426                 dev->data->mtu = mtu;
2427
2428         return eth_err(port_id, ret);
2429 }
2430
2431 int
2432 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2433 {
2434         struct rte_eth_dev *dev;
2435         int ret;
2436
2437         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2438         dev = &rte_eth_devices[port_id];
2439         if (!(dev->data->dev_conf.rxmode.offloads &
2440               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2441                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2442                 return -ENOSYS;
2443         }
2444
2445         if (vlan_id > 4095) {
2446                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2447                                 port_id, (unsigned) vlan_id);
2448                 return -EINVAL;
2449         }
2450         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2451
2452         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2453         if (ret == 0) {
2454                 struct rte_vlan_filter_conf *vfc;
2455                 int vidx;
2456                 int vbit;
2457
2458                 vfc = &dev->data->vlan_filter_conf;
2459                 vidx = vlan_id / 64;
2460                 vbit = vlan_id % 64;
2461
2462                 if (on)
2463                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2464                 else
2465                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2466         }
2467
2468         return eth_err(port_id, ret);
2469 }
2470
2471 int
2472 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2473                                     int on)
2474 {
2475         struct rte_eth_dev *dev;
2476
2477         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2478         dev = &rte_eth_devices[port_id];
2479         if (rx_queue_id >= dev->data->nb_rx_queues) {
2480                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2481                 return -EINVAL;
2482         }
2483
2484         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2485         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2486
2487         return 0;
2488 }
2489
2490 int
2491 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2492                                 enum rte_vlan_type vlan_type,
2493                                 uint16_t tpid)
2494 {
2495         struct rte_eth_dev *dev;
2496
2497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2498         dev = &rte_eth_devices[port_id];
2499         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2500
2501         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2502                                                                tpid));
2503 }
2504
2505 int
2506 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2507 {
2508         struct rte_eth_dev *dev;
2509         int ret = 0;
2510         int mask = 0;
2511         int cur, org = 0;
2512         uint64_t orig_offloads;
2513
2514         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2515         dev = &rte_eth_devices[port_id];
2516
2517         /* save original values in case of failure */
2518         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2519
2520         /*check which option changed by application*/
2521         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2522         org = !!(dev->data->dev_conf.rxmode.offloads &
2523                  DEV_RX_OFFLOAD_VLAN_STRIP);
2524         if (cur != org) {
2525                 if (cur)
2526                         dev->data->dev_conf.rxmode.offloads |=
2527                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2528                 else
2529                         dev->data->dev_conf.rxmode.offloads &=
2530                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2531                 mask |= ETH_VLAN_STRIP_MASK;
2532         }
2533
2534         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2535         org = !!(dev->data->dev_conf.rxmode.offloads &
2536                  DEV_RX_OFFLOAD_VLAN_FILTER);
2537         if (cur != org) {
2538                 if (cur)
2539                         dev->data->dev_conf.rxmode.offloads |=
2540                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2541                 else
2542                         dev->data->dev_conf.rxmode.offloads &=
2543                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2544                 mask |= ETH_VLAN_FILTER_MASK;
2545         }
2546
2547         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2548         org = !!(dev->data->dev_conf.rxmode.offloads &
2549                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2550         if (cur != org) {
2551                 if (cur)
2552                         dev->data->dev_conf.rxmode.offloads |=
2553                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2554                 else
2555                         dev->data->dev_conf.rxmode.offloads &=
2556                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2557                 mask |= ETH_VLAN_EXTEND_MASK;
2558         }
2559
2560         /*no change*/
2561         if (mask == 0)
2562                 return ret;
2563
2564         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2565         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2566         if (ret) {
2567                 /* hit an error restore  original values */
2568                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2569         }
2570
2571         return eth_err(port_id, ret);
2572 }
2573
2574 int
2575 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2576 {
2577         struct rte_eth_dev *dev;
2578         int ret = 0;
2579
2580         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2581         dev = &rte_eth_devices[port_id];
2582
2583         if (dev->data->dev_conf.rxmode.offloads &
2584             DEV_RX_OFFLOAD_VLAN_STRIP)
2585                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2586
2587         if (dev->data->dev_conf.rxmode.offloads &
2588             DEV_RX_OFFLOAD_VLAN_FILTER)
2589                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2590
2591         if (dev->data->dev_conf.rxmode.offloads &
2592             DEV_RX_OFFLOAD_VLAN_EXTEND)
2593                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2594
2595         return ret;
2596 }
2597
2598 int
2599 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2600 {
2601         struct rte_eth_dev *dev;
2602
2603         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2604         dev = &rte_eth_devices[port_id];
2605         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2606
2607         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2608 }
2609
2610 int
2611 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2612 {
2613         struct rte_eth_dev *dev;
2614
2615         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2616         dev = &rte_eth_devices[port_id];
2617         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2618         memset(fc_conf, 0, sizeof(*fc_conf));
2619         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2620 }
2621
2622 int
2623 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2624 {
2625         struct rte_eth_dev *dev;
2626
2627         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2628         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2629                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2630                 return -EINVAL;
2631         }
2632
2633         dev = &rte_eth_devices[port_id];
2634         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2635         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2636 }
2637
2638 int
2639 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2640                                    struct rte_eth_pfc_conf *pfc_conf)
2641 {
2642         struct rte_eth_dev *dev;
2643
2644         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2645         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2646                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2647                 return -EINVAL;
2648         }
2649
2650         dev = &rte_eth_devices[port_id];
2651         /* High water, low water validation are device specific */
2652         if  (*dev->dev_ops->priority_flow_ctrl_set)
2653                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2654                                         (dev, pfc_conf));
2655         return -ENOTSUP;
2656 }
2657
2658 static int
2659 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2660                         uint16_t reta_size)
2661 {
2662         uint16_t i, num;
2663
2664         if (!reta_conf)
2665                 return -EINVAL;
2666
2667         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2668         for (i = 0; i < num; i++) {
2669                 if (reta_conf[i].mask)
2670                         return 0;
2671         }
2672
2673         return -EINVAL;
2674 }
2675
2676 static int
2677 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2678                          uint16_t reta_size,
2679                          uint16_t max_rxq)
2680 {
2681         uint16_t i, idx, shift;
2682
2683         if (!reta_conf)
2684                 return -EINVAL;
2685
2686         if (max_rxq == 0) {
2687                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2688                 return -EINVAL;
2689         }
2690
2691         for (i = 0; i < reta_size; i++) {
2692                 idx = i / RTE_RETA_GROUP_SIZE;
2693                 shift = i % RTE_RETA_GROUP_SIZE;
2694                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2695                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2696                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2697                                 "the maximum rxq index: %u\n", idx, shift,
2698                                 reta_conf[idx].reta[shift], max_rxq);
2699                         return -EINVAL;
2700                 }
2701         }
2702
2703         return 0;
2704 }
2705
2706 int
2707 rte_eth_dev_rss_reta_update(uint16_t port_id,
2708                             struct rte_eth_rss_reta_entry64 *reta_conf,
2709                             uint16_t reta_size)
2710 {
2711         struct rte_eth_dev *dev;
2712         int ret;
2713
2714         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2715         /* Check mask bits */
2716         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2717         if (ret < 0)
2718                 return ret;
2719
2720         dev = &rte_eth_devices[port_id];
2721
2722         /* Check entry value */
2723         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2724                                 dev->data->nb_rx_queues);
2725         if (ret < 0)
2726                 return ret;
2727
2728         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2729         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2730                                                              reta_size));
2731 }
2732
2733 int
2734 rte_eth_dev_rss_reta_query(uint16_t port_id,
2735                            struct rte_eth_rss_reta_entry64 *reta_conf,
2736                            uint16_t reta_size)
2737 {
2738         struct rte_eth_dev *dev;
2739         int ret;
2740
2741         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2742
2743         /* Check mask bits */
2744         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2745         if (ret < 0)
2746                 return ret;
2747
2748         dev = &rte_eth_devices[port_id];
2749         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2750         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2751                                                             reta_size));
2752 }
2753
2754 int
2755 rte_eth_dev_rss_hash_update(uint16_t port_id,
2756                             struct rte_eth_rss_conf *rss_conf)
2757 {
2758         struct rte_eth_dev *dev;
2759
2760         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2761         dev = &rte_eth_devices[port_id];
2762         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2763         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2764                                                                  rss_conf));
2765 }
2766
2767 int
2768 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2769                               struct rte_eth_rss_conf *rss_conf)
2770 {
2771         struct rte_eth_dev *dev;
2772
2773         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2774         dev = &rte_eth_devices[port_id];
2775         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2776         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2777                                                                    rss_conf));
2778 }
2779
2780 int
2781 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2782                                 struct rte_eth_udp_tunnel *udp_tunnel)
2783 {
2784         struct rte_eth_dev *dev;
2785
2786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2787         if (udp_tunnel == NULL) {
2788                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2789                 return -EINVAL;
2790         }
2791
2792         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2793                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2794                 return -EINVAL;
2795         }
2796
2797         dev = &rte_eth_devices[port_id];
2798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2799         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2800                                                                 udp_tunnel));
2801 }
2802
2803 int
2804 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2805                                    struct rte_eth_udp_tunnel *udp_tunnel)
2806 {
2807         struct rte_eth_dev *dev;
2808
2809         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2810         dev = &rte_eth_devices[port_id];
2811
2812         if (udp_tunnel == NULL) {
2813                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2814                 return -EINVAL;
2815         }
2816
2817         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2818                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2819                 return -EINVAL;
2820         }
2821
2822         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2823         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2824                                                                 udp_tunnel));
2825 }
2826
2827 int
2828 rte_eth_led_on(uint16_t port_id)
2829 {
2830         struct rte_eth_dev *dev;
2831
2832         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2833         dev = &rte_eth_devices[port_id];
2834         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2835         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2836 }
2837
2838 int
2839 rte_eth_led_off(uint16_t port_id)
2840 {
2841         struct rte_eth_dev *dev;
2842
2843         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2844         dev = &rte_eth_devices[port_id];
2845         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2846         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2847 }
2848
2849 /*
2850  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2851  * an empty spot.
2852  */
2853 static int
2854 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2855 {
2856         struct rte_eth_dev_info dev_info;
2857         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2858         unsigned i;
2859
2860         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2861         rte_eth_dev_info_get(port_id, &dev_info);
2862
2863         for (i = 0; i < dev_info.max_mac_addrs; i++)
2864                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2865                         return i;
2866
2867         return -1;
2868 }
2869
2870 static const struct ether_addr null_mac_addr;
2871
2872 int
2873 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2874                         uint32_t pool)
2875 {
2876         struct rte_eth_dev *dev;
2877         int index;
2878         uint64_t pool_mask;
2879         int ret;
2880
2881         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2882         dev = &rte_eth_devices[port_id];
2883         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2884
2885         if (is_zero_ether_addr(addr)) {
2886                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2887                         port_id);
2888                 return -EINVAL;
2889         }
2890         if (pool >= ETH_64_POOLS) {
2891                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2892                 return -EINVAL;
2893         }
2894
2895         index = get_mac_addr_index(port_id, addr);
2896         if (index < 0) {
2897                 index = get_mac_addr_index(port_id, &null_mac_addr);
2898                 if (index < 0) {
2899                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2900                                 port_id);
2901                         return -ENOSPC;
2902                 }
2903         } else {
2904                 pool_mask = dev->data->mac_pool_sel[index];
2905
2906                 /* Check if both MAC address and pool is already there, and do nothing */
2907                 if (pool_mask & (1ULL << pool))
2908                         return 0;
2909         }
2910
2911         /* Update NIC */
2912         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2913
2914         if (ret == 0) {
2915                 /* Update address in NIC data structure */
2916                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2917
2918                 /* Update pool bitmap in NIC data structure */
2919                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2920         }
2921
2922         return eth_err(port_id, ret);
2923 }
2924
2925 int
2926 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2927 {
2928         struct rte_eth_dev *dev;
2929         int index;
2930
2931         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2932         dev = &rte_eth_devices[port_id];
2933         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2934
2935         index = get_mac_addr_index(port_id, addr);
2936         if (index == 0) {
2937                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2938                 return -EADDRINUSE;
2939         } else if (index < 0)
2940                 return 0;  /* Do nothing if address wasn't found */
2941
2942         /* Update NIC */
2943         (*dev->dev_ops->mac_addr_remove)(dev, index);
2944
2945         /* Update address in NIC data structure */
2946         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2947
2948         /* reset pool bitmap */
2949         dev->data->mac_pool_sel[index] = 0;
2950
2951         return 0;
2952 }
2953
2954 int
2955 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2956 {
2957         struct rte_eth_dev *dev;
2958         int ret;
2959
2960         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2961
2962         if (!is_valid_assigned_ether_addr(addr))
2963                 return -EINVAL;
2964
2965         dev = &rte_eth_devices[port_id];
2966         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2967
2968         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
2969         if (ret < 0)
2970                 return ret;
2971
2972         /* Update default address in NIC data structure */
2973         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2974
2975         return 0;
2976 }
2977
2978
2979 /*
2980  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2981  * an empty spot.
2982  */
2983 static int
2984 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2985 {
2986         struct rte_eth_dev_info dev_info;
2987         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2988         unsigned i;
2989
2990         rte_eth_dev_info_get(port_id, &dev_info);
2991         if (!dev->data->hash_mac_addrs)
2992                 return -1;
2993
2994         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2995                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2996                         ETHER_ADDR_LEN) == 0)
2997                         return i;
2998
2999         return -1;
3000 }
3001
3002 int
3003 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3004                                 uint8_t on)
3005 {
3006         int index;
3007         int ret;
3008         struct rte_eth_dev *dev;
3009
3010         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3011
3012         dev = &rte_eth_devices[port_id];
3013         if (is_zero_ether_addr(addr)) {
3014                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3015                         port_id);
3016                 return -EINVAL;
3017         }
3018
3019         index = get_hash_mac_addr_index(port_id, addr);
3020         /* Check if it's already there, and do nothing */
3021         if ((index >= 0) && on)
3022                 return 0;
3023
3024         if (index < 0) {
3025                 if (!on) {
3026                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3027                                 "set in UTA\n", port_id);
3028                         return -EINVAL;
3029                 }
3030
3031                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3032                 if (index < 0) {
3033                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3034                                         port_id);
3035                         return -ENOSPC;
3036                 }
3037         }
3038
3039         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3040         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3041         if (ret == 0) {
3042                 /* Update address in NIC data structure */
3043                 if (on)
3044                         ether_addr_copy(addr,
3045                                         &dev->data->hash_mac_addrs[index]);
3046                 else
3047                         ether_addr_copy(&null_mac_addr,
3048                                         &dev->data->hash_mac_addrs[index]);
3049         }
3050
3051         return eth_err(port_id, ret);
3052 }
3053
3054 int
3055 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3056 {
3057         struct rte_eth_dev *dev;
3058
3059         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3060
3061         dev = &rte_eth_devices[port_id];
3062
3063         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3064         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3065                                                                        on));
3066 }
3067
3068 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3069                                         uint16_t tx_rate)
3070 {
3071         struct rte_eth_dev *dev;
3072         struct rte_eth_dev_info dev_info;
3073         struct rte_eth_link link;
3074
3075         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3076
3077         dev = &rte_eth_devices[port_id];
3078         rte_eth_dev_info_get(port_id, &dev_info);
3079         link = dev->data->dev_link;
3080
3081         if (queue_idx > dev_info.max_tx_queues) {
3082                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3083                                 "invalid queue id=%d\n", port_id, queue_idx);
3084                 return -EINVAL;
3085         }
3086
3087         if (tx_rate > link.link_speed) {
3088                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3089                                 "bigger than link speed= %d\n",
3090                         tx_rate, link.link_speed);
3091                 return -EINVAL;
3092         }
3093
3094         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3095         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3096                                                         queue_idx, tx_rate));
3097 }
3098
3099 int
3100 rte_eth_mirror_rule_set(uint16_t port_id,
3101                         struct rte_eth_mirror_conf *mirror_conf,
3102                         uint8_t rule_id, uint8_t on)
3103 {
3104         struct rte_eth_dev *dev;
3105
3106         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3107         if (mirror_conf->rule_type == 0) {
3108                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3109                 return -EINVAL;
3110         }
3111
3112         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3113                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3114                                 ETH_64_POOLS - 1);
3115                 return -EINVAL;
3116         }
3117
3118         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3119              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3120             (mirror_conf->pool_mask == 0)) {
3121                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3122                 return -EINVAL;
3123         }
3124
3125         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3126             mirror_conf->vlan.vlan_mask == 0) {
3127                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3128                 return -EINVAL;
3129         }
3130
3131         dev = &rte_eth_devices[port_id];
3132         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3133
3134         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3135                                                 mirror_conf, rule_id, on));
3136 }
3137
3138 int
3139 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3140 {
3141         struct rte_eth_dev *dev;
3142
3143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3144
3145         dev = &rte_eth_devices[port_id];
3146         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3147
3148         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3149                                                                    rule_id));
3150 }
3151
3152 RTE_INIT(eth_dev_init_cb_lists)
3153 {
3154         int i;
3155
3156         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3157                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3158 }
3159
3160 int
3161 rte_eth_dev_callback_register(uint16_t port_id,
3162                         enum rte_eth_event_type event,
3163                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3164 {
3165         struct rte_eth_dev *dev;
3166         struct rte_eth_dev_callback *user_cb;
3167         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3168         uint16_t last_port;
3169
3170         if (!cb_fn)
3171                 return -EINVAL;
3172
3173         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3174                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3175                 return -EINVAL;
3176         }
3177
3178         if (port_id == RTE_ETH_ALL) {
3179                 next_port = 0;
3180                 last_port = RTE_MAX_ETHPORTS - 1;
3181         } else {
3182                 next_port = last_port = port_id;
3183         }
3184
3185         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3186
3187         do {
3188                 dev = &rte_eth_devices[next_port];
3189
3190                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3191                         if (user_cb->cb_fn == cb_fn &&
3192                                 user_cb->cb_arg == cb_arg &&
3193                                 user_cb->event == event) {
3194                                 break;
3195                         }
3196                 }
3197
3198                 /* create a new callback. */
3199                 if (user_cb == NULL) {
3200                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3201                                 sizeof(struct rte_eth_dev_callback), 0);
3202                         if (user_cb != NULL) {
3203                                 user_cb->cb_fn = cb_fn;
3204                                 user_cb->cb_arg = cb_arg;
3205                                 user_cb->event = event;
3206                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3207                                                   user_cb, next);
3208                         } else {
3209                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3210                                 rte_eth_dev_callback_unregister(port_id, event,
3211                                                                 cb_fn, cb_arg);
3212                                 return -ENOMEM;
3213                         }
3214
3215                 }
3216         } while (++next_port <= last_port);
3217
3218         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3219         return 0;
3220 }
3221
3222 int
3223 rte_eth_dev_callback_unregister(uint16_t port_id,
3224                         enum rte_eth_event_type event,
3225                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3226 {
3227         int ret;
3228         struct rte_eth_dev *dev;
3229         struct rte_eth_dev_callback *cb, *next;
3230         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3231         uint16_t last_port;
3232
3233         if (!cb_fn)
3234                 return -EINVAL;
3235
3236         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3237                 ethdev_log(ERR, "Invalid port_id=%d", port_id);
3238                 return -EINVAL;
3239         }
3240
3241         if (port_id == RTE_ETH_ALL) {
3242                 next_port = 0;
3243                 last_port = RTE_MAX_ETHPORTS - 1;
3244         } else {
3245                 next_port = last_port = port_id;
3246         }
3247
3248         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3249
3250         do {
3251                 dev = &rte_eth_devices[next_port];
3252                 ret = 0;
3253                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3254                      cb = next) {
3255
3256                         next = TAILQ_NEXT(cb, next);
3257
3258                         if (cb->cb_fn != cb_fn || cb->event != event ||
3259                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3260                                 continue;
3261
3262                         /*
3263                          * if this callback is not executing right now,
3264                          * then remove it.
3265                          */
3266                         if (cb->active == 0) {
3267                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3268                                 rte_free(cb);
3269                         } else {
3270                                 ret = -EAGAIN;
3271                         }
3272                 }
3273         } while (++next_port <= last_port);
3274
3275         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3276         return ret;
3277 }
3278
3279 int
3280 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3281         enum rte_eth_event_type event, void *ret_param)
3282 {
3283         struct rte_eth_dev_callback *cb_lst;
3284         struct rte_eth_dev_callback dev_cb;
3285         int rc = 0;
3286
3287         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3288         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3289                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3290                         continue;
3291                 dev_cb = *cb_lst;
3292                 cb_lst->active = 1;
3293                 if (ret_param != NULL)
3294                         dev_cb.ret_param = ret_param;
3295
3296                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3297                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3298                                 dev_cb.cb_arg, dev_cb.ret_param);
3299                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3300                 cb_lst->active = 0;
3301         }
3302         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3303         return rc;
3304 }
3305
3306 int
3307 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3308 {
3309         uint32_t vec;
3310         struct rte_eth_dev *dev;
3311         struct rte_intr_handle *intr_handle;
3312         uint16_t qid;
3313         int rc;
3314
3315         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3316
3317         dev = &rte_eth_devices[port_id];
3318
3319         if (!dev->intr_handle) {
3320                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3321                 return -ENOTSUP;
3322         }
3323
3324         intr_handle = dev->intr_handle;
3325         if (!intr_handle->intr_vec) {
3326                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3327                 return -EPERM;
3328         }
3329
3330         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3331                 vec = intr_handle->intr_vec[qid];
3332                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3333                 if (rc && rc != -EEXIST) {
3334                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3335                                         " op %d epfd %d vec %u\n",
3336                                         port_id, qid, op, epfd, vec);
3337                 }
3338         }
3339
3340         return 0;
3341 }
3342
3343 const struct rte_memzone *
3344 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3345                          uint16_t queue_id, size_t size, unsigned align,
3346                          int socket_id)
3347 {
3348         char z_name[RTE_MEMZONE_NAMESIZE];
3349         const struct rte_memzone *mz;
3350
3351         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3352                  dev->device->driver->name, ring_name,
3353                  dev->data->port_id, queue_id);
3354
3355         mz = rte_memzone_lookup(z_name);
3356         if (mz)
3357                 return mz;
3358
3359         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3360                         RTE_MEMZONE_IOVA_CONTIG, align);
3361 }
3362
3363 int
3364 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3365                           int epfd, int op, void *data)
3366 {
3367         uint32_t vec;
3368         struct rte_eth_dev *dev;
3369         struct rte_intr_handle *intr_handle;
3370         int rc;
3371
3372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3373
3374         dev = &rte_eth_devices[port_id];
3375         if (queue_id >= dev->data->nb_rx_queues) {
3376                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3377                 return -EINVAL;
3378         }
3379
3380         if (!dev->intr_handle) {
3381                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3382                 return -ENOTSUP;
3383         }
3384
3385         intr_handle = dev->intr_handle;
3386         if (!intr_handle->intr_vec) {
3387                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3388                 return -EPERM;
3389         }
3390
3391         vec = intr_handle->intr_vec[queue_id];
3392         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3393         if (rc && rc != -EEXIST) {
3394                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3395                                 " op %d epfd %d vec %u\n",
3396                                 port_id, queue_id, op, epfd, vec);
3397                 return rc;
3398         }
3399
3400         return 0;
3401 }
3402
3403 int
3404 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3405                            uint16_t queue_id)
3406 {
3407         struct rte_eth_dev *dev;
3408
3409         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3410
3411         dev = &rte_eth_devices[port_id];
3412
3413         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3414         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3415                                                                 queue_id));
3416 }
3417
3418 int
3419 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3420                             uint16_t queue_id)
3421 {
3422         struct rte_eth_dev *dev;
3423
3424         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3425
3426         dev = &rte_eth_devices[port_id];
3427
3428         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3429         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3430                                                                 queue_id));
3431 }
3432
3433
3434 int
3435 rte_eth_dev_filter_supported(uint16_t port_id,
3436                              enum rte_filter_type filter_type)
3437 {
3438         struct rte_eth_dev *dev;
3439
3440         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3441
3442         dev = &rte_eth_devices[port_id];
3443         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3444         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3445                                 RTE_ETH_FILTER_NOP, NULL);
3446 }
3447
3448 int
3449 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3450                         enum rte_filter_op filter_op, void *arg)
3451 {
3452         struct rte_eth_dev *dev;
3453
3454         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3455
3456         dev = &rte_eth_devices[port_id];
3457         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3458         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3459                                                              filter_op, arg));
3460 }
3461
3462 const struct rte_eth_rxtx_callback *
3463 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3464                 rte_rx_callback_fn fn, void *user_param)
3465 {
3466 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3467         rte_errno = ENOTSUP;
3468         return NULL;
3469 #endif
3470         /* check input parameters */
3471         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3472                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3473                 rte_errno = EINVAL;
3474                 return NULL;
3475         }
3476         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3477
3478         if (cb == NULL) {
3479                 rte_errno = ENOMEM;
3480                 return NULL;
3481         }
3482
3483         cb->fn.rx = fn;
3484         cb->param = user_param;
3485
3486         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3487         /* Add the callbacks in fifo order. */
3488         struct rte_eth_rxtx_callback *tail =
3489                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3490
3491         if (!tail) {
3492                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3493
3494         } else {
3495                 while (tail->next)
3496                         tail = tail->next;
3497                 tail->next = cb;
3498         }
3499         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3500
3501         return cb;
3502 }
3503
3504 const struct rte_eth_rxtx_callback *
3505 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3506                 rte_rx_callback_fn fn, void *user_param)
3507 {
3508 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3509         rte_errno = ENOTSUP;
3510         return NULL;
3511 #endif
3512         /* check input parameters */
3513         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3514                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3515                 rte_errno = EINVAL;
3516                 return NULL;
3517         }
3518
3519         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3520
3521         if (cb == NULL) {
3522                 rte_errno = ENOMEM;
3523                 return NULL;
3524         }
3525
3526         cb->fn.rx = fn;
3527         cb->param = user_param;
3528
3529         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3530         /* Add the callbacks at fisrt position*/
3531         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3532         rte_smp_wmb();
3533         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3534         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3535
3536         return cb;
3537 }
3538
3539 const struct rte_eth_rxtx_callback *
3540 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3541                 rte_tx_callback_fn fn, void *user_param)
3542 {
3543 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3544         rte_errno = ENOTSUP;
3545         return NULL;
3546 #endif
3547         /* check input parameters */
3548         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3549                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3550                 rte_errno = EINVAL;
3551                 return NULL;
3552         }
3553
3554         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3555
3556         if (cb == NULL) {
3557                 rte_errno = ENOMEM;
3558                 return NULL;
3559         }
3560
3561         cb->fn.tx = fn;
3562         cb->param = user_param;
3563
3564         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3565         /* Add the callbacks in fifo order. */
3566         struct rte_eth_rxtx_callback *tail =
3567                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3568
3569         if (!tail) {
3570                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3571
3572         } else {
3573                 while (tail->next)
3574                         tail = tail->next;
3575                 tail->next = cb;
3576         }
3577         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3578
3579         return cb;
3580 }
3581
3582 int
3583 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3584                 const struct rte_eth_rxtx_callback *user_cb)
3585 {
3586 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3587         return -ENOTSUP;
3588 #endif
3589         /* Check input parameters. */
3590         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3591         if (user_cb == NULL ||
3592                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3593                 return -EINVAL;
3594
3595         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3596         struct rte_eth_rxtx_callback *cb;
3597         struct rte_eth_rxtx_callback **prev_cb;
3598         int ret = -EINVAL;
3599
3600         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3601         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3602         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3603                 cb = *prev_cb;
3604                 if (cb == user_cb) {
3605                         /* Remove the user cb from the callback list. */
3606                         *prev_cb = cb->next;
3607                         ret = 0;
3608                         break;
3609                 }
3610         }
3611         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3612
3613         return ret;
3614 }
3615
3616 int
3617 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3618                 const struct rte_eth_rxtx_callback *user_cb)
3619 {
3620 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3621         return -ENOTSUP;
3622 #endif
3623         /* Check input parameters. */
3624         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3625         if (user_cb == NULL ||
3626                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3627                 return -EINVAL;
3628
3629         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3630         int ret = -EINVAL;
3631         struct rte_eth_rxtx_callback *cb;
3632         struct rte_eth_rxtx_callback **prev_cb;
3633
3634         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3635         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3636         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3637                 cb = *prev_cb;
3638                 if (cb == user_cb) {
3639                         /* Remove the user cb from the callback list. */
3640                         *prev_cb = cb->next;
3641                         ret = 0;
3642                         break;
3643                 }
3644         }
3645         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3646
3647         return ret;
3648 }
3649
3650 int
3651 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3652         struct rte_eth_rxq_info *qinfo)
3653 {
3654         struct rte_eth_dev *dev;
3655
3656         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3657
3658         if (qinfo == NULL)
3659                 return -EINVAL;
3660
3661         dev = &rte_eth_devices[port_id];
3662         if (queue_id >= dev->data->nb_rx_queues) {
3663                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3664                 return -EINVAL;
3665         }
3666
3667         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3668
3669         memset(qinfo, 0, sizeof(*qinfo));
3670         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3671         return 0;
3672 }
3673
3674 int
3675 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3676         struct rte_eth_txq_info *qinfo)
3677 {
3678         struct rte_eth_dev *dev;
3679
3680         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3681
3682         if (qinfo == NULL)
3683                 return -EINVAL;
3684
3685         dev = &rte_eth_devices[port_id];
3686         if (queue_id >= dev->data->nb_tx_queues) {
3687                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3688                 return -EINVAL;
3689         }
3690
3691         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3692
3693         memset(qinfo, 0, sizeof(*qinfo));
3694         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3695         return 0;
3696 }
3697
3698 int
3699 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3700                              struct ether_addr *mc_addr_set,
3701                              uint32_t nb_mc_addr)
3702 {
3703         struct rte_eth_dev *dev;
3704
3705         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3706
3707         dev = &rte_eth_devices[port_id];
3708         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3709         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3710                                                 mc_addr_set, nb_mc_addr));
3711 }
3712
3713 int
3714 rte_eth_timesync_enable(uint16_t port_id)
3715 {
3716         struct rte_eth_dev *dev;
3717
3718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3719         dev = &rte_eth_devices[port_id];
3720
3721         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3722         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3723 }
3724
3725 int
3726 rte_eth_timesync_disable(uint16_t port_id)
3727 {
3728         struct rte_eth_dev *dev;
3729
3730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3731         dev = &rte_eth_devices[port_id];
3732
3733         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3734         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3735 }
3736
3737 int
3738 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3739                                    uint32_t flags)
3740 {
3741         struct rte_eth_dev *dev;
3742
3743         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3744         dev = &rte_eth_devices[port_id];
3745
3746         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3747         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3748                                 (dev, timestamp, flags));
3749 }
3750
3751 int
3752 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3753                                    struct timespec *timestamp)
3754 {
3755         struct rte_eth_dev *dev;
3756
3757         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3758         dev = &rte_eth_devices[port_id];
3759
3760         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3761         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3762                                 (dev, timestamp));
3763 }
3764
3765 int
3766 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3767 {
3768         struct rte_eth_dev *dev;
3769
3770         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3771         dev = &rte_eth_devices[port_id];
3772
3773         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3774         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3775                                                                       delta));
3776 }
3777
3778 int
3779 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3780 {
3781         struct rte_eth_dev *dev;
3782
3783         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3784         dev = &rte_eth_devices[port_id];
3785
3786         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3787         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3788                                                                 timestamp));
3789 }
3790
3791 int
3792 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3793 {
3794         struct rte_eth_dev *dev;
3795
3796         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3797         dev = &rte_eth_devices[port_id];
3798
3799         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3800         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3801                                                                 timestamp));
3802 }
3803
3804 int
3805 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3806 {
3807         struct rte_eth_dev *dev;
3808
3809         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3810
3811         dev = &rte_eth_devices[port_id];
3812         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3813         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3814 }
3815
3816 int
3817 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3818 {
3819         struct rte_eth_dev *dev;
3820
3821         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3822
3823         dev = &rte_eth_devices[port_id];
3824         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3825         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3826 }
3827
3828 int
3829 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3830 {
3831         struct rte_eth_dev *dev;
3832
3833         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3834
3835         dev = &rte_eth_devices[port_id];
3836         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3837         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3838 }
3839
3840 int
3841 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3842 {
3843         struct rte_eth_dev *dev;
3844
3845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3846
3847         dev = &rte_eth_devices[port_id];
3848         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3849         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3850 }
3851
3852 int
3853 rte_eth_dev_get_dcb_info(uint16_t port_id,
3854                              struct rte_eth_dcb_info *dcb_info)
3855 {
3856         struct rte_eth_dev *dev;
3857
3858         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3859
3860         dev = &rte_eth_devices[port_id];
3861         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3862
3863         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3864         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3865 }
3866
3867 int
3868 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3869                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3870 {
3871         struct rte_eth_dev *dev;
3872
3873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3874         if (l2_tunnel == NULL) {
3875                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3876                 return -EINVAL;
3877         }
3878
3879         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3880                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3881                 return -EINVAL;
3882         }
3883
3884         dev = &rte_eth_devices[port_id];
3885         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3886                                 -ENOTSUP);
3887         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3888                                                                 l2_tunnel));
3889 }
3890
3891 int
3892 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3893                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3894                                   uint32_t mask,
3895                                   uint8_t en)
3896 {
3897         struct rte_eth_dev *dev;
3898
3899         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3900
3901         if (l2_tunnel == NULL) {
3902                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3903                 return -EINVAL;
3904         }
3905
3906         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3907                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3908                 return -EINVAL;
3909         }
3910
3911         if (mask == 0) {
3912                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3913                 return -EINVAL;
3914         }
3915
3916         dev = &rte_eth_devices[port_id];
3917         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3918                                 -ENOTSUP);
3919         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
3920                                                         l2_tunnel, mask, en));
3921 }
3922
3923 static void
3924 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3925                            const struct rte_eth_desc_lim *desc_lim)
3926 {
3927         if (desc_lim->nb_align != 0)
3928                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3929
3930         if (desc_lim->nb_max != 0)
3931                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3932
3933         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3934 }
3935
3936 int
3937 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3938                                  uint16_t *nb_rx_desc,
3939                                  uint16_t *nb_tx_desc)
3940 {
3941         struct rte_eth_dev *dev;
3942         struct rte_eth_dev_info dev_info;
3943
3944         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3945
3946         dev = &rte_eth_devices[port_id];
3947         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3948
3949         rte_eth_dev_info_get(port_id, &dev_info);
3950
3951         if (nb_rx_desc != NULL)
3952                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3953
3954         if (nb_tx_desc != NULL)
3955                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3956
3957         return 0;
3958 }
3959
3960 int
3961 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3962 {
3963         struct rte_eth_dev *dev;
3964
3965         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3966
3967         if (pool == NULL)
3968                 return -EINVAL;
3969
3970         dev = &rte_eth_devices[port_id];
3971
3972         if (*dev->dev_ops->pool_ops_supported == NULL)
3973                 return 1; /* all pools are supported */
3974
3975         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
3976 }
3977
3978 RTE_INIT(ethdev_init_log);
3979 static void
3980 ethdev_init_log(void)
3981 {
3982         ethdev_logtype = rte_log_register("lib.ethdev");
3983         if (ethdev_logtype >= 0)
3984                 rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);
3985 }