ethdev: fix port id allocation
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_compat.h>
38
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46 static struct rte_eth_dev_data *rte_eth_dev_data;
47 static uint8_t eth_dev_last_created_port;
48
49 /* spinlock for eth device callbacks */
50 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51
52 /* spinlock for add/remove rx callbacks */
53 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove tx callbacks */
56 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* store statistics names and its offset in stats structure  */
59 struct rte_eth_xstats_name_off {
60         char name[RTE_ETH_XSTATS_NAME_SIZE];
61         unsigned offset;
62 };
63
64 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
65         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
66         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
67         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
68         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
69         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
70         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
71         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
72         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
73                 rx_nombuf)},
74 };
75
76 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
77
78 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
79         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
80         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
81         {"errors", offsetof(struct rte_eth_stats, q_errors)},
82 };
83
84 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
85                 sizeof(rte_rxq_stats_strings[0]))
86
87 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
88         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
89         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
90 };
91 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
92                 sizeof(rte_txq_stats_strings[0]))
93
94 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
95         { DEV_RX_OFFLOAD_##_name, #_name }
96
97 static const struct {
98         uint64_t offload;
99         const char *name;
100 } rte_rx_offload_names[] = {
101         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
102         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
103         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
104         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
105         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
106         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
107         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
108         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
109         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
111         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
112         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
113         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
114         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
115         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
116         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
117 };
118
119 #undef RTE_RX_OFFLOAD_BIT2STR
120
121 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
122         { DEV_TX_OFFLOAD_##_name, #_name }
123
124 static const struct {
125         uint64_t offload;
126         const char *name;
127 } rte_tx_offload_names[] = {
128         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
129         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
130         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
131         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
132         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
133         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
134         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
135         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
136         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
137         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
138         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
139         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
140         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
141         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
142         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
143         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
144         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
145         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
146 };
147
148 #undef RTE_TX_OFFLOAD_BIT2STR
149
150 /**
151  * The user application callback description.
152  *
153  * It contains callback address to be registered by user application,
154  * the pointer to the parameters for callback, and the event type.
155  */
156 struct rte_eth_dev_callback {
157         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
158         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
159         void *cb_arg;                           /**< Parameter for callback */
160         void *ret_param;                        /**< Return parameter */
161         enum rte_eth_event_type event;          /**< Interrupt event type */
162         uint32_t active;                        /**< Callback is executing */
163 };
164
165 enum {
166         STAT_QMAP_TX = 0,
167         STAT_QMAP_RX
168 };
169
170 uint16_t
171 rte_eth_find_next(uint16_t port_id)
172 {
173         while (port_id < RTE_MAX_ETHPORTS &&
174                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
175                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
176                 port_id++;
177
178         if (port_id >= RTE_MAX_ETHPORTS)
179                 return RTE_MAX_ETHPORTS;
180
181         return port_id;
182 }
183
184 static void
185 rte_eth_dev_data_alloc(void)
186 {
187         const unsigned flags = 0;
188         const struct rte_memzone *mz;
189
190         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
191                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
192                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
193                                 rte_socket_id(), flags);
194         } else
195                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
196         if (mz == NULL)
197                 rte_panic("Cannot allocate memzone for ethernet port data\n");
198
199         rte_eth_dev_data = mz->addr;
200         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
201                 memset(rte_eth_dev_data, 0,
202                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
203 }
204
205 struct rte_eth_dev *
206 rte_eth_dev_allocated(const char *name)
207 {
208         unsigned i;
209
210         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
211                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
212                     strcmp(rte_eth_devices[i].data->name, name) == 0)
213                         return &rte_eth_devices[i];
214         }
215         return NULL;
216 }
217
218 static uint16_t
219 rte_eth_dev_find_free_port(void)
220 {
221         unsigned i;
222
223         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
224                 /* Using shared name field to find a free port. */
225                 if (rte_eth_dev_data[i].name[0] == '\0') {
226                         RTE_ASSERT(rte_eth_devices[i].state ==
227                                    RTE_ETH_DEV_UNUSED);
228                         return i;
229                 }
230         }
231         return RTE_MAX_ETHPORTS;
232 }
233
234 static struct rte_eth_dev *
235 eth_dev_get(uint16_t port_id)
236 {
237         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
238
239         eth_dev->data = &rte_eth_dev_data[port_id];
240         eth_dev->state = RTE_ETH_DEV_ATTACHED;
241
242         eth_dev_last_created_port = port_id;
243
244         return eth_dev;
245 }
246
247 struct rte_eth_dev *
248 rte_eth_dev_allocate(const char *name)
249 {
250         uint16_t port_id;
251         struct rte_eth_dev *eth_dev;
252
253         port_id = rte_eth_dev_find_free_port();
254         if (port_id == RTE_MAX_ETHPORTS) {
255                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
256                 return NULL;
257         }
258
259         if (rte_eth_dev_data == NULL)
260                 rte_eth_dev_data_alloc();
261
262         if (rte_eth_dev_allocated(name) != NULL) {
263                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
264                                 name);
265                 return NULL;
266         }
267
268         eth_dev = eth_dev_get(port_id);
269         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
270         eth_dev->data->port_id = port_id;
271         eth_dev->data->mtu = ETHER_MTU;
272
273         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
274
275         return eth_dev;
276 }
277
278 /*
279  * Attach to a port already registered by the primary process, which
280  * makes sure that the same device would have the same port id both
281  * in the primary and secondary process.
282  */
283 struct rte_eth_dev *
284 rte_eth_dev_attach_secondary(const char *name)
285 {
286         uint16_t i;
287         struct rte_eth_dev *eth_dev;
288
289         if (rte_eth_dev_data == NULL)
290                 rte_eth_dev_data_alloc();
291
292         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
293                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
294                         break;
295         }
296         if (i == RTE_MAX_ETHPORTS) {
297                 RTE_PMD_DEBUG_TRACE(
298                         "device %s is not driven by the primary process\n",
299                         name);
300                 return NULL;
301         }
302
303         eth_dev = eth_dev_get(i);
304         RTE_ASSERT(eth_dev->data->port_id == i);
305
306         return eth_dev;
307 }
308
309 int
310 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
311 {
312         if (eth_dev == NULL)
313                 return -EINVAL;
314
315         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
316         eth_dev->state = RTE_ETH_DEV_UNUSED;
317
318         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
319
320         return 0;
321 }
322
323 int
324 rte_eth_dev_is_valid_port(uint16_t port_id)
325 {
326         if (port_id >= RTE_MAX_ETHPORTS ||
327             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
328                 return 0;
329         else
330                 return 1;
331 }
332
333 int
334 rte_eth_dev_socket_id(uint16_t port_id)
335 {
336         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
337         return rte_eth_devices[port_id].data->numa_node;
338 }
339
340 void *
341 rte_eth_dev_get_sec_ctx(uint8_t port_id)
342 {
343         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
344         return rte_eth_devices[port_id].security_ctx;
345 }
346
347 uint16_t
348 rte_eth_dev_count(void)
349 {
350         uint16_t p;
351         uint16_t count;
352
353         count = 0;
354
355         RTE_ETH_FOREACH_DEV(p)
356                 count++;
357
358         return count;
359 }
360
361 int
362 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
363 {
364         char *tmp;
365
366         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
367
368         if (name == NULL) {
369                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
370                 return -EINVAL;
371         }
372
373         /* shouldn't check 'rte_eth_devices[i].data',
374          * because it might be overwritten by VDEV PMD */
375         tmp = rte_eth_dev_data[port_id].name;
376         strcpy(name, tmp);
377         return 0;
378 }
379
380 int
381 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
382 {
383         int i;
384
385         if (name == NULL) {
386                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
387                 return -EINVAL;
388         }
389
390         RTE_ETH_FOREACH_DEV(i) {
391                 if (!strncmp(name,
392                         rte_eth_dev_data[i].name, strlen(name))) {
393
394                         *port_id = i;
395
396                         return 0;
397                 }
398         }
399         return -ENODEV;
400 }
401
402 static int
403 eth_err(uint16_t port_id, int ret)
404 {
405         if (ret == 0)
406                 return 0;
407         if (rte_eth_dev_is_removed(port_id))
408                 return -EIO;
409         return ret;
410 }
411
412 /* attach the new device, then store port_id of the device */
413 int
414 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
415 {
416         int ret = -1;
417         int current = rte_eth_dev_count();
418         char *name = NULL;
419         char *args = NULL;
420
421         if ((devargs == NULL) || (port_id == NULL)) {
422                 ret = -EINVAL;
423                 goto err;
424         }
425
426         /* parse devargs, then retrieve device name and args */
427         if (rte_eal_parse_devargs_str(devargs, &name, &args))
428                 goto err;
429
430         ret = rte_eal_dev_attach(name, args);
431         if (ret < 0)
432                 goto err;
433
434         /* no point looking at the port count if no port exists */
435         if (!rte_eth_dev_count()) {
436                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
437                 ret = -1;
438                 goto err;
439         }
440
441         /* if nothing happened, there is a bug here, since some driver told us
442          * it did attach a device, but did not create a port.
443          */
444         if (current == rte_eth_dev_count()) {
445                 ret = -1;
446                 goto err;
447         }
448
449         *port_id = eth_dev_last_created_port;
450         ret = 0;
451
452 err:
453         free(name);
454         free(args);
455         return ret;
456 }
457
458 /* detach the device, then store the name of the device */
459 int
460 rte_eth_dev_detach(uint16_t port_id, char *name)
461 {
462         uint32_t dev_flags;
463         int ret = -1;
464
465         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
466
467         if (name == NULL) {
468                 ret = -EINVAL;
469                 goto err;
470         }
471
472         dev_flags = rte_eth_devices[port_id].data->dev_flags;
473         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
474                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
475                         port_id);
476                 ret = -ENOTSUP;
477                 goto err;
478         }
479
480         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
481                  "%s", rte_eth_devices[port_id].data->name);
482
483         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
484         if (ret < 0)
485                 goto err;
486
487         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
488         return 0;
489
490 err:
491         return ret;
492 }
493
494 static int
495 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
496 {
497         uint16_t old_nb_queues = dev->data->nb_rx_queues;
498         void **rxq;
499         unsigned i;
500
501         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
502                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
503                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
504                                 RTE_CACHE_LINE_SIZE);
505                 if (dev->data->rx_queues == NULL) {
506                         dev->data->nb_rx_queues = 0;
507                         return -(ENOMEM);
508                 }
509         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
510                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
511
512                 rxq = dev->data->rx_queues;
513
514                 for (i = nb_queues; i < old_nb_queues; i++)
515                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
516                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
517                                 RTE_CACHE_LINE_SIZE);
518                 if (rxq == NULL)
519                         return -(ENOMEM);
520                 if (nb_queues > old_nb_queues) {
521                         uint16_t new_qs = nb_queues - old_nb_queues;
522
523                         memset(rxq + old_nb_queues, 0,
524                                 sizeof(rxq[0]) * new_qs);
525                 }
526
527                 dev->data->rx_queues = rxq;
528
529         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
530                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
531
532                 rxq = dev->data->rx_queues;
533
534                 for (i = nb_queues; i < old_nb_queues; i++)
535                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
536
537                 rte_free(dev->data->rx_queues);
538                 dev->data->rx_queues = NULL;
539         }
540         dev->data->nb_rx_queues = nb_queues;
541         return 0;
542 }
543
544 int
545 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
546 {
547         struct rte_eth_dev *dev;
548
549         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
550
551         dev = &rte_eth_devices[port_id];
552         if (rx_queue_id >= dev->data->nb_rx_queues) {
553                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
554                 return -EINVAL;
555         }
556
557         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
558
559         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
560                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
561                         " already started\n",
562                         rx_queue_id, port_id);
563                 return 0;
564         }
565
566         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
567                                                              rx_queue_id));
568
569 }
570
571 int
572 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
573 {
574         struct rte_eth_dev *dev;
575
576         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
577
578         dev = &rte_eth_devices[port_id];
579         if (rx_queue_id >= dev->data->nb_rx_queues) {
580                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
581                 return -EINVAL;
582         }
583
584         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
585
586         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
587                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
588                         " already stopped\n",
589                         rx_queue_id, port_id);
590                 return 0;
591         }
592
593         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
594
595 }
596
597 int
598 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
599 {
600         struct rte_eth_dev *dev;
601
602         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
603
604         dev = &rte_eth_devices[port_id];
605         if (tx_queue_id >= dev->data->nb_tx_queues) {
606                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
607                 return -EINVAL;
608         }
609
610         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
611
612         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
613                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
614                         " already started\n",
615                         tx_queue_id, port_id);
616                 return 0;
617         }
618
619         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
620                                                              tx_queue_id));
621
622 }
623
624 int
625 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
626 {
627         struct rte_eth_dev *dev;
628
629         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
630
631         dev = &rte_eth_devices[port_id];
632         if (tx_queue_id >= dev->data->nb_tx_queues) {
633                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
634                 return -EINVAL;
635         }
636
637         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
638
639         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
640                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
641                         " already stopped\n",
642                         tx_queue_id, port_id);
643                 return 0;
644         }
645
646         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
647
648 }
649
650 static int
651 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
652 {
653         uint16_t old_nb_queues = dev->data->nb_tx_queues;
654         void **txq;
655         unsigned i;
656
657         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
658                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
659                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
660                                                    RTE_CACHE_LINE_SIZE);
661                 if (dev->data->tx_queues == NULL) {
662                         dev->data->nb_tx_queues = 0;
663                         return -(ENOMEM);
664                 }
665         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
666                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
667
668                 txq = dev->data->tx_queues;
669
670                 for (i = nb_queues; i < old_nb_queues; i++)
671                         (*dev->dev_ops->tx_queue_release)(txq[i]);
672                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
673                                   RTE_CACHE_LINE_SIZE);
674                 if (txq == NULL)
675                         return -ENOMEM;
676                 if (nb_queues > old_nb_queues) {
677                         uint16_t new_qs = nb_queues - old_nb_queues;
678
679                         memset(txq + old_nb_queues, 0,
680                                sizeof(txq[0]) * new_qs);
681                 }
682
683                 dev->data->tx_queues = txq;
684
685         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
686                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
687
688                 txq = dev->data->tx_queues;
689
690                 for (i = nb_queues; i < old_nb_queues; i++)
691                         (*dev->dev_ops->tx_queue_release)(txq[i]);
692
693                 rte_free(dev->data->tx_queues);
694                 dev->data->tx_queues = NULL;
695         }
696         dev->data->nb_tx_queues = nb_queues;
697         return 0;
698 }
699
700 uint32_t
701 rte_eth_speed_bitflag(uint32_t speed, int duplex)
702 {
703         switch (speed) {
704         case ETH_SPEED_NUM_10M:
705                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
706         case ETH_SPEED_NUM_100M:
707                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
708         case ETH_SPEED_NUM_1G:
709                 return ETH_LINK_SPEED_1G;
710         case ETH_SPEED_NUM_2_5G:
711                 return ETH_LINK_SPEED_2_5G;
712         case ETH_SPEED_NUM_5G:
713                 return ETH_LINK_SPEED_5G;
714         case ETH_SPEED_NUM_10G:
715                 return ETH_LINK_SPEED_10G;
716         case ETH_SPEED_NUM_20G:
717                 return ETH_LINK_SPEED_20G;
718         case ETH_SPEED_NUM_25G:
719                 return ETH_LINK_SPEED_25G;
720         case ETH_SPEED_NUM_40G:
721                 return ETH_LINK_SPEED_40G;
722         case ETH_SPEED_NUM_50G:
723                 return ETH_LINK_SPEED_50G;
724         case ETH_SPEED_NUM_56G:
725                 return ETH_LINK_SPEED_56G;
726         case ETH_SPEED_NUM_100G:
727                 return ETH_LINK_SPEED_100G;
728         default:
729                 return 0;
730         }
731 }
732
733 /**
734  * A conversion function from rxmode bitfield API.
735  */
736 static void
737 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
738                                     uint64_t *rx_offloads)
739 {
740         uint64_t offloads = 0;
741
742         if (rxmode->header_split == 1)
743                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
744         if (rxmode->hw_ip_checksum == 1)
745                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
746         if (rxmode->hw_vlan_filter == 1)
747                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
748         if (rxmode->hw_vlan_strip == 1)
749                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
750         if (rxmode->hw_vlan_extend == 1)
751                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
752         if (rxmode->jumbo_frame == 1)
753                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
754         if (rxmode->hw_strip_crc == 1)
755                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
756         if (rxmode->enable_scatter == 1)
757                 offloads |= DEV_RX_OFFLOAD_SCATTER;
758         if (rxmode->enable_lro == 1)
759                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
760         if (rxmode->hw_timestamp == 1)
761                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
762         if (rxmode->security == 1)
763                 offloads |= DEV_RX_OFFLOAD_SECURITY;
764
765         *rx_offloads = offloads;
766 }
767
768 /**
769  * A conversion function from rxmode offloads API.
770  */
771 static void
772 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
773                             struct rte_eth_rxmode *rxmode)
774 {
775
776         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
777                 rxmode->header_split = 1;
778         else
779                 rxmode->header_split = 0;
780         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
781                 rxmode->hw_ip_checksum = 1;
782         else
783                 rxmode->hw_ip_checksum = 0;
784         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
785                 rxmode->hw_vlan_filter = 1;
786         else
787                 rxmode->hw_vlan_filter = 0;
788         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
789                 rxmode->hw_vlan_strip = 1;
790         else
791                 rxmode->hw_vlan_strip = 0;
792         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
793                 rxmode->hw_vlan_extend = 1;
794         else
795                 rxmode->hw_vlan_extend = 0;
796         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
797                 rxmode->jumbo_frame = 1;
798         else
799                 rxmode->jumbo_frame = 0;
800         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
801                 rxmode->hw_strip_crc = 1;
802         else
803                 rxmode->hw_strip_crc = 0;
804         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
805                 rxmode->enable_scatter = 1;
806         else
807                 rxmode->enable_scatter = 0;
808         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
809                 rxmode->enable_lro = 1;
810         else
811                 rxmode->enable_lro = 0;
812         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
813                 rxmode->hw_timestamp = 1;
814         else
815                 rxmode->hw_timestamp = 0;
816         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
817                 rxmode->security = 1;
818         else
819                 rxmode->security = 0;
820 }
821
822 const char * __rte_experimental
823 rte_eth_dev_rx_offload_name(uint64_t offload)
824 {
825         const char *name = "UNKNOWN";
826         unsigned int i;
827
828         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
829                 if (offload == rte_rx_offload_names[i].offload) {
830                         name = rte_rx_offload_names[i].name;
831                         break;
832                 }
833         }
834
835         return name;
836 }
837
838 const char * __rte_experimental
839 rte_eth_dev_tx_offload_name(uint64_t offload)
840 {
841         const char *name = "UNKNOWN";
842         unsigned int i;
843
844         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
845                 if (offload == rte_tx_offload_names[i].offload) {
846                         name = rte_tx_offload_names[i].name;
847                         break;
848                 }
849         }
850
851         return name;
852 }
853
854 int
855 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
856                       const struct rte_eth_conf *dev_conf)
857 {
858         struct rte_eth_dev *dev;
859         struct rte_eth_dev_info dev_info;
860         struct rte_eth_conf local_conf = *dev_conf;
861         int diag;
862
863         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
864
865         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
866                 RTE_PMD_DEBUG_TRACE(
867                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
868                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
869                 return -EINVAL;
870         }
871
872         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
873                 RTE_PMD_DEBUG_TRACE(
874                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
875                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
876                 return -EINVAL;
877         }
878
879         dev = &rte_eth_devices[port_id];
880
881         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
882         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
883
884         if (dev->data->dev_started) {
885                 RTE_PMD_DEBUG_TRACE(
886                     "port %d must be stopped to allow configuration\n", port_id);
887                 return -EBUSY;
888         }
889
890         /*
891          * Convert between the offloads API to enable PMDs to support
892          * only one of them.
893          */
894         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
895                 rte_eth_convert_rx_offload_bitfield(
896                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
897         } else {
898                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
899                                             &local_conf.rxmode);
900         }
901
902         /* Copy the dev_conf parameter into the dev structure */
903         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
904
905         /*
906          * Check that the numbers of RX and TX queues are not greater
907          * than the maximum number of RX and TX queues supported by the
908          * configured device.
909          */
910         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
911
912         if (nb_rx_q == 0 && nb_tx_q == 0) {
913                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
914                 return -EINVAL;
915         }
916
917         if (nb_rx_q > dev_info.max_rx_queues) {
918                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
919                                 port_id, nb_rx_q, dev_info.max_rx_queues);
920                 return -EINVAL;
921         }
922
923         if (nb_tx_q > dev_info.max_tx_queues) {
924                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
925                                 port_id, nb_tx_q, dev_info.max_tx_queues);
926                 return -EINVAL;
927         }
928
929         /* Check that the device supports requested interrupts */
930         if ((dev_conf->intr_conf.lsc == 1) &&
931                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
932                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
933                                         dev->device->driver->name);
934                         return -EINVAL;
935         }
936         if ((dev_conf->intr_conf.rmv == 1) &&
937             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
938                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
939                                     dev->device->driver->name);
940                 return -EINVAL;
941         }
942
943         /*
944          * If jumbo frames are enabled, check that the maximum RX packet
945          * length is supported by the configured device.
946          */
947         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
948                 if (dev_conf->rxmode.max_rx_pkt_len >
949                     dev_info.max_rx_pktlen) {
950                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
951                                 " > max valid value %u\n",
952                                 port_id,
953                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
954                                 (unsigned)dev_info.max_rx_pktlen);
955                         return -EINVAL;
956                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
957                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
958                                 " < min valid value %u\n",
959                                 port_id,
960                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
961                                 (unsigned)ETHER_MIN_LEN);
962                         return -EINVAL;
963                 }
964         } else {
965                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
966                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
967                         /* Use default value */
968                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
969                                                         ETHER_MAX_LEN;
970         }
971
972         /*
973          * Setup new number of RX/TX queues and reconfigure device.
974          */
975         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
976         if (diag != 0) {
977                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
978                                 port_id, diag);
979                 return diag;
980         }
981
982         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
983         if (diag != 0) {
984                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
985                                 port_id, diag);
986                 rte_eth_dev_rx_queue_config(dev, 0);
987                 return diag;
988         }
989
990         diag = (*dev->dev_ops->dev_configure)(dev);
991         if (diag != 0) {
992                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
993                                 port_id, diag);
994                 rte_eth_dev_rx_queue_config(dev, 0);
995                 rte_eth_dev_tx_queue_config(dev, 0);
996                 return eth_err(port_id, diag);
997         }
998
999         /* Initialize Rx profiling if enabled at compilation time. */
1000         diag = __rte_eth_profile_rx_init(port_id, dev);
1001         if (diag != 0) {
1002                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1003                                 port_id, diag);
1004                 rte_eth_dev_rx_queue_config(dev, 0);
1005                 rte_eth_dev_tx_queue_config(dev, 0);
1006                 return eth_err(port_id, diag);
1007         }
1008
1009         return 0;
1010 }
1011
1012 void
1013 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1014 {
1015         if (dev->data->dev_started) {
1016                 RTE_PMD_DEBUG_TRACE(
1017                         "port %d must be stopped to allow reset\n",
1018                         dev->data->port_id);
1019                 return;
1020         }
1021
1022         rte_eth_dev_rx_queue_config(dev, 0);
1023         rte_eth_dev_tx_queue_config(dev, 0);
1024
1025         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1026 }
1027
1028 static void
1029 rte_eth_dev_config_restore(uint16_t port_id)
1030 {
1031         struct rte_eth_dev *dev;
1032         struct rte_eth_dev_info dev_info;
1033         struct ether_addr *addr;
1034         uint16_t i;
1035         uint32_t pool = 0;
1036         uint64_t pool_mask;
1037
1038         dev = &rte_eth_devices[port_id];
1039
1040         rte_eth_dev_info_get(port_id, &dev_info);
1041
1042         /* replay MAC address configuration including default MAC */
1043         addr = &dev->data->mac_addrs[0];
1044         if (*dev->dev_ops->mac_addr_set != NULL)
1045                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1046         else if (*dev->dev_ops->mac_addr_add != NULL)
1047                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1048
1049         if (*dev->dev_ops->mac_addr_add != NULL) {
1050                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1051                         addr = &dev->data->mac_addrs[i];
1052
1053                         /* skip zero address */
1054                         if (is_zero_ether_addr(addr))
1055                                 continue;
1056
1057                         pool = 0;
1058                         pool_mask = dev->data->mac_pool_sel[i];
1059
1060                         do {
1061                                 if (pool_mask & 1ULL)
1062                                         (*dev->dev_ops->mac_addr_add)(dev,
1063                                                 addr, i, pool);
1064                                 pool_mask >>= 1;
1065                                 pool++;
1066                         } while (pool_mask);
1067                 }
1068         }
1069
1070         /* replay promiscuous configuration */
1071         if (rte_eth_promiscuous_get(port_id) == 1)
1072                 rte_eth_promiscuous_enable(port_id);
1073         else if (rte_eth_promiscuous_get(port_id) == 0)
1074                 rte_eth_promiscuous_disable(port_id);
1075
1076         /* replay all multicast configuration */
1077         if (rte_eth_allmulticast_get(port_id) == 1)
1078                 rte_eth_allmulticast_enable(port_id);
1079         else if (rte_eth_allmulticast_get(port_id) == 0)
1080                 rte_eth_allmulticast_disable(port_id);
1081 }
1082
1083 int
1084 rte_eth_dev_start(uint16_t port_id)
1085 {
1086         struct rte_eth_dev *dev;
1087         int diag;
1088
1089         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1090
1091         dev = &rte_eth_devices[port_id];
1092
1093         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1094
1095         if (dev->data->dev_started != 0) {
1096                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1097                         " already started\n",
1098                         port_id);
1099                 return 0;
1100         }
1101
1102         diag = (*dev->dev_ops->dev_start)(dev);
1103         if (diag == 0)
1104                 dev->data->dev_started = 1;
1105         else
1106                 return eth_err(port_id, diag);
1107
1108         rte_eth_dev_config_restore(port_id);
1109
1110         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1111                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1112                 (*dev->dev_ops->link_update)(dev, 0);
1113         }
1114         return 0;
1115 }
1116
1117 void
1118 rte_eth_dev_stop(uint16_t port_id)
1119 {
1120         struct rte_eth_dev *dev;
1121
1122         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1123         dev = &rte_eth_devices[port_id];
1124
1125         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1126
1127         if (dev->data->dev_started == 0) {
1128                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1129                         " already stopped\n",
1130                         port_id);
1131                 return;
1132         }
1133
1134         dev->data->dev_started = 0;
1135         (*dev->dev_ops->dev_stop)(dev);
1136 }
1137
1138 int
1139 rte_eth_dev_set_link_up(uint16_t port_id)
1140 {
1141         struct rte_eth_dev *dev;
1142
1143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1144
1145         dev = &rte_eth_devices[port_id];
1146
1147         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1148         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1149 }
1150
1151 int
1152 rte_eth_dev_set_link_down(uint16_t port_id)
1153 {
1154         struct rte_eth_dev *dev;
1155
1156         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1157
1158         dev = &rte_eth_devices[port_id];
1159
1160         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1161         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1162 }
1163
1164 void
1165 rte_eth_dev_close(uint16_t port_id)
1166 {
1167         struct rte_eth_dev *dev;
1168
1169         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1170         dev = &rte_eth_devices[port_id];
1171
1172         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1173         dev->data->dev_started = 0;
1174         (*dev->dev_ops->dev_close)(dev);
1175
1176         dev->data->nb_rx_queues = 0;
1177         rte_free(dev->data->rx_queues);
1178         dev->data->rx_queues = NULL;
1179         dev->data->nb_tx_queues = 0;
1180         rte_free(dev->data->tx_queues);
1181         dev->data->tx_queues = NULL;
1182 }
1183
1184 int
1185 rte_eth_dev_reset(uint16_t port_id)
1186 {
1187         struct rte_eth_dev *dev;
1188         int ret;
1189
1190         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1191         dev = &rte_eth_devices[port_id];
1192
1193         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1194
1195         rte_eth_dev_stop(port_id);
1196         ret = dev->dev_ops->dev_reset(dev);
1197
1198         return eth_err(port_id, ret);
1199 }
1200
1201 int __rte_experimental
1202 rte_eth_dev_is_removed(uint16_t port_id)
1203 {
1204         struct rte_eth_dev *dev;
1205         int ret;
1206
1207         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1208
1209         dev = &rte_eth_devices[port_id];
1210
1211         if (dev->state == RTE_ETH_DEV_REMOVED)
1212                 return 1;
1213
1214         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1215
1216         ret = dev->dev_ops->is_removed(dev);
1217         if (ret != 0)
1218                 /* Device is physically removed. */
1219                 dev->state = RTE_ETH_DEV_REMOVED;
1220
1221         return ret;
1222 }
1223
1224 int
1225 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1226                        uint16_t nb_rx_desc, unsigned int socket_id,
1227                        const struct rte_eth_rxconf *rx_conf,
1228                        struct rte_mempool *mp)
1229 {
1230         int ret;
1231         uint32_t mbp_buf_size;
1232         struct rte_eth_dev *dev;
1233         struct rte_eth_dev_info dev_info;
1234         struct rte_eth_rxconf local_conf;
1235         void **rxq;
1236
1237         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1238
1239         dev = &rte_eth_devices[port_id];
1240         if (rx_queue_id >= dev->data->nb_rx_queues) {
1241                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1242                 return -EINVAL;
1243         }
1244
1245         if (dev->data->dev_started) {
1246                 RTE_PMD_DEBUG_TRACE(
1247                     "port %d must be stopped to allow configuration\n", port_id);
1248                 return -EBUSY;
1249         }
1250
1251         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1252         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1253
1254         /*
1255          * Check the size of the mbuf data buffer.
1256          * This value must be provided in the private data of the memory pool.
1257          * First check that the memory pool has a valid private data.
1258          */
1259         rte_eth_dev_info_get(port_id, &dev_info);
1260         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1261                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1262                                 mp->name, (int) mp->private_data_size,
1263                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1264                 return -ENOSPC;
1265         }
1266         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1267
1268         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1269                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1270                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1271                                 "=%d)\n",
1272                                 mp->name,
1273                                 (int)mbp_buf_size,
1274                                 (int)(RTE_PKTMBUF_HEADROOM +
1275                                       dev_info.min_rx_bufsize),
1276                                 (int)RTE_PKTMBUF_HEADROOM,
1277                                 (int)dev_info.min_rx_bufsize);
1278                 return -EINVAL;
1279         }
1280
1281         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1282                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1283                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1284
1285                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1286                         "should be: <= %hu, = %hu, and a product of %hu\n",
1287                         nb_rx_desc,
1288                         dev_info.rx_desc_lim.nb_max,
1289                         dev_info.rx_desc_lim.nb_min,
1290                         dev_info.rx_desc_lim.nb_align);
1291                 return -EINVAL;
1292         }
1293
1294         rxq = dev->data->rx_queues;
1295         if (rxq[rx_queue_id]) {
1296                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1297                                         -ENOTSUP);
1298                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1299                 rxq[rx_queue_id] = NULL;
1300         }
1301
1302         if (rx_conf == NULL)
1303                 rx_conf = &dev_info.default_rxconf;
1304
1305         local_conf = *rx_conf;
1306         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1307                 /**
1308                  * Reflect port offloads to queue offloads in order for
1309                  * offloads to not be discarded.
1310                  */
1311                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1312                                                     &local_conf.offloads);
1313         }
1314
1315         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1316                                               socket_id, &local_conf, mp);
1317         if (!ret) {
1318                 if (!dev->data->min_rx_buf_size ||
1319                     dev->data->min_rx_buf_size > mbp_buf_size)
1320                         dev->data->min_rx_buf_size = mbp_buf_size;
1321         }
1322
1323         return eth_err(port_id, ret);
1324 }
1325
1326 /**
1327  * A conversion function from txq_flags API.
1328  */
1329 static void
1330 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1331 {
1332         uint64_t offloads = 0;
1333
1334         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1335                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1336         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1337                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1338         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1339                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1340         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1341                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1342         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1343                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1344         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1345             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1346                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1347
1348         *tx_offloads = offloads;
1349 }
1350
1351 /**
1352  * A conversion function from offloads API.
1353  */
1354 static void
1355 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1356 {
1357         uint32_t flags = 0;
1358
1359         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1360                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1361         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1362                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1363         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1364                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1365         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1366                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1367         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1368                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1369         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1370                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1371
1372         *txq_flags = flags;
1373 }
1374
1375 int
1376 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1377                        uint16_t nb_tx_desc, unsigned int socket_id,
1378                        const struct rte_eth_txconf *tx_conf)
1379 {
1380         struct rte_eth_dev *dev;
1381         struct rte_eth_dev_info dev_info;
1382         struct rte_eth_txconf local_conf;
1383         void **txq;
1384
1385         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1386
1387         dev = &rte_eth_devices[port_id];
1388         if (tx_queue_id >= dev->data->nb_tx_queues) {
1389                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1390                 return -EINVAL;
1391         }
1392
1393         if (dev->data->dev_started) {
1394                 RTE_PMD_DEBUG_TRACE(
1395                     "port %d must be stopped to allow configuration\n", port_id);
1396                 return -EBUSY;
1397         }
1398
1399         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1400         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1401
1402         rte_eth_dev_info_get(port_id, &dev_info);
1403
1404         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1405             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1406             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1407                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1408                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1409                                 nb_tx_desc,
1410                                 dev_info.tx_desc_lim.nb_max,
1411                                 dev_info.tx_desc_lim.nb_min,
1412                                 dev_info.tx_desc_lim.nb_align);
1413                 return -EINVAL;
1414         }
1415
1416         txq = dev->data->tx_queues;
1417         if (txq[tx_queue_id]) {
1418                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1419                                         -ENOTSUP);
1420                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1421                 txq[tx_queue_id] = NULL;
1422         }
1423
1424         if (tx_conf == NULL)
1425                 tx_conf = &dev_info.default_txconf;
1426
1427         /*
1428          * Convert between the offloads API to enable PMDs to support
1429          * only one of them.
1430          */
1431         local_conf = *tx_conf;
1432         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1433                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1434                                              &local_conf.txq_flags);
1435                 /* Keep the ignore flag. */
1436                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1437         } else {
1438                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1439                                           &local_conf.offloads);
1440         }
1441
1442         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1443                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1444 }
1445
1446 void
1447 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1448                 void *userdata __rte_unused)
1449 {
1450         unsigned i;
1451
1452         for (i = 0; i < unsent; i++)
1453                 rte_pktmbuf_free(pkts[i]);
1454 }
1455
1456 void
1457 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1458                 void *userdata)
1459 {
1460         uint64_t *count = userdata;
1461         unsigned i;
1462
1463         for (i = 0; i < unsent; i++)
1464                 rte_pktmbuf_free(pkts[i]);
1465
1466         *count += unsent;
1467 }
1468
1469 int
1470 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1471                 buffer_tx_error_fn cbfn, void *userdata)
1472 {
1473         buffer->error_callback = cbfn;
1474         buffer->error_userdata = userdata;
1475         return 0;
1476 }
1477
1478 int
1479 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1480 {
1481         int ret = 0;
1482
1483         if (buffer == NULL)
1484                 return -EINVAL;
1485
1486         buffer->size = size;
1487         if (buffer->error_callback == NULL) {
1488                 ret = rte_eth_tx_buffer_set_err_callback(
1489                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1490         }
1491
1492         return ret;
1493 }
1494
1495 int
1496 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1497 {
1498         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1499         int ret;
1500
1501         /* Validate Input Data. Bail if not valid or not supported. */
1502         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1503         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1504
1505         /* Call driver to free pending mbufs. */
1506         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1507                                                free_cnt);
1508         return eth_err(port_id, ret);
1509 }
1510
1511 void
1512 rte_eth_promiscuous_enable(uint16_t port_id)
1513 {
1514         struct rte_eth_dev *dev;
1515
1516         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1517         dev = &rte_eth_devices[port_id];
1518
1519         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1520         (*dev->dev_ops->promiscuous_enable)(dev);
1521         dev->data->promiscuous = 1;
1522 }
1523
1524 void
1525 rte_eth_promiscuous_disable(uint16_t port_id)
1526 {
1527         struct rte_eth_dev *dev;
1528
1529         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1530         dev = &rte_eth_devices[port_id];
1531
1532         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1533         dev->data->promiscuous = 0;
1534         (*dev->dev_ops->promiscuous_disable)(dev);
1535 }
1536
1537 int
1538 rte_eth_promiscuous_get(uint16_t port_id)
1539 {
1540         struct rte_eth_dev *dev;
1541
1542         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1543
1544         dev = &rte_eth_devices[port_id];
1545         return dev->data->promiscuous;
1546 }
1547
1548 void
1549 rte_eth_allmulticast_enable(uint16_t port_id)
1550 {
1551         struct rte_eth_dev *dev;
1552
1553         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1554         dev = &rte_eth_devices[port_id];
1555
1556         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1557         (*dev->dev_ops->allmulticast_enable)(dev);
1558         dev->data->all_multicast = 1;
1559 }
1560
1561 void
1562 rte_eth_allmulticast_disable(uint16_t port_id)
1563 {
1564         struct rte_eth_dev *dev;
1565
1566         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1567         dev = &rte_eth_devices[port_id];
1568
1569         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1570         dev->data->all_multicast = 0;
1571         (*dev->dev_ops->allmulticast_disable)(dev);
1572 }
1573
1574 int
1575 rte_eth_allmulticast_get(uint16_t port_id)
1576 {
1577         struct rte_eth_dev *dev;
1578
1579         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1580
1581         dev = &rte_eth_devices[port_id];
1582         return dev->data->all_multicast;
1583 }
1584
1585 static inline int
1586 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1587                                 struct rte_eth_link *link)
1588 {
1589         struct rte_eth_link *dst = link;
1590         struct rte_eth_link *src = &(dev->data->dev_link);
1591
1592         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1593                                         *(uint64_t *)src) == 0)
1594                 return -1;
1595
1596         return 0;
1597 }
1598
1599 void
1600 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1601 {
1602         struct rte_eth_dev *dev;
1603
1604         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1605         dev = &rte_eth_devices[port_id];
1606
1607         if (dev->data->dev_conf.intr_conf.lsc != 0)
1608                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1609         else {
1610                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1611                 (*dev->dev_ops->link_update)(dev, 1);
1612                 *eth_link = dev->data->dev_link;
1613         }
1614 }
1615
1616 void
1617 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1618 {
1619         struct rte_eth_dev *dev;
1620
1621         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1622         dev = &rte_eth_devices[port_id];
1623
1624         if (dev->data->dev_conf.intr_conf.lsc != 0)
1625                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1626         else {
1627                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1628                 (*dev->dev_ops->link_update)(dev, 0);
1629                 *eth_link = dev->data->dev_link;
1630         }
1631 }
1632
1633 int
1634 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1635 {
1636         struct rte_eth_dev *dev;
1637
1638         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1639
1640         dev = &rte_eth_devices[port_id];
1641         memset(stats, 0, sizeof(*stats));
1642
1643         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1644         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1645         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1646 }
1647
1648 int
1649 rte_eth_stats_reset(uint16_t port_id)
1650 {
1651         struct rte_eth_dev *dev;
1652
1653         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1654         dev = &rte_eth_devices[port_id];
1655
1656         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1657         (*dev->dev_ops->stats_reset)(dev);
1658         dev->data->rx_mbuf_alloc_failed = 0;
1659
1660         return 0;
1661 }
1662
1663 static inline int
1664 get_xstats_basic_count(struct rte_eth_dev *dev)
1665 {
1666         uint16_t nb_rxqs, nb_txqs;
1667         int count;
1668
1669         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1670         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1671
1672         count = RTE_NB_STATS;
1673         count += nb_rxqs * RTE_NB_RXQ_STATS;
1674         count += nb_txqs * RTE_NB_TXQ_STATS;
1675
1676         return count;
1677 }
1678
1679 static int
1680 get_xstats_count(uint16_t port_id)
1681 {
1682         struct rte_eth_dev *dev;
1683         int count;
1684
1685         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1686         dev = &rte_eth_devices[port_id];
1687         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1688                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1689                                 NULL, 0);
1690                 if (count < 0)
1691                         return eth_err(port_id, count);
1692         }
1693         if (dev->dev_ops->xstats_get_names != NULL) {
1694                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1695                 if (count < 0)
1696                         return eth_err(port_id, count);
1697         } else
1698                 count = 0;
1699
1700
1701         count += get_xstats_basic_count(dev);
1702
1703         return count;
1704 }
1705
1706 int
1707 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1708                 uint64_t *id)
1709 {
1710         int cnt_xstats, idx_xstat;
1711
1712         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1713
1714         if (!id) {
1715                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1716                 return -ENOMEM;
1717         }
1718
1719         if (!xstat_name) {
1720                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1721                 return -ENOMEM;
1722         }
1723
1724         /* Get count */
1725         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1726         if (cnt_xstats  < 0) {
1727                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1728                 return -ENODEV;
1729         }
1730
1731         /* Get id-name lookup table */
1732         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1733
1734         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1735                         port_id, xstats_names, cnt_xstats, NULL)) {
1736                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1737                 return -1;
1738         }
1739
1740         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1741                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1742                         *id = idx_xstat;
1743                         return 0;
1744                 };
1745         }
1746
1747         return -EINVAL;
1748 }
1749
1750 /* retrieve basic stats names */
1751 static int
1752 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1753         struct rte_eth_xstat_name *xstats_names)
1754 {
1755         int cnt_used_entries = 0;
1756         uint32_t idx, id_queue;
1757         uint16_t num_q;
1758
1759         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1760                 snprintf(xstats_names[cnt_used_entries].name,
1761                         sizeof(xstats_names[0].name),
1762                         "%s", rte_stats_strings[idx].name);
1763                 cnt_used_entries++;
1764         }
1765         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1766         for (id_queue = 0; id_queue < num_q; id_queue++) {
1767                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1768                         snprintf(xstats_names[cnt_used_entries].name,
1769                                 sizeof(xstats_names[0].name),
1770                                 "rx_q%u%s",
1771                                 id_queue, rte_rxq_stats_strings[idx].name);
1772                         cnt_used_entries++;
1773                 }
1774
1775         }
1776         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1777         for (id_queue = 0; id_queue < num_q; id_queue++) {
1778                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1779                         snprintf(xstats_names[cnt_used_entries].name,
1780                                 sizeof(xstats_names[0].name),
1781                                 "tx_q%u%s",
1782                                 id_queue, rte_txq_stats_strings[idx].name);
1783                         cnt_used_entries++;
1784                 }
1785         }
1786         return cnt_used_entries;
1787 }
1788
1789 /* retrieve ethdev extended statistics names */
1790 int
1791 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1792         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1793         uint64_t *ids)
1794 {
1795         struct rte_eth_xstat_name *xstats_names_copy;
1796         unsigned int no_basic_stat_requested = 1;
1797         unsigned int no_ext_stat_requested = 1;
1798         unsigned int expected_entries;
1799         unsigned int basic_count;
1800         struct rte_eth_dev *dev;
1801         unsigned int i;
1802         int ret;
1803
1804         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1805         dev = &rte_eth_devices[port_id];
1806
1807         basic_count = get_xstats_basic_count(dev);
1808         ret = get_xstats_count(port_id);
1809         if (ret < 0)
1810                 return ret;
1811         expected_entries = (unsigned int)ret;
1812
1813         /* Return max number of stats if no ids given */
1814         if (!ids) {
1815                 if (!xstats_names)
1816                         return expected_entries;
1817                 else if (xstats_names && size < expected_entries)
1818                         return expected_entries;
1819         }
1820
1821         if (ids && !xstats_names)
1822                 return -EINVAL;
1823
1824         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1825                 uint64_t ids_copy[size];
1826
1827                 for (i = 0; i < size; i++) {
1828                         if (ids[i] < basic_count) {
1829                                 no_basic_stat_requested = 0;
1830                                 break;
1831                         }
1832
1833                         /*
1834                          * Convert ids to xstats ids that PMD knows.
1835                          * ids known by user are basic + extended stats.
1836                          */
1837                         ids_copy[i] = ids[i] - basic_count;
1838                 }
1839
1840                 if (no_basic_stat_requested)
1841                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1842                                         xstats_names, ids_copy, size);
1843         }
1844
1845         /* Retrieve all stats */
1846         if (!ids) {
1847                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1848                                 expected_entries);
1849                 if (num_stats < 0 || num_stats > (int)expected_entries)
1850                         return num_stats;
1851                 else
1852                         return expected_entries;
1853         }
1854
1855         xstats_names_copy = calloc(expected_entries,
1856                 sizeof(struct rte_eth_xstat_name));
1857
1858         if (!xstats_names_copy) {
1859                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
1860                 return -ENOMEM;
1861         }
1862
1863         if (ids) {
1864                 for (i = 0; i < size; i++) {
1865                         if (ids[i] > basic_count) {
1866                                 no_ext_stat_requested = 0;
1867                                 break;
1868                         }
1869                 }
1870         }
1871
1872         /* Fill xstats_names_copy structure */
1873         if (ids && no_ext_stat_requested) {
1874                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
1875         } else {
1876                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
1877                         expected_entries);
1878                 if (ret < 0) {
1879                         free(xstats_names_copy);
1880                         return ret;
1881                 }
1882         }
1883
1884         /* Filter stats */
1885         for (i = 0; i < size; i++) {
1886                 if (ids[i] >= expected_entries) {
1887                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1888                         free(xstats_names_copy);
1889                         return -1;
1890                 }
1891                 xstats_names[i] = xstats_names_copy[ids[i]];
1892         }
1893
1894         free(xstats_names_copy);
1895         return size;
1896 }
1897
1898 int
1899 rte_eth_xstats_get_names(uint16_t port_id,
1900         struct rte_eth_xstat_name *xstats_names,
1901         unsigned int size)
1902 {
1903         struct rte_eth_dev *dev;
1904         int cnt_used_entries;
1905         int cnt_expected_entries;
1906         int cnt_driver_entries;
1907
1908         cnt_expected_entries = get_xstats_count(port_id);
1909         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1910                         (int)size < cnt_expected_entries)
1911                 return cnt_expected_entries;
1912
1913         /* port_id checked in get_xstats_count() */
1914         dev = &rte_eth_devices[port_id];
1915
1916         cnt_used_entries = rte_eth_basic_stats_get_names(
1917                 dev, xstats_names);
1918
1919         if (dev->dev_ops->xstats_get_names != NULL) {
1920                 /* If there are any driver-specific xstats, append them
1921                  * to end of list.
1922                  */
1923                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1924                         dev,
1925                         xstats_names + cnt_used_entries,
1926                         size - cnt_used_entries);
1927                 if (cnt_driver_entries < 0)
1928                         return eth_err(port_id, cnt_driver_entries);
1929                 cnt_used_entries += cnt_driver_entries;
1930         }
1931
1932         return cnt_used_entries;
1933 }
1934
1935
1936 static int
1937 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
1938 {
1939         struct rte_eth_dev *dev;
1940         struct rte_eth_stats eth_stats;
1941         unsigned int count = 0, i, q;
1942         uint64_t val, *stats_ptr;
1943         uint16_t nb_rxqs, nb_txqs;
1944         int ret;
1945
1946         ret = rte_eth_stats_get(port_id, &eth_stats);
1947         if (ret < 0)
1948                 return ret;
1949
1950         dev = &rte_eth_devices[port_id];
1951
1952         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1953         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1954
1955         /* global stats */
1956         for (i = 0; i < RTE_NB_STATS; i++) {
1957                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1958                                         rte_stats_strings[i].offset);
1959                 val = *stats_ptr;
1960                 xstats[count++].value = val;
1961         }
1962
1963         /* per-rxq stats */
1964         for (q = 0; q < nb_rxqs; q++) {
1965                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1966                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1967                                         rte_rxq_stats_strings[i].offset +
1968                                         q * sizeof(uint64_t));
1969                         val = *stats_ptr;
1970                         xstats[count++].value = val;
1971                 }
1972         }
1973
1974         /* per-txq stats */
1975         for (q = 0; q < nb_txqs; q++) {
1976                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1977                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1978                                         rte_txq_stats_strings[i].offset +
1979                                         q * sizeof(uint64_t));
1980                         val = *stats_ptr;
1981                         xstats[count++].value = val;
1982                 }
1983         }
1984         return count;
1985 }
1986
1987 /* retrieve ethdev extended statistics */
1988 int
1989 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1990                          uint64_t *values, unsigned int size)
1991 {
1992         unsigned int no_basic_stat_requested = 1;
1993         unsigned int no_ext_stat_requested = 1;
1994         unsigned int num_xstats_filled;
1995         unsigned int basic_count;
1996         uint16_t expected_entries;
1997         struct rte_eth_dev *dev;
1998         unsigned int i;
1999         int ret;
2000
2001         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2002         ret = get_xstats_count(port_id);
2003         if (ret < 0)
2004                 return ret;
2005         expected_entries = (uint16_t)ret;
2006         struct rte_eth_xstat xstats[expected_entries];
2007         dev = &rte_eth_devices[port_id];
2008         basic_count = get_xstats_basic_count(dev);
2009
2010         /* Return max number of stats if no ids given */
2011         if (!ids) {
2012                 if (!values)
2013                         return expected_entries;
2014                 else if (values && size < expected_entries)
2015                         return expected_entries;
2016         }
2017
2018         if (ids && !values)
2019                 return -EINVAL;
2020
2021         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2022                 unsigned int basic_count = get_xstats_basic_count(dev);
2023                 uint64_t ids_copy[size];
2024
2025                 for (i = 0; i < size; i++) {
2026                         if (ids[i] < basic_count) {
2027                                 no_basic_stat_requested = 0;
2028                                 break;
2029                         }
2030
2031                         /*
2032                          * Convert ids to xstats ids that PMD knows.
2033                          * ids known by user are basic + extended stats.
2034                          */
2035                         ids_copy[i] = ids[i] - basic_count;
2036                 }
2037
2038                 if (no_basic_stat_requested)
2039                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2040                                         values, size);
2041         }
2042
2043         if (ids) {
2044                 for (i = 0; i < size; i++) {
2045                         if (ids[i] > basic_count) {
2046                                 no_ext_stat_requested = 0;
2047                                 break;
2048                         }
2049                 }
2050         }
2051
2052         /* Fill the xstats structure */
2053         if (ids && no_ext_stat_requested)
2054                 ret = rte_eth_basic_stats_get(port_id, xstats);
2055         else
2056                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2057
2058         if (ret < 0)
2059                 return ret;
2060         num_xstats_filled = (unsigned int)ret;
2061
2062         /* Return all stats */
2063         if (!ids) {
2064                 for (i = 0; i < num_xstats_filled; i++)
2065                         values[i] = xstats[i].value;
2066                 return expected_entries;
2067         }
2068
2069         /* Filter stats */
2070         for (i = 0; i < size; i++) {
2071                 if (ids[i] >= expected_entries) {
2072                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2073                         return -1;
2074                 }
2075                 values[i] = xstats[ids[i]].value;
2076         }
2077         return size;
2078 }
2079
2080 int
2081 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2082         unsigned int n)
2083 {
2084         struct rte_eth_dev *dev;
2085         unsigned int count = 0, i;
2086         signed int xcount = 0;
2087         uint16_t nb_rxqs, nb_txqs;
2088         int ret;
2089
2090         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2091
2092         dev = &rte_eth_devices[port_id];
2093
2094         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2095         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2096
2097         /* Return generic statistics */
2098         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2099                 (nb_txqs * RTE_NB_TXQ_STATS);
2100
2101         /* implemented by the driver */
2102         if (dev->dev_ops->xstats_get != NULL) {
2103                 /* Retrieve the xstats from the driver at the end of the
2104                  * xstats struct.
2105                  */
2106                 xcount = (*dev->dev_ops->xstats_get)(dev,
2107                                      xstats ? xstats + count : NULL,
2108                                      (n > count) ? n - count : 0);
2109
2110                 if (xcount < 0)
2111                         return eth_err(port_id, xcount);
2112         }
2113
2114         if (n < count + xcount || xstats == NULL)
2115                 return count + xcount;
2116
2117         /* now fill the xstats structure */
2118         ret = rte_eth_basic_stats_get(port_id, xstats);
2119         if (ret < 0)
2120                 return ret;
2121         count = ret;
2122
2123         for (i = 0; i < count; i++)
2124                 xstats[i].id = i;
2125         /* add an offset to driver-specific stats */
2126         for ( ; i < count + xcount; i++)
2127                 xstats[i].id += count;
2128
2129         return count + xcount;
2130 }
2131
2132 /* reset ethdev extended statistics */
2133 void
2134 rte_eth_xstats_reset(uint16_t port_id)
2135 {
2136         struct rte_eth_dev *dev;
2137
2138         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2139         dev = &rte_eth_devices[port_id];
2140
2141         /* implemented by the driver */
2142         if (dev->dev_ops->xstats_reset != NULL) {
2143                 (*dev->dev_ops->xstats_reset)(dev);
2144                 return;
2145         }
2146
2147         /* fallback to default */
2148         rte_eth_stats_reset(port_id);
2149 }
2150
2151 static int
2152 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2153                 uint8_t is_rx)
2154 {
2155         struct rte_eth_dev *dev;
2156
2157         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2158
2159         dev = &rte_eth_devices[port_id];
2160
2161         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2162         return (*dev->dev_ops->queue_stats_mapping_set)
2163                         (dev, queue_id, stat_idx, is_rx);
2164 }
2165
2166
2167 int
2168 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2169                 uint8_t stat_idx)
2170 {
2171         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2172                                                 stat_idx, STAT_QMAP_TX));
2173 }
2174
2175
2176 int
2177 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2178                 uint8_t stat_idx)
2179 {
2180         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2181                                                 stat_idx, STAT_QMAP_RX));
2182 }
2183
2184 int
2185 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2186 {
2187         struct rte_eth_dev *dev;
2188
2189         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2190         dev = &rte_eth_devices[port_id];
2191
2192         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2193         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2194                                                         fw_version, fw_size));
2195 }
2196
2197 void
2198 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2199 {
2200         struct rte_eth_dev *dev;
2201         const struct rte_eth_desc_lim lim = {
2202                 .nb_max = UINT16_MAX,
2203                 .nb_min = 0,
2204                 .nb_align = 1,
2205         };
2206
2207         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2208         dev = &rte_eth_devices[port_id];
2209
2210         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2211         dev_info->rx_desc_lim = lim;
2212         dev_info->tx_desc_lim = lim;
2213
2214         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2215         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2216         dev_info->driver_name = dev->device->driver->name;
2217         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2218         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2219 }
2220
2221 int
2222 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2223                                  uint32_t *ptypes, int num)
2224 {
2225         int i, j;
2226         struct rte_eth_dev *dev;
2227         const uint32_t *all_ptypes;
2228
2229         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2230         dev = &rte_eth_devices[port_id];
2231         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2232         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2233
2234         if (!all_ptypes)
2235                 return 0;
2236
2237         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2238                 if (all_ptypes[i] & ptype_mask) {
2239                         if (j < num)
2240                                 ptypes[j] = all_ptypes[i];
2241                         j++;
2242                 }
2243
2244         return j;
2245 }
2246
2247 void
2248 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2249 {
2250         struct rte_eth_dev *dev;
2251
2252         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2253         dev = &rte_eth_devices[port_id];
2254         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2255 }
2256
2257
2258 int
2259 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2260 {
2261         struct rte_eth_dev *dev;
2262
2263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2264
2265         dev = &rte_eth_devices[port_id];
2266         *mtu = dev->data->mtu;
2267         return 0;
2268 }
2269
2270 int
2271 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2272 {
2273         int ret;
2274         struct rte_eth_dev *dev;
2275
2276         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2277         dev = &rte_eth_devices[port_id];
2278         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2279
2280         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2281         if (!ret)
2282                 dev->data->mtu = mtu;
2283
2284         return eth_err(port_id, ret);
2285 }
2286
2287 int
2288 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2289 {
2290         struct rte_eth_dev *dev;
2291         int ret;
2292
2293         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2294         dev = &rte_eth_devices[port_id];
2295         if (!(dev->data->dev_conf.rxmode.offloads &
2296               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2297                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2298                 return -ENOSYS;
2299         }
2300
2301         if (vlan_id > 4095) {
2302                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2303                                 port_id, (unsigned) vlan_id);
2304                 return -EINVAL;
2305         }
2306         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2307
2308         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2309         if (ret == 0) {
2310                 struct rte_vlan_filter_conf *vfc;
2311                 int vidx;
2312                 int vbit;
2313
2314                 vfc = &dev->data->vlan_filter_conf;
2315                 vidx = vlan_id / 64;
2316                 vbit = vlan_id % 64;
2317
2318                 if (on)
2319                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2320                 else
2321                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2322         }
2323
2324         return eth_err(port_id, ret);
2325 }
2326
2327 int
2328 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2329                                     int on)
2330 {
2331         struct rte_eth_dev *dev;
2332
2333         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2334         dev = &rte_eth_devices[port_id];
2335         if (rx_queue_id >= dev->data->nb_rx_queues) {
2336                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2337                 return -EINVAL;
2338         }
2339
2340         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2341         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2342
2343         return 0;
2344 }
2345
2346 int
2347 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2348                                 enum rte_vlan_type vlan_type,
2349                                 uint16_t tpid)
2350 {
2351         struct rte_eth_dev *dev;
2352
2353         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2354         dev = &rte_eth_devices[port_id];
2355         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2356
2357         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2358                                                                tpid));
2359 }
2360
2361 int
2362 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2363 {
2364         struct rte_eth_dev *dev;
2365         int ret = 0;
2366         int mask = 0;
2367         int cur, org = 0;
2368         uint64_t orig_offloads;
2369
2370         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2371         dev = &rte_eth_devices[port_id];
2372
2373         /* save original values in case of failure */
2374         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2375
2376         /*check which option changed by application*/
2377         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2378         org = !!(dev->data->dev_conf.rxmode.offloads &
2379                  DEV_RX_OFFLOAD_VLAN_STRIP);
2380         if (cur != org) {
2381                 if (cur)
2382                         dev->data->dev_conf.rxmode.offloads |=
2383                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2384                 else
2385                         dev->data->dev_conf.rxmode.offloads &=
2386                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2387                 mask |= ETH_VLAN_STRIP_MASK;
2388         }
2389
2390         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2391         org = !!(dev->data->dev_conf.rxmode.offloads &
2392                  DEV_RX_OFFLOAD_VLAN_FILTER);
2393         if (cur != org) {
2394                 if (cur)
2395                         dev->data->dev_conf.rxmode.offloads |=
2396                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2397                 else
2398                         dev->data->dev_conf.rxmode.offloads &=
2399                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2400                 mask |= ETH_VLAN_FILTER_MASK;
2401         }
2402
2403         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2404         org = !!(dev->data->dev_conf.rxmode.offloads &
2405                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2406         if (cur != org) {
2407                 if (cur)
2408                         dev->data->dev_conf.rxmode.offloads |=
2409                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2410                 else
2411                         dev->data->dev_conf.rxmode.offloads &=
2412                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2413                 mask |= ETH_VLAN_EXTEND_MASK;
2414         }
2415
2416         /*no change*/
2417         if (mask == 0)
2418                 return ret;
2419
2420         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2421
2422         /*
2423          * Convert to the offload bitfield API just in case the underlying PMD
2424          * still supporting it.
2425          */
2426         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2427                                     &dev->data->dev_conf.rxmode);
2428         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2429         if (ret) {
2430                 /* hit an error restore  original values */
2431                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2432                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2433                                             &dev->data->dev_conf.rxmode);
2434         }
2435
2436         return eth_err(port_id, ret);
2437 }
2438
2439 int
2440 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2441 {
2442         struct rte_eth_dev *dev;
2443         int ret = 0;
2444
2445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2446         dev = &rte_eth_devices[port_id];
2447
2448         if (dev->data->dev_conf.rxmode.offloads &
2449             DEV_RX_OFFLOAD_VLAN_STRIP)
2450                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2451
2452         if (dev->data->dev_conf.rxmode.offloads &
2453             DEV_RX_OFFLOAD_VLAN_FILTER)
2454                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2455
2456         if (dev->data->dev_conf.rxmode.offloads &
2457             DEV_RX_OFFLOAD_VLAN_EXTEND)
2458                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2459
2460         return ret;
2461 }
2462
2463 int
2464 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2465 {
2466         struct rte_eth_dev *dev;
2467
2468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2469         dev = &rte_eth_devices[port_id];
2470         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2471
2472         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2473 }
2474
2475 int
2476 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2477 {
2478         struct rte_eth_dev *dev;
2479
2480         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2481         dev = &rte_eth_devices[port_id];
2482         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2483         memset(fc_conf, 0, sizeof(*fc_conf));
2484         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2485 }
2486
2487 int
2488 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2489 {
2490         struct rte_eth_dev *dev;
2491
2492         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2493         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2494                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2495                 return -EINVAL;
2496         }
2497
2498         dev = &rte_eth_devices[port_id];
2499         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2500         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2501 }
2502
2503 int
2504 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2505                                    struct rte_eth_pfc_conf *pfc_conf)
2506 {
2507         struct rte_eth_dev *dev;
2508
2509         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2510         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2511                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2512                 return -EINVAL;
2513         }
2514
2515         dev = &rte_eth_devices[port_id];
2516         /* High water, low water validation are device specific */
2517         if  (*dev->dev_ops->priority_flow_ctrl_set)
2518                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2519                                         (dev, pfc_conf));
2520         return -ENOTSUP;
2521 }
2522
2523 static int
2524 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2525                         uint16_t reta_size)
2526 {
2527         uint16_t i, num;
2528
2529         if (!reta_conf)
2530                 return -EINVAL;
2531
2532         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2533         for (i = 0; i < num; i++) {
2534                 if (reta_conf[i].mask)
2535                         return 0;
2536         }
2537
2538         return -EINVAL;
2539 }
2540
2541 static int
2542 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2543                          uint16_t reta_size,
2544                          uint16_t max_rxq)
2545 {
2546         uint16_t i, idx, shift;
2547
2548         if (!reta_conf)
2549                 return -EINVAL;
2550
2551         if (max_rxq == 0) {
2552                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2553                 return -EINVAL;
2554         }
2555
2556         for (i = 0; i < reta_size; i++) {
2557                 idx = i / RTE_RETA_GROUP_SIZE;
2558                 shift = i % RTE_RETA_GROUP_SIZE;
2559                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2560                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2561                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2562                                 "the maximum rxq index: %u\n", idx, shift,
2563                                 reta_conf[idx].reta[shift], max_rxq);
2564                         return -EINVAL;
2565                 }
2566         }
2567
2568         return 0;
2569 }
2570
2571 int
2572 rte_eth_dev_rss_reta_update(uint16_t port_id,
2573                             struct rte_eth_rss_reta_entry64 *reta_conf,
2574                             uint16_t reta_size)
2575 {
2576         struct rte_eth_dev *dev;
2577         int ret;
2578
2579         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2580         /* Check mask bits */
2581         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2582         if (ret < 0)
2583                 return ret;
2584
2585         dev = &rte_eth_devices[port_id];
2586
2587         /* Check entry value */
2588         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2589                                 dev->data->nb_rx_queues);
2590         if (ret < 0)
2591                 return ret;
2592
2593         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2594         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2595                                                              reta_size));
2596 }
2597
2598 int
2599 rte_eth_dev_rss_reta_query(uint16_t port_id,
2600                            struct rte_eth_rss_reta_entry64 *reta_conf,
2601                            uint16_t reta_size)
2602 {
2603         struct rte_eth_dev *dev;
2604         int ret;
2605
2606         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2607
2608         /* Check mask bits */
2609         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2610         if (ret < 0)
2611                 return ret;
2612
2613         dev = &rte_eth_devices[port_id];
2614         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2615         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2616                                                             reta_size));
2617 }
2618
2619 int
2620 rte_eth_dev_rss_hash_update(uint16_t port_id,
2621                             struct rte_eth_rss_conf *rss_conf)
2622 {
2623         struct rte_eth_dev *dev;
2624
2625         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2626         dev = &rte_eth_devices[port_id];
2627         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2628         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2629                                                                  rss_conf));
2630 }
2631
2632 int
2633 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2634                               struct rte_eth_rss_conf *rss_conf)
2635 {
2636         struct rte_eth_dev *dev;
2637
2638         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2639         dev = &rte_eth_devices[port_id];
2640         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2641         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2642                                                                    rss_conf));
2643 }
2644
2645 int
2646 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2647                                 struct rte_eth_udp_tunnel *udp_tunnel)
2648 {
2649         struct rte_eth_dev *dev;
2650
2651         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2652         if (udp_tunnel == NULL) {
2653                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2654                 return -EINVAL;
2655         }
2656
2657         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2658                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2659                 return -EINVAL;
2660         }
2661
2662         dev = &rte_eth_devices[port_id];
2663         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2664         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2665                                                                 udp_tunnel));
2666 }
2667
2668 int
2669 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2670                                    struct rte_eth_udp_tunnel *udp_tunnel)
2671 {
2672         struct rte_eth_dev *dev;
2673
2674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2675         dev = &rte_eth_devices[port_id];
2676
2677         if (udp_tunnel == NULL) {
2678                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2679                 return -EINVAL;
2680         }
2681
2682         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2683                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2684                 return -EINVAL;
2685         }
2686
2687         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2688         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2689                                                                 udp_tunnel));
2690 }
2691
2692 int
2693 rte_eth_led_on(uint16_t port_id)
2694 {
2695         struct rte_eth_dev *dev;
2696
2697         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2698         dev = &rte_eth_devices[port_id];
2699         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2700         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2701 }
2702
2703 int
2704 rte_eth_led_off(uint16_t port_id)
2705 {
2706         struct rte_eth_dev *dev;
2707
2708         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2709         dev = &rte_eth_devices[port_id];
2710         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2711         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2712 }
2713
2714 /*
2715  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2716  * an empty spot.
2717  */
2718 static int
2719 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2720 {
2721         struct rte_eth_dev_info dev_info;
2722         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2723         unsigned i;
2724
2725         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2726         rte_eth_dev_info_get(port_id, &dev_info);
2727
2728         for (i = 0; i < dev_info.max_mac_addrs; i++)
2729                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2730                         return i;
2731
2732         return -1;
2733 }
2734
2735 static const struct ether_addr null_mac_addr;
2736
2737 int
2738 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2739                         uint32_t pool)
2740 {
2741         struct rte_eth_dev *dev;
2742         int index;
2743         uint64_t pool_mask;
2744         int ret;
2745
2746         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2747         dev = &rte_eth_devices[port_id];
2748         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2749
2750         if (is_zero_ether_addr(addr)) {
2751                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2752                         port_id);
2753                 return -EINVAL;
2754         }
2755         if (pool >= ETH_64_POOLS) {
2756                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2757                 return -EINVAL;
2758         }
2759
2760         index = get_mac_addr_index(port_id, addr);
2761         if (index < 0) {
2762                 index = get_mac_addr_index(port_id, &null_mac_addr);
2763                 if (index < 0) {
2764                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2765                                 port_id);
2766                         return -ENOSPC;
2767                 }
2768         } else {
2769                 pool_mask = dev->data->mac_pool_sel[index];
2770
2771                 /* Check if both MAC address and pool is already there, and do nothing */
2772                 if (pool_mask & (1ULL << pool))
2773                         return 0;
2774         }
2775
2776         /* Update NIC */
2777         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2778
2779         if (ret == 0) {
2780                 /* Update address in NIC data structure */
2781                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2782
2783                 /* Update pool bitmap in NIC data structure */
2784                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2785         }
2786
2787         return eth_err(port_id, ret);
2788 }
2789
2790 int
2791 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2792 {
2793         struct rte_eth_dev *dev;
2794         int index;
2795
2796         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2797         dev = &rte_eth_devices[port_id];
2798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2799
2800         index = get_mac_addr_index(port_id, addr);
2801         if (index == 0) {
2802                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2803                 return -EADDRINUSE;
2804         } else if (index < 0)
2805                 return 0;  /* Do nothing if address wasn't found */
2806
2807         /* Update NIC */
2808         (*dev->dev_ops->mac_addr_remove)(dev, index);
2809
2810         /* Update address in NIC data structure */
2811         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2812
2813         /* reset pool bitmap */
2814         dev->data->mac_pool_sel[index] = 0;
2815
2816         return 0;
2817 }
2818
2819 int
2820 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2821 {
2822         struct rte_eth_dev *dev;
2823
2824         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2825
2826         if (!is_valid_assigned_ether_addr(addr))
2827                 return -EINVAL;
2828
2829         dev = &rte_eth_devices[port_id];
2830         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2831
2832         /* Update default address in NIC data structure */
2833         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2834
2835         (*dev->dev_ops->mac_addr_set)(dev, addr);
2836
2837         return 0;
2838 }
2839
2840
2841 /*
2842  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2843  * an empty spot.
2844  */
2845 static int
2846 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2847 {
2848         struct rte_eth_dev_info dev_info;
2849         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2850         unsigned i;
2851
2852         rte_eth_dev_info_get(port_id, &dev_info);
2853         if (!dev->data->hash_mac_addrs)
2854                 return -1;
2855
2856         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2857                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2858                         ETHER_ADDR_LEN) == 0)
2859                         return i;
2860
2861         return -1;
2862 }
2863
2864 int
2865 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2866                                 uint8_t on)
2867 {
2868         int index;
2869         int ret;
2870         struct rte_eth_dev *dev;
2871
2872         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2873
2874         dev = &rte_eth_devices[port_id];
2875         if (is_zero_ether_addr(addr)) {
2876                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2877                         port_id);
2878                 return -EINVAL;
2879         }
2880
2881         index = get_hash_mac_addr_index(port_id, addr);
2882         /* Check if it's already there, and do nothing */
2883         if ((index >= 0) && on)
2884                 return 0;
2885
2886         if (index < 0) {
2887                 if (!on) {
2888                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2889                                 "set in UTA\n", port_id);
2890                         return -EINVAL;
2891                 }
2892
2893                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2894                 if (index < 0) {
2895                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2896                                         port_id);
2897                         return -ENOSPC;
2898                 }
2899         }
2900
2901         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2902         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2903         if (ret == 0) {
2904                 /* Update address in NIC data structure */
2905                 if (on)
2906                         ether_addr_copy(addr,
2907                                         &dev->data->hash_mac_addrs[index]);
2908                 else
2909                         ether_addr_copy(&null_mac_addr,
2910                                         &dev->data->hash_mac_addrs[index]);
2911         }
2912
2913         return eth_err(port_id, ret);
2914 }
2915
2916 int
2917 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
2918 {
2919         struct rte_eth_dev *dev;
2920
2921         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2922
2923         dev = &rte_eth_devices[port_id];
2924
2925         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2926         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
2927                                                                        on));
2928 }
2929
2930 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2931                                         uint16_t tx_rate)
2932 {
2933         struct rte_eth_dev *dev;
2934         struct rte_eth_dev_info dev_info;
2935         struct rte_eth_link link;
2936
2937         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2938
2939         dev = &rte_eth_devices[port_id];
2940         rte_eth_dev_info_get(port_id, &dev_info);
2941         link = dev->data->dev_link;
2942
2943         if (queue_idx > dev_info.max_tx_queues) {
2944                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2945                                 "invalid queue id=%d\n", port_id, queue_idx);
2946                 return -EINVAL;
2947         }
2948
2949         if (tx_rate > link.link_speed) {
2950                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2951                                 "bigger than link speed= %d\n",
2952                         tx_rate, link.link_speed);
2953                 return -EINVAL;
2954         }
2955
2956         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2957         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
2958                                                         queue_idx, tx_rate));
2959 }
2960
2961 int
2962 rte_eth_mirror_rule_set(uint16_t port_id,
2963                         struct rte_eth_mirror_conf *mirror_conf,
2964                         uint8_t rule_id, uint8_t on)
2965 {
2966         struct rte_eth_dev *dev;
2967
2968         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2969         if (mirror_conf->rule_type == 0) {
2970                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2971                 return -EINVAL;
2972         }
2973
2974         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2975                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2976                                 ETH_64_POOLS - 1);
2977                 return -EINVAL;
2978         }
2979
2980         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2981              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2982             (mirror_conf->pool_mask == 0)) {
2983                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2984                 return -EINVAL;
2985         }
2986
2987         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2988             mirror_conf->vlan.vlan_mask == 0) {
2989                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2990                 return -EINVAL;
2991         }
2992
2993         dev = &rte_eth_devices[port_id];
2994         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2995
2996         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
2997                                                 mirror_conf, rule_id, on));
2998 }
2999
3000 int
3001 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3002 {
3003         struct rte_eth_dev *dev;
3004
3005         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3006
3007         dev = &rte_eth_devices[port_id];
3008         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3009
3010         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3011                                                                    rule_id));
3012 }
3013
3014 RTE_INIT(eth_dev_init_cb_lists)
3015 {
3016         int i;
3017
3018         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3019                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3020 }
3021
3022 int
3023 rte_eth_dev_callback_register(uint16_t port_id,
3024                         enum rte_eth_event_type event,
3025                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3026 {
3027         struct rte_eth_dev *dev;
3028         struct rte_eth_dev_callback *user_cb;
3029         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3030         uint16_t last_port;
3031
3032         if (!cb_fn)
3033                 return -EINVAL;
3034
3035         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3036                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3037                 return -EINVAL;
3038         }
3039
3040         if (port_id == RTE_ETH_ALL) {
3041                 next_port = 0;
3042                 last_port = RTE_MAX_ETHPORTS - 1;
3043         } else {
3044                 next_port = last_port = port_id;
3045         }
3046
3047         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3048
3049         do {
3050                 dev = &rte_eth_devices[next_port];
3051
3052                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3053                         if (user_cb->cb_fn == cb_fn &&
3054                                 user_cb->cb_arg == cb_arg &&
3055                                 user_cb->event == event) {
3056                                 break;
3057                         }
3058                 }
3059
3060                 /* create a new callback. */
3061                 if (user_cb == NULL) {
3062                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3063                                 sizeof(struct rte_eth_dev_callback), 0);
3064                         if (user_cb != NULL) {
3065                                 user_cb->cb_fn = cb_fn;
3066                                 user_cb->cb_arg = cb_arg;
3067                                 user_cb->event = event;
3068                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3069                                                   user_cb, next);
3070                         } else {
3071                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3072                                 rte_eth_dev_callback_unregister(port_id, event,
3073                                                                 cb_fn, cb_arg);
3074                                 return -ENOMEM;
3075                         }
3076
3077                 }
3078         } while (++next_port <= last_port);
3079
3080         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3081         return 0;
3082 }
3083
3084 int
3085 rte_eth_dev_callback_unregister(uint16_t port_id,
3086                         enum rte_eth_event_type event,
3087                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3088 {
3089         int ret;
3090         struct rte_eth_dev *dev;
3091         struct rte_eth_dev_callback *cb, *next;
3092         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3093         uint16_t last_port;
3094
3095         if (!cb_fn)
3096                 return -EINVAL;
3097
3098         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3099                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3100                 return -EINVAL;
3101         }
3102
3103         if (port_id == RTE_ETH_ALL) {
3104                 next_port = 0;
3105                 last_port = RTE_MAX_ETHPORTS - 1;
3106         } else {
3107                 next_port = last_port = port_id;
3108         }
3109
3110         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3111
3112         do {
3113                 dev = &rte_eth_devices[next_port];
3114                 ret = 0;
3115                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3116                      cb = next) {
3117
3118                         next = TAILQ_NEXT(cb, next);
3119
3120                         if (cb->cb_fn != cb_fn || cb->event != event ||
3121                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3122                                 continue;
3123
3124                         /*
3125                          * if this callback is not executing right now,
3126                          * then remove it.
3127                          */
3128                         if (cb->active == 0) {
3129                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3130                                 rte_free(cb);
3131                         } else {
3132                                 ret = -EAGAIN;
3133                         }
3134                 }
3135         } while (++next_port <= last_port);
3136
3137         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3138         return ret;
3139 }
3140
3141 int
3142 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3143         enum rte_eth_event_type event, void *ret_param)
3144 {
3145         struct rte_eth_dev_callback *cb_lst;
3146         struct rte_eth_dev_callback dev_cb;
3147         int rc = 0;
3148
3149         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3150         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3151                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3152                         continue;
3153                 dev_cb = *cb_lst;
3154                 cb_lst->active = 1;
3155                 if (ret_param != NULL)
3156                         dev_cb.ret_param = ret_param;
3157
3158                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3159                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3160                                 dev_cb.cb_arg, dev_cb.ret_param);
3161                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3162                 cb_lst->active = 0;
3163         }
3164         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3165         return rc;
3166 }
3167
3168 int
3169 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3170 {
3171         uint32_t vec;
3172         struct rte_eth_dev *dev;
3173         struct rte_intr_handle *intr_handle;
3174         uint16_t qid;
3175         int rc;
3176
3177         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3178
3179         dev = &rte_eth_devices[port_id];
3180
3181         if (!dev->intr_handle) {
3182                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3183                 return -ENOTSUP;
3184         }
3185
3186         intr_handle = dev->intr_handle;
3187         if (!intr_handle->intr_vec) {
3188                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3189                 return -EPERM;
3190         }
3191
3192         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3193                 vec = intr_handle->intr_vec[qid];
3194                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3195                 if (rc && rc != -EEXIST) {
3196                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3197                                         " op %d epfd %d vec %u\n",
3198                                         port_id, qid, op, epfd, vec);
3199                 }
3200         }
3201
3202         return 0;
3203 }
3204
3205 const struct rte_memzone *
3206 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3207                          uint16_t queue_id, size_t size, unsigned align,
3208                          int socket_id)
3209 {
3210         char z_name[RTE_MEMZONE_NAMESIZE];
3211         const struct rte_memzone *mz;
3212
3213         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3214                  dev->device->driver->name, ring_name,
3215                  dev->data->port_id, queue_id);
3216
3217         mz = rte_memzone_lookup(z_name);
3218         if (mz)
3219                 return mz;
3220
3221         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3222 }
3223
3224 int
3225 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3226                           int epfd, int op, void *data)
3227 {
3228         uint32_t vec;
3229         struct rte_eth_dev *dev;
3230         struct rte_intr_handle *intr_handle;
3231         int rc;
3232
3233         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3234
3235         dev = &rte_eth_devices[port_id];
3236         if (queue_id >= dev->data->nb_rx_queues) {
3237                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3238                 return -EINVAL;
3239         }
3240
3241         if (!dev->intr_handle) {
3242                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3243                 return -ENOTSUP;
3244         }
3245
3246         intr_handle = dev->intr_handle;
3247         if (!intr_handle->intr_vec) {
3248                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3249                 return -EPERM;
3250         }
3251
3252         vec = intr_handle->intr_vec[queue_id];
3253         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3254         if (rc && rc != -EEXIST) {
3255                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3256                                 " op %d epfd %d vec %u\n",
3257                                 port_id, queue_id, op, epfd, vec);
3258                 return rc;
3259         }
3260
3261         return 0;
3262 }
3263
3264 int
3265 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3266                            uint16_t queue_id)
3267 {
3268         struct rte_eth_dev *dev;
3269
3270         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3271
3272         dev = &rte_eth_devices[port_id];
3273
3274         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3275         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3276                                                                 queue_id));
3277 }
3278
3279 int
3280 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3281                             uint16_t queue_id)
3282 {
3283         struct rte_eth_dev *dev;
3284
3285         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3286
3287         dev = &rte_eth_devices[port_id];
3288
3289         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3290         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3291                                                                 queue_id));
3292 }
3293
3294
3295 int
3296 rte_eth_dev_filter_supported(uint16_t port_id,
3297                              enum rte_filter_type filter_type)
3298 {
3299         struct rte_eth_dev *dev;
3300
3301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3302
3303         dev = &rte_eth_devices[port_id];
3304         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3305         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3306                                 RTE_ETH_FILTER_NOP, NULL);
3307 }
3308
3309 int
3310 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3311                             enum rte_filter_type filter_type,
3312                             enum rte_filter_op filter_op, void *arg);
3313
3314 int
3315 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3316                             enum rte_filter_type filter_type,
3317                             enum rte_filter_op filter_op, void *arg)
3318 {
3319         struct rte_eth_fdir_info_v22 {
3320                 enum rte_fdir_mode mode;
3321                 struct rte_eth_fdir_masks mask;
3322                 struct rte_eth_fdir_flex_conf flex_conf;
3323                 uint32_t guarant_spc;
3324                 uint32_t best_spc;
3325                 uint32_t flow_types_mask[1];
3326                 uint32_t max_flexpayload;
3327                 uint32_t flex_payload_unit;
3328                 uint32_t max_flex_payload_segment_num;
3329                 uint16_t flex_payload_limit;
3330                 uint32_t flex_bitmask_unit;
3331                 uint32_t max_flex_bitmask_num;
3332         };
3333
3334         struct rte_eth_hash_global_conf_v22 {
3335                 enum rte_eth_hash_function hash_func;
3336                 uint32_t sym_hash_enable_mask[1];
3337                 uint32_t valid_bit_mask[1];
3338         };
3339
3340         struct rte_eth_hash_filter_info_v22 {
3341                 enum rte_eth_hash_filter_info_type info_type;
3342                 union {
3343                         uint8_t enable;
3344                         struct rte_eth_hash_global_conf_v22 global_conf;
3345                         struct rte_eth_input_set_conf input_set_conf;
3346                 } info;
3347         };
3348
3349         struct rte_eth_dev *dev;
3350
3351         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3352
3353         dev = &rte_eth_devices[port_id];
3354         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3355         if (filter_op == RTE_ETH_FILTER_INFO) {
3356                 int retval;
3357                 struct rte_eth_fdir_info_v22 *fdir_info_v22;
3358                 struct rte_eth_fdir_info fdir_info;
3359
3360                 fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
3361
3362                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3363                           filter_op, (void *)&fdir_info);
3364                 fdir_info_v22->mode = fdir_info.mode;
3365                 fdir_info_v22->mask = fdir_info.mask;
3366                 fdir_info_v22->flex_conf = fdir_info.flex_conf;
3367                 fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
3368                 fdir_info_v22->best_spc = fdir_info.best_spc;
3369                 fdir_info_v22->flow_types_mask[0] =
3370                         (uint32_t)fdir_info.flow_types_mask[0];
3371                 fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
3372                 fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
3373                 fdir_info_v22->max_flex_payload_segment_num =
3374                         fdir_info.max_flex_payload_segment_num;
3375                 fdir_info_v22->flex_payload_limit =
3376                         fdir_info.flex_payload_limit;
3377                 fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
3378                 fdir_info_v22->max_flex_bitmask_num =
3379                         fdir_info.max_flex_bitmask_num;
3380                 return retval;
3381         } else if (filter_op == RTE_ETH_FILTER_GET) {
3382                 int retval;
3383                 struct rte_eth_hash_filter_info f_info;
3384                 struct rte_eth_hash_filter_info_v22 *f_info_v22 =
3385                         (struct rte_eth_hash_filter_info_v22 *)arg;
3386
3387                 f_info.info_type = f_info_v22->info_type;
3388                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3389                           filter_op, (void *)&f_info);
3390
3391                 switch (f_info_v22->info_type) {
3392                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3393                         f_info_v22->info.enable = f_info.info.enable;
3394                         break;
3395                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3396                         f_info_v22->info.global_conf.hash_func =
3397                                 f_info.info.global_conf.hash_func;
3398                         f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
3399                                 (uint32_t)
3400                                 f_info.info.global_conf.sym_hash_enable_mask[0];
3401                         f_info_v22->info.global_conf.valid_bit_mask[0] =
3402                                 (uint32_t)
3403                                 f_info.info.global_conf.valid_bit_mask[0];
3404                         break;
3405                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3406                         f_info_v22->info.input_set_conf =
3407                                 f_info.info.input_set_conf;
3408                         break;
3409                 default:
3410                         break;
3411                 }
3412                 return retval;
3413         } else if (filter_op == RTE_ETH_FILTER_SET) {
3414                 struct rte_eth_hash_filter_info f_info;
3415                 struct rte_eth_hash_filter_info_v22 *f_v22 =
3416                         (struct rte_eth_hash_filter_info_v22 *)arg;
3417
3418                 f_info.info_type = f_v22->info_type;
3419                 switch (f_v22->info_type) {
3420                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3421                         f_info.info.enable = f_v22->info.enable;
3422                         break;
3423                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3424                         f_info.info.global_conf.hash_func =
3425                                 f_v22->info.global_conf.hash_func;
3426                         f_info.info.global_conf.sym_hash_enable_mask[0] =
3427                                 (uint32_t)
3428                                 f_v22->info.global_conf.sym_hash_enable_mask[0];
3429                         f_info.info.global_conf.valid_bit_mask[0] =
3430                                 (uint32_t)
3431                                 f_v22->info.global_conf.valid_bit_mask[0];
3432                         break;
3433                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3434                         f_info.info.input_set_conf =
3435                                 f_v22->info.input_set_conf;
3436                         break;
3437                 default:
3438                         break;
3439                 }
3440                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3441                                                     (void *)&f_info);
3442         } else
3443                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3444                                                     arg);
3445 }
3446 VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
3447
3448 int
3449 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3450                               enum rte_filter_type filter_type,
3451                               enum rte_filter_op filter_op, void *arg);
3452
3453 int
3454 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3455                               enum rte_filter_type filter_type,
3456                               enum rte_filter_op filter_op, void *arg)
3457 {
3458         struct rte_eth_dev *dev;
3459
3460         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3461
3462         dev = &rte_eth_devices[port_id];
3463         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3464         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3465                                                              filter_op, arg));
3466 }
3467 BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
3468 MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
3469                   enum rte_filter_type filter_type,
3470                   enum rte_filter_op filter_op, void *arg),
3471                   rte_eth_dev_filter_ctrl_v1802);
3472
3473 void *
3474 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3475                 rte_rx_callback_fn fn, void *user_param)
3476 {
3477 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3478         rte_errno = ENOTSUP;
3479         return NULL;
3480 #endif
3481         /* check input parameters */
3482         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3483                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3484                 rte_errno = EINVAL;
3485                 return NULL;
3486         }
3487         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3488
3489         if (cb == NULL) {
3490                 rte_errno = ENOMEM;
3491                 return NULL;
3492         }
3493
3494         cb->fn.rx = fn;
3495         cb->param = user_param;
3496
3497         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3498         /* Add the callbacks in fifo order. */
3499         struct rte_eth_rxtx_callback *tail =
3500                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3501
3502         if (!tail) {
3503                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3504
3505         } else {
3506                 while (tail->next)
3507                         tail = tail->next;
3508                 tail->next = cb;
3509         }
3510         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3511
3512         return cb;
3513 }
3514
3515 void *
3516 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3517                 rte_rx_callback_fn fn, void *user_param)
3518 {
3519 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3520         rte_errno = ENOTSUP;
3521         return NULL;
3522 #endif
3523         /* check input parameters */
3524         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3525                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3526                 rte_errno = EINVAL;
3527                 return NULL;
3528         }
3529
3530         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3531
3532         if (cb == NULL) {
3533                 rte_errno = ENOMEM;
3534                 return NULL;
3535         }
3536
3537         cb->fn.rx = fn;
3538         cb->param = user_param;
3539
3540         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3541         /* Add the callbacks at fisrt position*/
3542         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3543         rte_smp_wmb();
3544         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3545         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3546
3547         return cb;
3548 }
3549
3550 void *
3551 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3552                 rte_tx_callback_fn fn, void *user_param)
3553 {
3554 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3555         rte_errno = ENOTSUP;
3556         return NULL;
3557 #endif
3558         /* check input parameters */
3559         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3560                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3561                 rte_errno = EINVAL;
3562                 return NULL;
3563         }
3564
3565         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3566
3567         if (cb == NULL) {
3568                 rte_errno = ENOMEM;
3569                 return NULL;
3570         }
3571
3572         cb->fn.tx = fn;
3573         cb->param = user_param;
3574
3575         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3576         /* Add the callbacks in fifo order. */
3577         struct rte_eth_rxtx_callback *tail =
3578                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3579
3580         if (!tail) {
3581                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3582
3583         } else {
3584                 while (tail->next)
3585                         tail = tail->next;
3586                 tail->next = cb;
3587         }
3588         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3589
3590         return cb;
3591 }
3592
3593 int
3594 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3595                 struct rte_eth_rxtx_callback *user_cb)
3596 {
3597 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3598         return -ENOTSUP;
3599 #endif
3600         /* Check input parameters. */
3601         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3602         if (user_cb == NULL ||
3603                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3604                 return -EINVAL;
3605
3606         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3607         struct rte_eth_rxtx_callback *cb;
3608         struct rte_eth_rxtx_callback **prev_cb;
3609         int ret = -EINVAL;
3610
3611         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3612         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3613         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3614                 cb = *prev_cb;
3615                 if (cb == user_cb) {
3616                         /* Remove the user cb from the callback list. */
3617                         *prev_cb = cb->next;
3618                         ret = 0;
3619                         break;
3620                 }
3621         }
3622         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3623
3624         return ret;
3625 }
3626
3627 int
3628 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3629                 struct rte_eth_rxtx_callback *user_cb)
3630 {
3631 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3632         return -ENOTSUP;
3633 #endif
3634         /* Check input parameters. */
3635         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3636         if (user_cb == NULL ||
3637                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3638                 return -EINVAL;
3639
3640         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3641         int ret = -EINVAL;
3642         struct rte_eth_rxtx_callback *cb;
3643         struct rte_eth_rxtx_callback **prev_cb;
3644
3645         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3646         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3647         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3648                 cb = *prev_cb;
3649                 if (cb == user_cb) {
3650                         /* Remove the user cb from the callback list. */
3651                         *prev_cb = cb->next;
3652                         ret = 0;
3653                         break;
3654                 }
3655         }
3656         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3657
3658         return ret;
3659 }
3660
3661 int
3662 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3663         struct rte_eth_rxq_info *qinfo)
3664 {
3665         struct rte_eth_dev *dev;
3666
3667         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3668
3669         if (qinfo == NULL)
3670                 return -EINVAL;
3671
3672         dev = &rte_eth_devices[port_id];
3673         if (queue_id >= dev->data->nb_rx_queues) {
3674                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3675                 return -EINVAL;
3676         }
3677
3678         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3679
3680         memset(qinfo, 0, sizeof(*qinfo));
3681         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3682         return 0;
3683 }
3684
3685 int
3686 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3687         struct rte_eth_txq_info *qinfo)
3688 {
3689         struct rte_eth_dev *dev;
3690
3691         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3692
3693         if (qinfo == NULL)
3694                 return -EINVAL;
3695
3696         dev = &rte_eth_devices[port_id];
3697         if (queue_id >= dev->data->nb_tx_queues) {
3698                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3699                 return -EINVAL;
3700         }
3701
3702         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3703
3704         memset(qinfo, 0, sizeof(*qinfo));
3705         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3706         return 0;
3707 }
3708
3709 int
3710 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3711                              struct ether_addr *mc_addr_set,
3712                              uint32_t nb_mc_addr)
3713 {
3714         struct rte_eth_dev *dev;
3715
3716         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3717
3718         dev = &rte_eth_devices[port_id];
3719         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3720         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3721                                                 mc_addr_set, nb_mc_addr));
3722 }
3723
3724 int
3725 rte_eth_timesync_enable(uint16_t port_id)
3726 {
3727         struct rte_eth_dev *dev;
3728
3729         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3730         dev = &rte_eth_devices[port_id];
3731
3732         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3733         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3734 }
3735
3736 int
3737 rte_eth_timesync_disable(uint16_t port_id)
3738 {
3739         struct rte_eth_dev *dev;
3740
3741         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3742         dev = &rte_eth_devices[port_id];
3743
3744         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3745         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3746 }
3747
3748 int
3749 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3750                                    uint32_t flags)
3751 {
3752         struct rte_eth_dev *dev;
3753
3754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3755         dev = &rte_eth_devices[port_id];
3756
3757         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3758         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3759                                 (dev, timestamp, flags));
3760 }
3761
3762 int
3763 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3764                                    struct timespec *timestamp)
3765 {
3766         struct rte_eth_dev *dev;
3767
3768         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3769         dev = &rte_eth_devices[port_id];
3770
3771         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3772         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3773                                 (dev, timestamp));
3774 }
3775
3776 int
3777 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3778 {
3779         struct rte_eth_dev *dev;
3780
3781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3782         dev = &rte_eth_devices[port_id];
3783
3784         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3785         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3786                                                                       delta));
3787 }
3788
3789 int
3790 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3791 {
3792         struct rte_eth_dev *dev;
3793
3794         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3795         dev = &rte_eth_devices[port_id];
3796
3797         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3798         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3799                                                                 timestamp));
3800 }
3801
3802 int
3803 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3804 {
3805         struct rte_eth_dev *dev;
3806
3807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3808         dev = &rte_eth_devices[port_id];
3809
3810         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3811         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3812                                                                 timestamp));
3813 }
3814
3815 int
3816 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3817 {
3818         struct rte_eth_dev *dev;
3819
3820         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3821
3822         dev = &rte_eth_devices[port_id];
3823         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3824         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3825 }
3826
3827 int
3828 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3829 {
3830         struct rte_eth_dev *dev;
3831
3832         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3833
3834         dev = &rte_eth_devices[port_id];
3835         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3836         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3837 }
3838
3839 int
3840 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3841 {
3842         struct rte_eth_dev *dev;
3843
3844         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3845
3846         dev = &rte_eth_devices[port_id];
3847         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3848         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3849 }
3850
3851 int
3852 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3853 {
3854         struct rte_eth_dev *dev;
3855
3856         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3857
3858         dev = &rte_eth_devices[port_id];
3859         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3860         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3861 }
3862
3863 int
3864 rte_eth_dev_get_dcb_info(uint16_t port_id,
3865                              struct rte_eth_dcb_info *dcb_info)
3866 {
3867         struct rte_eth_dev *dev;
3868
3869         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3870
3871         dev = &rte_eth_devices[port_id];
3872         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3873
3874         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3875         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3876 }
3877
3878 int
3879 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3880                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3881 {
3882         struct rte_eth_dev *dev;
3883
3884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3885         if (l2_tunnel == NULL) {
3886                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3887                 return -EINVAL;
3888         }
3889
3890         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3891                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3892                 return -EINVAL;
3893         }
3894
3895         dev = &rte_eth_devices[port_id];
3896         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3897                                 -ENOTSUP);
3898         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3899                                                                 l2_tunnel));
3900 }
3901
3902 int
3903 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3904                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3905                                   uint32_t mask,
3906                                   uint8_t en)
3907 {
3908         struct rte_eth_dev *dev;
3909
3910         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3911
3912         if (l2_tunnel == NULL) {
3913                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3914                 return -EINVAL;
3915         }
3916
3917         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3918                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3919                 return -EINVAL;
3920         }
3921
3922         if (mask == 0) {
3923                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3924                 return -EINVAL;
3925         }
3926
3927         dev = &rte_eth_devices[port_id];
3928         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3929                                 -ENOTSUP);
3930         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
3931                                                         l2_tunnel, mask, en));
3932 }
3933
3934 static void
3935 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3936                            const struct rte_eth_desc_lim *desc_lim)
3937 {
3938         if (desc_lim->nb_align != 0)
3939                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3940
3941         if (desc_lim->nb_max != 0)
3942                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3943
3944         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3945 }
3946
3947 int
3948 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3949                                  uint16_t *nb_rx_desc,
3950                                  uint16_t *nb_tx_desc)
3951 {
3952         struct rte_eth_dev *dev;
3953         struct rte_eth_dev_info dev_info;
3954
3955         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3956
3957         dev = &rte_eth_devices[port_id];
3958         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3959
3960         rte_eth_dev_info_get(port_id, &dev_info);
3961
3962         if (nb_rx_desc != NULL)
3963                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3964
3965         if (nb_tx_desc != NULL)
3966                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3967
3968         return 0;
3969 }
3970
3971 int
3972 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3973 {
3974         struct rte_eth_dev *dev;
3975
3976         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3977
3978         if (pool == NULL)
3979                 return -EINVAL;
3980
3981         dev = &rte_eth_devices[port_id];
3982
3983         if (*dev->dev_ops->pool_ops_supported == NULL)
3984                 return 1; /* all pools are supported */
3985
3986         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
3987 }