net/virtio: rationalize queue flushing
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_compat.h>
38
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46 static struct rte_eth_dev_data *rte_eth_dev_data;
47 static uint8_t eth_dev_last_created_port;
48
49 /* spinlock for eth device callbacks */
50 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51
52 /* spinlock for add/remove rx callbacks */
53 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove tx callbacks */
56 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* store statistics names and its offset in stats structure  */
59 struct rte_eth_xstats_name_off {
60         char name[RTE_ETH_XSTATS_NAME_SIZE];
61         unsigned offset;
62 };
63
64 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
65         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
66         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
67         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
68         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
69         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
70         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
71         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
72         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
73                 rx_nombuf)},
74 };
75
76 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
77
78 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
79         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
80         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
81         {"errors", offsetof(struct rte_eth_stats, q_errors)},
82 };
83
84 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
85                 sizeof(rte_rxq_stats_strings[0]))
86
87 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
88         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
89         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
90 };
91 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
92                 sizeof(rte_txq_stats_strings[0]))
93
94 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
95         { DEV_RX_OFFLOAD_##_name, #_name }
96
97 static const struct {
98         uint64_t offload;
99         const char *name;
100 } rte_rx_offload_names[] = {
101         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
102         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
103         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
104         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
105         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
106         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
107         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
108         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
109         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
111         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
112         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
113         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
114         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
115         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
116         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
117 };
118
119 #undef RTE_RX_OFFLOAD_BIT2STR
120
121 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
122         { DEV_TX_OFFLOAD_##_name, #_name }
123
124 static const struct {
125         uint64_t offload;
126         const char *name;
127 } rte_tx_offload_names[] = {
128         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
129         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
130         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
131         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
132         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
133         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
134         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
135         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
136         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
137         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
138         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
139         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
140         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
141         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
142         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
143         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
144         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
145         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
146 };
147
148 #undef RTE_TX_OFFLOAD_BIT2STR
149
150 /**
151  * The user application callback description.
152  *
153  * It contains callback address to be registered by user application,
154  * the pointer to the parameters for callback, and the event type.
155  */
156 struct rte_eth_dev_callback {
157         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
158         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
159         void *cb_arg;                           /**< Parameter for callback */
160         void *ret_param;                        /**< Return parameter */
161         enum rte_eth_event_type event;          /**< Interrupt event type */
162         uint32_t active;                        /**< Callback is executing */
163 };
164
165 enum {
166         STAT_QMAP_TX = 0,
167         STAT_QMAP_RX
168 };
169
170 uint16_t
171 rte_eth_find_next(uint16_t port_id)
172 {
173         while (port_id < RTE_MAX_ETHPORTS &&
174                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
175                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
176                 port_id++;
177
178         if (port_id >= RTE_MAX_ETHPORTS)
179                 return RTE_MAX_ETHPORTS;
180
181         return port_id;
182 }
183
184 static void
185 rte_eth_dev_data_alloc(void)
186 {
187         const unsigned flags = 0;
188         const struct rte_memzone *mz;
189
190         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
191                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
192                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
193                                 rte_socket_id(), flags);
194         } else
195                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
196         if (mz == NULL)
197                 rte_panic("Cannot allocate memzone for ethernet port data\n");
198
199         rte_eth_dev_data = mz->addr;
200         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
201                 memset(rte_eth_dev_data, 0,
202                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
203 }
204
205 struct rte_eth_dev *
206 rte_eth_dev_allocated(const char *name)
207 {
208         unsigned i;
209
210         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
211                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
212                     strcmp(rte_eth_devices[i].data->name, name) == 0)
213                         return &rte_eth_devices[i];
214         }
215         return NULL;
216 }
217
218 static uint16_t
219 rte_eth_dev_find_free_port(void)
220 {
221         unsigned i;
222
223         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
224                 if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
225                         return i;
226         }
227         return RTE_MAX_ETHPORTS;
228 }
229
230 static struct rte_eth_dev *
231 eth_dev_get(uint16_t port_id)
232 {
233         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
234
235         eth_dev->data = &rte_eth_dev_data[port_id];
236         eth_dev->state = RTE_ETH_DEV_ATTACHED;
237
238         eth_dev_last_created_port = port_id;
239
240         return eth_dev;
241 }
242
243 struct rte_eth_dev *
244 rte_eth_dev_allocate(const char *name)
245 {
246         uint16_t port_id;
247         struct rte_eth_dev *eth_dev;
248
249         port_id = rte_eth_dev_find_free_port();
250         if (port_id == RTE_MAX_ETHPORTS) {
251                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
252                 return NULL;
253         }
254
255         if (rte_eth_dev_data == NULL)
256                 rte_eth_dev_data_alloc();
257
258         if (rte_eth_dev_allocated(name) != NULL) {
259                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
260                                 name);
261                 return NULL;
262         }
263
264         memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
265         eth_dev = eth_dev_get(port_id);
266         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
267         eth_dev->data->port_id = port_id;
268         eth_dev->data->mtu = ETHER_MTU;
269
270         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
271
272         return eth_dev;
273 }
274
275 /*
276  * Attach to a port already registered by the primary process, which
277  * makes sure that the same device would have the same port id both
278  * in the primary and secondary process.
279  */
280 struct rte_eth_dev *
281 rte_eth_dev_attach_secondary(const char *name)
282 {
283         uint16_t i;
284         struct rte_eth_dev *eth_dev;
285
286         if (rte_eth_dev_data == NULL)
287                 rte_eth_dev_data_alloc();
288
289         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
290                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
291                         break;
292         }
293         if (i == RTE_MAX_ETHPORTS) {
294                 RTE_PMD_DEBUG_TRACE(
295                         "device %s is not driven by the primary process\n",
296                         name);
297                 return NULL;
298         }
299
300         eth_dev = eth_dev_get(i);
301         RTE_ASSERT(eth_dev->data->port_id == i);
302
303         return eth_dev;
304 }
305
306 int
307 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
308 {
309         if (eth_dev == NULL)
310                 return -EINVAL;
311
312         eth_dev->state = RTE_ETH_DEV_UNUSED;
313
314         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
315
316         return 0;
317 }
318
319 int
320 rte_eth_dev_is_valid_port(uint16_t port_id)
321 {
322         if (port_id >= RTE_MAX_ETHPORTS ||
323             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
324                 return 0;
325         else
326                 return 1;
327 }
328
329 int
330 rte_eth_dev_socket_id(uint16_t port_id)
331 {
332         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
333         return rte_eth_devices[port_id].data->numa_node;
334 }
335
336 void *
337 rte_eth_dev_get_sec_ctx(uint8_t port_id)
338 {
339         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
340         return rte_eth_devices[port_id].security_ctx;
341 }
342
343 uint16_t
344 rte_eth_dev_count(void)
345 {
346         uint16_t p;
347         uint16_t count;
348
349         count = 0;
350
351         RTE_ETH_FOREACH_DEV(p)
352                 count++;
353
354         return count;
355 }
356
357 int
358 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
359 {
360         char *tmp;
361
362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
363
364         if (name == NULL) {
365                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
366                 return -EINVAL;
367         }
368
369         /* shouldn't check 'rte_eth_devices[i].data',
370          * because it might be overwritten by VDEV PMD */
371         tmp = rte_eth_dev_data[port_id].name;
372         strcpy(name, tmp);
373         return 0;
374 }
375
376 int
377 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
378 {
379         int i;
380
381         if (name == NULL) {
382                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
383                 return -EINVAL;
384         }
385
386         RTE_ETH_FOREACH_DEV(i) {
387                 if (!strncmp(name,
388                         rte_eth_dev_data[i].name, strlen(name))) {
389
390                         *port_id = i;
391
392                         return 0;
393                 }
394         }
395         return -ENODEV;
396 }
397
398 static int
399 eth_err(uint16_t port_id, int ret)
400 {
401         if (ret == 0)
402                 return 0;
403         if (rte_eth_dev_is_removed(port_id))
404                 return -EIO;
405         return ret;
406 }
407
408 /* attach the new device, then store port_id of the device */
409 int
410 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
411 {
412         int ret = -1;
413         int current = rte_eth_dev_count();
414         char *name = NULL;
415         char *args = NULL;
416
417         if ((devargs == NULL) || (port_id == NULL)) {
418                 ret = -EINVAL;
419                 goto err;
420         }
421
422         /* parse devargs, then retrieve device name and args */
423         if (rte_eal_parse_devargs_str(devargs, &name, &args))
424                 goto err;
425
426         ret = rte_eal_dev_attach(name, args);
427         if (ret < 0)
428                 goto err;
429
430         /* no point looking at the port count if no port exists */
431         if (!rte_eth_dev_count()) {
432                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
433                 ret = -1;
434                 goto err;
435         }
436
437         /* if nothing happened, there is a bug here, since some driver told us
438          * it did attach a device, but did not create a port.
439          */
440         if (current == rte_eth_dev_count()) {
441                 ret = -1;
442                 goto err;
443         }
444
445         *port_id = eth_dev_last_created_port;
446         ret = 0;
447
448 err:
449         free(name);
450         free(args);
451         return ret;
452 }
453
454 /* detach the device, then store the name of the device */
455 int
456 rte_eth_dev_detach(uint16_t port_id, char *name)
457 {
458         uint32_t dev_flags;
459         int ret = -1;
460
461         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
462
463         if (name == NULL) {
464                 ret = -EINVAL;
465                 goto err;
466         }
467
468         dev_flags = rte_eth_devices[port_id].data->dev_flags;
469         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
470                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
471                         port_id);
472                 ret = -ENOTSUP;
473                 goto err;
474         }
475
476         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
477                  "%s", rte_eth_devices[port_id].data->name);
478
479         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
480         if (ret < 0)
481                 goto err;
482
483         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
484         return 0;
485
486 err:
487         return ret;
488 }
489
490 static int
491 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
492 {
493         uint16_t old_nb_queues = dev->data->nb_rx_queues;
494         void **rxq;
495         unsigned i;
496
497         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
498                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
499                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
500                                 RTE_CACHE_LINE_SIZE);
501                 if (dev->data->rx_queues == NULL) {
502                         dev->data->nb_rx_queues = 0;
503                         return -(ENOMEM);
504                 }
505         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
506                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
507
508                 rxq = dev->data->rx_queues;
509
510                 for (i = nb_queues; i < old_nb_queues; i++)
511                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
512                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
513                                 RTE_CACHE_LINE_SIZE);
514                 if (rxq == NULL)
515                         return -(ENOMEM);
516                 if (nb_queues > old_nb_queues) {
517                         uint16_t new_qs = nb_queues - old_nb_queues;
518
519                         memset(rxq + old_nb_queues, 0,
520                                 sizeof(rxq[0]) * new_qs);
521                 }
522
523                 dev->data->rx_queues = rxq;
524
525         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
526                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
527
528                 rxq = dev->data->rx_queues;
529
530                 for (i = nb_queues; i < old_nb_queues; i++)
531                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
532
533                 rte_free(dev->data->rx_queues);
534                 dev->data->rx_queues = NULL;
535         }
536         dev->data->nb_rx_queues = nb_queues;
537         return 0;
538 }
539
540 int
541 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
542 {
543         struct rte_eth_dev *dev;
544
545         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
546
547         dev = &rte_eth_devices[port_id];
548         if (rx_queue_id >= dev->data->nb_rx_queues) {
549                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
550                 return -EINVAL;
551         }
552
553         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
554
555         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
556                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
557                         " already started\n",
558                         rx_queue_id, port_id);
559                 return 0;
560         }
561
562         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
563                                                              rx_queue_id));
564
565 }
566
567 int
568 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
569 {
570         struct rte_eth_dev *dev;
571
572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
573
574         dev = &rte_eth_devices[port_id];
575         if (rx_queue_id >= dev->data->nb_rx_queues) {
576                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
577                 return -EINVAL;
578         }
579
580         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
581
582         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
583                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
584                         " already stopped\n",
585                         rx_queue_id, port_id);
586                 return 0;
587         }
588
589         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
590
591 }
592
593 int
594 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
595 {
596         struct rte_eth_dev *dev;
597
598         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
599
600         dev = &rte_eth_devices[port_id];
601         if (tx_queue_id >= dev->data->nb_tx_queues) {
602                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
603                 return -EINVAL;
604         }
605
606         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
607
608         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
609                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
610                         " already started\n",
611                         tx_queue_id, port_id);
612                 return 0;
613         }
614
615         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
616                                                              tx_queue_id));
617
618 }
619
620 int
621 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
622 {
623         struct rte_eth_dev *dev;
624
625         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
626
627         dev = &rte_eth_devices[port_id];
628         if (tx_queue_id >= dev->data->nb_tx_queues) {
629                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
630                 return -EINVAL;
631         }
632
633         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
634
635         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
636                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
637                         " already stopped\n",
638                         tx_queue_id, port_id);
639                 return 0;
640         }
641
642         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
643
644 }
645
646 static int
647 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
648 {
649         uint16_t old_nb_queues = dev->data->nb_tx_queues;
650         void **txq;
651         unsigned i;
652
653         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
654                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
655                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
656                                                    RTE_CACHE_LINE_SIZE);
657                 if (dev->data->tx_queues == NULL) {
658                         dev->data->nb_tx_queues = 0;
659                         return -(ENOMEM);
660                 }
661         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
662                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
663
664                 txq = dev->data->tx_queues;
665
666                 for (i = nb_queues; i < old_nb_queues; i++)
667                         (*dev->dev_ops->tx_queue_release)(txq[i]);
668                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
669                                   RTE_CACHE_LINE_SIZE);
670                 if (txq == NULL)
671                         return -ENOMEM;
672                 if (nb_queues > old_nb_queues) {
673                         uint16_t new_qs = nb_queues - old_nb_queues;
674
675                         memset(txq + old_nb_queues, 0,
676                                sizeof(txq[0]) * new_qs);
677                 }
678
679                 dev->data->tx_queues = txq;
680
681         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
682                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
683
684                 txq = dev->data->tx_queues;
685
686                 for (i = nb_queues; i < old_nb_queues; i++)
687                         (*dev->dev_ops->tx_queue_release)(txq[i]);
688
689                 rte_free(dev->data->tx_queues);
690                 dev->data->tx_queues = NULL;
691         }
692         dev->data->nb_tx_queues = nb_queues;
693         return 0;
694 }
695
696 uint32_t
697 rte_eth_speed_bitflag(uint32_t speed, int duplex)
698 {
699         switch (speed) {
700         case ETH_SPEED_NUM_10M:
701                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
702         case ETH_SPEED_NUM_100M:
703                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
704         case ETH_SPEED_NUM_1G:
705                 return ETH_LINK_SPEED_1G;
706         case ETH_SPEED_NUM_2_5G:
707                 return ETH_LINK_SPEED_2_5G;
708         case ETH_SPEED_NUM_5G:
709                 return ETH_LINK_SPEED_5G;
710         case ETH_SPEED_NUM_10G:
711                 return ETH_LINK_SPEED_10G;
712         case ETH_SPEED_NUM_20G:
713                 return ETH_LINK_SPEED_20G;
714         case ETH_SPEED_NUM_25G:
715                 return ETH_LINK_SPEED_25G;
716         case ETH_SPEED_NUM_40G:
717                 return ETH_LINK_SPEED_40G;
718         case ETH_SPEED_NUM_50G:
719                 return ETH_LINK_SPEED_50G;
720         case ETH_SPEED_NUM_56G:
721                 return ETH_LINK_SPEED_56G;
722         case ETH_SPEED_NUM_100G:
723                 return ETH_LINK_SPEED_100G;
724         default:
725                 return 0;
726         }
727 }
728
729 /**
730  * A conversion function from rxmode bitfield API.
731  */
732 static void
733 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
734                                     uint64_t *rx_offloads)
735 {
736         uint64_t offloads = 0;
737
738         if (rxmode->header_split == 1)
739                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
740         if (rxmode->hw_ip_checksum == 1)
741                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
742         if (rxmode->hw_vlan_filter == 1)
743                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
744         if (rxmode->hw_vlan_strip == 1)
745                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
746         if (rxmode->hw_vlan_extend == 1)
747                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
748         if (rxmode->jumbo_frame == 1)
749                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
750         if (rxmode->hw_strip_crc == 1)
751                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
752         if (rxmode->enable_scatter == 1)
753                 offloads |= DEV_RX_OFFLOAD_SCATTER;
754         if (rxmode->enable_lro == 1)
755                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
756         if (rxmode->hw_timestamp == 1)
757                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
758         if (rxmode->security == 1)
759                 offloads |= DEV_RX_OFFLOAD_SECURITY;
760
761         *rx_offloads = offloads;
762 }
763
764 /**
765  * A conversion function from rxmode offloads API.
766  */
767 static void
768 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
769                             struct rte_eth_rxmode *rxmode)
770 {
771
772         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
773                 rxmode->header_split = 1;
774         else
775                 rxmode->header_split = 0;
776         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
777                 rxmode->hw_ip_checksum = 1;
778         else
779                 rxmode->hw_ip_checksum = 0;
780         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
781                 rxmode->hw_vlan_filter = 1;
782         else
783                 rxmode->hw_vlan_filter = 0;
784         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
785                 rxmode->hw_vlan_strip = 1;
786         else
787                 rxmode->hw_vlan_strip = 0;
788         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
789                 rxmode->hw_vlan_extend = 1;
790         else
791                 rxmode->hw_vlan_extend = 0;
792         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
793                 rxmode->jumbo_frame = 1;
794         else
795                 rxmode->jumbo_frame = 0;
796         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
797                 rxmode->hw_strip_crc = 1;
798         else
799                 rxmode->hw_strip_crc = 0;
800         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
801                 rxmode->enable_scatter = 1;
802         else
803                 rxmode->enable_scatter = 0;
804         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
805                 rxmode->enable_lro = 1;
806         else
807                 rxmode->enable_lro = 0;
808         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
809                 rxmode->hw_timestamp = 1;
810         else
811                 rxmode->hw_timestamp = 0;
812         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
813                 rxmode->security = 1;
814         else
815                 rxmode->security = 0;
816 }
817
818 const char * __rte_experimental
819 rte_eth_dev_rx_offload_name(uint64_t offload)
820 {
821         const char *name = "UNKNOWN";
822         unsigned int i;
823
824         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
825                 if (offload == rte_rx_offload_names[i].offload) {
826                         name = rte_rx_offload_names[i].name;
827                         break;
828                 }
829         }
830
831         return name;
832 }
833
834 const char * __rte_experimental
835 rte_eth_dev_tx_offload_name(uint64_t offload)
836 {
837         const char *name = "UNKNOWN";
838         unsigned int i;
839
840         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
841                 if (offload == rte_tx_offload_names[i].offload) {
842                         name = rte_tx_offload_names[i].name;
843                         break;
844                 }
845         }
846
847         return name;
848 }
849
850 int
851 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
852                       const struct rte_eth_conf *dev_conf)
853 {
854         struct rte_eth_dev *dev;
855         struct rte_eth_dev_info dev_info;
856         struct rte_eth_conf local_conf = *dev_conf;
857         int diag;
858
859         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
860
861         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
862                 RTE_PMD_DEBUG_TRACE(
863                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
864                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
865                 return -EINVAL;
866         }
867
868         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
869                 RTE_PMD_DEBUG_TRACE(
870                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
871                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
872                 return -EINVAL;
873         }
874
875         dev = &rte_eth_devices[port_id];
876
877         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
878         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
879
880         if (dev->data->dev_started) {
881                 RTE_PMD_DEBUG_TRACE(
882                     "port %d must be stopped to allow configuration\n", port_id);
883                 return -EBUSY;
884         }
885
886         /*
887          * Convert between the offloads API to enable PMDs to support
888          * only one of them.
889          */
890         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
891                 rte_eth_convert_rx_offload_bitfield(
892                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
893         } else {
894                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
895                                             &local_conf.rxmode);
896         }
897
898         /* Copy the dev_conf parameter into the dev structure */
899         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
900
901         /*
902          * Check that the numbers of RX and TX queues are not greater
903          * than the maximum number of RX and TX queues supported by the
904          * configured device.
905          */
906         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
907
908         if (nb_rx_q == 0 && nb_tx_q == 0) {
909                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
910                 return -EINVAL;
911         }
912
913         if (nb_rx_q > dev_info.max_rx_queues) {
914                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
915                                 port_id, nb_rx_q, dev_info.max_rx_queues);
916                 return -EINVAL;
917         }
918
919         if (nb_tx_q > dev_info.max_tx_queues) {
920                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
921                                 port_id, nb_tx_q, dev_info.max_tx_queues);
922                 return -EINVAL;
923         }
924
925         /* Check that the device supports requested interrupts */
926         if ((dev_conf->intr_conf.lsc == 1) &&
927                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
928                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
929                                         dev->device->driver->name);
930                         return -EINVAL;
931         }
932         if ((dev_conf->intr_conf.rmv == 1) &&
933             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
934                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
935                                     dev->device->driver->name);
936                 return -EINVAL;
937         }
938
939         /*
940          * If jumbo frames are enabled, check that the maximum RX packet
941          * length is supported by the configured device.
942          */
943         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
944                 if (dev_conf->rxmode.max_rx_pkt_len >
945                     dev_info.max_rx_pktlen) {
946                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
947                                 " > max valid value %u\n",
948                                 port_id,
949                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
950                                 (unsigned)dev_info.max_rx_pktlen);
951                         return -EINVAL;
952                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
953                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
954                                 " < min valid value %u\n",
955                                 port_id,
956                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
957                                 (unsigned)ETHER_MIN_LEN);
958                         return -EINVAL;
959                 }
960         } else {
961                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
962                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
963                         /* Use default value */
964                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
965                                                         ETHER_MAX_LEN;
966         }
967
968         /*
969          * Setup new number of RX/TX queues and reconfigure device.
970          */
971         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
972         if (diag != 0) {
973                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
974                                 port_id, diag);
975                 return diag;
976         }
977
978         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
979         if (diag != 0) {
980                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
981                                 port_id, diag);
982                 rte_eth_dev_rx_queue_config(dev, 0);
983                 return diag;
984         }
985
986         diag = (*dev->dev_ops->dev_configure)(dev);
987         if (diag != 0) {
988                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
989                                 port_id, diag);
990                 rte_eth_dev_rx_queue_config(dev, 0);
991                 rte_eth_dev_tx_queue_config(dev, 0);
992                 return eth_err(port_id, diag);
993         }
994
995         /* Initialize Rx profiling if enabled at compilation time. */
996         diag = __rte_eth_profile_rx_init(port_id, dev);
997         if (diag != 0) {
998                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
999                                 port_id, diag);
1000                 rte_eth_dev_rx_queue_config(dev, 0);
1001                 rte_eth_dev_tx_queue_config(dev, 0);
1002                 return eth_err(port_id, diag);
1003         }
1004
1005         return 0;
1006 }
1007
1008 void
1009 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1010 {
1011         if (dev->data->dev_started) {
1012                 RTE_PMD_DEBUG_TRACE(
1013                         "port %d must be stopped to allow reset\n",
1014                         dev->data->port_id);
1015                 return;
1016         }
1017
1018         rte_eth_dev_rx_queue_config(dev, 0);
1019         rte_eth_dev_tx_queue_config(dev, 0);
1020
1021         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1022 }
1023
1024 static void
1025 rte_eth_dev_config_restore(uint16_t port_id)
1026 {
1027         struct rte_eth_dev *dev;
1028         struct rte_eth_dev_info dev_info;
1029         struct ether_addr *addr;
1030         uint16_t i;
1031         uint32_t pool = 0;
1032         uint64_t pool_mask;
1033
1034         dev = &rte_eth_devices[port_id];
1035
1036         rte_eth_dev_info_get(port_id, &dev_info);
1037
1038         /* replay MAC address configuration including default MAC */
1039         addr = &dev->data->mac_addrs[0];
1040         if (*dev->dev_ops->mac_addr_set != NULL)
1041                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1042         else if (*dev->dev_ops->mac_addr_add != NULL)
1043                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1044
1045         if (*dev->dev_ops->mac_addr_add != NULL) {
1046                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1047                         addr = &dev->data->mac_addrs[i];
1048
1049                         /* skip zero address */
1050                         if (is_zero_ether_addr(addr))
1051                                 continue;
1052
1053                         pool = 0;
1054                         pool_mask = dev->data->mac_pool_sel[i];
1055
1056                         do {
1057                                 if (pool_mask & 1ULL)
1058                                         (*dev->dev_ops->mac_addr_add)(dev,
1059                                                 addr, i, pool);
1060                                 pool_mask >>= 1;
1061                                 pool++;
1062                         } while (pool_mask);
1063                 }
1064         }
1065
1066         /* replay promiscuous configuration */
1067         if (rte_eth_promiscuous_get(port_id) == 1)
1068                 rte_eth_promiscuous_enable(port_id);
1069         else if (rte_eth_promiscuous_get(port_id) == 0)
1070                 rte_eth_promiscuous_disable(port_id);
1071
1072         /* replay all multicast configuration */
1073         if (rte_eth_allmulticast_get(port_id) == 1)
1074                 rte_eth_allmulticast_enable(port_id);
1075         else if (rte_eth_allmulticast_get(port_id) == 0)
1076                 rte_eth_allmulticast_disable(port_id);
1077 }
1078
1079 int
1080 rte_eth_dev_start(uint16_t port_id)
1081 {
1082         struct rte_eth_dev *dev;
1083         int diag;
1084
1085         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1086
1087         dev = &rte_eth_devices[port_id];
1088
1089         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1090
1091         if (dev->data->dev_started != 0) {
1092                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1093                         " already started\n",
1094                         port_id);
1095                 return 0;
1096         }
1097
1098         diag = (*dev->dev_ops->dev_start)(dev);
1099         if (diag == 0)
1100                 dev->data->dev_started = 1;
1101         else
1102                 return eth_err(port_id, diag);
1103
1104         rte_eth_dev_config_restore(port_id);
1105
1106         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1107                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1108                 (*dev->dev_ops->link_update)(dev, 0);
1109         }
1110         return 0;
1111 }
1112
1113 void
1114 rte_eth_dev_stop(uint16_t port_id)
1115 {
1116         struct rte_eth_dev *dev;
1117
1118         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1119         dev = &rte_eth_devices[port_id];
1120
1121         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1122
1123         if (dev->data->dev_started == 0) {
1124                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1125                         " already stopped\n",
1126                         port_id);
1127                 return;
1128         }
1129
1130         dev->data->dev_started = 0;
1131         (*dev->dev_ops->dev_stop)(dev);
1132 }
1133
1134 int
1135 rte_eth_dev_set_link_up(uint16_t port_id)
1136 {
1137         struct rte_eth_dev *dev;
1138
1139         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1140
1141         dev = &rte_eth_devices[port_id];
1142
1143         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1144         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1145 }
1146
1147 int
1148 rte_eth_dev_set_link_down(uint16_t port_id)
1149 {
1150         struct rte_eth_dev *dev;
1151
1152         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1153
1154         dev = &rte_eth_devices[port_id];
1155
1156         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1157         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1158 }
1159
1160 void
1161 rte_eth_dev_close(uint16_t port_id)
1162 {
1163         struct rte_eth_dev *dev;
1164
1165         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1166         dev = &rte_eth_devices[port_id];
1167
1168         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1169         dev->data->dev_started = 0;
1170         (*dev->dev_ops->dev_close)(dev);
1171
1172         dev->data->nb_rx_queues = 0;
1173         rte_free(dev->data->rx_queues);
1174         dev->data->rx_queues = NULL;
1175         dev->data->nb_tx_queues = 0;
1176         rte_free(dev->data->tx_queues);
1177         dev->data->tx_queues = NULL;
1178 }
1179
1180 int
1181 rte_eth_dev_reset(uint16_t port_id)
1182 {
1183         struct rte_eth_dev *dev;
1184         int ret;
1185
1186         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1187         dev = &rte_eth_devices[port_id];
1188
1189         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1190
1191         rte_eth_dev_stop(port_id);
1192         ret = dev->dev_ops->dev_reset(dev);
1193
1194         return eth_err(port_id, ret);
1195 }
1196
1197 int __rte_experimental
1198 rte_eth_dev_is_removed(uint16_t port_id)
1199 {
1200         struct rte_eth_dev *dev;
1201         int ret;
1202
1203         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1204
1205         dev = &rte_eth_devices[port_id];
1206
1207         if (dev->state == RTE_ETH_DEV_REMOVED)
1208                 return 1;
1209
1210         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1211
1212         ret = dev->dev_ops->is_removed(dev);
1213         if (ret != 0)
1214                 /* Device is physically removed. */
1215                 dev->state = RTE_ETH_DEV_REMOVED;
1216
1217         return ret;
1218 }
1219
1220 int
1221 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1222                        uint16_t nb_rx_desc, unsigned int socket_id,
1223                        const struct rte_eth_rxconf *rx_conf,
1224                        struct rte_mempool *mp)
1225 {
1226         int ret;
1227         uint32_t mbp_buf_size;
1228         struct rte_eth_dev *dev;
1229         struct rte_eth_dev_info dev_info;
1230         struct rte_eth_rxconf local_conf;
1231         void **rxq;
1232
1233         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1234
1235         dev = &rte_eth_devices[port_id];
1236         if (rx_queue_id >= dev->data->nb_rx_queues) {
1237                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1238                 return -EINVAL;
1239         }
1240
1241         if (dev->data->dev_started) {
1242                 RTE_PMD_DEBUG_TRACE(
1243                     "port %d must be stopped to allow configuration\n", port_id);
1244                 return -EBUSY;
1245         }
1246
1247         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1248         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1249
1250         /*
1251          * Check the size of the mbuf data buffer.
1252          * This value must be provided in the private data of the memory pool.
1253          * First check that the memory pool has a valid private data.
1254          */
1255         rte_eth_dev_info_get(port_id, &dev_info);
1256         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1257                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1258                                 mp->name, (int) mp->private_data_size,
1259                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1260                 return -ENOSPC;
1261         }
1262         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1263
1264         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1265                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1266                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1267                                 "=%d)\n",
1268                                 mp->name,
1269                                 (int)mbp_buf_size,
1270                                 (int)(RTE_PKTMBUF_HEADROOM +
1271                                       dev_info.min_rx_bufsize),
1272                                 (int)RTE_PKTMBUF_HEADROOM,
1273                                 (int)dev_info.min_rx_bufsize);
1274                 return -EINVAL;
1275         }
1276
1277         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1278                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1279                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1280
1281                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1282                         "should be: <= %hu, = %hu, and a product of %hu\n",
1283                         nb_rx_desc,
1284                         dev_info.rx_desc_lim.nb_max,
1285                         dev_info.rx_desc_lim.nb_min,
1286                         dev_info.rx_desc_lim.nb_align);
1287                 return -EINVAL;
1288         }
1289
1290         rxq = dev->data->rx_queues;
1291         if (rxq[rx_queue_id]) {
1292                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1293                                         -ENOTSUP);
1294                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1295                 rxq[rx_queue_id] = NULL;
1296         }
1297
1298         if (rx_conf == NULL)
1299                 rx_conf = &dev_info.default_rxconf;
1300
1301         local_conf = *rx_conf;
1302         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1303                 /**
1304                  * Reflect port offloads to queue offloads in order for
1305                  * offloads to not be discarded.
1306                  */
1307                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1308                                                     &local_conf.offloads);
1309         }
1310
1311         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1312                                               socket_id, &local_conf, mp);
1313         if (!ret) {
1314                 if (!dev->data->min_rx_buf_size ||
1315                     dev->data->min_rx_buf_size > mbp_buf_size)
1316                         dev->data->min_rx_buf_size = mbp_buf_size;
1317         }
1318
1319         return eth_err(port_id, ret);
1320 }
1321
1322 /**
1323  * A conversion function from txq_flags API.
1324  */
1325 static void
1326 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1327 {
1328         uint64_t offloads = 0;
1329
1330         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1331                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1332         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1333                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1334         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1335                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1336         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1337                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1338         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1339                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1340         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1341             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1342                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1343
1344         *tx_offloads = offloads;
1345 }
1346
1347 /**
1348  * A conversion function from offloads API.
1349  */
1350 static void
1351 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1352 {
1353         uint32_t flags = 0;
1354
1355         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1356                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1357         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1358                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1359         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1360                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1361         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1362                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1363         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1364                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1365         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1366                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1367
1368         *txq_flags = flags;
1369 }
1370
1371 int
1372 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1373                        uint16_t nb_tx_desc, unsigned int socket_id,
1374                        const struct rte_eth_txconf *tx_conf)
1375 {
1376         struct rte_eth_dev *dev;
1377         struct rte_eth_dev_info dev_info;
1378         struct rte_eth_txconf local_conf;
1379         void **txq;
1380
1381         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1382
1383         dev = &rte_eth_devices[port_id];
1384         if (tx_queue_id >= dev->data->nb_tx_queues) {
1385                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1386                 return -EINVAL;
1387         }
1388
1389         if (dev->data->dev_started) {
1390                 RTE_PMD_DEBUG_TRACE(
1391                     "port %d must be stopped to allow configuration\n", port_id);
1392                 return -EBUSY;
1393         }
1394
1395         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1396         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1397
1398         rte_eth_dev_info_get(port_id, &dev_info);
1399
1400         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1401             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1402             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1403                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1404                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1405                                 nb_tx_desc,
1406                                 dev_info.tx_desc_lim.nb_max,
1407                                 dev_info.tx_desc_lim.nb_min,
1408                                 dev_info.tx_desc_lim.nb_align);
1409                 return -EINVAL;
1410         }
1411
1412         txq = dev->data->tx_queues;
1413         if (txq[tx_queue_id]) {
1414                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1415                                         -ENOTSUP);
1416                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1417                 txq[tx_queue_id] = NULL;
1418         }
1419
1420         if (tx_conf == NULL)
1421                 tx_conf = &dev_info.default_txconf;
1422
1423         /*
1424          * Convert between the offloads API to enable PMDs to support
1425          * only one of them.
1426          */
1427         local_conf = *tx_conf;
1428         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1429                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1430                                              &local_conf.txq_flags);
1431                 /* Keep the ignore flag. */
1432                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1433         } else {
1434                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1435                                           &local_conf.offloads);
1436         }
1437
1438         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1439                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1440 }
1441
1442 void
1443 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1444                 void *userdata __rte_unused)
1445 {
1446         unsigned i;
1447
1448         for (i = 0; i < unsent; i++)
1449                 rte_pktmbuf_free(pkts[i]);
1450 }
1451
1452 void
1453 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1454                 void *userdata)
1455 {
1456         uint64_t *count = userdata;
1457         unsigned i;
1458
1459         for (i = 0; i < unsent; i++)
1460                 rte_pktmbuf_free(pkts[i]);
1461
1462         *count += unsent;
1463 }
1464
1465 int
1466 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1467                 buffer_tx_error_fn cbfn, void *userdata)
1468 {
1469         buffer->error_callback = cbfn;
1470         buffer->error_userdata = userdata;
1471         return 0;
1472 }
1473
1474 int
1475 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1476 {
1477         int ret = 0;
1478
1479         if (buffer == NULL)
1480                 return -EINVAL;
1481
1482         buffer->size = size;
1483         if (buffer->error_callback == NULL) {
1484                 ret = rte_eth_tx_buffer_set_err_callback(
1485                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1486         }
1487
1488         return ret;
1489 }
1490
1491 int
1492 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1493 {
1494         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1495         int ret;
1496
1497         /* Validate Input Data. Bail if not valid or not supported. */
1498         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1499         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1500
1501         /* Call driver to free pending mbufs. */
1502         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1503                                                free_cnt);
1504         return eth_err(port_id, ret);
1505 }
1506
1507 void
1508 rte_eth_promiscuous_enable(uint16_t port_id)
1509 {
1510         struct rte_eth_dev *dev;
1511
1512         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1513         dev = &rte_eth_devices[port_id];
1514
1515         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1516         (*dev->dev_ops->promiscuous_enable)(dev);
1517         dev->data->promiscuous = 1;
1518 }
1519
1520 void
1521 rte_eth_promiscuous_disable(uint16_t port_id)
1522 {
1523         struct rte_eth_dev *dev;
1524
1525         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1526         dev = &rte_eth_devices[port_id];
1527
1528         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1529         dev->data->promiscuous = 0;
1530         (*dev->dev_ops->promiscuous_disable)(dev);
1531 }
1532
1533 int
1534 rte_eth_promiscuous_get(uint16_t port_id)
1535 {
1536         struct rte_eth_dev *dev;
1537
1538         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1539
1540         dev = &rte_eth_devices[port_id];
1541         return dev->data->promiscuous;
1542 }
1543
1544 void
1545 rte_eth_allmulticast_enable(uint16_t port_id)
1546 {
1547         struct rte_eth_dev *dev;
1548
1549         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1550         dev = &rte_eth_devices[port_id];
1551
1552         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1553         (*dev->dev_ops->allmulticast_enable)(dev);
1554         dev->data->all_multicast = 1;
1555 }
1556
1557 void
1558 rte_eth_allmulticast_disable(uint16_t port_id)
1559 {
1560         struct rte_eth_dev *dev;
1561
1562         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1563         dev = &rte_eth_devices[port_id];
1564
1565         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1566         dev->data->all_multicast = 0;
1567         (*dev->dev_ops->allmulticast_disable)(dev);
1568 }
1569
1570 int
1571 rte_eth_allmulticast_get(uint16_t port_id)
1572 {
1573         struct rte_eth_dev *dev;
1574
1575         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1576
1577         dev = &rte_eth_devices[port_id];
1578         return dev->data->all_multicast;
1579 }
1580
1581 static inline int
1582 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1583                                 struct rte_eth_link *link)
1584 {
1585         struct rte_eth_link *dst = link;
1586         struct rte_eth_link *src = &(dev->data->dev_link);
1587
1588         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1589                                         *(uint64_t *)src) == 0)
1590                 return -1;
1591
1592         return 0;
1593 }
1594
1595 void
1596 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1597 {
1598         struct rte_eth_dev *dev;
1599
1600         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1601         dev = &rte_eth_devices[port_id];
1602
1603         if (dev->data->dev_conf.intr_conf.lsc != 0)
1604                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1605         else {
1606                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1607                 (*dev->dev_ops->link_update)(dev, 1);
1608                 *eth_link = dev->data->dev_link;
1609         }
1610 }
1611
1612 void
1613 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1614 {
1615         struct rte_eth_dev *dev;
1616
1617         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1618         dev = &rte_eth_devices[port_id];
1619
1620         if (dev->data->dev_conf.intr_conf.lsc != 0)
1621                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1622         else {
1623                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1624                 (*dev->dev_ops->link_update)(dev, 0);
1625                 *eth_link = dev->data->dev_link;
1626         }
1627 }
1628
1629 int
1630 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1631 {
1632         struct rte_eth_dev *dev;
1633
1634         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1635
1636         dev = &rte_eth_devices[port_id];
1637         memset(stats, 0, sizeof(*stats));
1638
1639         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1640         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1641         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1642 }
1643
1644 int
1645 rte_eth_stats_reset(uint16_t port_id)
1646 {
1647         struct rte_eth_dev *dev;
1648
1649         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1650         dev = &rte_eth_devices[port_id];
1651
1652         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1653         (*dev->dev_ops->stats_reset)(dev);
1654         dev->data->rx_mbuf_alloc_failed = 0;
1655
1656         return 0;
1657 }
1658
1659 static inline int
1660 get_xstats_basic_count(struct rte_eth_dev *dev)
1661 {
1662         uint16_t nb_rxqs, nb_txqs;
1663         int count;
1664
1665         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1666         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1667
1668         count = RTE_NB_STATS;
1669         count += nb_rxqs * RTE_NB_RXQ_STATS;
1670         count += nb_txqs * RTE_NB_TXQ_STATS;
1671
1672         return count;
1673 }
1674
1675 static int
1676 get_xstats_count(uint16_t port_id)
1677 {
1678         struct rte_eth_dev *dev;
1679         int count;
1680
1681         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1682         dev = &rte_eth_devices[port_id];
1683         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1684                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1685                                 NULL, 0);
1686                 if (count < 0)
1687                         return eth_err(port_id, count);
1688         }
1689         if (dev->dev_ops->xstats_get_names != NULL) {
1690                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1691                 if (count < 0)
1692                         return eth_err(port_id, count);
1693         } else
1694                 count = 0;
1695
1696
1697         count += get_xstats_basic_count(dev);
1698
1699         return count;
1700 }
1701
1702 int
1703 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1704                 uint64_t *id)
1705 {
1706         int cnt_xstats, idx_xstat;
1707
1708         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1709
1710         if (!id) {
1711                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1712                 return -ENOMEM;
1713         }
1714
1715         if (!xstat_name) {
1716                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1717                 return -ENOMEM;
1718         }
1719
1720         /* Get count */
1721         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1722         if (cnt_xstats  < 0) {
1723                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1724                 return -ENODEV;
1725         }
1726
1727         /* Get id-name lookup table */
1728         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1729
1730         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1731                         port_id, xstats_names, cnt_xstats, NULL)) {
1732                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1733                 return -1;
1734         }
1735
1736         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1737                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1738                         *id = idx_xstat;
1739                         return 0;
1740                 };
1741         }
1742
1743         return -EINVAL;
1744 }
1745
1746 /* retrieve basic stats names */
1747 static int
1748 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1749         struct rte_eth_xstat_name *xstats_names)
1750 {
1751         int cnt_used_entries = 0;
1752         uint32_t idx, id_queue;
1753         uint16_t num_q;
1754
1755         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1756                 snprintf(xstats_names[cnt_used_entries].name,
1757                         sizeof(xstats_names[0].name),
1758                         "%s", rte_stats_strings[idx].name);
1759                 cnt_used_entries++;
1760         }
1761         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1762         for (id_queue = 0; id_queue < num_q; id_queue++) {
1763                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1764                         snprintf(xstats_names[cnt_used_entries].name,
1765                                 sizeof(xstats_names[0].name),
1766                                 "rx_q%u%s",
1767                                 id_queue, rte_rxq_stats_strings[idx].name);
1768                         cnt_used_entries++;
1769                 }
1770
1771         }
1772         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1773         for (id_queue = 0; id_queue < num_q; id_queue++) {
1774                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1775                         snprintf(xstats_names[cnt_used_entries].name,
1776                                 sizeof(xstats_names[0].name),
1777                                 "tx_q%u%s",
1778                                 id_queue, rte_txq_stats_strings[idx].name);
1779                         cnt_used_entries++;
1780                 }
1781         }
1782         return cnt_used_entries;
1783 }
1784
1785 /* retrieve ethdev extended statistics names */
1786 int
1787 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1788         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1789         uint64_t *ids)
1790 {
1791         struct rte_eth_xstat_name *xstats_names_copy;
1792         unsigned int no_basic_stat_requested = 1;
1793         unsigned int no_ext_stat_requested = 1;
1794         unsigned int expected_entries;
1795         unsigned int basic_count;
1796         struct rte_eth_dev *dev;
1797         unsigned int i;
1798         int ret;
1799
1800         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1801         dev = &rte_eth_devices[port_id];
1802
1803         basic_count = get_xstats_basic_count(dev);
1804         ret = get_xstats_count(port_id);
1805         if (ret < 0)
1806                 return ret;
1807         expected_entries = (unsigned int)ret;
1808
1809         /* Return max number of stats if no ids given */
1810         if (!ids) {
1811                 if (!xstats_names)
1812                         return expected_entries;
1813                 else if (xstats_names && size < expected_entries)
1814                         return expected_entries;
1815         }
1816
1817         if (ids && !xstats_names)
1818                 return -EINVAL;
1819
1820         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1821                 uint64_t ids_copy[size];
1822
1823                 for (i = 0; i < size; i++) {
1824                         if (ids[i] < basic_count) {
1825                                 no_basic_stat_requested = 0;
1826                                 break;
1827                         }
1828
1829                         /*
1830                          * Convert ids to xstats ids that PMD knows.
1831                          * ids known by user are basic + extended stats.
1832                          */
1833                         ids_copy[i] = ids[i] - basic_count;
1834                 }
1835
1836                 if (no_basic_stat_requested)
1837                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1838                                         xstats_names, ids_copy, size);
1839         }
1840
1841         /* Retrieve all stats */
1842         if (!ids) {
1843                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1844                                 expected_entries);
1845                 if (num_stats < 0 || num_stats > (int)expected_entries)
1846                         return num_stats;
1847                 else
1848                         return expected_entries;
1849         }
1850
1851         xstats_names_copy = calloc(expected_entries,
1852                 sizeof(struct rte_eth_xstat_name));
1853
1854         if (!xstats_names_copy) {
1855                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
1856                 return -ENOMEM;
1857         }
1858
1859         if (ids) {
1860                 for (i = 0; i < size; i++) {
1861                         if (ids[i] > basic_count) {
1862                                 no_ext_stat_requested = 0;
1863                                 break;
1864                         }
1865                 }
1866         }
1867
1868         /* Fill xstats_names_copy structure */
1869         if (ids && no_ext_stat_requested) {
1870                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
1871         } else {
1872                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
1873                         expected_entries);
1874                 if (ret < 0) {
1875                         free(xstats_names_copy);
1876                         return ret;
1877                 }
1878         }
1879
1880         /* Filter stats */
1881         for (i = 0; i < size; i++) {
1882                 if (ids[i] >= expected_entries) {
1883                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1884                         free(xstats_names_copy);
1885                         return -1;
1886                 }
1887                 xstats_names[i] = xstats_names_copy[ids[i]];
1888         }
1889
1890         free(xstats_names_copy);
1891         return size;
1892 }
1893
1894 int
1895 rte_eth_xstats_get_names(uint16_t port_id,
1896         struct rte_eth_xstat_name *xstats_names,
1897         unsigned int size)
1898 {
1899         struct rte_eth_dev *dev;
1900         int cnt_used_entries;
1901         int cnt_expected_entries;
1902         int cnt_driver_entries;
1903
1904         cnt_expected_entries = get_xstats_count(port_id);
1905         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1906                         (int)size < cnt_expected_entries)
1907                 return cnt_expected_entries;
1908
1909         /* port_id checked in get_xstats_count() */
1910         dev = &rte_eth_devices[port_id];
1911
1912         cnt_used_entries = rte_eth_basic_stats_get_names(
1913                 dev, xstats_names);
1914
1915         if (dev->dev_ops->xstats_get_names != NULL) {
1916                 /* If there are any driver-specific xstats, append them
1917                  * to end of list.
1918                  */
1919                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1920                         dev,
1921                         xstats_names + cnt_used_entries,
1922                         size - cnt_used_entries);
1923                 if (cnt_driver_entries < 0)
1924                         return eth_err(port_id, cnt_driver_entries);
1925                 cnt_used_entries += cnt_driver_entries;
1926         }
1927
1928         return cnt_used_entries;
1929 }
1930
1931
1932 static int
1933 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
1934 {
1935         struct rte_eth_dev *dev;
1936         struct rte_eth_stats eth_stats;
1937         unsigned int count = 0, i, q;
1938         uint64_t val, *stats_ptr;
1939         uint16_t nb_rxqs, nb_txqs;
1940         int ret;
1941
1942         ret = rte_eth_stats_get(port_id, &eth_stats);
1943         if (ret < 0)
1944                 return ret;
1945
1946         dev = &rte_eth_devices[port_id];
1947
1948         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1949         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1950
1951         /* global stats */
1952         for (i = 0; i < RTE_NB_STATS; i++) {
1953                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1954                                         rte_stats_strings[i].offset);
1955                 val = *stats_ptr;
1956                 xstats[count++].value = val;
1957         }
1958
1959         /* per-rxq stats */
1960         for (q = 0; q < nb_rxqs; q++) {
1961                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1962                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1963                                         rte_rxq_stats_strings[i].offset +
1964                                         q * sizeof(uint64_t));
1965                         val = *stats_ptr;
1966                         xstats[count++].value = val;
1967                 }
1968         }
1969
1970         /* per-txq stats */
1971         for (q = 0; q < nb_txqs; q++) {
1972                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1973                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1974                                         rte_txq_stats_strings[i].offset +
1975                                         q * sizeof(uint64_t));
1976                         val = *stats_ptr;
1977                         xstats[count++].value = val;
1978                 }
1979         }
1980         return count;
1981 }
1982
1983 /* retrieve ethdev extended statistics */
1984 int
1985 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1986                          uint64_t *values, unsigned int size)
1987 {
1988         unsigned int no_basic_stat_requested = 1;
1989         unsigned int no_ext_stat_requested = 1;
1990         unsigned int num_xstats_filled;
1991         unsigned int basic_count;
1992         uint16_t expected_entries;
1993         struct rte_eth_dev *dev;
1994         unsigned int i;
1995         int ret;
1996
1997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1998         ret = get_xstats_count(port_id);
1999         if (ret < 0)
2000                 return ret;
2001         expected_entries = (uint16_t)ret;
2002         struct rte_eth_xstat xstats[expected_entries];
2003         dev = &rte_eth_devices[port_id];
2004         basic_count = get_xstats_basic_count(dev);
2005
2006         /* Return max number of stats if no ids given */
2007         if (!ids) {
2008                 if (!values)
2009                         return expected_entries;
2010                 else if (values && size < expected_entries)
2011                         return expected_entries;
2012         }
2013
2014         if (ids && !values)
2015                 return -EINVAL;
2016
2017         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2018                 unsigned int basic_count = get_xstats_basic_count(dev);
2019                 uint64_t ids_copy[size];
2020
2021                 for (i = 0; i < size; i++) {
2022                         if (ids[i] < basic_count) {
2023                                 no_basic_stat_requested = 0;
2024                                 break;
2025                         }
2026
2027                         /*
2028                          * Convert ids to xstats ids that PMD knows.
2029                          * ids known by user are basic + extended stats.
2030                          */
2031                         ids_copy[i] = ids[i] - basic_count;
2032                 }
2033
2034                 if (no_basic_stat_requested)
2035                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2036                                         values, size);
2037         }
2038
2039         if (ids) {
2040                 for (i = 0; i < size; i++) {
2041                         if (ids[i] > basic_count) {
2042                                 no_ext_stat_requested = 0;
2043                                 break;
2044                         }
2045                 }
2046         }
2047
2048         /* Fill the xstats structure */
2049         if (ids && no_ext_stat_requested)
2050                 ret = rte_eth_basic_stats_get(port_id, xstats);
2051         else
2052                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2053
2054         if (ret < 0)
2055                 return ret;
2056         num_xstats_filled = (unsigned int)ret;
2057
2058         /* Return all stats */
2059         if (!ids) {
2060                 for (i = 0; i < num_xstats_filled; i++)
2061                         values[i] = xstats[i].value;
2062                 return expected_entries;
2063         }
2064
2065         /* Filter stats */
2066         for (i = 0; i < size; i++) {
2067                 if (ids[i] >= expected_entries) {
2068                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2069                         return -1;
2070                 }
2071                 values[i] = xstats[ids[i]].value;
2072         }
2073         return size;
2074 }
2075
2076 int
2077 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2078         unsigned int n)
2079 {
2080         struct rte_eth_dev *dev;
2081         unsigned int count = 0, i;
2082         signed int xcount = 0;
2083         uint16_t nb_rxqs, nb_txqs;
2084         int ret;
2085
2086         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2087
2088         dev = &rte_eth_devices[port_id];
2089
2090         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2091         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2092
2093         /* Return generic statistics */
2094         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2095                 (nb_txqs * RTE_NB_TXQ_STATS);
2096
2097         /* implemented by the driver */
2098         if (dev->dev_ops->xstats_get != NULL) {
2099                 /* Retrieve the xstats from the driver at the end of the
2100                  * xstats struct.
2101                  */
2102                 xcount = (*dev->dev_ops->xstats_get)(dev,
2103                                      xstats ? xstats + count : NULL,
2104                                      (n > count) ? n - count : 0);
2105
2106                 if (xcount < 0)
2107                         return eth_err(port_id, xcount);
2108         }
2109
2110         if (n < count + xcount || xstats == NULL)
2111                 return count + xcount;
2112
2113         /* now fill the xstats structure */
2114         ret = rte_eth_basic_stats_get(port_id, xstats);
2115         if (ret < 0)
2116                 return ret;
2117         count = ret;
2118
2119         for (i = 0; i < count; i++)
2120                 xstats[i].id = i;
2121         /* add an offset to driver-specific stats */
2122         for ( ; i < count + xcount; i++)
2123                 xstats[i].id += count;
2124
2125         return count + xcount;
2126 }
2127
2128 /* reset ethdev extended statistics */
2129 void
2130 rte_eth_xstats_reset(uint16_t port_id)
2131 {
2132         struct rte_eth_dev *dev;
2133
2134         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2135         dev = &rte_eth_devices[port_id];
2136
2137         /* implemented by the driver */
2138         if (dev->dev_ops->xstats_reset != NULL) {
2139                 (*dev->dev_ops->xstats_reset)(dev);
2140                 return;
2141         }
2142
2143         /* fallback to default */
2144         rte_eth_stats_reset(port_id);
2145 }
2146
2147 static int
2148 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2149                 uint8_t is_rx)
2150 {
2151         struct rte_eth_dev *dev;
2152
2153         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2154
2155         dev = &rte_eth_devices[port_id];
2156
2157         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2158         return (*dev->dev_ops->queue_stats_mapping_set)
2159                         (dev, queue_id, stat_idx, is_rx);
2160 }
2161
2162
2163 int
2164 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2165                 uint8_t stat_idx)
2166 {
2167         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2168                                                 stat_idx, STAT_QMAP_TX));
2169 }
2170
2171
2172 int
2173 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2174                 uint8_t stat_idx)
2175 {
2176         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2177                                                 stat_idx, STAT_QMAP_RX));
2178 }
2179
2180 int
2181 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2182 {
2183         struct rte_eth_dev *dev;
2184
2185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2186         dev = &rte_eth_devices[port_id];
2187
2188         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2189         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2190                                                         fw_version, fw_size));
2191 }
2192
2193 void
2194 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2195 {
2196         struct rte_eth_dev *dev;
2197         const struct rte_eth_desc_lim lim = {
2198                 .nb_max = UINT16_MAX,
2199                 .nb_min = 0,
2200                 .nb_align = 1,
2201         };
2202
2203         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2204         dev = &rte_eth_devices[port_id];
2205
2206         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2207         dev_info->rx_desc_lim = lim;
2208         dev_info->tx_desc_lim = lim;
2209
2210         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2211         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2212         dev_info->driver_name = dev->device->driver->name;
2213         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2214         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2215 }
2216
2217 int
2218 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2219                                  uint32_t *ptypes, int num)
2220 {
2221         int i, j;
2222         struct rte_eth_dev *dev;
2223         const uint32_t *all_ptypes;
2224
2225         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2226         dev = &rte_eth_devices[port_id];
2227         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2228         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2229
2230         if (!all_ptypes)
2231                 return 0;
2232
2233         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2234                 if (all_ptypes[i] & ptype_mask) {
2235                         if (j < num)
2236                                 ptypes[j] = all_ptypes[i];
2237                         j++;
2238                 }
2239
2240         return j;
2241 }
2242
2243 void
2244 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2245 {
2246         struct rte_eth_dev *dev;
2247
2248         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2249         dev = &rte_eth_devices[port_id];
2250         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2251 }
2252
2253
2254 int
2255 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2256 {
2257         struct rte_eth_dev *dev;
2258
2259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2260
2261         dev = &rte_eth_devices[port_id];
2262         *mtu = dev->data->mtu;
2263         return 0;
2264 }
2265
2266 int
2267 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2268 {
2269         int ret;
2270         struct rte_eth_dev *dev;
2271
2272         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2273         dev = &rte_eth_devices[port_id];
2274         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2275
2276         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2277         if (!ret)
2278                 dev->data->mtu = mtu;
2279
2280         return eth_err(port_id, ret);
2281 }
2282
2283 int
2284 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2285 {
2286         struct rte_eth_dev *dev;
2287         int ret;
2288
2289         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2290         dev = &rte_eth_devices[port_id];
2291         if (!(dev->data->dev_conf.rxmode.offloads &
2292               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2293                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2294                 return -ENOSYS;
2295         }
2296
2297         if (vlan_id > 4095) {
2298                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2299                                 port_id, (unsigned) vlan_id);
2300                 return -EINVAL;
2301         }
2302         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2303
2304         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2305         if (ret == 0) {
2306                 struct rte_vlan_filter_conf *vfc;
2307                 int vidx;
2308                 int vbit;
2309
2310                 vfc = &dev->data->vlan_filter_conf;
2311                 vidx = vlan_id / 64;
2312                 vbit = vlan_id % 64;
2313
2314                 if (on)
2315                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2316                 else
2317                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2318         }
2319
2320         return eth_err(port_id, ret);
2321 }
2322
2323 int
2324 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2325                                     int on)
2326 {
2327         struct rte_eth_dev *dev;
2328
2329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2330         dev = &rte_eth_devices[port_id];
2331         if (rx_queue_id >= dev->data->nb_rx_queues) {
2332                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2333                 return -EINVAL;
2334         }
2335
2336         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2337         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2338
2339         return 0;
2340 }
2341
2342 int
2343 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2344                                 enum rte_vlan_type vlan_type,
2345                                 uint16_t tpid)
2346 {
2347         struct rte_eth_dev *dev;
2348
2349         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2350         dev = &rte_eth_devices[port_id];
2351         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2352
2353         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2354                                                                tpid));
2355 }
2356
2357 int
2358 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2359 {
2360         struct rte_eth_dev *dev;
2361         int ret = 0;
2362         int mask = 0;
2363         int cur, org = 0;
2364         uint64_t orig_offloads;
2365
2366         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2367         dev = &rte_eth_devices[port_id];
2368
2369         /* save original values in case of failure */
2370         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2371
2372         /*check which option changed by application*/
2373         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2374         org = !!(dev->data->dev_conf.rxmode.offloads &
2375                  DEV_RX_OFFLOAD_VLAN_STRIP);
2376         if (cur != org) {
2377                 if (cur)
2378                         dev->data->dev_conf.rxmode.offloads |=
2379                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2380                 else
2381                         dev->data->dev_conf.rxmode.offloads &=
2382                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2383                 mask |= ETH_VLAN_STRIP_MASK;
2384         }
2385
2386         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2387         org = !!(dev->data->dev_conf.rxmode.offloads &
2388                  DEV_RX_OFFLOAD_VLAN_FILTER);
2389         if (cur != org) {
2390                 if (cur)
2391                         dev->data->dev_conf.rxmode.offloads |=
2392                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2393                 else
2394                         dev->data->dev_conf.rxmode.offloads &=
2395                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2396                 mask |= ETH_VLAN_FILTER_MASK;
2397         }
2398
2399         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2400         org = !!(dev->data->dev_conf.rxmode.offloads &
2401                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2402         if (cur != org) {
2403                 if (cur)
2404                         dev->data->dev_conf.rxmode.offloads |=
2405                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2406                 else
2407                         dev->data->dev_conf.rxmode.offloads &=
2408                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2409                 mask |= ETH_VLAN_EXTEND_MASK;
2410         }
2411
2412         /*no change*/
2413         if (mask == 0)
2414                 return ret;
2415
2416         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2417
2418         /*
2419          * Convert to the offload bitfield API just in case the underlying PMD
2420          * still supporting it.
2421          */
2422         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2423                                     &dev->data->dev_conf.rxmode);
2424         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2425         if (ret) {
2426                 /* hit an error restore  original values */
2427                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2428                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2429                                             &dev->data->dev_conf.rxmode);
2430         }
2431
2432         return eth_err(port_id, ret);
2433 }
2434
2435 int
2436 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2437 {
2438         struct rte_eth_dev *dev;
2439         int ret = 0;
2440
2441         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2442         dev = &rte_eth_devices[port_id];
2443
2444         if (dev->data->dev_conf.rxmode.offloads &
2445             DEV_RX_OFFLOAD_VLAN_STRIP)
2446                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2447
2448         if (dev->data->dev_conf.rxmode.offloads &
2449             DEV_RX_OFFLOAD_VLAN_FILTER)
2450                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2451
2452         if (dev->data->dev_conf.rxmode.offloads &
2453             DEV_RX_OFFLOAD_VLAN_EXTEND)
2454                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2455
2456         return ret;
2457 }
2458
2459 int
2460 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2461 {
2462         struct rte_eth_dev *dev;
2463
2464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2465         dev = &rte_eth_devices[port_id];
2466         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2467
2468         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2469 }
2470
2471 int
2472 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2473 {
2474         struct rte_eth_dev *dev;
2475
2476         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2477         dev = &rte_eth_devices[port_id];
2478         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2479         memset(fc_conf, 0, sizeof(*fc_conf));
2480         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2481 }
2482
2483 int
2484 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2485 {
2486         struct rte_eth_dev *dev;
2487
2488         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2489         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2490                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2491                 return -EINVAL;
2492         }
2493
2494         dev = &rte_eth_devices[port_id];
2495         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2496         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2497 }
2498
2499 int
2500 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2501                                    struct rte_eth_pfc_conf *pfc_conf)
2502 {
2503         struct rte_eth_dev *dev;
2504
2505         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2506         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2507                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2508                 return -EINVAL;
2509         }
2510
2511         dev = &rte_eth_devices[port_id];
2512         /* High water, low water validation are device specific */
2513         if  (*dev->dev_ops->priority_flow_ctrl_set)
2514                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2515                                         (dev, pfc_conf));
2516         return -ENOTSUP;
2517 }
2518
2519 static int
2520 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2521                         uint16_t reta_size)
2522 {
2523         uint16_t i, num;
2524
2525         if (!reta_conf)
2526                 return -EINVAL;
2527
2528         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2529         for (i = 0; i < num; i++) {
2530                 if (reta_conf[i].mask)
2531                         return 0;
2532         }
2533
2534         return -EINVAL;
2535 }
2536
2537 static int
2538 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2539                          uint16_t reta_size,
2540                          uint16_t max_rxq)
2541 {
2542         uint16_t i, idx, shift;
2543
2544         if (!reta_conf)
2545                 return -EINVAL;
2546
2547         if (max_rxq == 0) {
2548                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2549                 return -EINVAL;
2550         }
2551
2552         for (i = 0; i < reta_size; i++) {
2553                 idx = i / RTE_RETA_GROUP_SIZE;
2554                 shift = i % RTE_RETA_GROUP_SIZE;
2555                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2556                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2557                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2558                                 "the maximum rxq index: %u\n", idx, shift,
2559                                 reta_conf[idx].reta[shift], max_rxq);
2560                         return -EINVAL;
2561                 }
2562         }
2563
2564         return 0;
2565 }
2566
2567 int
2568 rte_eth_dev_rss_reta_update(uint16_t port_id,
2569                             struct rte_eth_rss_reta_entry64 *reta_conf,
2570                             uint16_t reta_size)
2571 {
2572         struct rte_eth_dev *dev;
2573         int ret;
2574
2575         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2576         /* Check mask bits */
2577         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2578         if (ret < 0)
2579                 return ret;
2580
2581         dev = &rte_eth_devices[port_id];
2582
2583         /* Check entry value */
2584         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2585                                 dev->data->nb_rx_queues);
2586         if (ret < 0)
2587                 return ret;
2588
2589         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2590         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2591                                                              reta_size));
2592 }
2593
2594 int
2595 rte_eth_dev_rss_reta_query(uint16_t port_id,
2596                            struct rte_eth_rss_reta_entry64 *reta_conf,
2597                            uint16_t reta_size)
2598 {
2599         struct rte_eth_dev *dev;
2600         int ret;
2601
2602         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2603
2604         /* Check mask bits */
2605         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2606         if (ret < 0)
2607                 return ret;
2608
2609         dev = &rte_eth_devices[port_id];
2610         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2611         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2612                                                             reta_size));
2613 }
2614
2615 int
2616 rte_eth_dev_rss_hash_update(uint16_t port_id,
2617                             struct rte_eth_rss_conf *rss_conf)
2618 {
2619         struct rte_eth_dev *dev;
2620
2621         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2622         dev = &rte_eth_devices[port_id];
2623         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2624         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2625                                                                  rss_conf));
2626 }
2627
2628 int
2629 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2630                               struct rte_eth_rss_conf *rss_conf)
2631 {
2632         struct rte_eth_dev *dev;
2633
2634         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2635         dev = &rte_eth_devices[port_id];
2636         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2637         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2638                                                                    rss_conf));
2639 }
2640
2641 int
2642 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2643                                 struct rte_eth_udp_tunnel *udp_tunnel)
2644 {
2645         struct rte_eth_dev *dev;
2646
2647         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2648         if (udp_tunnel == NULL) {
2649                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2650                 return -EINVAL;
2651         }
2652
2653         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2654                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2655                 return -EINVAL;
2656         }
2657
2658         dev = &rte_eth_devices[port_id];
2659         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2660         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2661                                                                 udp_tunnel));
2662 }
2663
2664 int
2665 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2666                                    struct rte_eth_udp_tunnel *udp_tunnel)
2667 {
2668         struct rte_eth_dev *dev;
2669
2670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2671         dev = &rte_eth_devices[port_id];
2672
2673         if (udp_tunnel == NULL) {
2674                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2675                 return -EINVAL;
2676         }
2677
2678         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2679                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2680                 return -EINVAL;
2681         }
2682
2683         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2684         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2685                                                                 udp_tunnel));
2686 }
2687
2688 int
2689 rte_eth_led_on(uint16_t port_id)
2690 {
2691         struct rte_eth_dev *dev;
2692
2693         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2694         dev = &rte_eth_devices[port_id];
2695         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2696         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2697 }
2698
2699 int
2700 rte_eth_led_off(uint16_t port_id)
2701 {
2702         struct rte_eth_dev *dev;
2703
2704         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2705         dev = &rte_eth_devices[port_id];
2706         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2707         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2708 }
2709
2710 /*
2711  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2712  * an empty spot.
2713  */
2714 static int
2715 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2716 {
2717         struct rte_eth_dev_info dev_info;
2718         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2719         unsigned i;
2720
2721         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2722         rte_eth_dev_info_get(port_id, &dev_info);
2723
2724         for (i = 0; i < dev_info.max_mac_addrs; i++)
2725                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2726                         return i;
2727
2728         return -1;
2729 }
2730
2731 static const struct ether_addr null_mac_addr;
2732
2733 int
2734 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2735                         uint32_t pool)
2736 {
2737         struct rte_eth_dev *dev;
2738         int index;
2739         uint64_t pool_mask;
2740         int ret;
2741
2742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743         dev = &rte_eth_devices[port_id];
2744         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2745
2746         if (is_zero_ether_addr(addr)) {
2747                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2748                         port_id);
2749                 return -EINVAL;
2750         }
2751         if (pool >= ETH_64_POOLS) {
2752                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2753                 return -EINVAL;
2754         }
2755
2756         index = get_mac_addr_index(port_id, addr);
2757         if (index < 0) {
2758                 index = get_mac_addr_index(port_id, &null_mac_addr);
2759                 if (index < 0) {
2760                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2761                                 port_id);
2762                         return -ENOSPC;
2763                 }
2764         } else {
2765                 pool_mask = dev->data->mac_pool_sel[index];
2766
2767                 /* Check if both MAC address and pool is already there, and do nothing */
2768                 if (pool_mask & (1ULL << pool))
2769                         return 0;
2770         }
2771
2772         /* Update NIC */
2773         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2774
2775         if (ret == 0) {
2776                 /* Update address in NIC data structure */
2777                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2778
2779                 /* Update pool bitmap in NIC data structure */
2780                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2781         }
2782
2783         return eth_err(port_id, ret);
2784 }
2785
2786 int
2787 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2788 {
2789         struct rte_eth_dev *dev;
2790         int index;
2791
2792         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2793         dev = &rte_eth_devices[port_id];
2794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2795
2796         index = get_mac_addr_index(port_id, addr);
2797         if (index == 0) {
2798                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2799                 return -EADDRINUSE;
2800         } else if (index < 0)
2801                 return 0;  /* Do nothing if address wasn't found */
2802
2803         /* Update NIC */
2804         (*dev->dev_ops->mac_addr_remove)(dev, index);
2805
2806         /* Update address in NIC data structure */
2807         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2808
2809         /* reset pool bitmap */
2810         dev->data->mac_pool_sel[index] = 0;
2811
2812         return 0;
2813 }
2814
2815 int
2816 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2817 {
2818         struct rte_eth_dev *dev;
2819
2820         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2821
2822         if (!is_valid_assigned_ether_addr(addr))
2823                 return -EINVAL;
2824
2825         dev = &rte_eth_devices[port_id];
2826         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2827
2828         /* Update default address in NIC data structure */
2829         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2830
2831         (*dev->dev_ops->mac_addr_set)(dev, addr);
2832
2833         return 0;
2834 }
2835
2836
2837 /*
2838  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2839  * an empty spot.
2840  */
2841 static int
2842 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2843 {
2844         struct rte_eth_dev_info dev_info;
2845         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2846         unsigned i;
2847
2848         rte_eth_dev_info_get(port_id, &dev_info);
2849         if (!dev->data->hash_mac_addrs)
2850                 return -1;
2851
2852         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2853                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2854                         ETHER_ADDR_LEN) == 0)
2855                         return i;
2856
2857         return -1;
2858 }
2859
2860 int
2861 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2862                                 uint8_t on)
2863 {
2864         int index;
2865         int ret;
2866         struct rte_eth_dev *dev;
2867
2868         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2869
2870         dev = &rte_eth_devices[port_id];
2871         if (is_zero_ether_addr(addr)) {
2872                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2873                         port_id);
2874                 return -EINVAL;
2875         }
2876
2877         index = get_hash_mac_addr_index(port_id, addr);
2878         /* Check if it's already there, and do nothing */
2879         if ((index >= 0) && on)
2880                 return 0;
2881
2882         if (index < 0) {
2883                 if (!on) {
2884                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2885                                 "set in UTA\n", port_id);
2886                         return -EINVAL;
2887                 }
2888
2889                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2890                 if (index < 0) {
2891                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2892                                         port_id);
2893                         return -ENOSPC;
2894                 }
2895         }
2896
2897         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2898         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2899         if (ret == 0) {
2900                 /* Update address in NIC data structure */
2901                 if (on)
2902                         ether_addr_copy(addr,
2903                                         &dev->data->hash_mac_addrs[index]);
2904                 else
2905                         ether_addr_copy(&null_mac_addr,
2906                                         &dev->data->hash_mac_addrs[index]);
2907         }
2908
2909         return eth_err(port_id, ret);
2910 }
2911
2912 int
2913 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
2914 {
2915         struct rte_eth_dev *dev;
2916
2917         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2918
2919         dev = &rte_eth_devices[port_id];
2920
2921         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2922         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
2923                                                                        on));
2924 }
2925
2926 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2927                                         uint16_t tx_rate)
2928 {
2929         struct rte_eth_dev *dev;
2930         struct rte_eth_dev_info dev_info;
2931         struct rte_eth_link link;
2932
2933         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2934
2935         dev = &rte_eth_devices[port_id];
2936         rte_eth_dev_info_get(port_id, &dev_info);
2937         link = dev->data->dev_link;
2938
2939         if (queue_idx > dev_info.max_tx_queues) {
2940                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2941                                 "invalid queue id=%d\n", port_id, queue_idx);
2942                 return -EINVAL;
2943         }
2944
2945         if (tx_rate > link.link_speed) {
2946                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2947                                 "bigger than link speed= %d\n",
2948                         tx_rate, link.link_speed);
2949                 return -EINVAL;
2950         }
2951
2952         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2953         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
2954                                                         queue_idx, tx_rate));
2955 }
2956
2957 int
2958 rte_eth_mirror_rule_set(uint16_t port_id,
2959                         struct rte_eth_mirror_conf *mirror_conf,
2960                         uint8_t rule_id, uint8_t on)
2961 {
2962         struct rte_eth_dev *dev;
2963
2964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2965         if (mirror_conf->rule_type == 0) {
2966                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2967                 return -EINVAL;
2968         }
2969
2970         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2971                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2972                                 ETH_64_POOLS - 1);
2973                 return -EINVAL;
2974         }
2975
2976         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2977              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2978             (mirror_conf->pool_mask == 0)) {
2979                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2980                 return -EINVAL;
2981         }
2982
2983         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2984             mirror_conf->vlan.vlan_mask == 0) {
2985                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2986                 return -EINVAL;
2987         }
2988
2989         dev = &rte_eth_devices[port_id];
2990         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2991
2992         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
2993                                                 mirror_conf, rule_id, on));
2994 }
2995
2996 int
2997 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
2998 {
2999         struct rte_eth_dev *dev;
3000
3001         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3002
3003         dev = &rte_eth_devices[port_id];
3004         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3005
3006         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3007                                                                    rule_id));
3008 }
3009
3010 RTE_INIT(eth_dev_init_cb_lists)
3011 {
3012         int i;
3013
3014         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3015                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3016 }
3017
3018 int
3019 rte_eth_dev_callback_register(uint16_t port_id,
3020                         enum rte_eth_event_type event,
3021                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3022 {
3023         struct rte_eth_dev *dev;
3024         struct rte_eth_dev_callback *user_cb;
3025         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3026         uint16_t last_port;
3027
3028         if (!cb_fn)
3029                 return -EINVAL;
3030
3031         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3032                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3033                 return -EINVAL;
3034         }
3035
3036         if (port_id == RTE_ETH_ALL) {
3037                 next_port = 0;
3038                 last_port = RTE_MAX_ETHPORTS - 1;
3039         } else {
3040                 next_port = last_port = port_id;
3041         }
3042
3043         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3044
3045         do {
3046                 dev = &rte_eth_devices[next_port];
3047
3048                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3049                         if (user_cb->cb_fn == cb_fn &&
3050                                 user_cb->cb_arg == cb_arg &&
3051                                 user_cb->event == event) {
3052                                 break;
3053                         }
3054                 }
3055
3056                 /* create a new callback. */
3057                 if (user_cb == NULL) {
3058                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3059                                 sizeof(struct rte_eth_dev_callback), 0);
3060                         if (user_cb != NULL) {
3061                                 user_cb->cb_fn = cb_fn;
3062                                 user_cb->cb_arg = cb_arg;
3063                                 user_cb->event = event;
3064                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3065                                                   user_cb, next);
3066                         } else {
3067                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3068                                 rte_eth_dev_callback_unregister(port_id, event,
3069                                                                 cb_fn, cb_arg);
3070                                 return -ENOMEM;
3071                         }
3072
3073                 }
3074         } while (++next_port <= last_port);
3075
3076         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3077         return 0;
3078 }
3079
3080 int
3081 rte_eth_dev_callback_unregister(uint16_t port_id,
3082                         enum rte_eth_event_type event,
3083                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3084 {
3085         int ret;
3086         struct rte_eth_dev *dev;
3087         struct rte_eth_dev_callback *cb, *next;
3088         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3089         uint16_t last_port;
3090
3091         if (!cb_fn)
3092                 return -EINVAL;
3093
3094         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3095                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3096                 return -EINVAL;
3097         }
3098
3099         if (port_id == RTE_ETH_ALL) {
3100                 next_port = 0;
3101                 last_port = RTE_MAX_ETHPORTS - 1;
3102         } else {
3103                 next_port = last_port = port_id;
3104         }
3105
3106         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3107
3108         do {
3109                 dev = &rte_eth_devices[next_port];
3110                 ret = 0;
3111                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3112                      cb = next) {
3113
3114                         next = TAILQ_NEXT(cb, next);
3115
3116                         if (cb->cb_fn != cb_fn || cb->event != event ||
3117                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3118                                 continue;
3119
3120                         /*
3121                          * if this callback is not executing right now,
3122                          * then remove it.
3123                          */
3124                         if (cb->active == 0) {
3125                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3126                                 rte_free(cb);
3127                         } else {
3128                                 ret = -EAGAIN;
3129                         }
3130                 }
3131         } while (++next_port <= last_port);
3132
3133         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3134         return ret;
3135 }
3136
3137 int
3138 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3139         enum rte_eth_event_type event, void *ret_param)
3140 {
3141         struct rte_eth_dev_callback *cb_lst;
3142         struct rte_eth_dev_callback dev_cb;
3143         int rc = 0;
3144
3145         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3146         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3147                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3148                         continue;
3149                 dev_cb = *cb_lst;
3150                 cb_lst->active = 1;
3151                 if (ret_param != NULL)
3152                         dev_cb.ret_param = ret_param;
3153
3154                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3155                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3156                                 dev_cb.cb_arg, dev_cb.ret_param);
3157                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3158                 cb_lst->active = 0;
3159         }
3160         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3161         return rc;
3162 }
3163
3164 int
3165 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3166 {
3167         uint32_t vec;
3168         struct rte_eth_dev *dev;
3169         struct rte_intr_handle *intr_handle;
3170         uint16_t qid;
3171         int rc;
3172
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3174
3175         dev = &rte_eth_devices[port_id];
3176
3177         if (!dev->intr_handle) {
3178                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3179                 return -ENOTSUP;
3180         }
3181
3182         intr_handle = dev->intr_handle;
3183         if (!intr_handle->intr_vec) {
3184                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3185                 return -EPERM;
3186         }
3187
3188         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3189                 vec = intr_handle->intr_vec[qid];
3190                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3191                 if (rc && rc != -EEXIST) {
3192                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3193                                         " op %d epfd %d vec %u\n",
3194                                         port_id, qid, op, epfd, vec);
3195                 }
3196         }
3197
3198         return 0;
3199 }
3200
3201 const struct rte_memzone *
3202 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3203                          uint16_t queue_id, size_t size, unsigned align,
3204                          int socket_id)
3205 {
3206         char z_name[RTE_MEMZONE_NAMESIZE];
3207         const struct rte_memzone *mz;
3208
3209         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3210                  dev->device->driver->name, ring_name,
3211                  dev->data->port_id, queue_id);
3212
3213         mz = rte_memzone_lookup(z_name);
3214         if (mz)
3215                 return mz;
3216
3217         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3218 }
3219
3220 int
3221 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3222                           int epfd, int op, void *data)
3223 {
3224         uint32_t vec;
3225         struct rte_eth_dev *dev;
3226         struct rte_intr_handle *intr_handle;
3227         int rc;
3228
3229         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3230
3231         dev = &rte_eth_devices[port_id];
3232         if (queue_id >= dev->data->nb_rx_queues) {
3233                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3234                 return -EINVAL;
3235         }
3236
3237         if (!dev->intr_handle) {
3238                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3239                 return -ENOTSUP;
3240         }
3241
3242         intr_handle = dev->intr_handle;
3243         if (!intr_handle->intr_vec) {
3244                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3245                 return -EPERM;
3246         }
3247
3248         vec = intr_handle->intr_vec[queue_id];
3249         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3250         if (rc && rc != -EEXIST) {
3251                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3252                                 " op %d epfd %d vec %u\n",
3253                                 port_id, queue_id, op, epfd, vec);
3254                 return rc;
3255         }
3256
3257         return 0;
3258 }
3259
3260 int
3261 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3262                            uint16_t queue_id)
3263 {
3264         struct rte_eth_dev *dev;
3265
3266         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3267
3268         dev = &rte_eth_devices[port_id];
3269
3270         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3271         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3272                                                                 queue_id));
3273 }
3274
3275 int
3276 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3277                             uint16_t queue_id)
3278 {
3279         struct rte_eth_dev *dev;
3280
3281         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3282
3283         dev = &rte_eth_devices[port_id];
3284
3285         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3286         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3287                                                                 queue_id));
3288 }
3289
3290
3291 int
3292 rte_eth_dev_filter_supported(uint16_t port_id,
3293                              enum rte_filter_type filter_type)
3294 {
3295         struct rte_eth_dev *dev;
3296
3297         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3298
3299         dev = &rte_eth_devices[port_id];
3300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3301         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3302                                 RTE_ETH_FILTER_NOP, NULL);
3303 }
3304
3305 int
3306 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3307                             enum rte_filter_type filter_type,
3308                             enum rte_filter_op filter_op, void *arg);
3309
3310 int
3311 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3312                             enum rte_filter_type filter_type,
3313                             enum rte_filter_op filter_op, void *arg)
3314 {
3315         struct rte_eth_fdir_info_v22 {
3316                 enum rte_fdir_mode mode;
3317                 struct rte_eth_fdir_masks mask;
3318                 struct rte_eth_fdir_flex_conf flex_conf;
3319                 uint32_t guarant_spc;
3320                 uint32_t best_spc;
3321                 uint32_t flow_types_mask[1];
3322                 uint32_t max_flexpayload;
3323                 uint32_t flex_payload_unit;
3324                 uint32_t max_flex_payload_segment_num;
3325                 uint16_t flex_payload_limit;
3326                 uint32_t flex_bitmask_unit;
3327                 uint32_t max_flex_bitmask_num;
3328         };
3329
3330         struct rte_eth_hash_global_conf_v22 {
3331                 enum rte_eth_hash_function hash_func;
3332                 uint32_t sym_hash_enable_mask[1];
3333                 uint32_t valid_bit_mask[1];
3334         };
3335
3336         struct rte_eth_hash_filter_info_v22 {
3337                 enum rte_eth_hash_filter_info_type info_type;
3338                 union {
3339                         uint8_t enable;
3340                         struct rte_eth_hash_global_conf_v22 global_conf;
3341                         struct rte_eth_input_set_conf input_set_conf;
3342                 } info;
3343         };
3344
3345         struct rte_eth_dev *dev;
3346
3347         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3348
3349         dev = &rte_eth_devices[port_id];
3350         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3351         if (filter_op == RTE_ETH_FILTER_INFO) {
3352                 int retval;
3353                 struct rte_eth_fdir_info_v22 *fdir_info_v22;
3354                 struct rte_eth_fdir_info fdir_info;
3355
3356                 fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
3357
3358                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3359                           filter_op, (void *)&fdir_info);
3360                 fdir_info_v22->mode = fdir_info.mode;
3361                 fdir_info_v22->mask = fdir_info.mask;
3362                 fdir_info_v22->flex_conf = fdir_info.flex_conf;
3363                 fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
3364                 fdir_info_v22->best_spc = fdir_info.best_spc;
3365                 fdir_info_v22->flow_types_mask[0] =
3366                         (uint32_t)fdir_info.flow_types_mask[0];
3367                 fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
3368                 fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
3369                 fdir_info_v22->max_flex_payload_segment_num =
3370                         fdir_info.max_flex_payload_segment_num;
3371                 fdir_info_v22->flex_payload_limit =
3372                         fdir_info.flex_payload_limit;
3373                 fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
3374                 fdir_info_v22->max_flex_bitmask_num =
3375                         fdir_info.max_flex_bitmask_num;
3376                 return retval;
3377         } else if (filter_op == RTE_ETH_FILTER_GET) {
3378                 int retval;
3379                 struct rte_eth_hash_filter_info f_info;
3380                 struct rte_eth_hash_filter_info_v22 *f_info_v22 =
3381                         (struct rte_eth_hash_filter_info_v22 *)arg;
3382
3383                 f_info.info_type = f_info_v22->info_type;
3384                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3385                           filter_op, (void *)&f_info);
3386
3387                 switch (f_info_v22->info_type) {
3388                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3389                         f_info_v22->info.enable = f_info.info.enable;
3390                         break;
3391                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3392                         f_info_v22->info.global_conf.hash_func =
3393                                 f_info.info.global_conf.hash_func;
3394                         f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
3395                                 (uint32_t)
3396                                 f_info.info.global_conf.sym_hash_enable_mask[0];
3397                         f_info_v22->info.global_conf.valid_bit_mask[0] =
3398                                 (uint32_t)
3399                                 f_info.info.global_conf.valid_bit_mask[0];
3400                         break;
3401                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3402                         f_info_v22->info.input_set_conf =
3403                                 f_info.info.input_set_conf;
3404                         break;
3405                 default:
3406                         break;
3407                 }
3408                 return retval;
3409         } else if (filter_op == RTE_ETH_FILTER_SET) {
3410                 struct rte_eth_hash_filter_info f_info;
3411                 struct rte_eth_hash_filter_info_v22 *f_v22 =
3412                         (struct rte_eth_hash_filter_info_v22 *)arg;
3413
3414                 f_info.info_type = f_v22->info_type;
3415                 switch (f_v22->info_type) {
3416                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3417                         f_info.info.enable = f_v22->info.enable;
3418                         break;
3419                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3420                         f_info.info.global_conf.hash_func =
3421                                 f_v22->info.global_conf.hash_func;
3422                         f_info.info.global_conf.sym_hash_enable_mask[0] =
3423                                 (uint32_t)
3424                                 f_v22->info.global_conf.sym_hash_enable_mask[0];
3425                         f_info.info.global_conf.valid_bit_mask[0] =
3426                                 (uint32_t)
3427                                 f_v22->info.global_conf.valid_bit_mask[0];
3428                         break;
3429                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3430                         f_info.info.input_set_conf =
3431                                 f_v22->info.input_set_conf;
3432                         break;
3433                 default:
3434                         break;
3435                 }
3436                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3437                                                     (void *)&f_info);
3438         } else
3439                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3440                                                     arg);
3441 }
3442 VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
3443
3444 int
3445 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3446                               enum rte_filter_type filter_type,
3447                               enum rte_filter_op filter_op, void *arg);
3448
3449 int
3450 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3451                               enum rte_filter_type filter_type,
3452                               enum rte_filter_op filter_op, void *arg)
3453 {
3454         struct rte_eth_dev *dev;
3455
3456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3457
3458         dev = &rte_eth_devices[port_id];
3459         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3460         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3461                                                              filter_op, arg));
3462 }
3463 BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
3464 MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
3465                   enum rte_filter_type filter_type,
3466                   enum rte_filter_op filter_op, void *arg),
3467                   rte_eth_dev_filter_ctrl_v1802);
3468
3469 void *
3470 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3471                 rte_rx_callback_fn fn, void *user_param)
3472 {
3473 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3474         rte_errno = ENOTSUP;
3475         return NULL;
3476 #endif
3477         /* check input parameters */
3478         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3479                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3480                 rte_errno = EINVAL;
3481                 return NULL;
3482         }
3483         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3484
3485         if (cb == NULL) {
3486                 rte_errno = ENOMEM;
3487                 return NULL;
3488         }
3489
3490         cb->fn.rx = fn;
3491         cb->param = user_param;
3492
3493         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3494         /* Add the callbacks in fifo order. */
3495         struct rte_eth_rxtx_callback *tail =
3496                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3497
3498         if (!tail) {
3499                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3500
3501         } else {
3502                 while (tail->next)
3503                         tail = tail->next;
3504                 tail->next = cb;
3505         }
3506         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3507
3508         return cb;
3509 }
3510
3511 void *
3512 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3513                 rte_rx_callback_fn fn, void *user_param)
3514 {
3515 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3516         rte_errno = ENOTSUP;
3517         return NULL;
3518 #endif
3519         /* check input parameters */
3520         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3521                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3522                 rte_errno = EINVAL;
3523                 return NULL;
3524         }
3525
3526         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3527
3528         if (cb == NULL) {
3529                 rte_errno = ENOMEM;
3530                 return NULL;
3531         }
3532
3533         cb->fn.rx = fn;
3534         cb->param = user_param;
3535
3536         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3537         /* Add the callbacks at fisrt position*/
3538         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3539         rte_smp_wmb();
3540         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3541         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3542
3543         return cb;
3544 }
3545
3546 void *
3547 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3548                 rte_tx_callback_fn fn, void *user_param)
3549 {
3550 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3551         rte_errno = ENOTSUP;
3552         return NULL;
3553 #endif
3554         /* check input parameters */
3555         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3556                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3557                 rte_errno = EINVAL;
3558                 return NULL;
3559         }
3560
3561         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3562
3563         if (cb == NULL) {
3564                 rte_errno = ENOMEM;
3565                 return NULL;
3566         }
3567
3568         cb->fn.tx = fn;
3569         cb->param = user_param;
3570
3571         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3572         /* Add the callbacks in fifo order. */
3573         struct rte_eth_rxtx_callback *tail =
3574                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3575
3576         if (!tail) {
3577                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3578
3579         } else {
3580                 while (tail->next)
3581                         tail = tail->next;
3582                 tail->next = cb;
3583         }
3584         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3585
3586         return cb;
3587 }
3588
3589 int
3590 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3591                 struct rte_eth_rxtx_callback *user_cb)
3592 {
3593 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3594         return -ENOTSUP;
3595 #endif
3596         /* Check input parameters. */
3597         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3598         if (user_cb == NULL ||
3599                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3600                 return -EINVAL;
3601
3602         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3603         struct rte_eth_rxtx_callback *cb;
3604         struct rte_eth_rxtx_callback **prev_cb;
3605         int ret = -EINVAL;
3606
3607         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3608         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3609         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3610                 cb = *prev_cb;
3611                 if (cb == user_cb) {
3612                         /* Remove the user cb from the callback list. */
3613                         *prev_cb = cb->next;
3614                         ret = 0;
3615                         break;
3616                 }
3617         }
3618         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3619
3620         return ret;
3621 }
3622
3623 int
3624 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3625                 struct rte_eth_rxtx_callback *user_cb)
3626 {
3627 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3628         return -ENOTSUP;
3629 #endif
3630         /* Check input parameters. */
3631         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3632         if (user_cb == NULL ||
3633                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3634                 return -EINVAL;
3635
3636         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3637         int ret = -EINVAL;
3638         struct rte_eth_rxtx_callback *cb;
3639         struct rte_eth_rxtx_callback **prev_cb;
3640
3641         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3642         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3643         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3644                 cb = *prev_cb;
3645                 if (cb == user_cb) {
3646                         /* Remove the user cb from the callback list. */
3647                         *prev_cb = cb->next;
3648                         ret = 0;
3649                         break;
3650                 }
3651         }
3652         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3653
3654         return ret;
3655 }
3656
3657 int
3658 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3659         struct rte_eth_rxq_info *qinfo)
3660 {
3661         struct rte_eth_dev *dev;
3662
3663         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3664
3665         if (qinfo == NULL)
3666                 return -EINVAL;
3667
3668         dev = &rte_eth_devices[port_id];
3669         if (queue_id >= dev->data->nb_rx_queues) {
3670                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3671                 return -EINVAL;
3672         }
3673
3674         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3675
3676         memset(qinfo, 0, sizeof(*qinfo));
3677         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3678         return 0;
3679 }
3680
3681 int
3682 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3683         struct rte_eth_txq_info *qinfo)
3684 {
3685         struct rte_eth_dev *dev;
3686
3687         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3688
3689         if (qinfo == NULL)
3690                 return -EINVAL;
3691
3692         dev = &rte_eth_devices[port_id];
3693         if (queue_id >= dev->data->nb_tx_queues) {
3694                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3695                 return -EINVAL;
3696         }
3697
3698         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3699
3700         memset(qinfo, 0, sizeof(*qinfo));
3701         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3702         return 0;
3703 }
3704
3705 int
3706 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3707                              struct ether_addr *mc_addr_set,
3708                              uint32_t nb_mc_addr)
3709 {
3710         struct rte_eth_dev *dev;
3711
3712         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3713
3714         dev = &rte_eth_devices[port_id];
3715         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3716         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3717                                                 mc_addr_set, nb_mc_addr));
3718 }
3719
3720 int
3721 rte_eth_timesync_enable(uint16_t port_id)
3722 {
3723         struct rte_eth_dev *dev;
3724
3725         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3726         dev = &rte_eth_devices[port_id];
3727
3728         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3729         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3730 }
3731
3732 int
3733 rte_eth_timesync_disable(uint16_t port_id)
3734 {
3735         struct rte_eth_dev *dev;
3736
3737         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3738         dev = &rte_eth_devices[port_id];
3739
3740         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3741         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3742 }
3743
3744 int
3745 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3746                                    uint32_t flags)
3747 {
3748         struct rte_eth_dev *dev;
3749
3750         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3751         dev = &rte_eth_devices[port_id];
3752
3753         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3754         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3755                                 (dev, timestamp, flags));
3756 }
3757
3758 int
3759 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3760                                    struct timespec *timestamp)
3761 {
3762         struct rte_eth_dev *dev;
3763
3764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3765         dev = &rte_eth_devices[port_id];
3766
3767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3768         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3769                                 (dev, timestamp));
3770 }
3771
3772 int
3773 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3774 {
3775         struct rte_eth_dev *dev;
3776
3777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3778         dev = &rte_eth_devices[port_id];
3779
3780         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3781         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3782                                                                       delta));
3783 }
3784
3785 int
3786 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3787 {
3788         struct rte_eth_dev *dev;
3789
3790         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3791         dev = &rte_eth_devices[port_id];
3792
3793         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3794         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3795                                                                 timestamp));
3796 }
3797
3798 int
3799 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3800 {
3801         struct rte_eth_dev *dev;
3802
3803         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3804         dev = &rte_eth_devices[port_id];
3805
3806         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3807         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3808                                                                 timestamp));
3809 }
3810
3811 int
3812 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3813 {
3814         struct rte_eth_dev *dev;
3815
3816         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3817
3818         dev = &rte_eth_devices[port_id];
3819         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3820         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
3821 }
3822
3823 int
3824 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3825 {
3826         struct rte_eth_dev *dev;
3827
3828         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3829
3830         dev = &rte_eth_devices[port_id];
3831         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3832         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
3833 }
3834
3835 int
3836 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3837 {
3838         struct rte_eth_dev *dev;
3839
3840         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3841
3842         dev = &rte_eth_devices[port_id];
3843         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3844         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
3845 }
3846
3847 int
3848 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3849 {
3850         struct rte_eth_dev *dev;
3851
3852         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3853
3854         dev = &rte_eth_devices[port_id];
3855         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3856         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
3857 }
3858
3859 int
3860 rte_eth_dev_get_dcb_info(uint16_t port_id,
3861                              struct rte_eth_dcb_info *dcb_info)
3862 {
3863         struct rte_eth_dev *dev;
3864
3865         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3866
3867         dev = &rte_eth_devices[port_id];
3868         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3869
3870         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3871         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
3872 }
3873
3874 int
3875 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3876                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3877 {
3878         struct rte_eth_dev *dev;
3879
3880         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3881         if (l2_tunnel == NULL) {
3882                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3883                 return -EINVAL;
3884         }
3885
3886         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3887                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3888                 return -EINVAL;
3889         }
3890
3891         dev = &rte_eth_devices[port_id];
3892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3893                                 -ENOTSUP);
3894         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
3895                                                                 l2_tunnel));
3896 }
3897
3898 int
3899 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3900                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3901                                   uint32_t mask,
3902                                   uint8_t en)
3903 {
3904         struct rte_eth_dev *dev;
3905
3906         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3907
3908         if (l2_tunnel == NULL) {
3909                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3910                 return -EINVAL;
3911         }
3912
3913         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3914                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3915                 return -EINVAL;
3916         }
3917
3918         if (mask == 0) {
3919                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3920                 return -EINVAL;
3921         }
3922
3923         dev = &rte_eth_devices[port_id];
3924         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3925                                 -ENOTSUP);
3926         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
3927                                                         l2_tunnel, mask, en));
3928 }
3929
3930 static void
3931 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3932                            const struct rte_eth_desc_lim *desc_lim)
3933 {
3934         if (desc_lim->nb_align != 0)
3935                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3936
3937         if (desc_lim->nb_max != 0)
3938                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3939
3940         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3941 }
3942
3943 int
3944 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3945                                  uint16_t *nb_rx_desc,
3946                                  uint16_t *nb_tx_desc)
3947 {
3948         struct rte_eth_dev *dev;
3949         struct rte_eth_dev_info dev_info;
3950
3951         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3952
3953         dev = &rte_eth_devices[port_id];
3954         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3955
3956         rte_eth_dev_info_get(port_id, &dev_info);
3957
3958         if (nb_rx_desc != NULL)
3959                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3960
3961         if (nb_tx_desc != NULL)
3962                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3963
3964         return 0;
3965 }
3966
3967 int
3968 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3969 {
3970         struct rte_eth_dev *dev;
3971
3972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3973
3974         if (pool == NULL)
3975                 return -EINVAL;
3976
3977         dev = &rte_eth_devices[port_id];
3978
3979         if (*dev->dev_ops->pool_ops_supported == NULL)
3980                 return 1; /* all pools are supported */
3981
3982         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
3983 }