542dcb823fd11de374de1bbade1aefdd9ed879ef
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_compat.h>
38
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "ethdev_profile.h"
42
43 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
44 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
45 static struct rte_eth_dev_data *rte_eth_dev_data;
46 static uint8_t eth_dev_last_created_port;
47
48 /* spinlock for eth device callbacks */
49 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
50
51 /* spinlock for add/remove rx callbacks */
52 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove tx callbacks */
55 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* store statistics names and its offset in stats structure  */
58 struct rte_eth_xstats_name_off {
59         char name[RTE_ETH_XSTATS_NAME_SIZE];
60         unsigned offset;
61 };
62
63 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
64         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
65         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
66         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
67         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
68         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
69         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
70         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
71         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
72                 rx_nombuf)},
73 };
74
75 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
76
77 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
78         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
79         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
80         {"errors", offsetof(struct rte_eth_stats, q_errors)},
81 };
82
83 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
84                 sizeof(rte_rxq_stats_strings[0]))
85
86 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
87         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
88         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
89 };
90 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
91                 sizeof(rte_txq_stats_strings[0]))
92
93 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
94         { DEV_RX_OFFLOAD_##_name, #_name }
95
96 static const struct {
97         uint64_t offload;
98         const char *name;
99 } rte_rx_offload_names[] = {
100         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
101         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
102         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
103         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
104         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
105         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
106         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
107         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
108         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
109         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
111         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
112         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
113         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
114         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
115         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
116 };
117
118 #undef RTE_RX_OFFLOAD_BIT2STR
119
120 /**
121  * The user application callback description.
122  *
123  * It contains callback address to be registered by user application,
124  * the pointer to the parameters for callback, and the event type.
125  */
126 struct rte_eth_dev_callback {
127         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
128         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
129         void *cb_arg;                           /**< Parameter for callback */
130         void *ret_param;                        /**< Return parameter */
131         enum rte_eth_event_type event;          /**< Interrupt event type */
132         uint32_t active;                        /**< Callback is executing */
133 };
134
135 enum {
136         STAT_QMAP_TX = 0,
137         STAT_QMAP_RX
138 };
139
140 uint16_t
141 rte_eth_find_next(uint16_t port_id)
142 {
143         while (port_id < RTE_MAX_ETHPORTS &&
144                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
145                 port_id++;
146
147         if (port_id >= RTE_MAX_ETHPORTS)
148                 return RTE_MAX_ETHPORTS;
149
150         return port_id;
151 }
152
153 static void
154 rte_eth_dev_data_alloc(void)
155 {
156         const unsigned flags = 0;
157         const struct rte_memzone *mz;
158
159         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
160                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
161                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
162                                 rte_socket_id(), flags);
163         } else
164                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
165         if (mz == NULL)
166                 rte_panic("Cannot allocate memzone for ethernet port data\n");
167
168         rte_eth_dev_data = mz->addr;
169         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
170                 memset(rte_eth_dev_data, 0,
171                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
172 }
173
174 struct rte_eth_dev *
175 rte_eth_dev_allocated(const char *name)
176 {
177         unsigned i;
178
179         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
180                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
181                     strcmp(rte_eth_devices[i].data->name, name) == 0)
182                         return &rte_eth_devices[i];
183         }
184         return NULL;
185 }
186
187 static uint16_t
188 rte_eth_dev_find_free_port(void)
189 {
190         unsigned i;
191
192         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
193                 if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
194                         return i;
195         }
196         return RTE_MAX_ETHPORTS;
197 }
198
199 static struct rte_eth_dev *
200 eth_dev_get(uint16_t port_id)
201 {
202         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
203
204         eth_dev->data = &rte_eth_dev_data[port_id];
205         eth_dev->state = RTE_ETH_DEV_ATTACHED;
206
207         eth_dev_last_created_port = port_id;
208
209         return eth_dev;
210 }
211
212 struct rte_eth_dev *
213 rte_eth_dev_allocate(const char *name)
214 {
215         uint16_t port_id;
216         struct rte_eth_dev *eth_dev;
217
218         port_id = rte_eth_dev_find_free_port();
219         if (port_id == RTE_MAX_ETHPORTS) {
220                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
221                 return NULL;
222         }
223
224         if (rte_eth_dev_data == NULL)
225                 rte_eth_dev_data_alloc();
226
227         if (rte_eth_dev_allocated(name) != NULL) {
228                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
229                                 name);
230                 return NULL;
231         }
232
233         memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
234         eth_dev = eth_dev_get(port_id);
235         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
236         eth_dev->data->port_id = port_id;
237         eth_dev->data->mtu = ETHER_MTU;
238
239         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
240
241         return eth_dev;
242 }
243
244 /*
245  * Attach to a port already registered by the primary process, which
246  * makes sure that the same device would have the same port id both
247  * in the primary and secondary process.
248  */
249 struct rte_eth_dev *
250 rte_eth_dev_attach_secondary(const char *name)
251 {
252         uint16_t i;
253         struct rte_eth_dev *eth_dev;
254
255         if (rte_eth_dev_data == NULL)
256                 rte_eth_dev_data_alloc();
257
258         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
259                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
260                         break;
261         }
262         if (i == RTE_MAX_ETHPORTS) {
263                 RTE_PMD_DEBUG_TRACE(
264                         "device %s is not driven by the primary process\n",
265                         name);
266                 return NULL;
267         }
268
269         eth_dev = eth_dev_get(i);
270         RTE_ASSERT(eth_dev->data->port_id == i);
271
272         return eth_dev;
273 }
274
275 int
276 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
277 {
278         if (eth_dev == NULL)
279                 return -EINVAL;
280
281         eth_dev->state = RTE_ETH_DEV_UNUSED;
282
283         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
284
285         return 0;
286 }
287
288 int
289 rte_eth_dev_is_valid_port(uint16_t port_id)
290 {
291         if (port_id >= RTE_MAX_ETHPORTS ||
292             (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
293              rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
294                 return 0;
295         else
296                 return 1;
297 }
298
299 int
300 rte_eth_dev_socket_id(uint16_t port_id)
301 {
302         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
303         return rte_eth_devices[port_id].data->numa_node;
304 }
305
306 void *
307 rte_eth_dev_get_sec_ctx(uint8_t port_id)
308 {
309         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
310         return rte_eth_devices[port_id].security_ctx;
311 }
312
313 uint16_t
314 rte_eth_dev_count(void)
315 {
316         uint16_t p;
317         uint16_t count;
318
319         count = 0;
320
321         RTE_ETH_FOREACH_DEV(p)
322                 count++;
323
324         return count;
325 }
326
327 int
328 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
329 {
330         char *tmp;
331
332         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
333
334         if (name == NULL) {
335                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
336                 return -EINVAL;
337         }
338
339         /* shouldn't check 'rte_eth_devices[i].data',
340          * because it might be overwritten by VDEV PMD */
341         tmp = rte_eth_dev_data[port_id].name;
342         strcpy(name, tmp);
343         return 0;
344 }
345
346 int
347 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
348 {
349         int i;
350
351         if (name == NULL) {
352                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
353                 return -EINVAL;
354         }
355
356         RTE_ETH_FOREACH_DEV(i) {
357                 if (!strncmp(name,
358                         rte_eth_dev_data[i].name, strlen(name))) {
359
360                         *port_id = i;
361
362                         return 0;
363                 }
364         }
365         return -ENODEV;
366 }
367
368 /* attach the new device, then store port_id of the device */
369 int
370 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
371 {
372         int ret = -1;
373         int current = rte_eth_dev_count();
374         char *name = NULL;
375         char *args = NULL;
376
377         if ((devargs == NULL) || (port_id == NULL)) {
378                 ret = -EINVAL;
379                 goto err;
380         }
381
382         /* parse devargs, then retrieve device name and args */
383         if (rte_eal_parse_devargs_str(devargs, &name, &args))
384                 goto err;
385
386         ret = rte_eal_dev_attach(name, args);
387         if (ret < 0)
388                 goto err;
389
390         /* no point looking at the port count if no port exists */
391         if (!rte_eth_dev_count()) {
392                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
393                 ret = -1;
394                 goto err;
395         }
396
397         /* if nothing happened, there is a bug here, since some driver told us
398          * it did attach a device, but did not create a port.
399          */
400         if (current == rte_eth_dev_count()) {
401                 ret = -1;
402                 goto err;
403         }
404
405         *port_id = eth_dev_last_created_port;
406         ret = 0;
407
408 err:
409         free(name);
410         free(args);
411         return ret;
412 }
413
414 /* detach the device, then store the name of the device */
415 int
416 rte_eth_dev_detach(uint16_t port_id, char *name)
417 {
418         uint32_t dev_flags;
419         int ret = -1;
420
421         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
422
423         if (name == NULL) {
424                 ret = -EINVAL;
425                 goto err;
426         }
427
428         dev_flags = rte_eth_devices[port_id].data->dev_flags;
429         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
430                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
431                         port_id);
432                 ret = -ENOTSUP;
433                 goto err;
434         }
435
436         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
437                  "%s", rte_eth_devices[port_id].data->name);
438
439         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
440         if (ret < 0)
441                 goto err;
442
443         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
444         return 0;
445
446 err:
447         return ret;
448 }
449
450 static int
451 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
452 {
453         uint16_t old_nb_queues = dev->data->nb_rx_queues;
454         void **rxq;
455         unsigned i;
456
457         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
458                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
459                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
460                                 RTE_CACHE_LINE_SIZE);
461                 if (dev->data->rx_queues == NULL) {
462                         dev->data->nb_rx_queues = 0;
463                         return -(ENOMEM);
464                 }
465         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
466                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
467
468                 rxq = dev->data->rx_queues;
469
470                 for (i = nb_queues; i < old_nb_queues; i++)
471                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
472                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
473                                 RTE_CACHE_LINE_SIZE);
474                 if (rxq == NULL)
475                         return -(ENOMEM);
476                 if (nb_queues > old_nb_queues) {
477                         uint16_t new_qs = nb_queues - old_nb_queues;
478
479                         memset(rxq + old_nb_queues, 0,
480                                 sizeof(rxq[0]) * new_qs);
481                 }
482
483                 dev->data->rx_queues = rxq;
484
485         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
486                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
487
488                 rxq = dev->data->rx_queues;
489
490                 for (i = nb_queues; i < old_nb_queues; i++)
491                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
492
493                 rte_free(dev->data->rx_queues);
494                 dev->data->rx_queues = NULL;
495         }
496         dev->data->nb_rx_queues = nb_queues;
497         return 0;
498 }
499
500 int
501 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
502 {
503         struct rte_eth_dev *dev;
504
505         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
506
507         dev = &rte_eth_devices[port_id];
508         if (rx_queue_id >= dev->data->nb_rx_queues) {
509                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
510                 return -EINVAL;
511         }
512
513         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
514
515         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
516                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
517                         " already started\n",
518                         rx_queue_id, port_id);
519                 return 0;
520         }
521
522         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
523
524 }
525
526 int
527 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
528 {
529         struct rte_eth_dev *dev;
530
531         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
532
533         dev = &rte_eth_devices[port_id];
534         if (rx_queue_id >= dev->data->nb_rx_queues) {
535                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
536                 return -EINVAL;
537         }
538
539         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
540
541         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
542                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
543                         " already stopped\n",
544                         rx_queue_id, port_id);
545                 return 0;
546         }
547
548         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
549
550 }
551
552 int
553 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
554 {
555         struct rte_eth_dev *dev;
556
557         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
558
559         dev = &rte_eth_devices[port_id];
560         if (tx_queue_id >= dev->data->nb_tx_queues) {
561                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
562                 return -EINVAL;
563         }
564
565         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
566
567         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
568                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
569                         " already started\n",
570                         tx_queue_id, port_id);
571                 return 0;
572         }
573
574         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
575
576 }
577
578 int
579 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
580 {
581         struct rte_eth_dev *dev;
582
583         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
584
585         dev = &rte_eth_devices[port_id];
586         if (tx_queue_id >= dev->data->nb_tx_queues) {
587                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
588                 return -EINVAL;
589         }
590
591         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
592
593         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
594                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
595                         " already stopped\n",
596                         tx_queue_id, port_id);
597                 return 0;
598         }
599
600         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
601
602 }
603
604 static int
605 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
606 {
607         uint16_t old_nb_queues = dev->data->nb_tx_queues;
608         void **txq;
609         unsigned i;
610
611         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
612                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
613                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
614                                                    RTE_CACHE_LINE_SIZE);
615                 if (dev->data->tx_queues == NULL) {
616                         dev->data->nb_tx_queues = 0;
617                         return -(ENOMEM);
618                 }
619         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
620                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
621
622                 txq = dev->data->tx_queues;
623
624                 for (i = nb_queues; i < old_nb_queues; i++)
625                         (*dev->dev_ops->tx_queue_release)(txq[i]);
626                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
627                                   RTE_CACHE_LINE_SIZE);
628                 if (txq == NULL)
629                         return -ENOMEM;
630                 if (nb_queues > old_nb_queues) {
631                         uint16_t new_qs = nb_queues - old_nb_queues;
632
633                         memset(txq + old_nb_queues, 0,
634                                sizeof(txq[0]) * new_qs);
635                 }
636
637                 dev->data->tx_queues = txq;
638
639         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
640                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
641
642                 txq = dev->data->tx_queues;
643
644                 for (i = nb_queues; i < old_nb_queues; i++)
645                         (*dev->dev_ops->tx_queue_release)(txq[i]);
646
647                 rte_free(dev->data->tx_queues);
648                 dev->data->tx_queues = NULL;
649         }
650         dev->data->nb_tx_queues = nb_queues;
651         return 0;
652 }
653
654 uint32_t
655 rte_eth_speed_bitflag(uint32_t speed, int duplex)
656 {
657         switch (speed) {
658         case ETH_SPEED_NUM_10M:
659                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
660         case ETH_SPEED_NUM_100M:
661                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
662         case ETH_SPEED_NUM_1G:
663                 return ETH_LINK_SPEED_1G;
664         case ETH_SPEED_NUM_2_5G:
665                 return ETH_LINK_SPEED_2_5G;
666         case ETH_SPEED_NUM_5G:
667                 return ETH_LINK_SPEED_5G;
668         case ETH_SPEED_NUM_10G:
669                 return ETH_LINK_SPEED_10G;
670         case ETH_SPEED_NUM_20G:
671                 return ETH_LINK_SPEED_20G;
672         case ETH_SPEED_NUM_25G:
673                 return ETH_LINK_SPEED_25G;
674         case ETH_SPEED_NUM_40G:
675                 return ETH_LINK_SPEED_40G;
676         case ETH_SPEED_NUM_50G:
677                 return ETH_LINK_SPEED_50G;
678         case ETH_SPEED_NUM_56G:
679                 return ETH_LINK_SPEED_56G;
680         case ETH_SPEED_NUM_100G:
681                 return ETH_LINK_SPEED_100G;
682         default:
683                 return 0;
684         }
685 }
686
687 /**
688  * A conversion function from rxmode bitfield API.
689  */
690 static void
691 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
692                                     uint64_t *rx_offloads)
693 {
694         uint64_t offloads = 0;
695
696         if (rxmode->header_split == 1)
697                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
698         if (rxmode->hw_ip_checksum == 1)
699                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
700         if (rxmode->hw_vlan_filter == 1)
701                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
702         if (rxmode->hw_vlan_strip == 1)
703                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
704         if (rxmode->hw_vlan_extend == 1)
705                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
706         if (rxmode->jumbo_frame == 1)
707                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
708         if (rxmode->hw_strip_crc == 1)
709                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
710         if (rxmode->enable_scatter == 1)
711                 offloads |= DEV_RX_OFFLOAD_SCATTER;
712         if (rxmode->enable_lro == 1)
713                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
714         if (rxmode->hw_timestamp == 1)
715                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
716         if (rxmode->security == 1)
717                 offloads |= DEV_RX_OFFLOAD_SECURITY;
718
719         *rx_offloads = offloads;
720 }
721
722 /**
723  * A conversion function from rxmode offloads API.
724  */
725 static void
726 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
727                             struct rte_eth_rxmode *rxmode)
728 {
729
730         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
731                 rxmode->header_split = 1;
732         else
733                 rxmode->header_split = 0;
734         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
735                 rxmode->hw_ip_checksum = 1;
736         else
737                 rxmode->hw_ip_checksum = 0;
738         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
739                 rxmode->hw_vlan_filter = 1;
740         else
741                 rxmode->hw_vlan_filter = 0;
742         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
743                 rxmode->hw_vlan_strip = 1;
744         else
745                 rxmode->hw_vlan_strip = 0;
746         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
747                 rxmode->hw_vlan_extend = 1;
748         else
749                 rxmode->hw_vlan_extend = 0;
750         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
751                 rxmode->jumbo_frame = 1;
752         else
753                 rxmode->jumbo_frame = 0;
754         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
755                 rxmode->hw_strip_crc = 1;
756         else
757                 rxmode->hw_strip_crc = 0;
758         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
759                 rxmode->enable_scatter = 1;
760         else
761                 rxmode->enable_scatter = 0;
762         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
763                 rxmode->enable_lro = 1;
764         else
765                 rxmode->enable_lro = 0;
766         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
767                 rxmode->hw_timestamp = 1;
768         else
769                 rxmode->hw_timestamp = 0;
770         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
771                 rxmode->security = 1;
772         else
773                 rxmode->security = 0;
774 }
775
776 const char *
777 rte_eth_dev_rx_offload_name(uint64_t offload)
778 {
779         const char *name = "UNKNOWN";
780         unsigned int i;
781
782         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
783                 if (offload == rte_rx_offload_names[i].offload) {
784                         name = rte_rx_offload_names[i].name;
785                         break;
786                 }
787         }
788
789         return name;
790 }
791
792 int
793 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
794                       const struct rte_eth_conf *dev_conf)
795 {
796         struct rte_eth_dev *dev;
797         struct rte_eth_dev_info dev_info;
798         struct rte_eth_conf local_conf = *dev_conf;
799         int diag;
800
801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
802
803         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
804                 RTE_PMD_DEBUG_TRACE(
805                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
806                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
807                 return -EINVAL;
808         }
809
810         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
811                 RTE_PMD_DEBUG_TRACE(
812                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
813                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
814                 return -EINVAL;
815         }
816
817         dev = &rte_eth_devices[port_id];
818
819         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
820         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
821
822         if (dev->data->dev_started) {
823                 RTE_PMD_DEBUG_TRACE(
824                     "port %d must be stopped to allow configuration\n", port_id);
825                 return -EBUSY;
826         }
827
828         /*
829          * Convert between the offloads API to enable PMDs to support
830          * only one of them.
831          */
832         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
833                 rte_eth_convert_rx_offload_bitfield(
834                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
835         } else {
836                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
837                                             &local_conf.rxmode);
838         }
839
840         /* Copy the dev_conf parameter into the dev structure */
841         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
842
843         /*
844          * Check that the numbers of RX and TX queues are not greater
845          * than the maximum number of RX and TX queues supported by the
846          * configured device.
847          */
848         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
849
850         if (nb_rx_q == 0 && nb_tx_q == 0) {
851                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
852                 return -EINVAL;
853         }
854
855         if (nb_rx_q > dev_info.max_rx_queues) {
856                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
857                                 port_id, nb_rx_q, dev_info.max_rx_queues);
858                 return -EINVAL;
859         }
860
861         if (nb_tx_q > dev_info.max_tx_queues) {
862                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
863                                 port_id, nb_tx_q, dev_info.max_tx_queues);
864                 return -EINVAL;
865         }
866
867         /* Check that the device supports requested interrupts */
868         if ((dev_conf->intr_conf.lsc == 1) &&
869                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
870                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
871                                         dev->device->driver->name);
872                         return -EINVAL;
873         }
874         if ((dev_conf->intr_conf.rmv == 1) &&
875             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
876                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
877                                     dev->device->driver->name);
878                 return -EINVAL;
879         }
880
881         /*
882          * If jumbo frames are enabled, check that the maximum RX packet
883          * length is supported by the configured device.
884          */
885         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
886                 if (dev_conf->rxmode.max_rx_pkt_len >
887                     dev_info.max_rx_pktlen) {
888                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
889                                 " > max valid value %u\n",
890                                 port_id,
891                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
892                                 (unsigned)dev_info.max_rx_pktlen);
893                         return -EINVAL;
894                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
895                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
896                                 " < min valid value %u\n",
897                                 port_id,
898                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
899                                 (unsigned)ETHER_MIN_LEN);
900                         return -EINVAL;
901                 }
902         } else {
903                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
904                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
905                         /* Use default value */
906                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
907                                                         ETHER_MAX_LEN;
908         }
909
910         /*
911          * Setup new number of RX/TX queues and reconfigure device.
912          */
913         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
914         if (diag != 0) {
915                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
916                                 port_id, diag);
917                 return diag;
918         }
919
920         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
921         if (diag != 0) {
922                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
923                                 port_id, diag);
924                 rte_eth_dev_rx_queue_config(dev, 0);
925                 return diag;
926         }
927
928         diag = (*dev->dev_ops->dev_configure)(dev);
929         if (diag != 0) {
930                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
931                                 port_id, diag);
932                 rte_eth_dev_rx_queue_config(dev, 0);
933                 rte_eth_dev_tx_queue_config(dev, 0);
934                 return diag;
935         }
936
937         /* Initialize Rx profiling if enabled at compilation time. */
938         diag = __rte_eth_profile_rx_init(port_id, dev);
939         if (diag != 0) {
940                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
941                                 port_id, diag);
942                 rte_eth_dev_rx_queue_config(dev, 0);
943                 rte_eth_dev_tx_queue_config(dev, 0);
944                 return diag;
945         }
946
947         return 0;
948 }
949
950 void
951 _rte_eth_dev_reset(struct rte_eth_dev *dev)
952 {
953         if (dev->data->dev_started) {
954                 RTE_PMD_DEBUG_TRACE(
955                         "port %d must be stopped to allow reset\n",
956                         dev->data->port_id);
957                 return;
958         }
959
960         rte_eth_dev_rx_queue_config(dev, 0);
961         rte_eth_dev_tx_queue_config(dev, 0);
962
963         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
964 }
965
966 static void
967 rte_eth_dev_config_restore(uint16_t port_id)
968 {
969         struct rte_eth_dev *dev;
970         struct rte_eth_dev_info dev_info;
971         struct ether_addr *addr;
972         uint16_t i;
973         uint32_t pool = 0;
974         uint64_t pool_mask;
975
976         dev = &rte_eth_devices[port_id];
977
978         rte_eth_dev_info_get(port_id, &dev_info);
979
980         /* replay MAC address configuration including default MAC */
981         addr = &dev->data->mac_addrs[0];
982         if (*dev->dev_ops->mac_addr_set != NULL)
983                 (*dev->dev_ops->mac_addr_set)(dev, addr);
984         else if (*dev->dev_ops->mac_addr_add != NULL)
985                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
986
987         if (*dev->dev_ops->mac_addr_add != NULL) {
988                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
989                         addr = &dev->data->mac_addrs[i];
990
991                         /* skip zero address */
992                         if (is_zero_ether_addr(addr))
993                                 continue;
994
995                         pool = 0;
996                         pool_mask = dev->data->mac_pool_sel[i];
997
998                         do {
999                                 if (pool_mask & 1ULL)
1000                                         (*dev->dev_ops->mac_addr_add)(dev,
1001                                                 addr, i, pool);
1002                                 pool_mask >>= 1;
1003                                 pool++;
1004                         } while (pool_mask);
1005                 }
1006         }
1007
1008         /* replay promiscuous configuration */
1009         if (rte_eth_promiscuous_get(port_id) == 1)
1010                 rte_eth_promiscuous_enable(port_id);
1011         else if (rte_eth_promiscuous_get(port_id) == 0)
1012                 rte_eth_promiscuous_disable(port_id);
1013
1014         /* replay all multicast configuration */
1015         if (rte_eth_allmulticast_get(port_id) == 1)
1016                 rte_eth_allmulticast_enable(port_id);
1017         else if (rte_eth_allmulticast_get(port_id) == 0)
1018                 rte_eth_allmulticast_disable(port_id);
1019 }
1020
1021 int
1022 rte_eth_dev_start(uint16_t port_id)
1023 {
1024         struct rte_eth_dev *dev;
1025         int diag;
1026
1027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1028
1029         dev = &rte_eth_devices[port_id];
1030
1031         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1032
1033         if (dev->data->dev_started != 0) {
1034                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1035                         " already started\n",
1036                         port_id);
1037                 return 0;
1038         }
1039
1040         diag = (*dev->dev_ops->dev_start)(dev);
1041         if (diag == 0)
1042                 dev->data->dev_started = 1;
1043         else
1044                 return diag;
1045
1046         rte_eth_dev_config_restore(port_id);
1047
1048         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1049                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1050                 (*dev->dev_ops->link_update)(dev, 0);
1051         }
1052         return 0;
1053 }
1054
1055 void
1056 rte_eth_dev_stop(uint16_t port_id)
1057 {
1058         struct rte_eth_dev *dev;
1059
1060         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1061         dev = &rte_eth_devices[port_id];
1062
1063         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1064
1065         if (dev->data->dev_started == 0) {
1066                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1067                         " already stopped\n",
1068                         port_id);
1069                 return;
1070         }
1071
1072         dev->data->dev_started = 0;
1073         (*dev->dev_ops->dev_stop)(dev);
1074 }
1075
1076 int
1077 rte_eth_dev_set_link_up(uint16_t port_id)
1078 {
1079         struct rte_eth_dev *dev;
1080
1081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1082
1083         dev = &rte_eth_devices[port_id];
1084
1085         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1086         return (*dev->dev_ops->dev_set_link_up)(dev);
1087 }
1088
1089 int
1090 rte_eth_dev_set_link_down(uint16_t port_id)
1091 {
1092         struct rte_eth_dev *dev;
1093
1094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1095
1096         dev = &rte_eth_devices[port_id];
1097
1098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1099         return (*dev->dev_ops->dev_set_link_down)(dev);
1100 }
1101
1102 void
1103 rte_eth_dev_close(uint16_t port_id)
1104 {
1105         struct rte_eth_dev *dev;
1106
1107         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1108         dev = &rte_eth_devices[port_id];
1109
1110         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1111         dev->data->dev_started = 0;
1112         (*dev->dev_ops->dev_close)(dev);
1113
1114         dev->data->nb_rx_queues = 0;
1115         rte_free(dev->data->rx_queues);
1116         dev->data->rx_queues = NULL;
1117         dev->data->nb_tx_queues = 0;
1118         rte_free(dev->data->tx_queues);
1119         dev->data->tx_queues = NULL;
1120 }
1121
1122 int
1123 rte_eth_dev_reset(uint16_t port_id)
1124 {
1125         struct rte_eth_dev *dev;
1126         int ret;
1127
1128         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1129         dev = &rte_eth_devices[port_id];
1130
1131         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1132
1133         rte_eth_dev_stop(port_id);
1134         ret = dev->dev_ops->dev_reset(dev);
1135
1136         return ret;
1137 }
1138
1139 int
1140 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1141                        uint16_t nb_rx_desc, unsigned int socket_id,
1142                        const struct rte_eth_rxconf *rx_conf,
1143                        struct rte_mempool *mp)
1144 {
1145         int ret;
1146         uint32_t mbp_buf_size;
1147         struct rte_eth_dev *dev;
1148         struct rte_eth_dev_info dev_info;
1149         struct rte_eth_rxconf local_conf;
1150         void **rxq;
1151
1152         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1153
1154         dev = &rte_eth_devices[port_id];
1155         if (rx_queue_id >= dev->data->nb_rx_queues) {
1156                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1157                 return -EINVAL;
1158         }
1159
1160         if (dev->data->dev_started) {
1161                 RTE_PMD_DEBUG_TRACE(
1162                     "port %d must be stopped to allow configuration\n", port_id);
1163                 return -EBUSY;
1164         }
1165
1166         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1167         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1168
1169         /*
1170          * Check the size of the mbuf data buffer.
1171          * This value must be provided in the private data of the memory pool.
1172          * First check that the memory pool has a valid private data.
1173          */
1174         rte_eth_dev_info_get(port_id, &dev_info);
1175         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1176                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1177                                 mp->name, (int) mp->private_data_size,
1178                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1179                 return -ENOSPC;
1180         }
1181         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1182
1183         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1184                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1185                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1186                                 "=%d)\n",
1187                                 mp->name,
1188                                 (int)mbp_buf_size,
1189                                 (int)(RTE_PKTMBUF_HEADROOM +
1190                                       dev_info.min_rx_bufsize),
1191                                 (int)RTE_PKTMBUF_HEADROOM,
1192                                 (int)dev_info.min_rx_bufsize);
1193                 return -EINVAL;
1194         }
1195
1196         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1197                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1198                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1199
1200                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1201                         "should be: <= %hu, = %hu, and a product of %hu\n",
1202                         nb_rx_desc,
1203                         dev_info.rx_desc_lim.nb_max,
1204                         dev_info.rx_desc_lim.nb_min,
1205                         dev_info.rx_desc_lim.nb_align);
1206                 return -EINVAL;
1207         }
1208
1209         rxq = dev->data->rx_queues;
1210         if (rxq[rx_queue_id]) {
1211                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1212                                         -ENOTSUP);
1213                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1214                 rxq[rx_queue_id] = NULL;
1215         }
1216
1217         if (rx_conf == NULL)
1218                 rx_conf = &dev_info.default_rxconf;
1219
1220         local_conf = *rx_conf;
1221         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1222                 /**
1223                  * Reflect port offloads to queue offloads in order for
1224                  * offloads to not be discarded.
1225                  */
1226                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1227                                                     &local_conf.offloads);
1228         }
1229
1230         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1231                                               socket_id, &local_conf, mp);
1232         if (!ret) {
1233                 if (!dev->data->min_rx_buf_size ||
1234                     dev->data->min_rx_buf_size > mbp_buf_size)
1235                         dev->data->min_rx_buf_size = mbp_buf_size;
1236         }
1237
1238         return ret;
1239 }
1240
1241 /**
1242  * A conversion function from txq_flags API.
1243  */
1244 static void
1245 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1246 {
1247         uint64_t offloads = 0;
1248
1249         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1250                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1251         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1252                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1253         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1254                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1255         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1256                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1257         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1258                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1259         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1260             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1261                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1262
1263         *tx_offloads = offloads;
1264 }
1265
1266 /**
1267  * A conversion function from offloads API.
1268  */
1269 static void
1270 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1271 {
1272         uint32_t flags = 0;
1273
1274         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1275                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1276         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1277                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1278         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1279                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1280         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1281                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1282         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1283                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1284         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1285                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1286
1287         *txq_flags = flags;
1288 }
1289
1290 int
1291 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1292                        uint16_t nb_tx_desc, unsigned int socket_id,
1293                        const struct rte_eth_txconf *tx_conf)
1294 {
1295         struct rte_eth_dev *dev;
1296         struct rte_eth_dev_info dev_info;
1297         struct rte_eth_txconf local_conf;
1298         void **txq;
1299
1300         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1301
1302         dev = &rte_eth_devices[port_id];
1303         if (tx_queue_id >= dev->data->nb_tx_queues) {
1304                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1305                 return -EINVAL;
1306         }
1307
1308         if (dev->data->dev_started) {
1309                 RTE_PMD_DEBUG_TRACE(
1310                     "port %d must be stopped to allow configuration\n", port_id);
1311                 return -EBUSY;
1312         }
1313
1314         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1315         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1316
1317         rte_eth_dev_info_get(port_id, &dev_info);
1318
1319         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1320             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1321             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1322                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1323                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1324                                 nb_tx_desc,
1325                                 dev_info.tx_desc_lim.nb_max,
1326                                 dev_info.tx_desc_lim.nb_min,
1327                                 dev_info.tx_desc_lim.nb_align);
1328                 return -EINVAL;
1329         }
1330
1331         txq = dev->data->tx_queues;
1332         if (txq[tx_queue_id]) {
1333                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1334                                         -ENOTSUP);
1335                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1336                 txq[tx_queue_id] = NULL;
1337         }
1338
1339         if (tx_conf == NULL)
1340                 tx_conf = &dev_info.default_txconf;
1341
1342         /*
1343          * Convert between the offloads API to enable PMDs to support
1344          * only one of them.
1345          */
1346         local_conf = *tx_conf;
1347         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1348                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1349                                              &local_conf.txq_flags);
1350                 /* Keep the ignore flag. */
1351                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1352         } else {
1353                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1354                                           &local_conf.offloads);
1355         }
1356
1357         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1358                                                socket_id, &local_conf);
1359 }
1360
1361 void
1362 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1363                 void *userdata __rte_unused)
1364 {
1365         unsigned i;
1366
1367         for (i = 0; i < unsent; i++)
1368                 rte_pktmbuf_free(pkts[i]);
1369 }
1370
1371 void
1372 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1373                 void *userdata)
1374 {
1375         uint64_t *count = userdata;
1376         unsigned i;
1377
1378         for (i = 0; i < unsent; i++)
1379                 rte_pktmbuf_free(pkts[i]);
1380
1381         *count += unsent;
1382 }
1383
1384 int
1385 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1386                 buffer_tx_error_fn cbfn, void *userdata)
1387 {
1388         buffer->error_callback = cbfn;
1389         buffer->error_userdata = userdata;
1390         return 0;
1391 }
1392
1393 int
1394 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1395 {
1396         int ret = 0;
1397
1398         if (buffer == NULL)
1399                 return -EINVAL;
1400
1401         buffer->size = size;
1402         if (buffer->error_callback == NULL) {
1403                 ret = rte_eth_tx_buffer_set_err_callback(
1404                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1405         }
1406
1407         return ret;
1408 }
1409
1410 int
1411 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1412 {
1413         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1414
1415         /* Validate Input Data. Bail if not valid or not supported. */
1416         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1417         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1418
1419         /* Call driver to free pending mbufs. */
1420         return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1421                         free_cnt);
1422 }
1423
1424 void
1425 rte_eth_promiscuous_enable(uint16_t port_id)
1426 {
1427         struct rte_eth_dev *dev;
1428
1429         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1430         dev = &rte_eth_devices[port_id];
1431
1432         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1433         (*dev->dev_ops->promiscuous_enable)(dev);
1434         dev->data->promiscuous = 1;
1435 }
1436
1437 void
1438 rte_eth_promiscuous_disable(uint16_t port_id)
1439 {
1440         struct rte_eth_dev *dev;
1441
1442         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1443         dev = &rte_eth_devices[port_id];
1444
1445         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1446         dev->data->promiscuous = 0;
1447         (*dev->dev_ops->promiscuous_disable)(dev);
1448 }
1449
1450 int
1451 rte_eth_promiscuous_get(uint16_t port_id)
1452 {
1453         struct rte_eth_dev *dev;
1454
1455         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1456
1457         dev = &rte_eth_devices[port_id];
1458         return dev->data->promiscuous;
1459 }
1460
1461 void
1462 rte_eth_allmulticast_enable(uint16_t port_id)
1463 {
1464         struct rte_eth_dev *dev;
1465
1466         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1467         dev = &rte_eth_devices[port_id];
1468
1469         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1470         (*dev->dev_ops->allmulticast_enable)(dev);
1471         dev->data->all_multicast = 1;
1472 }
1473
1474 void
1475 rte_eth_allmulticast_disable(uint16_t port_id)
1476 {
1477         struct rte_eth_dev *dev;
1478
1479         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1480         dev = &rte_eth_devices[port_id];
1481
1482         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1483         dev->data->all_multicast = 0;
1484         (*dev->dev_ops->allmulticast_disable)(dev);
1485 }
1486
1487 int
1488 rte_eth_allmulticast_get(uint16_t port_id)
1489 {
1490         struct rte_eth_dev *dev;
1491
1492         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1493
1494         dev = &rte_eth_devices[port_id];
1495         return dev->data->all_multicast;
1496 }
1497
1498 static inline int
1499 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1500                                 struct rte_eth_link *link)
1501 {
1502         struct rte_eth_link *dst = link;
1503         struct rte_eth_link *src = &(dev->data->dev_link);
1504
1505         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1506                                         *(uint64_t *)src) == 0)
1507                 return -1;
1508
1509         return 0;
1510 }
1511
1512 void
1513 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1514 {
1515         struct rte_eth_dev *dev;
1516
1517         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1518         dev = &rte_eth_devices[port_id];
1519
1520         if (dev->data->dev_conf.intr_conf.lsc != 0)
1521                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1522         else {
1523                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1524                 (*dev->dev_ops->link_update)(dev, 1);
1525                 *eth_link = dev->data->dev_link;
1526         }
1527 }
1528
1529 void
1530 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1531 {
1532         struct rte_eth_dev *dev;
1533
1534         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1535         dev = &rte_eth_devices[port_id];
1536
1537         if (dev->data->dev_conf.intr_conf.lsc != 0)
1538                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1539         else {
1540                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1541                 (*dev->dev_ops->link_update)(dev, 0);
1542                 *eth_link = dev->data->dev_link;
1543         }
1544 }
1545
1546 int
1547 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1548 {
1549         struct rte_eth_dev *dev;
1550
1551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1552
1553         dev = &rte_eth_devices[port_id];
1554         memset(stats, 0, sizeof(*stats));
1555
1556         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1557         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1558         return (*dev->dev_ops->stats_get)(dev, stats);
1559 }
1560
1561 int
1562 rte_eth_stats_reset(uint16_t port_id)
1563 {
1564         struct rte_eth_dev *dev;
1565
1566         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1567         dev = &rte_eth_devices[port_id];
1568
1569         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1570         (*dev->dev_ops->stats_reset)(dev);
1571         dev->data->rx_mbuf_alloc_failed = 0;
1572
1573         return 0;
1574 }
1575
1576 static inline int
1577 get_xstats_basic_count(struct rte_eth_dev *dev)
1578 {
1579         uint16_t nb_rxqs, nb_txqs;
1580         int count;
1581
1582         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1583         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1584
1585         count = RTE_NB_STATS;
1586         count += nb_rxqs * RTE_NB_RXQ_STATS;
1587         count += nb_txqs * RTE_NB_TXQ_STATS;
1588
1589         return count;
1590 }
1591
1592 static int
1593 get_xstats_count(uint16_t port_id)
1594 {
1595         struct rte_eth_dev *dev;
1596         int count;
1597
1598         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1599         dev = &rte_eth_devices[port_id];
1600         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1601                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1602                                 NULL, 0);
1603                 if (count < 0)
1604                         return count;
1605         }
1606         if (dev->dev_ops->xstats_get_names != NULL) {
1607                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1608                 if (count < 0)
1609                         return count;
1610         } else
1611                 count = 0;
1612
1613
1614         count += get_xstats_basic_count(dev);
1615
1616         return count;
1617 }
1618
1619 int
1620 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1621                 uint64_t *id)
1622 {
1623         int cnt_xstats, idx_xstat;
1624
1625         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1626
1627         if (!id) {
1628                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1629                 return -ENOMEM;
1630         }
1631
1632         if (!xstat_name) {
1633                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1634                 return -ENOMEM;
1635         }
1636
1637         /* Get count */
1638         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1639         if (cnt_xstats  < 0) {
1640                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1641                 return -ENODEV;
1642         }
1643
1644         /* Get id-name lookup table */
1645         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1646
1647         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1648                         port_id, xstats_names, cnt_xstats, NULL)) {
1649                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1650                 return -1;
1651         }
1652
1653         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1654                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1655                         *id = idx_xstat;
1656                         return 0;
1657                 };
1658         }
1659
1660         return -EINVAL;
1661 }
1662
1663 /* retrieve basic stats names */
1664 static int
1665 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1666         struct rte_eth_xstat_name *xstats_names)
1667 {
1668         int cnt_used_entries = 0;
1669         uint32_t idx, id_queue;
1670         uint16_t num_q;
1671
1672         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1673                 snprintf(xstats_names[cnt_used_entries].name,
1674                         sizeof(xstats_names[0].name),
1675                         "%s", rte_stats_strings[idx].name);
1676                 cnt_used_entries++;
1677         }
1678         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1679         for (id_queue = 0; id_queue < num_q; id_queue++) {
1680                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1681                         snprintf(xstats_names[cnt_used_entries].name,
1682                                 sizeof(xstats_names[0].name),
1683                                 "rx_q%u%s",
1684                                 id_queue, rte_rxq_stats_strings[idx].name);
1685                         cnt_used_entries++;
1686                 }
1687
1688         }
1689         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1690         for (id_queue = 0; id_queue < num_q; id_queue++) {
1691                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1692                         snprintf(xstats_names[cnt_used_entries].name,
1693                                 sizeof(xstats_names[0].name),
1694                                 "tx_q%u%s",
1695                                 id_queue, rte_txq_stats_strings[idx].name);
1696                         cnt_used_entries++;
1697                 }
1698         }
1699         return cnt_used_entries;
1700 }
1701
1702 /* retrieve ethdev extended statistics names */
1703 int
1704 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1705         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1706         uint64_t *ids)
1707 {
1708         struct rte_eth_xstat_name *xstats_names_copy;
1709         unsigned int no_basic_stat_requested = 1;
1710         unsigned int no_ext_stat_requested = 1;
1711         unsigned int expected_entries;
1712         unsigned int basic_count;
1713         struct rte_eth_dev *dev;
1714         unsigned int i;
1715         int ret;
1716
1717         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1718         dev = &rte_eth_devices[port_id];
1719
1720         basic_count = get_xstats_basic_count(dev);
1721         ret = get_xstats_count(port_id);
1722         if (ret < 0)
1723                 return ret;
1724         expected_entries = (unsigned int)ret;
1725
1726         /* Return max number of stats if no ids given */
1727         if (!ids) {
1728                 if (!xstats_names)
1729                         return expected_entries;
1730                 else if (xstats_names && size < expected_entries)
1731                         return expected_entries;
1732         }
1733
1734         if (ids && !xstats_names)
1735                 return -EINVAL;
1736
1737         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1738                 uint64_t ids_copy[size];
1739
1740                 for (i = 0; i < size; i++) {
1741                         if (ids[i] < basic_count) {
1742                                 no_basic_stat_requested = 0;
1743                                 break;
1744                         }
1745
1746                         /*
1747                          * Convert ids to xstats ids that PMD knows.
1748                          * ids known by user are basic + extended stats.
1749                          */
1750                         ids_copy[i] = ids[i] - basic_count;
1751                 }
1752
1753                 if (no_basic_stat_requested)
1754                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1755                                         xstats_names, ids_copy, size);
1756         }
1757
1758         /* Retrieve all stats */
1759         if (!ids) {
1760                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1761                                 expected_entries);
1762                 if (num_stats < 0 || num_stats > (int)expected_entries)
1763                         return num_stats;
1764                 else
1765                         return expected_entries;
1766         }
1767
1768         xstats_names_copy = calloc(expected_entries,
1769                 sizeof(struct rte_eth_xstat_name));
1770
1771         if (!xstats_names_copy) {
1772                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
1773                 return -ENOMEM;
1774         }
1775
1776         if (ids) {
1777                 for (i = 0; i < size; i++) {
1778                         if (ids[i] > basic_count) {
1779                                 no_ext_stat_requested = 0;
1780                                 break;
1781                         }
1782                 }
1783         }
1784
1785         /* Fill xstats_names_copy structure */
1786         if (ids && no_ext_stat_requested) {
1787                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
1788         } else {
1789                 rte_eth_xstats_get_names(port_id, xstats_names_copy,
1790                         expected_entries);
1791         }
1792
1793         /* Filter stats */
1794         for (i = 0; i < size; i++) {
1795                 if (ids[i] >= expected_entries) {
1796                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1797                         free(xstats_names_copy);
1798                         return -1;
1799                 }
1800                 xstats_names[i] = xstats_names_copy[ids[i]];
1801         }
1802
1803         free(xstats_names_copy);
1804         return size;
1805 }
1806
1807 int
1808 rte_eth_xstats_get_names(uint16_t port_id,
1809         struct rte_eth_xstat_name *xstats_names,
1810         unsigned int size)
1811 {
1812         struct rte_eth_dev *dev;
1813         int cnt_used_entries;
1814         int cnt_expected_entries;
1815         int cnt_driver_entries;
1816
1817         cnt_expected_entries = get_xstats_count(port_id);
1818         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1819                         (int)size < cnt_expected_entries)
1820                 return cnt_expected_entries;
1821
1822         /* port_id checked in get_xstats_count() */
1823         dev = &rte_eth_devices[port_id];
1824
1825         cnt_used_entries = rte_eth_basic_stats_get_names(
1826                 dev, xstats_names);
1827
1828         if (dev->dev_ops->xstats_get_names != NULL) {
1829                 /* If there are any driver-specific xstats, append them
1830                  * to end of list.
1831                  */
1832                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1833                         dev,
1834                         xstats_names + cnt_used_entries,
1835                         size - cnt_used_entries);
1836                 if (cnt_driver_entries < 0)
1837                         return cnt_driver_entries;
1838                 cnt_used_entries += cnt_driver_entries;
1839         }
1840
1841         return cnt_used_entries;
1842 }
1843
1844
1845 static int
1846 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
1847 {
1848         struct rte_eth_dev *dev;
1849         struct rte_eth_stats eth_stats;
1850         unsigned int count = 0, i, q;
1851         uint64_t val, *stats_ptr;
1852         uint16_t nb_rxqs, nb_txqs;
1853
1854         rte_eth_stats_get(port_id, &eth_stats);
1855         dev = &rte_eth_devices[port_id];
1856
1857         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1858         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1859
1860         /* global stats */
1861         for (i = 0; i < RTE_NB_STATS; i++) {
1862                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1863                                         rte_stats_strings[i].offset);
1864                 val = *stats_ptr;
1865                 xstats[count++].value = val;
1866         }
1867
1868         /* per-rxq stats */
1869         for (q = 0; q < nb_rxqs; q++) {
1870                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1871                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1872                                         rte_rxq_stats_strings[i].offset +
1873                                         q * sizeof(uint64_t));
1874                         val = *stats_ptr;
1875                         xstats[count++].value = val;
1876                 }
1877         }
1878
1879         /* per-txq stats */
1880         for (q = 0; q < nb_txqs; q++) {
1881                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1882                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1883                                         rte_txq_stats_strings[i].offset +
1884                                         q * sizeof(uint64_t));
1885                         val = *stats_ptr;
1886                         xstats[count++].value = val;
1887                 }
1888         }
1889         return count;
1890 }
1891
1892 /* retrieve ethdev extended statistics */
1893 int
1894 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1895                          uint64_t *values, unsigned int size)
1896 {
1897         unsigned int no_basic_stat_requested = 1;
1898         unsigned int no_ext_stat_requested = 1;
1899         unsigned int num_xstats_filled;
1900         unsigned int basic_count;
1901         uint16_t expected_entries;
1902         struct rte_eth_dev *dev;
1903         unsigned int i;
1904         int ret;
1905
1906         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1907         expected_entries = get_xstats_count(port_id);
1908         struct rte_eth_xstat xstats[expected_entries];
1909         dev = &rte_eth_devices[port_id];
1910         basic_count = get_xstats_basic_count(dev);
1911
1912         /* Return max number of stats if no ids given */
1913         if (!ids) {
1914                 if (!values)
1915                         return expected_entries;
1916                 else if (values && size < expected_entries)
1917                         return expected_entries;
1918         }
1919
1920         if (ids && !values)
1921                 return -EINVAL;
1922
1923         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
1924                 unsigned int basic_count = get_xstats_basic_count(dev);
1925                 uint64_t ids_copy[size];
1926
1927                 for (i = 0; i < size; i++) {
1928                         if (ids[i] < basic_count) {
1929                                 no_basic_stat_requested = 0;
1930                                 break;
1931                         }
1932
1933                         /*
1934                          * Convert ids to xstats ids that PMD knows.
1935                          * ids known by user are basic + extended stats.
1936                          */
1937                         ids_copy[i] = ids[i] - basic_count;
1938                 }
1939
1940                 if (no_basic_stat_requested)
1941                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
1942                                         values, size);
1943         }
1944
1945         if (ids) {
1946                 for (i = 0; i < size; i++) {
1947                         if (ids[i] > basic_count) {
1948                                 no_ext_stat_requested = 0;
1949                                 break;
1950                         }
1951                 }
1952         }
1953
1954         /* Fill the xstats structure */
1955         if (ids && no_ext_stat_requested)
1956                 ret = rte_eth_basic_stats_get(port_id, xstats);
1957         else
1958                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
1959
1960         if (ret < 0)
1961                 return ret;
1962         num_xstats_filled = (unsigned int)ret;
1963
1964         /* Return all stats */
1965         if (!ids) {
1966                 for (i = 0; i < num_xstats_filled; i++)
1967                         values[i] = xstats[i].value;
1968                 return expected_entries;
1969         }
1970
1971         /* Filter stats */
1972         for (i = 0; i < size; i++) {
1973                 if (ids[i] >= expected_entries) {
1974                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1975                         return -1;
1976                 }
1977                 values[i] = xstats[ids[i]].value;
1978         }
1979         return size;
1980 }
1981
1982 int
1983 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
1984         unsigned int n)
1985 {
1986         struct rte_eth_dev *dev;
1987         unsigned int count = 0, i;
1988         signed int xcount = 0;
1989         uint16_t nb_rxqs, nb_txqs;
1990
1991         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1992
1993         dev = &rte_eth_devices[port_id];
1994
1995         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1996         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1997
1998         /* Return generic statistics */
1999         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2000                 (nb_txqs * RTE_NB_TXQ_STATS);
2001
2002         /* implemented by the driver */
2003         if (dev->dev_ops->xstats_get != NULL) {
2004                 /* Retrieve the xstats from the driver at the end of the
2005                  * xstats struct.
2006                  */
2007                 xcount = (*dev->dev_ops->xstats_get)(dev,
2008                                      xstats ? xstats + count : NULL,
2009                                      (n > count) ? n - count : 0);
2010
2011                 if (xcount < 0)
2012                         return xcount;
2013         }
2014
2015         if (n < count + xcount || xstats == NULL)
2016                 return count + xcount;
2017
2018         /* now fill the xstats structure */
2019         count = rte_eth_basic_stats_get(port_id, xstats);
2020
2021         for (i = 0; i < count; i++)
2022                 xstats[i].id = i;
2023         /* add an offset to driver-specific stats */
2024         for ( ; i < count + xcount; i++)
2025                 xstats[i].id += count;
2026
2027         return count + xcount;
2028 }
2029
2030 /* reset ethdev extended statistics */
2031 void
2032 rte_eth_xstats_reset(uint16_t port_id)
2033 {
2034         struct rte_eth_dev *dev;
2035
2036         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2037         dev = &rte_eth_devices[port_id];
2038
2039         /* implemented by the driver */
2040         if (dev->dev_ops->xstats_reset != NULL) {
2041                 (*dev->dev_ops->xstats_reset)(dev);
2042                 return;
2043         }
2044
2045         /* fallback to default */
2046         rte_eth_stats_reset(port_id);
2047 }
2048
2049 static int
2050 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2051                 uint8_t is_rx)
2052 {
2053         struct rte_eth_dev *dev;
2054
2055         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2056
2057         dev = &rte_eth_devices[port_id];
2058
2059         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2060         return (*dev->dev_ops->queue_stats_mapping_set)
2061                         (dev, queue_id, stat_idx, is_rx);
2062 }
2063
2064
2065 int
2066 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2067                 uint8_t stat_idx)
2068 {
2069         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
2070                         STAT_QMAP_TX);
2071 }
2072
2073
2074 int
2075 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2076                 uint8_t stat_idx)
2077 {
2078         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
2079                         STAT_QMAP_RX);
2080 }
2081
2082 int
2083 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2084 {
2085         struct rte_eth_dev *dev;
2086
2087         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2088         dev = &rte_eth_devices[port_id];
2089
2090         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2091         return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
2092 }
2093
2094 void
2095 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2096 {
2097         struct rte_eth_dev *dev;
2098         const struct rte_eth_desc_lim lim = {
2099                 .nb_max = UINT16_MAX,
2100                 .nb_min = 0,
2101                 .nb_align = 1,
2102         };
2103
2104         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2105         dev = &rte_eth_devices[port_id];
2106
2107         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2108         dev_info->rx_desc_lim = lim;
2109         dev_info->tx_desc_lim = lim;
2110
2111         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2112         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2113         dev_info->driver_name = dev->device->driver->name;
2114         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2115         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2116 }
2117
2118 int
2119 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2120                                  uint32_t *ptypes, int num)
2121 {
2122         int i, j;
2123         struct rte_eth_dev *dev;
2124         const uint32_t *all_ptypes;
2125
2126         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2127         dev = &rte_eth_devices[port_id];
2128         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2129         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2130
2131         if (!all_ptypes)
2132                 return 0;
2133
2134         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2135                 if (all_ptypes[i] & ptype_mask) {
2136                         if (j < num)
2137                                 ptypes[j] = all_ptypes[i];
2138                         j++;
2139                 }
2140
2141         return j;
2142 }
2143
2144 void
2145 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2146 {
2147         struct rte_eth_dev *dev;
2148
2149         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2150         dev = &rte_eth_devices[port_id];
2151         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2152 }
2153
2154
2155 int
2156 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2157 {
2158         struct rte_eth_dev *dev;
2159
2160         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2161
2162         dev = &rte_eth_devices[port_id];
2163         *mtu = dev->data->mtu;
2164         return 0;
2165 }
2166
2167 int
2168 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2169 {
2170         int ret;
2171         struct rte_eth_dev *dev;
2172
2173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2174         dev = &rte_eth_devices[port_id];
2175         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2176
2177         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2178         if (!ret)
2179                 dev->data->mtu = mtu;
2180
2181         return ret;
2182 }
2183
2184 int
2185 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2186 {
2187         struct rte_eth_dev *dev;
2188         int ret;
2189
2190         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2191         dev = &rte_eth_devices[port_id];
2192         if (!(dev->data->dev_conf.rxmode.offloads &
2193               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2194                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2195                 return -ENOSYS;
2196         }
2197
2198         if (vlan_id > 4095) {
2199                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2200                                 port_id, (unsigned) vlan_id);
2201                 return -EINVAL;
2202         }
2203         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2204
2205         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2206         if (ret == 0) {
2207                 struct rte_vlan_filter_conf *vfc;
2208                 int vidx;
2209                 int vbit;
2210
2211                 vfc = &dev->data->vlan_filter_conf;
2212                 vidx = vlan_id / 64;
2213                 vbit = vlan_id % 64;
2214
2215                 if (on)
2216                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2217                 else
2218                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2219         }
2220
2221         return ret;
2222 }
2223
2224 int
2225 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2226                                     int on)
2227 {
2228         struct rte_eth_dev *dev;
2229
2230         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2231         dev = &rte_eth_devices[port_id];
2232         if (rx_queue_id >= dev->data->nb_rx_queues) {
2233                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2234                 return -EINVAL;
2235         }
2236
2237         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2238         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2239
2240         return 0;
2241 }
2242
2243 int
2244 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2245                                 enum rte_vlan_type vlan_type,
2246                                 uint16_t tpid)
2247 {
2248         struct rte_eth_dev *dev;
2249
2250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2251         dev = &rte_eth_devices[port_id];
2252         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2253
2254         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
2255 }
2256
2257 int
2258 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2259 {
2260         struct rte_eth_dev *dev;
2261         int ret = 0;
2262         int mask = 0;
2263         int cur, org = 0;
2264         uint64_t orig_offloads;
2265
2266         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2267         dev = &rte_eth_devices[port_id];
2268
2269         /* save original values in case of failure */
2270         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2271
2272         /*check which option changed by application*/
2273         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2274         org = !!(dev->data->dev_conf.rxmode.offloads &
2275                  DEV_RX_OFFLOAD_VLAN_STRIP);
2276         if (cur != org) {
2277                 if (cur)
2278                         dev->data->dev_conf.rxmode.offloads |=
2279                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2280                 else
2281                         dev->data->dev_conf.rxmode.offloads &=
2282                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2283                 mask |= ETH_VLAN_STRIP_MASK;
2284         }
2285
2286         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2287         org = !!(dev->data->dev_conf.rxmode.offloads &
2288                  DEV_RX_OFFLOAD_VLAN_FILTER);
2289         if (cur != org) {
2290                 if (cur)
2291                         dev->data->dev_conf.rxmode.offloads |=
2292                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2293                 else
2294                         dev->data->dev_conf.rxmode.offloads &=
2295                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2296                 mask |= ETH_VLAN_FILTER_MASK;
2297         }
2298
2299         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2300         org = !!(dev->data->dev_conf.rxmode.offloads &
2301                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2302         if (cur != org) {
2303                 if (cur)
2304                         dev->data->dev_conf.rxmode.offloads |=
2305                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2306                 else
2307                         dev->data->dev_conf.rxmode.offloads &=
2308                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2309                 mask |= ETH_VLAN_EXTEND_MASK;
2310         }
2311
2312         /*no change*/
2313         if (mask == 0)
2314                 return ret;
2315
2316         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2317
2318         /*
2319          * Convert to the offload bitfield API just in case the underlying PMD
2320          * still supporting it.
2321          */
2322         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2323                                     &dev->data->dev_conf.rxmode);
2324         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2325         if (ret) {
2326                 /* hit an error restore  original values */
2327                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2328                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2329                                             &dev->data->dev_conf.rxmode);
2330         }
2331
2332         return ret;
2333 }
2334
2335 int
2336 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2337 {
2338         struct rte_eth_dev *dev;
2339         int ret = 0;
2340
2341         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2342         dev = &rte_eth_devices[port_id];
2343
2344         if (dev->data->dev_conf.rxmode.offloads &
2345             DEV_RX_OFFLOAD_VLAN_STRIP)
2346                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2347
2348         if (dev->data->dev_conf.rxmode.offloads &
2349             DEV_RX_OFFLOAD_VLAN_FILTER)
2350                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2351
2352         if (dev->data->dev_conf.rxmode.offloads &
2353             DEV_RX_OFFLOAD_VLAN_EXTEND)
2354                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2355
2356         return ret;
2357 }
2358
2359 int
2360 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2361 {
2362         struct rte_eth_dev *dev;
2363
2364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2365         dev = &rte_eth_devices[port_id];
2366         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2367         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
2368
2369         return 0;
2370 }
2371
2372 int
2373 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2374 {
2375         struct rte_eth_dev *dev;
2376
2377         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2378         dev = &rte_eth_devices[port_id];
2379         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2380         memset(fc_conf, 0, sizeof(*fc_conf));
2381         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2382 }
2383
2384 int
2385 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2386 {
2387         struct rte_eth_dev *dev;
2388
2389         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2390         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2391                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2392                 return -EINVAL;
2393         }
2394
2395         dev = &rte_eth_devices[port_id];
2396         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2397         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2398 }
2399
2400 int
2401 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2402                                    struct rte_eth_pfc_conf *pfc_conf)
2403 {
2404         struct rte_eth_dev *dev;
2405
2406         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2407         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2408                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2409                 return -EINVAL;
2410         }
2411
2412         dev = &rte_eth_devices[port_id];
2413         /* High water, low water validation are device specific */
2414         if  (*dev->dev_ops->priority_flow_ctrl_set)
2415                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2416         return -ENOTSUP;
2417 }
2418
2419 static int
2420 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2421                         uint16_t reta_size)
2422 {
2423         uint16_t i, num;
2424
2425         if (!reta_conf)
2426                 return -EINVAL;
2427
2428         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2429         for (i = 0; i < num; i++) {
2430                 if (reta_conf[i].mask)
2431                         return 0;
2432         }
2433
2434         return -EINVAL;
2435 }
2436
2437 static int
2438 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2439                          uint16_t reta_size,
2440                          uint16_t max_rxq)
2441 {
2442         uint16_t i, idx, shift;
2443
2444         if (!reta_conf)
2445                 return -EINVAL;
2446
2447         if (max_rxq == 0) {
2448                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2449                 return -EINVAL;
2450         }
2451
2452         for (i = 0; i < reta_size; i++) {
2453                 idx = i / RTE_RETA_GROUP_SIZE;
2454                 shift = i % RTE_RETA_GROUP_SIZE;
2455                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2456                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2457                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2458                                 "the maximum rxq index: %u\n", idx, shift,
2459                                 reta_conf[idx].reta[shift], max_rxq);
2460                         return -EINVAL;
2461                 }
2462         }
2463
2464         return 0;
2465 }
2466
2467 int
2468 rte_eth_dev_rss_reta_update(uint16_t port_id,
2469                             struct rte_eth_rss_reta_entry64 *reta_conf,
2470                             uint16_t reta_size)
2471 {
2472         struct rte_eth_dev *dev;
2473         int ret;
2474
2475         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2476         /* Check mask bits */
2477         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2478         if (ret < 0)
2479                 return ret;
2480
2481         dev = &rte_eth_devices[port_id];
2482
2483         /* Check entry value */
2484         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2485                                 dev->data->nb_rx_queues);
2486         if (ret < 0)
2487                 return ret;
2488
2489         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2490         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2491 }
2492
2493 int
2494 rte_eth_dev_rss_reta_query(uint16_t port_id,
2495                            struct rte_eth_rss_reta_entry64 *reta_conf,
2496                            uint16_t reta_size)
2497 {
2498         struct rte_eth_dev *dev;
2499         int ret;
2500
2501         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2502
2503         /* Check mask bits */
2504         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2505         if (ret < 0)
2506                 return ret;
2507
2508         dev = &rte_eth_devices[port_id];
2509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2510         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2511 }
2512
2513 int
2514 rte_eth_dev_rss_hash_update(uint16_t port_id,
2515                             struct rte_eth_rss_conf *rss_conf)
2516 {
2517         struct rte_eth_dev *dev;
2518
2519         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2520         dev = &rte_eth_devices[port_id];
2521         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2522         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2523 }
2524
2525 int
2526 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2527                               struct rte_eth_rss_conf *rss_conf)
2528 {
2529         struct rte_eth_dev *dev;
2530
2531         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2532         dev = &rte_eth_devices[port_id];
2533         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2534         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2535 }
2536
2537 int
2538 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2539                                 struct rte_eth_udp_tunnel *udp_tunnel)
2540 {
2541         struct rte_eth_dev *dev;
2542
2543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2544         if (udp_tunnel == NULL) {
2545                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2546                 return -EINVAL;
2547         }
2548
2549         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2550                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2551                 return -EINVAL;
2552         }
2553
2554         dev = &rte_eth_devices[port_id];
2555         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2556         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2557 }
2558
2559 int
2560 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2561                                    struct rte_eth_udp_tunnel *udp_tunnel)
2562 {
2563         struct rte_eth_dev *dev;
2564
2565         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2566         dev = &rte_eth_devices[port_id];
2567
2568         if (udp_tunnel == NULL) {
2569                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2570                 return -EINVAL;
2571         }
2572
2573         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2574                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2575                 return -EINVAL;
2576         }
2577
2578         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2579         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2580 }
2581
2582 int
2583 rte_eth_led_on(uint16_t port_id)
2584 {
2585         struct rte_eth_dev *dev;
2586
2587         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2588         dev = &rte_eth_devices[port_id];
2589         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2590         return (*dev->dev_ops->dev_led_on)(dev);
2591 }
2592
2593 int
2594 rte_eth_led_off(uint16_t port_id)
2595 {
2596         struct rte_eth_dev *dev;
2597
2598         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2599         dev = &rte_eth_devices[port_id];
2600         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2601         return (*dev->dev_ops->dev_led_off)(dev);
2602 }
2603
2604 /*
2605  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2606  * an empty spot.
2607  */
2608 static int
2609 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2610 {
2611         struct rte_eth_dev_info dev_info;
2612         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2613         unsigned i;
2614
2615         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2616         rte_eth_dev_info_get(port_id, &dev_info);
2617
2618         for (i = 0; i < dev_info.max_mac_addrs; i++)
2619                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2620                         return i;
2621
2622         return -1;
2623 }
2624
2625 static const struct ether_addr null_mac_addr;
2626
2627 int
2628 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2629                         uint32_t pool)
2630 {
2631         struct rte_eth_dev *dev;
2632         int index;
2633         uint64_t pool_mask;
2634         int ret;
2635
2636         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2637         dev = &rte_eth_devices[port_id];
2638         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2639
2640         if (is_zero_ether_addr(addr)) {
2641                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2642                         port_id);
2643                 return -EINVAL;
2644         }
2645         if (pool >= ETH_64_POOLS) {
2646                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2647                 return -EINVAL;
2648         }
2649
2650         index = get_mac_addr_index(port_id, addr);
2651         if (index < 0) {
2652                 index = get_mac_addr_index(port_id, &null_mac_addr);
2653                 if (index < 0) {
2654                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2655                                 port_id);
2656                         return -ENOSPC;
2657                 }
2658         } else {
2659                 pool_mask = dev->data->mac_pool_sel[index];
2660
2661                 /* Check if both MAC address and pool is already there, and do nothing */
2662                 if (pool_mask & (1ULL << pool))
2663                         return 0;
2664         }
2665
2666         /* Update NIC */
2667         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2668
2669         if (ret == 0) {
2670                 /* Update address in NIC data structure */
2671                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2672
2673                 /* Update pool bitmap in NIC data structure */
2674                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2675         }
2676
2677         return ret;
2678 }
2679
2680 int
2681 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2682 {
2683         struct rte_eth_dev *dev;
2684         int index;
2685
2686         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2687         dev = &rte_eth_devices[port_id];
2688         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2689
2690         index = get_mac_addr_index(port_id, addr);
2691         if (index == 0) {
2692                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2693                 return -EADDRINUSE;
2694         } else if (index < 0)
2695                 return 0;  /* Do nothing if address wasn't found */
2696
2697         /* Update NIC */
2698         (*dev->dev_ops->mac_addr_remove)(dev, index);
2699
2700         /* Update address in NIC data structure */
2701         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2702
2703         /* reset pool bitmap */
2704         dev->data->mac_pool_sel[index] = 0;
2705
2706         return 0;
2707 }
2708
2709 int
2710 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2711 {
2712         struct rte_eth_dev *dev;
2713
2714         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2715
2716         if (!is_valid_assigned_ether_addr(addr))
2717                 return -EINVAL;
2718
2719         dev = &rte_eth_devices[port_id];
2720         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2721
2722         /* Update default address in NIC data structure */
2723         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2724
2725         (*dev->dev_ops->mac_addr_set)(dev, addr);
2726
2727         return 0;
2728 }
2729
2730
2731 /*
2732  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2733  * an empty spot.
2734  */
2735 static int
2736 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2737 {
2738         struct rte_eth_dev_info dev_info;
2739         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2740         unsigned i;
2741
2742         rte_eth_dev_info_get(port_id, &dev_info);
2743         if (!dev->data->hash_mac_addrs)
2744                 return -1;
2745
2746         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2747                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2748                         ETHER_ADDR_LEN) == 0)
2749                         return i;
2750
2751         return -1;
2752 }
2753
2754 int
2755 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2756                                 uint8_t on)
2757 {
2758         int index;
2759         int ret;
2760         struct rte_eth_dev *dev;
2761
2762         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2763
2764         dev = &rte_eth_devices[port_id];
2765         if (is_zero_ether_addr(addr)) {
2766                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2767                         port_id);
2768                 return -EINVAL;
2769         }
2770
2771         index = get_hash_mac_addr_index(port_id, addr);
2772         /* Check if it's already there, and do nothing */
2773         if ((index >= 0) && on)
2774                 return 0;
2775
2776         if (index < 0) {
2777                 if (!on) {
2778                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2779                                 "set in UTA\n", port_id);
2780                         return -EINVAL;
2781                 }
2782
2783                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2784                 if (index < 0) {
2785                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2786                                         port_id);
2787                         return -ENOSPC;
2788                 }
2789         }
2790
2791         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2792         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2793         if (ret == 0) {
2794                 /* Update address in NIC data structure */
2795                 if (on)
2796                         ether_addr_copy(addr,
2797                                         &dev->data->hash_mac_addrs[index]);
2798                 else
2799                         ether_addr_copy(&null_mac_addr,
2800                                         &dev->data->hash_mac_addrs[index]);
2801         }
2802
2803         return ret;
2804 }
2805
2806 int
2807 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
2808 {
2809         struct rte_eth_dev *dev;
2810
2811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2812
2813         dev = &rte_eth_devices[port_id];
2814
2815         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2816         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2817 }
2818
2819 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2820                                         uint16_t tx_rate)
2821 {
2822         struct rte_eth_dev *dev;
2823         struct rte_eth_dev_info dev_info;
2824         struct rte_eth_link link;
2825
2826         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2827
2828         dev = &rte_eth_devices[port_id];
2829         rte_eth_dev_info_get(port_id, &dev_info);
2830         link = dev->data->dev_link;
2831
2832         if (queue_idx > dev_info.max_tx_queues) {
2833                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2834                                 "invalid queue id=%d\n", port_id, queue_idx);
2835                 return -EINVAL;
2836         }
2837
2838         if (tx_rate > link.link_speed) {
2839                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2840                                 "bigger than link speed= %d\n",
2841                         tx_rate, link.link_speed);
2842                 return -EINVAL;
2843         }
2844
2845         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2846         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2847 }
2848
2849 int
2850 rte_eth_mirror_rule_set(uint16_t port_id,
2851                         struct rte_eth_mirror_conf *mirror_conf,
2852                         uint8_t rule_id, uint8_t on)
2853 {
2854         struct rte_eth_dev *dev;
2855
2856         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2857         if (mirror_conf->rule_type == 0) {
2858                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2859                 return -EINVAL;
2860         }
2861
2862         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2863                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2864                                 ETH_64_POOLS - 1);
2865                 return -EINVAL;
2866         }
2867
2868         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2869              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2870             (mirror_conf->pool_mask == 0)) {
2871                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2872                 return -EINVAL;
2873         }
2874
2875         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2876             mirror_conf->vlan.vlan_mask == 0) {
2877                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2878                 return -EINVAL;
2879         }
2880
2881         dev = &rte_eth_devices[port_id];
2882         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2883
2884         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2885 }
2886
2887 int
2888 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
2889 {
2890         struct rte_eth_dev *dev;
2891
2892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2893
2894         dev = &rte_eth_devices[port_id];
2895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2896
2897         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2898 }
2899
2900 RTE_INIT(eth_dev_init_cb_lists)
2901 {
2902         int i;
2903
2904         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
2905                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
2906 }
2907
2908 int
2909 rte_eth_dev_callback_register(uint16_t port_id,
2910                         enum rte_eth_event_type event,
2911                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2912 {
2913         struct rte_eth_dev *dev;
2914         struct rte_eth_dev_callback *user_cb;
2915         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
2916         uint16_t last_port;
2917
2918         if (!cb_fn)
2919                 return -EINVAL;
2920
2921         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
2922                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
2923                 return -EINVAL;
2924         }
2925
2926         if (port_id == RTE_ETH_ALL) {
2927                 next_port = 0;
2928                 last_port = RTE_MAX_ETHPORTS - 1;
2929         } else {
2930                 next_port = last_port = port_id;
2931         }
2932
2933         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2934
2935         do {
2936                 dev = &rte_eth_devices[next_port];
2937
2938                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2939                         if (user_cb->cb_fn == cb_fn &&
2940                                 user_cb->cb_arg == cb_arg &&
2941                                 user_cb->event == event) {
2942                                 break;
2943                         }
2944                 }
2945
2946                 /* create a new callback. */
2947                 if (user_cb == NULL) {
2948                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2949                                 sizeof(struct rte_eth_dev_callback), 0);
2950                         if (user_cb != NULL) {
2951                                 user_cb->cb_fn = cb_fn;
2952                                 user_cb->cb_arg = cb_arg;
2953                                 user_cb->event = event;
2954                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
2955                                                   user_cb, next);
2956                         } else {
2957                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2958                                 rte_eth_dev_callback_unregister(port_id, event,
2959                                                                 cb_fn, cb_arg);
2960                                 return -ENOMEM;
2961                         }
2962
2963                 }
2964         } while (++next_port <= last_port);
2965
2966         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2967         return 0;
2968 }
2969
2970 int
2971 rte_eth_dev_callback_unregister(uint16_t port_id,
2972                         enum rte_eth_event_type event,
2973                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2974 {
2975         int ret;
2976         struct rte_eth_dev *dev;
2977         struct rte_eth_dev_callback *cb, *next;
2978         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
2979         uint16_t last_port;
2980
2981         if (!cb_fn)
2982                 return -EINVAL;
2983
2984         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
2985                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
2986                 return -EINVAL;
2987         }
2988
2989         if (port_id == RTE_ETH_ALL) {
2990                 next_port = 0;
2991                 last_port = RTE_MAX_ETHPORTS - 1;
2992         } else {
2993                 next_port = last_port = port_id;
2994         }
2995
2996         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2997
2998         do {
2999                 dev = &rte_eth_devices[next_port];
3000                 ret = 0;
3001                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3002                      cb = next) {
3003
3004                         next = TAILQ_NEXT(cb, next);
3005
3006                         if (cb->cb_fn != cb_fn || cb->event != event ||
3007                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3008                                 continue;
3009
3010                         /*
3011                          * if this callback is not executing right now,
3012                          * then remove it.
3013                          */
3014                         if (cb->active == 0) {
3015                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3016                                 rte_free(cb);
3017                         } else {
3018                                 ret = -EAGAIN;
3019                         }
3020                 }
3021         } while (++next_port <= last_port);
3022
3023         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3024         return ret;
3025 }
3026
3027 int
3028 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3029         enum rte_eth_event_type event, void *ret_param)
3030 {
3031         struct rte_eth_dev_callback *cb_lst;
3032         struct rte_eth_dev_callback dev_cb;
3033         int rc = 0;
3034
3035         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3036         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3037                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3038                         continue;
3039                 dev_cb = *cb_lst;
3040                 cb_lst->active = 1;
3041                 if (ret_param != NULL)
3042                         dev_cb.ret_param = ret_param;
3043
3044                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3045                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3046                                 dev_cb.cb_arg, dev_cb.ret_param);
3047                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3048                 cb_lst->active = 0;
3049         }
3050         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3051         return rc;
3052 }
3053
3054 int
3055 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3056 {
3057         uint32_t vec;
3058         struct rte_eth_dev *dev;
3059         struct rte_intr_handle *intr_handle;
3060         uint16_t qid;
3061         int rc;
3062
3063         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3064
3065         dev = &rte_eth_devices[port_id];
3066
3067         if (!dev->intr_handle) {
3068                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3069                 return -ENOTSUP;
3070         }
3071
3072         intr_handle = dev->intr_handle;
3073         if (!intr_handle->intr_vec) {
3074                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3075                 return -EPERM;
3076         }
3077
3078         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3079                 vec = intr_handle->intr_vec[qid];
3080                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3081                 if (rc && rc != -EEXIST) {
3082                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3083                                         " op %d epfd %d vec %u\n",
3084                                         port_id, qid, op, epfd, vec);
3085                 }
3086         }
3087
3088         return 0;
3089 }
3090
3091 const struct rte_memzone *
3092 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3093                          uint16_t queue_id, size_t size, unsigned align,
3094                          int socket_id)
3095 {
3096         char z_name[RTE_MEMZONE_NAMESIZE];
3097         const struct rte_memzone *mz;
3098
3099         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3100                  dev->device->driver->name, ring_name,
3101                  dev->data->port_id, queue_id);
3102
3103         mz = rte_memzone_lookup(z_name);
3104         if (mz)
3105                 return mz;
3106
3107         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3108 }
3109
3110 int
3111 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3112                           int epfd, int op, void *data)
3113 {
3114         uint32_t vec;
3115         struct rte_eth_dev *dev;
3116         struct rte_intr_handle *intr_handle;
3117         int rc;
3118
3119         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3120
3121         dev = &rte_eth_devices[port_id];
3122         if (queue_id >= dev->data->nb_rx_queues) {
3123                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3124                 return -EINVAL;
3125         }
3126
3127         if (!dev->intr_handle) {
3128                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3129                 return -ENOTSUP;
3130         }
3131
3132         intr_handle = dev->intr_handle;
3133         if (!intr_handle->intr_vec) {
3134                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3135                 return -EPERM;
3136         }
3137
3138         vec = intr_handle->intr_vec[queue_id];
3139         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3140         if (rc && rc != -EEXIST) {
3141                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3142                                 " op %d epfd %d vec %u\n",
3143                                 port_id, queue_id, op, epfd, vec);
3144                 return rc;
3145         }
3146
3147         return 0;
3148 }
3149
3150 int
3151 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3152                            uint16_t queue_id)
3153 {
3154         struct rte_eth_dev *dev;
3155
3156         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3157
3158         dev = &rte_eth_devices[port_id];
3159
3160         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3161         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
3162 }
3163
3164 int
3165 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3166                             uint16_t queue_id)
3167 {
3168         struct rte_eth_dev *dev;
3169
3170         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3171
3172         dev = &rte_eth_devices[port_id];
3173
3174         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3175         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
3176 }
3177
3178
3179 int
3180 rte_eth_dev_filter_supported(uint16_t port_id,
3181                              enum rte_filter_type filter_type)
3182 {
3183         struct rte_eth_dev *dev;
3184
3185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3186
3187         dev = &rte_eth_devices[port_id];
3188         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3189         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3190                                 RTE_ETH_FILTER_NOP, NULL);
3191 }
3192
3193 int
3194 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3195                             enum rte_filter_type filter_type,
3196                             enum rte_filter_op filter_op, void *arg);
3197
3198 int
3199 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3200                             enum rte_filter_type filter_type,
3201                             enum rte_filter_op filter_op, void *arg)
3202 {
3203         struct rte_eth_fdir_info_v22 {
3204                 enum rte_fdir_mode mode;
3205                 struct rte_eth_fdir_masks mask;
3206                 struct rte_eth_fdir_flex_conf flex_conf;
3207                 uint32_t guarant_spc;
3208                 uint32_t best_spc;
3209                 uint32_t flow_types_mask[1];
3210                 uint32_t max_flexpayload;
3211                 uint32_t flex_payload_unit;
3212                 uint32_t max_flex_payload_segment_num;
3213                 uint16_t flex_payload_limit;
3214                 uint32_t flex_bitmask_unit;
3215                 uint32_t max_flex_bitmask_num;
3216         };
3217
3218         struct rte_eth_hash_global_conf_v22 {
3219                 enum rte_eth_hash_function hash_func;
3220                 uint32_t sym_hash_enable_mask[1];
3221                 uint32_t valid_bit_mask[1];
3222         };
3223
3224         struct rte_eth_hash_filter_info_v22 {
3225                 enum rte_eth_hash_filter_info_type info_type;
3226                 union {
3227                         uint8_t enable;
3228                         struct rte_eth_hash_global_conf_v22 global_conf;
3229                         struct rte_eth_input_set_conf input_set_conf;
3230                 } info;
3231         };
3232
3233         struct rte_eth_dev *dev;
3234
3235         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3236
3237         dev = &rte_eth_devices[port_id];
3238         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3239         if (filter_op == RTE_ETH_FILTER_INFO) {
3240                 int retval;
3241                 struct rte_eth_fdir_info_v22 *fdir_info_v22;
3242                 struct rte_eth_fdir_info fdir_info;
3243
3244                 fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
3245
3246                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3247                           filter_op, (void *)&fdir_info);
3248                 fdir_info_v22->mode = fdir_info.mode;
3249                 fdir_info_v22->mask = fdir_info.mask;
3250                 fdir_info_v22->flex_conf = fdir_info.flex_conf;
3251                 fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
3252                 fdir_info_v22->best_spc = fdir_info.best_spc;
3253                 fdir_info_v22->flow_types_mask[0] =
3254                         (uint32_t)fdir_info.flow_types_mask[0];
3255                 fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
3256                 fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
3257                 fdir_info_v22->max_flex_payload_segment_num =
3258                         fdir_info.max_flex_payload_segment_num;
3259                 fdir_info_v22->flex_payload_limit =
3260                         fdir_info.flex_payload_limit;
3261                 fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
3262                 fdir_info_v22->max_flex_bitmask_num =
3263                         fdir_info.max_flex_bitmask_num;
3264                 return retval;
3265         } else if (filter_op == RTE_ETH_FILTER_GET) {
3266                 int retval;
3267                 struct rte_eth_hash_filter_info f_info;
3268                 struct rte_eth_hash_filter_info_v22 *f_info_v22 =
3269                         (struct rte_eth_hash_filter_info_v22 *)arg;
3270
3271                 f_info.info_type = f_info_v22->info_type;
3272                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3273                           filter_op, (void *)&f_info);
3274
3275                 switch (f_info_v22->info_type) {
3276                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3277                         f_info_v22->info.enable = f_info.info.enable;
3278                         break;
3279                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3280                         f_info_v22->info.global_conf.hash_func =
3281                                 f_info.info.global_conf.hash_func;
3282                         f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
3283                                 (uint32_t)
3284                                 f_info.info.global_conf.sym_hash_enable_mask[0];
3285                         f_info_v22->info.global_conf.valid_bit_mask[0] =
3286                                 (uint32_t)
3287                                 f_info.info.global_conf.valid_bit_mask[0];
3288                         break;
3289                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3290                         f_info_v22->info.input_set_conf =
3291                                 f_info.info.input_set_conf;
3292                         break;
3293                 default:
3294                         break;
3295                 }
3296                 return retval;
3297         } else if (filter_op == RTE_ETH_FILTER_SET) {
3298                 struct rte_eth_hash_filter_info f_info;
3299                 struct rte_eth_hash_filter_info_v22 *f_v22 =
3300                         (struct rte_eth_hash_filter_info_v22 *)arg;
3301
3302                 f_info.info_type = f_v22->info_type;
3303                 switch (f_v22->info_type) {
3304                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3305                         f_info.info.enable = f_v22->info.enable;
3306                         break;
3307                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3308                         f_info.info.global_conf.hash_func =
3309                                 f_v22->info.global_conf.hash_func;
3310                         f_info.info.global_conf.sym_hash_enable_mask[0] =
3311                                 (uint32_t)
3312                                 f_v22->info.global_conf.sym_hash_enable_mask[0];
3313                         f_info.info.global_conf.valid_bit_mask[0] =
3314                                 (uint32_t)
3315                                 f_v22->info.global_conf.valid_bit_mask[0];
3316                         break;
3317                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3318                         f_info.info.input_set_conf =
3319                                 f_v22->info.input_set_conf;
3320                         break;
3321                 default:
3322                         break;
3323                 }
3324                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3325                                                     (void *)&f_info);
3326         } else
3327                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3328                                                     arg);
3329 }
3330 VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
3331
3332 int
3333 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3334                               enum rte_filter_type filter_type,
3335                               enum rte_filter_op filter_op, void *arg);
3336
3337 int
3338 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3339                               enum rte_filter_type filter_type,
3340                               enum rte_filter_op filter_op, void *arg)
3341 {
3342         struct rte_eth_dev *dev;
3343
3344         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3345
3346         dev = &rte_eth_devices[port_id];
3347         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3348         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3349 }
3350 BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
3351 MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
3352                   enum rte_filter_type filter_type,
3353                   enum rte_filter_op filter_op, void *arg),
3354                   rte_eth_dev_filter_ctrl_v1802);
3355
3356 void *
3357 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3358                 rte_rx_callback_fn fn, void *user_param)
3359 {
3360 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3361         rte_errno = ENOTSUP;
3362         return NULL;
3363 #endif
3364         /* check input parameters */
3365         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3366                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3367                 rte_errno = EINVAL;
3368                 return NULL;
3369         }
3370         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3371
3372         if (cb == NULL) {
3373                 rte_errno = ENOMEM;
3374                 return NULL;
3375         }
3376
3377         cb->fn.rx = fn;
3378         cb->param = user_param;
3379
3380         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3381         /* Add the callbacks in fifo order. */
3382         struct rte_eth_rxtx_callback *tail =
3383                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3384
3385         if (!tail) {
3386                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3387
3388         } else {
3389                 while (tail->next)
3390                         tail = tail->next;
3391                 tail->next = cb;
3392         }
3393         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3394
3395         return cb;
3396 }
3397
3398 void *
3399 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3400                 rte_rx_callback_fn fn, void *user_param)
3401 {
3402 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3403         rte_errno = ENOTSUP;
3404         return NULL;
3405 #endif
3406         /* check input parameters */
3407         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3408                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3409                 rte_errno = EINVAL;
3410                 return NULL;
3411         }
3412
3413         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3414
3415         if (cb == NULL) {
3416                 rte_errno = ENOMEM;
3417                 return NULL;
3418         }
3419
3420         cb->fn.rx = fn;
3421         cb->param = user_param;
3422
3423         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3424         /* Add the callbacks at fisrt position*/
3425         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3426         rte_smp_wmb();
3427         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3428         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3429
3430         return cb;
3431 }
3432
3433 void *
3434 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3435                 rte_tx_callback_fn fn, void *user_param)
3436 {
3437 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3438         rte_errno = ENOTSUP;
3439         return NULL;
3440 #endif
3441         /* check input parameters */
3442         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3443                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3444                 rte_errno = EINVAL;
3445                 return NULL;
3446         }
3447
3448         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3449
3450         if (cb == NULL) {
3451                 rte_errno = ENOMEM;
3452                 return NULL;
3453         }
3454
3455         cb->fn.tx = fn;
3456         cb->param = user_param;
3457
3458         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3459         /* Add the callbacks in fifo order. */
3460         struct rte_eth_rxtx_callback *tail =
3461                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3462
3463         if (!tail) {
3464                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3465
3466         } else {
3467                 while (tail->next)
3468                         tail = tail->next;
3469                 tail->next = cb;
3470         }
3471         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3472
3473         return cb;
3474 }
3475
3476 int
3477 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3478                 struct rte_eth_rxtx_callback *user_cb)
3479 {
3480 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3481         return -ENOTSUP;
3482 #endif
3483         /* Check input parameters. */
3484         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3485         if (user_cb == NULL ||
3486                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3487                 return -EINVAL;
3488
3489         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3490         struct rte_eth_rxtx_callback *cb;
3491         struct rte_eth_rxtx_callback **prev_cb;
3492         int ret = -EINVAL;
3493
3494         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3495         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3496         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3497                 cb = *prev_cb;
3498                 if (cb == user_cb) {
3499                         /* Remove the user cb from the callback list. */
3500                         *prev_cb = cb->next;
3501                         ret = 0;
3502                         break;
3503                 }
3504         }
3505         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3506
3507         return ret;
3508 }
3509
3510 int
3511 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3512                 struct rte_eth_rxtx_callback *user_cb)
3513 {
3514 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3515         return -ENOTSUP;
3516 #endif
3517         /* Check input parameters. */
3518         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3519         if (user_cb == NULL ||
3520                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3521                 return -EINVAL;
3522
3523         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3524         int ret = -EINVAL;
3525         struct rte_eth_rxtx_callback *cb;
3526         struct rte_eth_rxtx_callback **prev_cb;
3527
3528         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3529         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3530         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3531                 cb = *prev_cb;
3532                 if (cb == user_cb) {
3533                         /* Remove the user cb from the callback list. */
3534                         *prev_cb = cb->next;
3535                         ret = 0;
3536                         break;
3537                 }
3538         }
3539         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3540
3541         return ret;
3542 }
3543
3544 int
3545 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3546         struct rte_eth_rxq_info *qinfo)
3547 {
3548         struct rte_eth_dev *dev;
3549
3550         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3551
3552         if (qinfo == NULL)
3553                 return -EINVAL;
3554
3555         dev = &rte_eth_devices[port_id];
3556         if (queue_id >= dev->data->nb_rx_queues) {
3557                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3558                 return -EINVAL;
3559         }
3560
3561         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3562
3563         memset(qinfo, 0, sizeof(*qinfo));
3564         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3565         return 0;
3566 }
3567
3568 int
3569 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3570         struct rte_eth_txq_info *qinfo)
3571 {
3572         struct rte_eth_dev *dev;
3573
3574         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3575
3576         if (qinfo == NULL)
3577                 return -EINVAL;
3578
3579         dev = &rte_eth_devices[port_id];
3580         if (queue_id >= dev->data->nb_tx_queues) {
3581                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3582                 return -EINVAL;
3583         }
3584
3585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3586
3587         memset(qinfo, 0, sizeof(*qinfo));
3588         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3589         return 0;
3590 }
3591
3592 int
3593 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3594                              struct ether_addr *mc_addr_set,
3595                              uint32_t nb_mc_addr)
3596 {
3597         struct rte_eth_dev *dev;
3598
3599         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3600
3601         dev = &rte_eth_devices[port_id];
3602         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3603         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3604 }
3605
3606 int
3607 rte_eth_timesync_enable(uint16_t port_id)
3608 {
3609         struct rte_eth_dev *dev;
3610
3611         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3612         dev = &rte_eth_devices[port_id];
3613
3614         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3615         return (*dev->dev_ops->timesync_enable)(dev);
3616 }
3617
3618 int
3619 rte_eth_timesync_disable(uint16_t port_id)
3620 {
3621         struct rte_eth_dev *dev;
3622
3623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3624         dev = &rte_eth_devices[port_id];
3625
3626         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3627         return (*dev->dev_ops->timesync_disable)(dev);
3628 }
3629
3630 int
3631 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3632                                    uint32_t flags)
3633 {
3634         struct rte_eth_dev *dev;
3635
3636         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3637         dev = &rte_eth_devices[port_id];
3638
3639         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3640         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3641 }
3642
3643 int
3644 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3645                                    struct timespec *timestamp)
3646 {
3647         struct rte_eth_dev *dev;
3648
3649         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3650         dev = &rte_eth_devices[port_id];
3651
3652         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3653         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3654 }
3655
3656 int
3657 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3658 {
3659         struct rte_eth_dev *dev;
3660
3661         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3662         dev = &rte_eth_devices[port_id];
3663
3664         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3665         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3666 }
3667
3668 int
3669 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3670 {
3671         struct rte_eth_dev *dev;
3672
3673         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3674         dev = &rte_eth_devices[port_id];
3675
3676         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3677         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3678 }
3679
3680 int
3681 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3682 {
3683         struct rte_eth_dev *dev;
3684
3685         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3686         dev = &rte_eth_devices[port_id];
3687
3688         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3689         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3690 }
3691
3692 int
3693 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3694 {
3695         struct rte_eth_dev *dev;
3696
3697         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3698
3699         dev = &rte_eth_devices[port_id];
3700         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3701         return (*dev->dev_ops->get_reg)(dev, info);
3702 }
3703
3704 int
3705 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3706 {
3707         struct rte_eth_dev *dev;
3708
3709         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3710
3711         dev = &rte_eth_devices[port_id];
3712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3713         return (*dev->dev_ops->get_eeprom_length)(dev);
3714 }
3715
3716 int
3717 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3718 {
3719         struct rte_eth_dev *dev;
3720
3721         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3722
3723         dev = &rte_eth_devices[port_id];
3724         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3725         return (*dev->dev_ops->get_eeprom)(dev, info);
3726 }
3727
3728 int
3729 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3730 {
3731         struct rte_eth_dev *dev;
3732
3733         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3734
3735         dev = &rte_eth_devices[port_id];
3736         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3737         return (*dev->dev_ops->set_eeprom)(dev, info);
3738 }
3739
3740 int
3741 rte_eth_dev_get_dcb_info(uint16_t port_id,
3742                              struct rte_eth_dcb_info *dcb_info)
3743 {
3744         struct rte_eth_dev *dev;
3745
3746         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3747
3748         dev = &rte_eth_devices[port_id];
3749         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3750
3751         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3752         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3753 }
3754
3755 int
3756 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3757                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3758 {
3759         struct rte_eth_dev *dev;
3760
3761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3762         if (l2_tunnel == NULL) {
3763                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3764                 return -EINVAL;
3765         }
3766
3767         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3768                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3769                 return -EINVAL;
3770         }
3771
3772         dev = &rte_eth_devices[port_id];
3773         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3774                                 -ENOTSUP);
3775         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3776 }
3777
3778 int
3779 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3780                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3781                                   uint32_t mask,
3782                                   uint8_t en)
3783 {
3784         struct rte_eth_dev *dev;
3785
3786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3787
3788         if (l2_tunnel == NULL) {
3789                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3790                 return -EINVAL;
3791         }
3792
3793         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3794                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3795                 return -EINVAL;
3796         }
3797
3798         if (mask == 0) {
3799                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3800                 return -EINVAL;
3801         }
3802
3803         dev = &rte_eth_devices[port_id];
3804         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3805                                 -ENOTSUP);
3806         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3807 }
3808
3809 static void
3810 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3811                            const struct rte_eth_desc_lim *desc_lim)
3812 {
3813         if (desc_lim->nb_align != 0)
3814                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3815
3816         if (desc_lim->nb_max != 0)
3817                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3818
3819         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3820 }
3821
3822 int
3823 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3824                                  uint16_t *nb_rx_desc,
3825                                  uint16_t *nb_tx_desc)
3826 {
3827         struct rte_eth_dev *dev;
3828         struct rte_eth_dev_info dev_info;
3829
3830         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3831
3832         dev = &rte_eth_devices[port_id];
3833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3834
3835         rte_eth_dev_info_get(port_id, &dev_info);
3836
3837         if (nb_rx_desc != NULL)
3838                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3839
3840         if (nb_tx_desc != NULL)
3841                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3842
3843         return 0;
3844 }
3845
3846 int
3847 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3848 {
3849         struct rte_eth_dev *dev;
3850
3851         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3852
3853         if (pool == NULL)
3854                 return -EINVAL;
3855
3856         dev = &rte_eth_devices[port_id];
3857
3858         if (*dev->dev_ops->pool_ops_supported == NULL)
3859                 return 1; /* all pools are supported */
3860
3861         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
3862 }