ethdev: add mbuf RSS update as an offload
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133 };
134
135 #undef RTE_RX_OFFLOAD_BIT2STR
136
137 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
138         { DEV_TX_OFFLOAD_##_name, #_name }
139
140 static const struct {
141         uint64_t offload;
142         const char *name;
143 } rte_tx_offload_names[] = {
144         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
145         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
152         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
153         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
158         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
159         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
160         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
161         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
162         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
164         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
165 };
166
167 #undef RTE_TX_OFFLOAD_BIT2STR
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         void *ret_param;                        /**< Return parameter */
180         enum rte_eth_event_type event;          /**< Interrupt event type */
181         uint32_t active;                        /**< Callback is executing */
182 };
183
184 enum {
185         STAT_QMAP_TX = 0,
186         STAT_QMAP_RX
187 };
188
189 int
190 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
191 {
192         int ret;
193         struct rte_devargs devargs = {.args = NULL};
194         const char *bus_param_key;
195         char *bus_str = NULL;
196         char *cls_str = NULL;
197         int str_size;
198
199         memset(iter, 0, sizeof(*iter));
200
201         /*
202          * The devargs string may use various syntaxes:
203          *   - 0000:08:00.0,representor=[1-3]
204          *   - pci:0000:06:00.0,representor=[0,5]
205          *   - class=eth,mac=00:11:22:33:44:55
206          * A new syntax is in development (not yet supported):
207          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
208          */
209
210         /*
211          * Handle pure class filter (i.e. without any bus-level argument),
212          * from future new syntax.
213          * rte_devargs_parse() is not yet supporting the new syntax,
214          * that's why this simple case is temporarily parsed here.
215          */
216 #define iter_anybus_str "class=eth,"
217         if (strncmp(devargs_str, iter_anybus_str,
218                         strlen(iter_anybus_str)) == 0) {
219                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
220                 goto end;
221         }
222
223         /* Split bus, device and parameters. */
224         ret = rte_devargs_parse(&devargs, devargs_str);
225         if (ret != 0)
226                 goto error;
227
228         /*
229          * Assume parameters of old syntax can match only at ethdev level.
230          * Extra parameters will be ignored, thanks to "+" prefix.
231          */
232         str_size = strlen(devargs.args) + 2;
233         cls_str = malloc(str_size);
234         if (cls_str == NULL) {
235                 ret = -ENOMEM;
236                 goto error;
237         }
238         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
239         if (ret != str_size - 1) {
240                 ret = -EINVAL;
241                 goto error;
242         }
243         iter->cls_str = cls_str;
244         free(devargs.args); /* allocated by rte_devargs_parse() */
245         devargs.args = NULL;
246
247         iter->bus = devargs.bus;
248         if (iter->bus->dev_iterate == NULL) {
249                 ret = -ENOTSUP;
250                 goto error;
251         }
252
253         /* Convert bus args to new syntax for use with new API dev_iterate. */
254         if (strcmp(iter->bus->name, "vdev") == 0) {
255                 bus_param_key = "name";
256         } else if (strcmp(iter->bus->name, "pci") == 0) {
257                 bus_param_key = "addr";
258         } else {
259                 ret = -ENOTSUP;
260                 goto error;
261         }
262         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
263         bus_str = malloc(str_size);
264         if (bus_str == NULL) {
265                 ret = -ENOMEM;
266                 goto error;
267         }
268         ret = snprintf(bus_str, str_size, "%s=%s",
269                         bus_param_key, devargs.name);
270         if (ret != str_size - 1) {
271                 ret = -EINVAL;
272                 goto error;
273         }
274         iter->bus_str = bus_str;
275
276 end:
277         iter->cls = rte_class_find_by_name("eth");
278         return 0;
279
280 error:
281         if (ret == -ENOTSUP)
282                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
283                                 iter->bus->name);
284         free(devargs.args);
285         free(bus_str);
286         free(cls_str);
287         return ret;
288 }
289
290 uint16_t
291 rte_eth_iterator_next(struct rte_dev_iterator *iter)
292 {
293         if (iter->cls == NULL) /* invalid ethdev iterator */
294                 return RTE_MAX_ETHPORTS;
295
296         do { /* loop to try all matching rte_device */
297                 /* If not pure ethdev filter and */
298                 if (iter->bus != NULL &&
299                                 /* not in middle of rte_eth_dev iteration, */
300                                 iter->class_device == NULL) {
301                         /* get next rte_device to try. */
302                         iter->device = iter->bus->dev_iterate(
303                                         iter->device, iter->bus_str, iter);
304                         if (iter->device == NULL)
305                                 break; /* no more rte_device candidate */
306                 }
307                 /* A device is matching bus part, need to check ethdev part. */
308                 iter->class_device = iter->cls->dev_iterate(
309                                 iter->class_device, iter->cls_str, iter);
310                 if (iter->class_device != NULL)
311                         return eth_dev_to_id(iter->class_device); /* match */
312         } while (iter->bus != NULL); /* need to try next rte_device */
313
314         /* No more ethdev port to iterate. */
315         rte_eth_iterator_cleanup(iter);
316         return RTE_MAX_ETHPORTS;
317 }
318
319 void
320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
321 {
322         if (iter->bus_str == NULL)
323                 return; /* nothing to free in pure class filter */
324         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
325         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
326         memset(iter, 0, sizeof(*iter));
327 }
328
329 uint16_t
330 rte_eth_find_next(uint16_t port_id)
331 {
332         while (port_id < RTE_MAX_ETHPORTS &&
333                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
334                 port_id++;
335
336         if (port_id >= RTE_MAX_ETHPORTS)
337                 return RTE_MAX_ETHPORTS;
338
339         return port_id;
340 }
341
342 /*
343  * Macro to iterate over all valid ports for internal usage.
344  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
345  */
346 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
347         for (port_id = rte_eth_find_next(0); \
348              port_id < RTE_MAX_ETHPORTS; \
349              port_id = rte_eth_find_next(port_id + 1))
350
351 uint16_t
352 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
353 {
354         port_id = rte_eth_find_next(port_id);
355         while (port_id < RTE_MAX_ETHPORTS &&
356                         rte_eth_devices[port_id].device != parent)
357                 port_id = rte_eth_find_next(port_id + 1);
358
359         return port_id;
360 }
361
362 uint16_t
363 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
364 {
365         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
366         return rte_eth_find_next_of(port_id,
367                         rte_eth_devices[ref_port_id].device);
368 }
369
370 static void
371 rte_eth_dev_shared_data_prepare(void)
372 {
373         const unsigned flags = 0;
374         const struct rte_memzone *mz;
375
376         rte_spinlock_lock(&rte_eth_shared_data_lock);
377
378         if (rte_eth_dev_shared_data == NULL) {
379                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
380                         /* Allocate port data and ownership shared memory. */
381                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
382                                         sizeof(*rte_eth_dev_shared_data),
383                                         rte_socket_id(), flags);
384                 } else
385                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
386                 if (mz == NULL)
387                         rte_panic("Cannot allocate ethdev shared data\n");
388
389                 rte_eth_dev_shared_data = mz->addr;
390                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
391                         rte_eth_dev_shared_data->next_owner_id =
392                                         RTE_ETH_DEV_NO_OWNER + 1;
393                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
394                         memset(rte_eth_dev_shared_data->data, 0,
395                                sizeof(rte_eth_dev_shared_data->data));
396                 }
397         }
398
399         rte_spinlock_unlock(&rte_eth_shared_data_lock);
400 }
401
402 static bool
403 is_allocated(const struct rte_eth_dev *ethdev)
404 {
405         return ethdev->data->name[0] != '\0';
406 }
407
408 static struct rte_eth_dev *
409 _rte_eth_dev_allocated(const char *name)
410 {
411         unsigned i;
412
413         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414                 if (rte_eth_devices[i].data != NULL &&
415                     strcmp(rte_eth_devices[i].data->name, name) == 0)
416                         return &rte_eth_devices[i];
417         }
418         return NULL;
419 }
420
421 struct rte_eth_dev *
422 rte_eth_dev_allocated(const char *name)
423 {
424         struct rte_eth_dev *ethdev;
425
426         rte_eth_dev_shared_data_prepare();
427
428         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
429
430         ethdev = _rte_eth_dev_allocated(name);
431
432         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
433
434         return ethdev;
435 }
436
437 static uint16_t
438 rte_eth_dev_find_free_port(void)
439 {
440         unsigned i;
441
442         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
443                 /* Using shared name field to find a free port. */
444                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
445                         RTE_ASSERT(rte_eth_devices[i].state ==
446                                    RTE_ETH_DEV_UNUSED);
447                         return i;
448                 }
449         }
450         return RTE_MAX_ETHPORTS;
451 }
452
453 static struct rte_eth_dev *
454 eth_dev_get(uint16_t port_id)
455 {
456         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
457
458         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
459
460         return eth_dev;
461 }
462
463 struct rte_eth_dev *
464 rte_eth_dev_allocate(const char *name)
465 {
466         uint16_t port_id;
467         struct rte_eth_dev *eth_dev = NULL;
468         size_t name_len;
469
470         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
471         if (name_len == 0) {
472                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
473                 return NULL;
474         }
475
476         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
477                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
478                 return NULL;
479         }
480
481         rte_eth_dev_shared_data_prepare();
482
483         /* Synchronize port creation between primary and secondary threads. */
484         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
485
486         if (_rte_eth_dev_allocated(name) != NULL) {
487                 RTE_ETHDEV_LOG(ERR,
488                         "Ethernet device with name %s already allocated\n",
489                         name);
490                 goto unlock;
491         }
492
493         port_id = rte_eth_dev_find_free_port();
494         if (port_id == RTE_MAX_ETHPORTS) {
495                 RTE_ETHDEV_LOG(ERR,
496                         "Reached maximum number of Ethernet ports\n");
497                 goto unlock;
498         }
499
500         eth_dev = eth_dev_get(port_id);
501         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
502         eth_dev->data->port_id = port_id;
503         eth_dev->data->mtu = RTE_ETHER_MTU;
504
505 unlock:
506         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508         return eth_dev;
509 }
510
511 /*
512  * Attach to a port already registered by the primary process, which
513  * makes sure that the same device would have the same port id both
514  * in the primary and secondary process.
515  */
516 struct rte_eth_dev *
517 rte_eth_dev_attach_secondary(const char *name)
518 {
519         uint16_t i;
520         struct rte_eth_dev *eth_dev = NULL;
521
522         rte_eth_dev_shared_data_prepare();
523
524         /* Synchronize port attachment to primary port creation and release. */
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529                         break;
530         }
531         if (i == RTE_MAX_ETHPORTS) {
532                 RTE_ETHDEV_LOG(ERR,
533                         "Device %s is not driven by the primary process\n",
534                         name);
535         } else {
536                 eth_dev = eth_dev_get(i);
537                 RTE_ASSERT(eth_dev->data->port_id == i);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541         return eth_dev;
542 }
543
544 int
545 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546 {
547         if (eth_dev == NULL)
548                 return -EINVAL;
549
550         rte_eth_dev_shared_data_prepare();
551
552         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553                 _rte_eth_dev_callback_process(eth_dev,
554                                 RTE_ETH_EVENT_DESTROY, NULL);
555
556         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558         eth_dev->state = RTE_ETH_DEV_UNUSED;
559
560         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
561                 rte_free(eth_dev->data->rx_queues);
562                 rte_free(eth_dev->data->tx_queues);
563                 rte_free(eth_dev->data->mac_addrs);
564                 rte_free(eth_dev->data->hash_mac_addrs);
565                 rte_free(eth_dev->data->dev_private);
566                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
567         }
568
569         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
570
571         return 0;
572 }
573
574 int
575 rte_eth_dev_is_valid_port(uint16_t port_id)
576 {
577         if (port_id >= RTE_MAX_ETHPORTS ||
578             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
579                 return 0;
580         else
581                 return 1;
582 }
583
584 static int
585 rte_eth_is_valid_owner_id(uint64_t owner_id)
586 {
587         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
588             rte_eth_dev_shared_data->next_owner_id <= owner_id)
589                 return 0;
590         return 1;
591 }
592
593 uint64_t
594 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
595 {
596         port_id = rte_eth_find_next(port_id);
597         while (port_id < RTE_MAX_ETHPORTS &&
598                         rte_eth_devices[port_id].data->owner.id != owner_id)
599                 port_id = rte_eth_find_next(port_id + 1);
600
601         return port_id;
602 }
603
604 int
605 rte_eth_dev_owner_new(uint64_t *owner_id)
606 {
607         rte_eth_dev_shared_data_prepare();
608
609         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
610
611         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
612
613         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
614         return 0;
615 }
616
617 static int
618 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
619                        const struct rte_eth_dev_owner *new_owner)
620 {
621         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
622         struct rte_eth_dev_owner *port_owner;
623
624         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
625                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
626                         port_id);
627                 return -ENODEV;
628         }
629
630         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
631             !rte_eth_is_valid_owner_id(old_owner_id)) {
632                 RTE_ETHDEV_LOG(ERR,
633                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
634                        old_owner_id, new_owner->id);
635                 return -EINVAL;
636         }
637
638         port_owner = &rte_eth_devices[port_id].data->owner;
639         if (port_owner->id != old_owner_id) {
640                 RTE_ETHDEV_LOG(ERR,
641                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
642                         port_id, port_owner->name, port_owner->id);
643                 return -EPERM;
644         }
645
646         /* can not truncate (same structure) */
647         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
648
649         port_owner->id = new_owner->id;
650
651         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
652                 port_id, new_owner->name, new_owner->id);
653
654         return 0;
655 }
656
657 int
658 rte_eth_dev_owner_set(const uint16_t port_id,
659                       const struct rte_eth_dev_owner *owner)
660 {
661         int ret;
662
663         rte_eth_dev_shared_data_prepare();
664
665         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
666
667         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
668
669         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
670         return ret;
671 }
672
673 int
674 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
675 {
676         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
677                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
678         int ret;
679
680         rte_eth_dev_shared_data_prepare();
681
682         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
683
684         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
685
686         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
687         return ret;
688 }
689
690 int
691 rte_eth_dev_owner_delete(const uint64_t owner_id)
692 {
693         uint16_t port_id;
694         int ret = 0;
695
696         rte_eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
699
700         if (rte_eth_is_valid_owner_id(owner_id)) {
701                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
702                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
703                                 memset(&rte_eth_devices[port_id].data->owner, 0,
704                                        sizeof(struct rte_eth_dev_owner));
705                 RTE_ETHDEV_LOG(NOTICE,
706                         "All port owners owned by %016"PRIx64" identifier have removed\n",
707                         owner_id);
708         } else {
709                 RTE_ETHDEV_LOG(ERR,
710                                "Invalid owner id=%016"PRIx64"\n",
711                                owner_id);
712                 ret = -EINVAL;
713         }
714
715         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
716
717         return ret;
718 }
719
720 int
721 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
722 {
723         int ret = 0;
724         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
725
726         rte_eth_dev_shared_data_prepare();
727
728         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
729
730         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
731                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
732                         port_id);
733                 ret = -ENODEV;
734         } else {
735                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
736         }
737
738         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
739         return ret;
740 }
741
742 int
743 rte_eth_dev_socket_id(uint16_t port_id)
744 {
745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
746         return rte_eth_devices[port_id].data->numa_node;
747 }
748
749 void *
750 rte_eth_dev_get_sec_ctx(uint16_t port_id)
751 {
752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
753         return rte_eth_devices[port_id].security_ctx;
754 }
755
756 uint16_t
757 rte_eth_dev_count_avail(void)
758 {
759         uint16_t p;
760         uint16_t count;
761
762         count = 0;
763
764         RTE_ETH_FOREACH_DEV(p)
765                 count++;
766
767         return count;
768 }
769
770 uint16_t
771 rte_eth_dev_count_total(void)
772 {
773         uint16_t port, count = 0;
774
775         RTE_ETH_FOREACH_VALID_DEV(port)
776                 count++;
777
778         return count;
779 }
780
781 int
782 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
783 {
784         char *tmp;
785
786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
787
788         if (name == NULL) {
789                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
790                 return -EINVAL;
791         }
792
793         /* shouldn't check 'rte_eth_devices[i].data',
794          * because it might be overwritten by VDEV PMD */
795         tmp = rte_eth_dev_shared_data->data[port_id].name;
796         strcpy(name, tmp);
797         return 0;
798 }
799
800 int
801 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
802 {
803         uint32_t pid;
804
805         if (name == NULL) {
806                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
807                 return -EINVAL;
808         }
809
810         RTE_ETH_FOREACH_VALID_DEV(pid)
811                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
812                         *port_id = pid;
813                         return 0;
814                 }
815
816         return -ENODEV;
817 }
818
819 static int
820 eth_err(uint16_t port_id, int ret)
821 {
822         if (ret == 0)
823                 return 0;
824         if (rte_eth_dev_is_removed(port_id))
825                 return -EIO;
826         return ret;
827 }
828
829 static int
830 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
831 {
832         uint16_t old_nb_queues = dev->data->nb_rx_queues;
833         void **rxq;
834         unsigned i;
835
836         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
837                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
838                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
839                                 RTE_CACHE_LINE_SIZE);
840                 if (dev->data->rx_queues == NULL) {
841                         dev->data->nb_rx_queues = 0;
842                         return -(ENOMEM);
843                 }
844         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
845                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
846
847                 rxq = dev->data->rx_queues;
848
849                 for (i = nb_queues; i < old_nb_queues; i++)
850                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
851                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
852                                 RTE_CACHE_LINE_SIZE);
853                 if (rxq == NULL)
854                         return -(ENOMEM);
855                 if (nb_queues > old_nb_queues) {
856                         uint16_t new_qs = nb_queues - old_nb_queues;
857
858                         memset(rxq + old_nb_queues, 0,
859                                 sizeof(rxq[0]) * new_qs);
860                 }
861
862                 dev->data->rx_queues = rxq;
863
864         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
865                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
866
867                 rxq = dev->data->rx_queues;
868
869                 for (i = nb_queues; i < old_nb_queues; i++)
870                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
871
872                 rte_free(dev->data->rx_queues);
873                 dev->data->rx_queues = NULL;
874         }
875         dev->data->nb_rx_queues = nb_queues;
876         return 0;
877 }
878
879 int
880 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
881 {
882         struct rte_eth_dev *dev;
883
884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
885
886         dev = &rte_eth_devices[port_id];
887         if (!dev->data->dev_started) {
888                 RTE_ETHDEV_LOG(ERR,
889                         "Port %u must be started before start any queue\n",
890                         port_id);
891                 return -EINVAL;
892         }
893
894         if (rx_queue_id >= dev->data->nb_rx_queues) {
895                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
896                 return -EINVAL;
897         }
898
899         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
900
901         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
902                 RTE_ETHDEV_LOG(INFO,
903                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
904                         rx_queue_id, port_id);
905                 return -EINVAL;
906         }
907
908         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
909                 RTE_ETHDEV_LOG(INFO,
910                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
911                         rx_queue_id, port_id);
912                 return 0;
913         }
914
915         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
916                                                              rx_queue_id));
917
918 }
919
920 int
921 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
922 {
923         struct rte_eth_dev *dev;
924
925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
926
927         dev = &rte_eth_devices[port_id];
928         if (rx_queue_id >= dev->data->nb_rx_queues) {
929                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
930                 return -EINVAL;
931         }
932
933         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
934
935         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
936                 RTE_ETHDEV_LOG(INFO,
937                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
938                         rx_queue_id, port_id);
939                 return -EINVAL;
940         }
941
942         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
943                 RTE_ETHDEV_LOG(INFO,
944                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
945                         rx_queue_id, port_id);
946                 return 0;
947         }
948
949         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
950
951 }
952
953 int
954 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
955 {
956         struct rte_eth_dev *dev;
957
958         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
959
960         dev = &rte_eth_devices[port_id];
961         if (!dev->data->dev_started) {
962                 RTE_ETHDEV_LOG(ERR,
963                         "Port %u must be started before start any queue\n",
964                         port_id);
965                 return -EINVAL;
966         }
967
968         if (tx_queue_id >= dev->data->nb_tx_queues) {
969                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
970                 return -EINVAL;
971         }
972
973         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
974
975         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
976                 RTE_ETHDEV_LOG(INFO,
977                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
978                         tx_queue_id, port_id);
979                 return -EINVAL;
980         }
981
982         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
983                 RTE_ETHDEV_LOG(INFO,
984                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
985                         tx_queue_id, port_id);
986                 return 0;
987         }
988
989         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
990 }
991
992 int
993 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
994 {
995         struct rte_eth_dev *dev;
996
997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
998
999         dev = &rte_eth_devices[port_id];
1000         if (tx_queue_id >= dev->data->nb_tx_queues) {
1001                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1002                 return -EINVAL;
1003         }
1004
1005         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1006
1007         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1010                         tx_queue_id, port_id);
1011                 return -EINVAL;
1012         }
1013
1014         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1015                 RTE_ETHDEV_LOG(INFO,
1016                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1017                         tx_queue_id, port_id);
1018                 return 0;
1019         }
1020
1021         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1022
1023 }
1024
1025 static int
1026 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1027 {
1028         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1029         void **txq;
1030         unsigned i;
1031
1032         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1033                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1034                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1035                                                    RTE_CACHE_LINE_SIZE);
1036                 if (dev->data->tx_queues == NULL) {
1037                         dev->data->nb_tx_queues = 0;
1038                         return -(ENOMEM);
1039                 }
1040         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1041                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1042
1043                 txq = dev->data->tx_queues;
1044
1045                 for (i = nb_queues; i < old_nb_queues; i++)
1046                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1047                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1048                                   RTE_CACHE_LINE_SIZE);
1049                 if (txq == NULL)
1050                         return -ENOMEM;
1051                 if (nb_queues > old_nb_queues) {
1052                         uint16_t new_qs = nb_queues - old_nb_queues;
1053
1054                         memset(txq + old_nb_queues, 0,
1055                                sizeof(txq[0]) * new_qs);
1056                 }
1057
1058                 dev->data->tx_queues = txq;
1059
1060         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1061                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1062
1063                 txq = dev->data->tx_queues;
1064
1065                 for (i = nb_queues; i < old_nb_queues; i++)
1066                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1067
1068                 rte_free(dev->data->tx_queues);
1069                 dev->data->tx_queues = NULL;
1070         }
1071         dev->data->nb_tx_queues = nb_queues;
1072         return 0;
1073 }
1074
1075 uint32_t
1076 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1077 {
1078         switch (speed) {
1079         case ETH_SPEED_NUM_10M:
1080                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1081         case ETH_SPEED_NUM_100M:
1082                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1083         case ETH_SPEED_NUM_1G:
1084                 return ETH_LINK_SPEED_1G;
1085         case ETH_SPEED_NUM_2_5G:
1086                 return ETH_LINK_SPEED_2_5G;
1087         case ETH_SPEED_NUM_5G:
1088                 return ETH_LINK_SPEED_5G;
1089         case ETH_SPEED_NUM_10G:
1090                 return ETH_LINK_SPEED_10G;
1091         case ETH_SPEED_NUM_20G:
1092                 return ETH_LINK_SPEED_20G;
1093         case ETH_SPEED_NUM_25G:
1094                 return ETH_LINK_SPEED_25G;
1095         case ETH_SPEED_NUM_40G:
1096                 return ETH_LINK_SPEED_40G;
1097         case ETH_SPEED_NUM_50G:
1098                 return ETH_LINK_SPEED_50G;
1099         case ETH_SPEED_NUM_56G:
1100                 return ETH_LINK_SPEED_56G;
1101         case ETH_SPEED_NUM_100G:
1102                 return ETH_LINK_SPEED_100G;
1103         default:
1104                 return 0;
1105         }
1106 }
1107
1108 const char *
1109 rte_eth_dev_rx_offload_name(uint64_t offload)
1110 {
1111         const char *name = "UNKNOWN";
1112         unsigned int i;
1113
1114         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1115                 if (offload == rte_rx_offload_names[i].offload) {
1116                         name = rte_rx_offload_names[i].name;
1117                         break;
1118                 }
1119         }
1120
1121         return name;
1122 }
1123
1124 const char *
1125 rte_eth_dev_tx_offload_name(uint64_t offload)
1126 {
1127         const char *name = "UNKNOWN";
1128         unsigned int i;
1129
1130         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1131                 if (offload == rte_tx_offload_names[i].offload) {
1132                         name = rte_tx_offload_names[i].name;
1133                         break;
1134                 }
1135         }
1136
1137         return name;
1138 }
1139
1140 int
1141 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1142                       const struct rte_eth_conf *dev_conf)
1143 {
1144         struct rte_eth_dev *dev;
1145         struct rte_eth_dev_info dev_info;
1146         struct rte_eth_conf orig_conf;
1147         int diag;
1148         int ret;
1149
1150         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1151
1152         dev = &rte_eth_devices[port_id];
1153
1154         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1155
1156         if (dev->data->dev_started) {
1157                 RTE_ETHDEV_LOG(ERR,
1158                         "Port %u must be stopped to allow configuration\n",
1159                         port_id);
1160                 return -EBUSY;
1161         }
1162
1163          /* Store original config, as rollback required on failure */
1164         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1165
1166         /*
1167          * Copy the dev_conf parameter into the dev structure.
1168          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1169          */
1170         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1171
1172         ret = rte_eth_dev_info_get(port_id, &dev_info);
1173         if (ret != 0)
1174                 goto rollback;
1175
1176         /* If number of queues specified by application for both Rx and Tx is
1177          * zero, use driver preferred values. This cannot be done individually
1178          * as it is valid for either Tx or Rx (but not both) to be zero.
1179          * If driver does not provide any preferred valued, fall back on
1180          * EAL defaults.
1181          */
1182         if (nb_rx_q == 0 && nb_tx_q == 0) {
1183                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1184                 if (nb_rx_q == 0)
1185                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1186                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1187                 if (nb_tx_q == 0)
1188                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1189         }
1190
1191         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1192                 RTE_ETHDEV_LOG(ERR,
1193                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1194                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1195                 ret = -EINVAL;
1196                 goto rollback;
1197         }
1198
1199         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1200                 RTE_ETHDEV_LOG(ERR,
1201                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1202                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1203                 ret = -EINVAL;
1204                 goto rollback;
1205         }
1206
1207         /*
1208          * Check that the numbers of RX and TX queues are not greater
1209          * than the maximum number of RX and TX queues supported by the
1210          * configured device.
1211          */
1212         if (nb_rx_q > dev_info.max_rx_queues) {
1213                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1214                         port_id, nb_rx_q, dev_info.max_rx_queues);
1215                 ret = -EINVAL;
1216                 goto rollback;
1217         }
1218
1219         if (nb_tx_q > dev_info.max_tx_queues) {
1220                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1221                         port_id, nb_tx_q, dev_info.max_tx_queues);
1222                 ret = -EINVAL;
1223                 goto rollback;
1224         }
1225
1226         /* Check that the device supports requested interrupts */
1227         if ((dev_conf->intr_conf.lsc == 1) &&
1228                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1229                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1230                         dev->device->driver->name);
1231                 ret = -EINVAL;
1232                 goto rollback;
1233         }
1234         if ((dev_conf->intr_conf.rmv == 1) &&
1235                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1236                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1237                         dev->device->driver->name);
1238                 ret = -EINVAL;
1239                 goto rollback;
1240         }
1241
1242         /*
1243          * If jumbo frames are enabled, check that the maximum RX packet
1244          * length is supported by the configured device.
1245          */
1246         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1247                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1248                         RTE_ETHDEV_LOG(ERR,
1249                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1250                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1251                                 dev_info.max_rx_pktlen);
1252                         ret = -EINVAL;
1253                         goto rollback;
1254                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1255                         RTE_ETHDEV_LOG(ERR,
1256                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1257                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1258                                 (unsigned int)RTE_ETHER_MIN_LEN);
1259                         ret = -EINVAL;
1260                         goto rollback;
1261                 }
1262         } else {
1263                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1264                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1265                         /* Use default value */
1266                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1267                                                         RTE_ETHER_MAX_LEN;
1268         }
1269
1270         /* Any requested offloading must be within its device capabilities */
1271         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1272              dev_conf->rxmode.offloads) {
1273                 RTE_ETHDEV_LOG(ERR,
1274                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1275                         "capabilities 0x%"PRIx64" in %s()\n",
1276                         port_id, dev_conf->rxmode.offloads,
1277                         dev_info.rx_offload_capa,
1278                         __func__);
1279                 ret = -EINVAL;
1280                 goto rollback;
1281         }
1282         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1283              dev_conf->txmode.offloads) {
1284                 RTE_ETHDEV_LOG(ERR,
1285                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1286                         "capabilities 0x%"PRIx64" in %s()\n",
1287                         port_id, dev_conf->txmode.offloads,
1288                         dev_info.tx_offload_capa,
1289                         __func__);
1290                 ret = -EINVAL;
1291                 goto rollback;
1292         }
1293
1294         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1295                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1296
1297         /* Check that device supports requested rss hash functions. */
1298         if ((dev_info.flow_type_rss_offloads |
1299              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1300             dev_info.flow_type_rss_offloads) {
1301                 RTE_ETHDEV_LOG(ERR,
1302                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1303                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1304                         dev_info.flow_type_rss_offloads);
1305                 ret = -EINVAL;
1306                 goto rollback;
1307         }
1308
1309         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1310         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1311             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1312                 RTE_ETHDEV_LOG(ERR,
1313                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested",
1314                         port_id,
1315                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1316                 ret = -EINVAL;
1317                 goto rollback;
1318         }
1319
1320         /*
1321          * Setup new number of RX/TX queues and reconfigure device.
1322          */
1323         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1324         if (diag != 0) {
1325                 RTE_ETHDEV_LOG(ERR,
1326                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1327                         port_id, diag);
1328                 ret = diag;
1329                 goto rollback;
1330         }
1331
1332         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1333         if (diag != 0) {
1334                 RTE_ETHDEV_LOG(ERR,
1335                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1336                         port_id, diag);
1337                 rte_eth_dev_rx_queue_config(dev, 0);
1338                 ret = diag;
1339                 goto rollback;
1340         }
1341
1342         diag = (*dev->dev_ops->dev_configure)(dev);
1343         if (diag != 0) {
1344                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1345                         port_id, diag);
1346                 rte_eth_dev_rx_queue_config(dev, 0);
1347                 rte_eth_dev_tx_queue_config(dev, 0);
1348                 ret = eth_err(port_id, diag);
1349                 goto rollback;
1350         }
1351
1352         /* Initialize Rx profiling if enabled at compilation time. */
1353         diag = __rte_eth_dev_profile_init(port_id, dev);
1354         if (diag != 0) {
1355                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1356                         port_id, diag);
1357                 rte_eth_dev_rx_queue_config(dev, 0);
1358                 rte_eth_dev_tx_queue_config(dev, 0);
1359                 ret = eth_err(port_id, diag);
1360                 goto rollback;
1361         }
1362
1363         return 0;
1364
1365 rollback:
1366         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1367
1368         return ret;
1369 }
1370
1371 void
1372 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1373 {
1374         if (dev->data->dev_started) {
1375                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1376                         dev->data->port_id);
1377                 return;
1378         }
1379
1380         rte_eth_dev_rx_queue_config(dev, 0);
1381         rte_eth_dev_tx_queue_config(dev, 0);
1382
1383         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1384 }
1385
1386 static void
1387 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1388                         struct rte_eth_dev_info *dev_info)
1389 {
1390         struct rte_ether_addr *addr;
1391         uint16_t i;
1392         uint32_t pool = 0;
1393         uint64_t pool_mask;
1394
1395         /* replay MAC address configuration including default MAC */
1396         addr = &dev->data->mac_addrs[0];
1397         if (*dev->dev_ops->mac_addr_set != NULL)
1398                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1399         else if (*dev->dev_ops->mac_addr_add != NULL)
1400                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1401
1402         if (*dev->dev_ops->mac_addr_add != NULL) {
1403                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1404                         addr = &dev->data->mac_addrs[i];
1405
1406                         /* skip zero address */
1407                         if (rte_is_zero_ether_addr(addr))
1408                                 continue;
1409
1410                         pool = 0;
1411                         pool_mask = dev->data->mac_pool_sel[i];
1412
1413                         do {
1414                                 if (pool_mask & 1ULL)
1415                                         (*dev->dev_ops->mac_addr_add)(dev,
1416                                                 addr, i, pool);
1417                                 pool_mask >>= 1;
1418                                 pool++;
1419                         } while (pool_mask);
1420                 }
1421         }
1422 }
1423
1424 static int
1425 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1426                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1427 {
1428         int ret;
1429
1430         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1431                 rte_eth_dev_mac_restore(dev, dev_info);
1432
1433         /* replay promiscuous configuration */
1434         /*
1435          * use callbacks directly since we don't need port_id check and
1436          * would like to bypass the same value set
1437          */
1438         if (rte_eth_promiscuous_get(port_id) == 1 &&
1439             *dev->dev_ops->promiscuous_enable != NULL) {
1440                 ret = eth_err(port_id,
1441                               (*dev->dev_ops->promiscuous_enable)(dev));
1442                 if (ret != 0 && ret != -ENOTSUP) {
1443                         RTE_ETHDEV_LOG(ERR,
1444                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1445                                 port_id, rte_strerror(-ret));
1446                         return ret;
1447                 }
1448         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1449                    *dev->dev_ops->promiscuous_disable != NULL) {
1450                 ret = eth_err(port_id,
1451                               (*dev->dev_ops->promiscuous_disable)(dev));
1452                 if (ret != 0 && ret != -ENOTSUP) {
1453                         RTE_ETHDEV_LOG(ERR,
1454                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1455                                 port_id, rte_strerror(-ret));
1456                         return ret;
1457                 }
1458         }
1459
1460         /* replay all multicast configuration */
1461         /*
1462          * use callbacks directly since we don't need port_id check and
1463          * would like to bypass the same value set
1464          */
1465         if (rte_eth_allmulticast_get(port_id) == 1 &&
1466             *dev->dev_ops->allmulticast_enable != NULL) {
1467                 ret = eth_err(port_id,
1468                               (*dev->dev_ops->allmulticast_enable)(dev));
1469                 if (ret != 0 && ret != -ENOTSUP) {
1470                         RTE_ETHDEV_LOG(ERR,
1471                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1472                                 port_id, rte_strerror(-ret));
1473                         return ret;
1474                 }
1475         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1476                    *dev->dev_ops->allmulticast_disable != NULL) {
1477                 ret = eth_err(port_id,
1478                               (*dev->dev_ops->allmulticast_disable)(dev));
1479                 if (ret != 0 && ret != -ENOTSUP) {
1480                         RTE_ETHDEV_LOG(ERR,
1481                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1482                                 port_id, rte_strerror(-ret));
1483                         return ret;
1484                 }
1485         }
1486
1487         return 0;
1488 }
1489
1490 int
1491 rte_eth_dev_start(uint16_t port_id)
1492 {
1493         struct rte_eth_dev *dev;
1494         struct rte_eth_dev_info dev_info;
1495         int diag;
1496         int ret;
1497
1498         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1499
1500         dev = &rte_eth_devices[port_id];
1501
1502         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1503
1504         if (dev->data->dev_started != 0) {
1505                 RTE_ETHDEV_LOG(INFO,
1506                         "Device with port_id=%"PRIu16" already started\n",
1507                         port_id);
1508                 return 0;
1509         }
1510
1511         ret = rte_eth_dev_info_get(port_id, &dev_info);
1512         if (ret != 0)
1513                 return ret;
1514
1515         /* Lets restore MAC now if device does not support live change */
1516         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1517                 rte_eth_dev_mac_restore(dev, &dev_info);
1518
1519         diag = (*dev->dev_ops->dev_start)(dev);
1520         if (diag == 0)
1521                 dev->data->dev_started = 1;
1522         else
1523                 return eth_err(port_id, diag);
1524
1525         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1526         if (ret != 0) {
1527                 RTE_ETHDEV_LOG(ERR,
1528                         "Error during restoring configuration for device (port %u): %s\n",
1529                         port_id, rte_strerror(-ret));
1530                 rte_eth_dev_stop(port_id);
1531                 return ret;
1532         }
1533
1534         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1535                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1536                 (*dev->dev_ops->link_update)(dev, 0);
1537         }
1538         return 0;
1539 }
1540
1541 void
1542 rte_eth_dev_stop(uint16_t port_id)
1543 {
1544         struct rte_eth_dev *dev;
1545
1546         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1547         dev = &rte_eth_devices[port_id];
1548
1549         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1550
1551         if (dev->data->dev_started == 0) {
1552                 RTE_ETHDEV_LOG(INFO,
1553                         "Device with port_id=%"PRIu16" already stopped\n",
1554                         port_id);
1555                 return;
1556         }
1557
1558         dev->data->dev_started = 0;
1559         (*dev->dev_ops->dev_stop)(dev);
1560 }
1561
1562 int
1563 rte_eth_dev_set_link_up(uint16_t port_id)
1564 {
1565         struct rte_eth_dev *dev;
1566
1567         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1568
1569         dev = &rte_eth_devices[port_id];
1570
1571         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1572         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1573 }
1574
1575 int
1576 rte_eth_dev_set_link_down(uint16_t port_id)
1577 {
1578         struct rte_eth_dev *dev;
1579
1580         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1581
1582         dev = &rte_eth_devices[port_id];
1583
1584         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1585         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1586 }
1587
1588 void
1589 rte_eth_dev_close(uint16_t port_id)
1590 {
1591         struct rte_eth_dev *dev;
1592
1593         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1594         dev = &rte_eth_devices[port_id];
1595
1596         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1597         dev->data->dev_started = 0;
1598         (*dev->dev_ops->dev_close)(dev);
1599
1600         /* check behaviour flag - temporary for PMD migration */
1601         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1602                 /* new behaviour: send event + reset state + free all data */
1603                 rte_eth_dev_release_port(dev);
1604                 return;
1605         }
1606         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1607                         "The driver %s should migrate to the new behaviour.\n",
1608                         dev->device->driver->name);
1609         /* old behaviour: only free queue arrays */
1610         dev->data->nb_rx_queues = 0;
1611         rte_free(dev->data->rx_queues);
1612         dev->data->rx_queues = NULL;
1613         dev->data->nb_tx_queues = 0;
1614         rte_free(dev->data->tx_queues);
1615         dev->data->tx_queues = NULL;
1616 }
1617
1618 int
1619 rte_eth_dev_reset(uint16_t port_id)
1620 {
1621         struct rte_eth_dev *dev;
1622         int ret;
1623
1624         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1625         dev = &rte_eth_devices[port_id];
1626
1627         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1628
1629         rte_eth_dev_stop(port_id);
1630         ret = dev->dev_ops->dev_reset(dev);
1631
1632         return eth_err(port_id, ret);
1633 }
1634
1635 int
1636 rte_eth_dev_is_removed(uint16_t port_id)
1637 {
1638         struct rte_eth_dev *dev;
1639         int ret;
1640
1641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1642
1643         dev = &rte_eth_devices[port_id];
1644
1645         if (dev->state == RTE_ETH_DEV_REMOVED)
1646                 return 1;
1647
1648         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1649
1650         ret = dev->dev_ops->is_removed(dev);
1651         if (ret != 0)
1652                 /* Device is physically removed. */
1653                 dev->state = RTE_ETH_DEV_REMOVED;
1654
1655         return ret;
1656 }
1657
1658 int
1659 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1660                        uint16_t nb_rx_desc, unsigned int socket_id,
1661                        const struct rte_eth_rxconf *rx_conf,
1662                        struct rte_mempool *mp)
1663 {
1664         int ret;
1665         uint32_t mbp_buf_size;
1666         struct rte_eth_dev *dev;
1667         struct rte_eth_dev_info dev_info;
1668         struct rte_eth_rxconf local_conf;
1669         void **rxq;
1670
1671         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1672
1673         dev = &rte_eth_devices[port_id];
1674         if (rx_queue_id >= dev->data->nb_rx_queues) {
1675                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1676                 return -EINVAL;
1677         }
1678
1679         if (mp == NULL) {
1680                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1681                 return -EINVAL;
1682         }
1683
1684         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1685
1686         /*
1687          * Check the size of the mbuf data buffer.
1688          * This value must be provided in the private data of the memory pool.
1689          * First check that the memory pool has a valid private data.
1690          */
1691         ret = rte_eth_dev_info_get(port_id, &dev_info);
1692         if (ret != 0)
1693                 return ret;
1694
1695         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1696                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1697                         mp->name, (int)mp->private_data_size,
1698                         (int)sizeof(struct rte_pktmbuf_pool_private));
1699                 return -ENOSPC;
1700         }
1701         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1702
1703         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1704                 RTE_ETHDEV_LOG(ERR,
1705                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1706                         mp->name, (int)mbp_buf_size,
1707                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1708                         (int)RTE_PKTMBUF_HEADROOM,
1709                         (int)dev_info.min_rx_bufsize);
1710                 return -EINVAL;
1711         }
1712
1713         /* Use default specified by driver, if nb_rx_desc is zero */
1714         if (nb_rx_desc == 0) {
1715                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1716                 /* If driver default is also zero, fall back on EAL default */
1717                 if (nb_rx_desc == 0)
1718                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1719         }
1720
1721         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1722                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1723                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1724
1725                 RTE_ETHDEV_LOG(ERR,
1726                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1727                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1728                         dev_info.rx_desc_lim.nb_min,
1729                         dev_info.rx_desc_lim.nb_align);
1730                 return -EINVAL;
1731         }
1732
1733         if (dev->data->dev_started &&
1734                 !(dev_info.dev_capa &
1735                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1736                 return -EBUSY;
1737
1738         if (dev->data->dev_started &&
1739                 (dev->data->rx_queue_state[rx_queue_id] !=
1740                         RTE_ETH_QUEUE_STATE_STOPPED))
1741                 return -EBUSY;
1742
1743         rxq = dev->data->rx_queues;
1744         if (rxq[rx_queue_id]) {
1745                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1746                                         -ENOTSUP);
1747                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1748                 rxq[rx_queue_id] = NULL;
1749         }
1750
1751         if (rx_conf == NULL)
1752                 rx_conf = &dev_info.default_rxconf;
1753
1754         local_conf = *rx_conf;
1755
1756         /*
1757          * If an offloading has already been enabled in
1758          * rte_eth_dev_configure(), it has been enabled on all queues,
1759          * so there is no need to enable it in this queue again.
1760          * The local_conf.offloads input to underlying PMD only carries
1761          * those offloadings which are only enabled on this queue and
1762          * not enabled on all queues.
1763          */
1764         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1765
1766         /*
1767          * New added offloadings for this queue are those not enabled in
1768          * rte_eth_dev_configure() and they must be per-queue type.
1769          * A pure per-port offloading can't be enabled on a queue while
1770          * disabled on another queue. A pure per-port offloading can't
1771          * be enabled for any queue as new added one if it hasn't been
1772          * enabled in rte_eth_dev_configure().
1773          */
1774         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1775              local_conf.offloads) {
1776                 RTE_ETHDEV_LOG(ERR,
1777                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1778                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1779                         port_id, rx_queue_id, local_conf.offloads,
1780                         dev_info.rx_queue_offload_capa,
1781                         __func__);
1782                 return -EINVAL;
1783         }
1784
1785         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1786                                               socket_id, &local_conf, mp);
1787         if (!ret) {
1788                 if (!dev->data->min_rx_buf_size ||
1789                     dev->data->min_rx_buf_size > mbp_buf_size)
1790                         dev->data->min_rx_buf_size = mbp_buf_size;
1791         }
1792
1793         return eth_err(port_id, ret);
1794 }
1795
1796 int
1797 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1798                                uint16_t nb_rx_desc,
1799                                const struct rte_eth_hairpin_conf *conf)
1800 {
1801         int ret;
1802         struct rte_eth_dev *dev;
1803         struct rte_eth_hairpin_cap cap;
1804         void **rxq;
1805         int i;
1806         int count;
1807
1808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1809
1810         dev = &rte_eth_devices[port_id];
1811         if (rx_queue_id >= dev->data->nb_rx_queues) {
1812                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1813                 return -EINVAL;
1814         }
1815         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1816         if (ret != 0)
1817                 return ret;
1818         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1819                                 -ENOTSUP);
1820         /* if nb_rx_desc is zero use max number of desc from the driver. */
1821         if (nb_rx_desc == 0)
1822                 nb_rx_desc = cap.max_nb_desc;
1823         if (nb_rx_desc > cap.max_nb_desc) {
1824                 RTE_ETHDEV_LOG(ERR,
1825                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1826                         nb_rx_desc, cap.max_nb_desc);
1827                 return -EINVAL;
1828         }
1829         if (conf->peer_count > cap.max_rx_2_tx) {
1830                 RTE_ETHDEV_LOG(ERR,
1831                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1832                         conf->peer_count, cap.max_rx_2_tx);
1833                 return -EINVAL;
1834         }
1835         if (conf->peer_count == 0) {
1836                 RTE_ETHDEV_LOG(ERR,
1837                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1838                         conf->peer_count);
1839                 return -EINVAL;
1840         }
1841         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1842              cap.max_nb_queues != UINT16_MAX; i++) {
1843                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1844                         count++;
1845         }
1846         if (count > cap.max_nb_queues) {
1847                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1848                 cap.max_nb_queues);
1849                 return -EINVAL;
1850         }
1851         if (dev->data->dev_started)
1852                 return -EBUSY;
1853         rxq = dev->data->rx_queues;
1854         if (rxq[rx_queue_id] != NULL) {
1855                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1856                                         -ENOTSUP);
1857                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1858                 rxq[rx_queue_id] = NULL;
1859         }
1860         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1861                                                       nb_rx_desc, conf);
1862         if (ret == 0)
1863                 dev->data->rx_queue_state[rx_queue_id] =
1864                         RTE_ETH_QUEUE_STATE_HAIRPIN;
1865         return eth_err(port_id, ret);
1866 }
1867
1868 int
1869 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1870                        uint16_t nb_tx_desc, unsigned int socket_id,
1871                        const struct rte_eth_txconf *tx_conf)
1872 {
1873         struct rte_eth_dev *dev;
1874         struct rte_eth_dev_info dev_info;
1875         struct rte_eth_txconf local_conf;
1876         void **txq;
1877         int ret;
1878
1879         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1880
1881         dev = &rte_eth_devices[port_id];
1882         if (tx_queue_id >= dev->data->nb_tx_queues) {
1883                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1884                 return -EINVAL;
1885         }
1886
1887         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1888
1889         ret = rte_eth_dev_info_get(port_id, &dev_info);
1890         if (ret != 0)
1891                 return ret;
1892
1893         /* Use default specified by driver, if nb_tx_desc is zero */
1894         if (nb_tx_desc == 0) {
1895                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1896                 /* If driver default is zero, fall back on EAL default */
1897                 if (nb_tx_desc == 0)
1898                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1899         }
1900         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1901             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1902             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1903                 RTE_ETHDEV_LOG(ERR,
1904                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1905                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1906                         dev_info.tx_desc_lim.nb_min,
1907                         dev_info.tx_desc_lim.nb_align);
1908                 return -EINVAL;
1909         }
1910
1911         if (dev->data->dev_started &&
1912                 !(dev_info.dev_capa &
1913                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1914                 return -EBUSY;
1915
1916         if (dev->data->dev_started &&
1917                 (dev->data->tx_queue_state[tx_queue_id] !=
1918                         RTE_ETH_QUEUE_STATE_STOPPED))
1919                 return -EBUSY;
1920
1921         txq = dev->data->tx_queues;
1922         if (txq[tx_queue_id]) {
1923                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1924                                         -ENOTSUP);
1925                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1926                 txq[tx_queue_id] = NULL;
1927         }
1928
1929         if (tx_conf == NULL)
1930                 tx_conf = &dev_info.default_txconf;
1931
1932         local_conf = *tx_conf;
1933
1934         /*
1935          * If an offloading has already been enabled in
1936          * rte_eth_dev_configure(), it has been enabled on all queues,
1937          * so there is no need to enable it in this queue again.
1938          * The local_conf.offloads input to underlying PMD only carries
1939          * those offloadings which are only enabled on this queue and
1940          * not enabled on all queues.
1941          */
1942         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1943
1944         /*
1945          * New added offloadings for this queue are those not enabled in
1946          * rte_eth_dev_configure() and they must be per-queue type.
1947          * A pure per-port offloading can't be enabled on a queue while
1948          * disabled on another queue. A pure per-port offloading can't
1949          * be enabled for any queue as new added one if it hasn't been
1950          * enabled in rte_eth_dev_configure().
1951          */
1952         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1953              local_conf.offloads) {
1954                 RTE_ETHDEV_LOG(ERR,
1955                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1956                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1957                         port_id, tx_queue_id, local_conf.offloads,
1958                         dev_info.tx_queue_offload_capa,
1959                         __func__);
1960                 return -EINVAL;
1961         }
1962
1963         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1964                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1965 }
1966
1967 int
1968 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1969                                uint16_t nb_tx_desc,
1970                                const struct rte_eth_hairpin_conf *conf)
1971 {
1972         struct rte_eth_dev *dev;
1973         struct rte_eth_hairpin_cap cap;
1974         void **txq;
1975         int i;
1976         int count;
1977         int ret;
1978
1979         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1980         dev = &rte_eth_devices[port_id];
1981         if (tx_queue_id >= dev->data->nb_tx_queues) {
1982                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1983                 return -EINVAL;
1984         }
1985         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1986         if (ret != 0)
1987                 return ret;
1988         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
1989                                 -ENOTSUP);
1990         /* if nb_rx_desc is zero use max number of desc from the driver. */
1991         if (nb_tx_desc == 0)
1992                 nb_tx_desc = cap.max_nb_desc;
1993         if (nb_tx_desc > cap.max_nb_desc) {
1994                 RTE_ETHDEV_LOG(ERR,
1995                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
1996                         nb_tx_desc, cap.max_nb_desc);
1997                 return -EINVAL;
1998         }
1999         if (conf->peer_count > cap.max_tx_2_rx) {
2000                 RTE_ETHDEV_LOG(ERR,
2001                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2002                         conf->peer_count, cap.max_tx_2_rx);
2003                 return -EINVAL;
2004         }
2005         if (conf->peer_count == 0) {
2006                 RTE_ETHDEV_LOG(ERR,
2007                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2008                         conf->peer_count);
2009                 return -EINVAL;
2010         }
2011         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2012              cap.max_nb_queues != UINT16_MAX; i++) {
2013                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2014                         count++;
2015         }
2016         if (count > cap.max_nb_queues) {
2017                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2018                 cap.max_nb_queues);
2019                 return -EINVAL;
2020         }
2021         if (dev->data->dev_started)
2022                 return -EBUSY;
2023         txq = dev->data->tx_queues;
2024         if (txq[tx_queue_id] != NULL) {
2025                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2026                                         -ENOTSUP);
2027                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2028                 txq[tx_queue_id] = NULL;
2029         }
2030         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2031                 (dev, tx_queue_id, nb_tx_desc, conf);
2032         if (ret == 0)
2033                 dev->data->tx_queue_state[tx_queue_id] =
2034                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2035         return eth_err(port_id, ret);
2036 }
2037
2038 void
2039 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2040                 void *userdata __rte_unused)
2041 {
2042         unsigned i;
2043
2044         for (i = 0; i < unsent; i++)
2045                 rte_pktmbuf_free(pkts[i]);
2046 }
2047
2048 void
2049 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2050                 void *userdata)
2051 {
2052         uint64_t *count = userdata;
2053         unsigned i;
2054
2055         for (i = 0; i < unsent; i++)
2056                 rte_pktmbuf_free(pkts[i]);
2057
2058         *count += unsent;
2059 }
2060
2061 int
2062 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2063                 buffer_tx_error_fn cbfn, void *userdata)
2064 {
2065         buffer->error_callback = cbfn;
2066         buffer->error_userdata = userdata;
2067         return 0;
2068 }
2069
2070 int
2071 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2072 {
2073         int ret = 0;
2074
2075         if (buffer == NULL)
2076                 return -EINVAL;
2077
2078         buffer->size = size;
2079         if (buffer->error_callback == NULL) {
2080                 ret = rte_eth_tx_buffer_set_err_callback(
2081                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2082         }
2083
2084         return ret;
2085 }
2086
2087 int
2088 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2089 {
2090         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2091         int ret;
2092
2093         /* Validate Input Data. Bail if not valid or not supported. */
2094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2095         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2096
2097         /* Call driver to free pending mbufs. */
2098         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2099                                                free_cnt);
2100         return eth_err(port_id, ret);
2101 }
2102
2103 int
2104 rte_eth_promiscuous_enable(uint16_t port_id)
2105 {
2106         struct rte_eth_dev *dev;
2107         int diag = 0;
2108
2109         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2110         dev = &rte_eth_devices[port_id];
2111
2112         if (dev->data->promiscuous == 1)
2113                 return 0;
2114
2115         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2116
2117         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2118         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2119
2120         return eth_err(port_id, diag);
2121 }
2122
2123 int
2124 rte_eth_promiscuous_disable(uint16_t port_id)
2125 {
2126         struct rte_eth_dev *dev;
2127         int diag = 0;
2128
2129         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2130         dev = &rte_eth_devices[port_id];
2131
2132         if (dev->data->promiscuous == 0)
2133                 return 0;
2134
2135         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2136
2137         dev->data->promiscuous = 0;
2138         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2139         if (diag != 0)
2140                 dev->data->promiscuous = 1;
2141
2142         return eth_err(port_id, diag);
2143 }
2144
2145 int
2146 rte_eth_promiscuous_get(uint16_t port_id)
2147 {
2148         struct rte_eth_dev *dev;
2149
2150         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2151
2152         dev = &rte_eth_devices[port_id];
2153         return dev->data->promiscuous;
2154 }
2155
2156 int
2157 rte_eth_allmulticast_enable(uint16_t port_id)
2158 {
2159         struct rte_eth_dev *dev;
2160         int diag;
2161
2162         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2163         dev = &rte_eth_devices[port_id];
2164
2165         if (dev->data->all_multicast == 1)
2166                 return 0;
2167
2168         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2169         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2170         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2171
2172         return eth_err(port_id, diag);
2173 }
2174
2175 int
2176 rte_eth_allmulticast_disable(uint16_t port_id)
2177 {
2178         struct rte_eth_dev *dev;
2179         int diag;
2180
2181         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2182         dev = &rte_eth_devices[port_id];
2183
2184         if (dev->data->all_multicast == 0)
2185                 return 0;
2186
2187         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2188         dev->data->all_multicast = 0;
2189         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2190         if (diag != 0)
2191                 dev->data->all_multicast = 1;
2192
2193         return eth_err(port_id, diag);
2194 }
2195
2196 int
2197 rte_eth_allmulticast_get(uint16_t port_id)
2198 {
2199         struct rte_eth_dev *dev;
2200
2201         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2202
2203         dev = &rte_eth_devices[port_id];
2204         return dev->data->all_multicast;
2205 }
2206
2207 int
2208 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2209 {
2210         struct rte_eth_dev *dev;
2211
2212         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2213         dev = &rte_eth_devices[port_id];
2214
2215         if (dev->data->dev_conf.intr_conf.lsc &&
2216             dev->data->dev_started)
2217                 rte_eth_linkstatus_get(dev, eth_link);
2218         else {
2219                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2220                 (*dev->dev_ops->link_update)(dev, 1);
2221                 *eth_link = dev->data->dev_link;
2222         }
2223
2224         return 0;
2225 }
2226
2227 int
2228 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2229 {
2230         struct rte_eth_dev *dev;
2231
2232         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2233         dev = &rte_eth_devices[port_id];
2234
2235         if (dev->data->dev_conf.intr_conf.lsc &&
2236             dev->data->dev_started)
2237                 rte_eth_linkstatus_get(dev, eth_link);
2238         else {
2239                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2240                 (*dev->dev_ops->link_update)(dev, 0);
2241                 *eth_link = dev->data->dev_link;
2242         }
2243
2244         return 0;
2245 }
2246
2247 int
2248 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2249 {
2250         struct rte_eth_dev *dev;
2251
2252         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2253
2254         dev = &rte_eth_devices[port_id];
2255         memset(stats, 0, sizeof(*stats));
2256
2257         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2258         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2259         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2260 }
2261
2262 int
2263 rte_eth_stats_reset(uint16_t port_id)
2264 {
2265         struct rte_eth_dev *dev;
2266         int ret;
2267
2268         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2269         dev = &rte_eth_devices[port_id];
2270
2271         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2272         ret = (*dev->dev_ops->stats_reset)(dev);
2273         if (ret != 0)
2274                 return eth_err(port_id, ret);
2275
2276         dev->data->rx_mbuf_alloc_failed = 0;
2277
2278         return 0;
2279 }
2280
2281 static inline int
2282 get_xstats_basic_count(struct rte_eth_dev *dev)
2283 {
2284         uint16_t nb_rxqs, nb_txqs;
2285         int count;
2286
2287         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2288         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2289
2290         count = RTE_NB_STATS;
2291         count += nb_rxqs * RTE_NB_RXQ_STATS;
2292         count += nb_txqs * RTE_NB_TXQ_STATS;
2293
2294         return count;
2295 }
2296
2297 static int
2298 get_xstats_count(uint16_t port_id)
2299 {
2300         struct rte_eth_dev *dev;
2301         int count;
2302
2303         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2304         dev = &rte_eth_devices[port_id];
2305         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2306                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2307                                 NULL, 0);
2308                 if (count < 0)
2309                         return eth_err(port_id, count);
2310         }
2311         if (dev->dev_ops->xstats_get_names != NULL) {
2312                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2313                 if (count < 0)
2314                         return eth_err(port_id, count);
2315         } else
2316                 count = 0;
2317
2318
2319         count += get_xstats_basic_count(dev);
2320
2321         return count;
2322 }
2323
2324 int
2325 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2326                 uint64_t *id)
2327 {
2328         int cnt_xstats, idx_xstat;
2329
2330         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2331
2332         if (!id) {
2333                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2334                 return -ENOMEM;
2335         }
2336
2337         if (!xstat_name) {
2338                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2339                 return -ENOMEM;
2340         }
2341
2342         /* Get count */
2343         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2344         if (cnt_xstats  < 0) {
2345                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2346                 return -ENODEV;
2347         }
2348
2349         /* Get id-name lookup table */
2350         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2351
2352         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2353                         port_id, xstats_names, cnt_xstats, NULL)) {
2354                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2355                 return -1;
2356         }
2357
2358         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2359                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2360                         *id = idx_xstat;
2361                         return 0;
2362                 };
2363         }
2364
2365         return -EINVAL;
2366 }
2367
2368 /* retrieve basic stats names */
2369 static int
2370 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2371         struct rte_eth_xstat_name *xstats_names)
2372 {
2373         int cnt_used_entries = 0;
2374         uint32_t idx, id_queue;
2375         uint16_t num_q;
2376
2377         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2378                 strlcpy(xstats_names[cnt_used_entries].name,
2379                         rte_stats_strings[idx].name,
2380                         sizeof(xstats_names[0].name));
2381                 cnt_used_entries++;
2382         }
2383         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2384         for (id_queue = 0; id_queue < num_q; id_queue++) {
2385                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2386                         snprintf(xstats_names[cnt_used_entries].name,
2387                                 sizeof(xstats_names[0].name),
2388                                 "rx_q%u%s",
2389                                 id_queue, rte_rxq_stats_strings[idx].name);
2390                         cnt_used_entries++;
2391                 }
2392
2393         }
2394         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2395         for (id_queue = 0; id_queue < num_q; id_queue++) {
2396                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2397                         snprintf(xstats_names[cnt_used_entries].name,
2398                                 sizeof(xstats_names[0].name),
2399                                 "tx_q%u%s",
2400                                 id_queue, rte_txq_stats_strings[idx].name);
2401                         cnt_used_entries++;
2402                 }
2403         }
2404         return cnt_used_entries;
2405 }
2406
2407 /* retrieve ethdev extended statistics names */
2408 int
2409 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2410         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2411         uint64_t *ids)
2412 {
2413         struct rte_eth_xstat_name *xstats_names_copy;
2414         unsigned int no_basic_stat_requested = 1;
2415         unsigned int no_ext_stat_requested = 1;
2416         unsigned int expected_entries;
2417         unsigned int basic_count;
2418         struct rte_eth_dev *dev;
2419         unsigned int i;
2420         int ret;
2421
2422         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2423         dev = &rte_eth_devices[port_id];
2424
2425         basic_count = get_xstats_basic_count(dev);
2426         ret = get_xstats_count(port_id);
2427         if (ret < 0)
2428                 return ret;
2429         expected_entries = (unsigned int)ret;
2430
2431         /* Return max number of stats if no ids given */
2432         if (!ids) {
2433                 if (!xstats_names)
2434                         return expected_entries;
2435                 else if (xstats_names && size < expected_entries)
2436                         return expected_entries;
2437         }
2438
2439         if (ids && !xstats_names)
2440                 return -EINVAL;
2441
2442         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2443                 uint64_t ids_copy[size];
2444
2445                 for (i = 0; i < size; i++) {
2446                         if (ids[i] < basic_count) {
2447                                 no_basic_stat_requested = 0;
2448                                 break;
2449                         }
2450
2451                         /*
2452                          * Convert ids to xstats ids that PMD knows.
2453                          * ids known by user are basic + extended stats.
2454                          */
2455                         ids_copy[i] = ids[i] - basic_count;
2456                 }
2457
2458                 if (no_basic_stat_requested)
2459                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2460                                         xstats_names, ids_copy, size);
2461         }
2462
2463         /* Retrieve all stats */
2464         if (!ids) {
2465                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2466                                 expected_entries);
2467                 if (num_stats < 0 || num_stats > (int)expected_entries)
2468                         return num_stats;
2469                 else
2470                         return expected_entries;
2471         }
2472
2473         xstats_names_copy = calloc(expected_entries,
2474                 sizeof(struct rte_eth_xstat_name));
2475
2476         if (!xstats_names_copy) {
2477                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2478                 return -ENOMEM;
2479         }
2480
2481         if (ids) {
2482                 for (i = 0; i < size; i++) {
2483                         if (ids[i] >= basic_count) {
2484                                 no_ext_stat_requested = 0;
2485                                 break;
2486                         }
2487                 }
2488         }
2489
2490         /* Fill xstats_names_copy structure */
2491         if (ids && no_ext_stat_requested) {
2492                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2493         } else {
2494                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2495                         expected_entries);
2496                 if (ret < 0) {
2497                         free(xstats_names_copy);
2498                         return ret;
2499                 }
2500         }
2501
2502         /* Filter stats */
2503         for (i = 0; i < size; i++) {
2504                 if (ids[i] >= expected_entries) {
2505                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2506                         free(xstats_names_copy);
2507                         return -1;
2508                 }
2509                 xstats_names[i] = xstats_names_copy[ids[i]];
2510         }
2511
2512         free(xstats_names_copy);
2513         return size;
2514 }
2515
2516 int
2517 rte_eth_xstats_get_names(uint16_t port_id,
2518         struct rte_eth_xstat_name *xstats_names,
2519         unsigned int size)
2520 {
2521         struct rte_eth_dev *dev;
2522         int cnt_used_entries;
2523         int cnt_expected_entries;
2524         int cnt_driver_entries;
2525
2526         cnt_expected_entries = get_xstats_count(port_id);
2527         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2528                         (int)size < cnt_expected_entries)
2529                 return cnt_expected_entries;
2530
2531         /* port_id checked in get_xstats_count() */
2532         dev = &rte_eth_devices[port_id];
2533
2534         cnt_used_entries = rte_eth_basic_stats_get_names(
2535                 dev, xstats_names);
2536
2537         if (dev->dev_ops->xstats_get_names != NULL) {
2538                 /* If there are any driver-specific xstats, append them
2539                  * to end of list.
2540                  */
2541                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2542                         dev,
2543                         xstats_names + cnt_used_entries,
2544                         size - cnt_used_entries);
2545                 if (cnt_driver_entries < 0)
2546                         return eth_err(port_id, cnt_driver_entries);
2547                 cnt_used_entries += cnt_driver_entries;
2548         }
2549
2550         return cnt_used_entries;
2551 }
2552
2553
2554 static int
2555 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2556 {
2557         struct rte_eth_dev *dev;
2558         struct rte_eth_stats eth_stats;
2559         unsigned int count = 0, i, q;
2560         uint64_t val, *stats_ptr;
2561         uint16_t nb_rxqs, nb_txqs;
2562         int ret;
2563
2564         ret = rte_eth_stats_get(port_id, &eth_stats);
2565         if (ret < 0)
2566                 return ret;
2567
2568         dev = &rte_eth_devices[port_id];
2569
2570         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2571         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2572
2573         /* global stats */
2574         for (i = 0; i < RTE_NB_STATS; i++) {
2575                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2576                                         rte_stats_strings[i].offset);
2577                 val = *stats_ptr;
2578                 xstats[count++].value = val;
2579         }
2580
2581         /* per-rxq stats */
2582         for (q = 0; q < nb_rxqs; q++) {
2583                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2584                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2585                                         rte_rxq_stats_strings[i].offset +
2586                                         q * sizeof(uint64_t));
2587                         val = *stats_ptr;
2588                         xstats[count++].value = val;
2589                 }
2590         }
2591
2592         /* per-txq stats */
2593         for (q = 0; q < nb_txqs; q++) {
2594                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2595                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2596                                         rte_txq_stats_strings[i].offset +
2597                                         q * sizeof(uint64_t));
2598                         val = *stats_ptr;
2599                         xstats[count++].value = val;
2600                 }
2601         }
2602         return count;
2603 }
2604
2605 /* retrieve ethdev extended statistics */
2606 int
2607 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2608                          uint64_t *values, unsigned int size)
2609 {
2610         unsigned int no_basic_stat_requested = 1;
2611         unsigned int no_ext_stat_requested = 1;
2612         unsigned int num_xstats_filled;
2613         unsigned int basic_count;
2614         uint16_t expected_entries;
2615         struct rte_eth_dev *dev;
2616         unsigned int i;
2617         int ret;
2618
2619         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2620         ret = get_xstats_count(port_id);
2621         if (ret < 0)
2622                 return ret;
2623         expected_entries = (uint16_t)ret;
2624         struct rte_eth_xstat xstats[expected_entries];
2625         dev = &rte_eth_devices[port_id];
2626         basic_count = get_xstats_basic_count(dev);
2627
2628         /* Return max number of stats if no ids given */
2629         if (!ids) {
2630                 if (!values)
2631                         return expected_entries;
2632                 else if (values && size < expected_entries)
2633                         return expected_entries;
2634         }
2635
2636         if (ids && !values)
2637                 return -EINVAL;
2638
2639         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2640                 unsigned int basic_count = get_xstats_basic_count(dev);
2641                 uint64_t ids_copy[size];
2642
2643                 for (i = 0; i < size; i++) {
2644                         if (ids[i] < basic_count) {
2645                                 no_basic_stat_requested = 0;
2646                                 break;
2647                         }
2648
2649                         /*
2650                          * Convert ids to xstats ids that PMD knows.
2651                          * ids known by user are basic + extended stats.
2652                          */
2653                         ids_copy[i] = ids[i] - basic_count;
2654                 }
2655
2656                 if (no_basic_stat_requested)
2657                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2658                                         values, size);
2659         }
2660
2661         if (ids) {
2662                 for (i = 0; i < size; i++) {
2663                         if (ids[i] >= basic_count) {
2664                                 no_ext_stat_requested = 0;
2665                                 break;
2666                         }
2667                 }
2668         }
2669
2670         /* Fill the xstats structure */
2671         if (ids && no_ext_stat_requested)
2672                 ret = rte_eth_basic_stats_get(port_id, xstats);
2673         else
2674                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2675
2676         if (ret < 0)
2677                 return ret;
2678         num_xstats_filled = (unsigned int)ret;
2679
2680         /* Return all stats */
2681         if (!ids) {
2682                 for (i = 0; i < num_xstats_filled; i++)
2683                         values[i] = xstats[i].value;
2684                 return expected_entries;
2685         }
2686
2687         /* Filter stats */
2688         for (i = 0; i < size; i++) {
2689                 if (ids[i] >= expected_entries) {
2690                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2691                         return -1;
2692                 }
2693                 values[i] = xstats[ids[i]].value;
2694         }
2695         return size;
2696 }
2697
2698 int
2699 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2700         unsigned int n)
2701 {
2702         struct rte_eth_dev *dev;
2703         unsigned int count = 0, i;
2704         signed int xcount = 0;
2705         uint16_t nb_rxqs, nb_txqs;
2706         int ret;
2707
2708         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2709
2710         dev = &rte_eth_devices[port_id];
2711
2712         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2713         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2714
2715         /* Return generic statistics */
2716         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2717                 (nb_txqs * RTE_NB_TXQ_STATS);
2718
2719         /* implemented by the driver */
2720         if (dev->dev_ops->xstats_get != NULL) {
2721                 /* Retrieve the xstats from the driver at the end of the
2722                  * xstats struct.
2723                  */
2724                 xcount = (*dev->dev_ops->xstats_get)(dev,
2725                                      xstats ? xstats + count : NULL,
2726                                      (n > count) ? n - count : 0);
2727
2728                 if (xcount < 0)
2729                         return eth_err(port_id, xcount);
2730         }
2731
2732         if (n < count + xcount || xstats == NULL)
2733                 return count + xcount;
2734
2735         /* now fill the xstats structure */
2736         ret = rte_eth_basic_stats_get(port_id, xstats);
2737         if (ret < 0)
2738                 return ret;
2739         count = ret;
2740
2741         for (i = 0; i < count; i++)
2742                 xstats[i].id = i;
2743         /* add an offset to driver-specific stats */
2744         for ( ; i < count + xcount; i++)
2745                 xstats[i].id += count;
2746
2747         return count + xcount;
2748 }
2749
2750 /* reset ethdev extended statistics */
2751 int
2752 rte_eth_xstats_reset(uint16_t port_id)
2753 {
2754         struct rte_eth_dev *dev;
2755
2756         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2757         dev = &rte_eth_devices[port_id];
2758
2759         /* implemented by the driver */
2760         if (dev->dev_ops->xstats_reset != NULL)
2761                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2762
2763         /* fallback to default */
2764         return rte_eth_stats_reset(port_id);
2765 }
2766
2767 static int
2768 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2769                 uint8_t is_rx)
2770 {
2771         struct rte_eth_dev *dev;
2772
2773         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2774
2775         dev = &rte_eth_devices[port_id];
2776
2777         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2778
2779         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2780                 return -EINVAL;
2781
2782         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2783                 return -EINVAL;
2784
2785         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2786                 return -EINVAL;
2787
2788         return (*dev->dev_ops->queue_stats_mapping_set)
2789                         (dev, queue_id, stat_idx, is_rx);
2790 }
2791
2792
2793 int
2794 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2795                 uint8_t stat_idx)
2796 {
2797         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2798                                                 stat_idx, STAT_QMAP_TX));
2799 }
2800
2801
2802 int
2803 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2804                 uint8_t stat_idx)
2805 {
2806         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2807                                                 stat_idx, STAT_QMAP_RX));
2808 }
2809
2810 int
2811 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2812 {
2813         struct rte_eth_dev *dev;
2814
2815         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2816         dev = &rte_eth_devices[port_id];
2817
2818         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2819         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2820                                                         fw_version, fw_size));
2821 }
2822
2823 int
2824 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2825 {
2826         struct rte_eth_dev *dev;
2827         const struct rte_eth_desc_lim lim = {
2828                 .nb_max = UINT16_MAX,
2829                 .nb_min = 0,
2830                 .nb_align = 1,
2831                 .nb_seg_max = UINT16_MAX,
2832                 .nb_mtu_seg_max = UINT16_MAX,
2833         };
2834         int diag;
2835
2836         /*
2837          * Init dev_info before port_id check since caller does not have
2838          * return status and does not know if get is successful or not.
2839          */
2840         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2841
2842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2843         dev = &rte_eth_devices[port_id];
2844
2845         dev_info->rx_desc_lim = lim;
2846         dev_info->tx_desc_lim = lim;
2847         dev_info->device = dev->device;
2848         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2849         dev_info->max_mtu = UINT16_MAX;
2850
2851         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2852         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2853         if (diag != 0) {
2854                 /* Cleanup already filled in device information */
2855                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2856                 return eth_err(port_id, diag);
2857         }
2858
2859         dev_info->driver_name = dev->device->driver->name;
2860         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2861         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2862
2863         dev_info->dev_flags = &dev->data->dev_flags;
2864
2865         return 0;
2866 }
2867
2868 int
2869 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2870                                  uint32_t *ptypes, int num)
2871 {
2872         int i, j;
2873         struct rte_eth_dev *dev;
2874         const uint32_t *all_ptypes;
2875
2876         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2877         dev = &rte_eth_devices[port_id];
2878         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2879         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2880
2881         if (!all_ptypes)
2882                 return 0;
2883
2884         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2885                 if (all_ptypes[i] & ptype_mask) {
2886                         if (j < num)
2887                                 ptypes[j] = all_ptypes[i];
2888                         j++;
2889                 }
2890
2891         return j;
2892 }
2893
2894 int
2895 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
2896                                  uint32_t *set_ptypes, unsigned int num)
2897 {
2898         const uint32_t valid_ptype_masks[] = {
2899                 RTE_PTYPE_L2_MASK,
2900                 RTE_PTYPE_L3_MASK,
2901                 RTE_PTYPE_L4_MASK,
2902                 RTE_PTYPE_TUNNEL_MASK,
2903                 RTE_PTYPE_INNER_L2_MASK,
2904                 RTE_PTYPE_INNER_L3_MASK,
2905                 RTE_PTYPE_INNER_L4_MASK,
2906         };
2907         const uint32_t *all_ptypes;
2908         struct rte_eth_dev *dev;
2909         uint32_t unused_mask;
2910         unsigned int i, j;
2911         int ret;
2912
2913         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2914         dev = &rte_eth_devices[port_id];
2915
2916         if (num > 0 && set_ptypes == NULL)
2917                 return -EINVAL;
2918
2919         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
2920                         *dev->dev_ops->dev_ptypes_set == NULL) {
2921                 ret = 0;
2922                 goto ptype_unknown;
2923         }
2924
2925         if (ptype_mask == 0) {
2926                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
2927                                 ptype_mask);
2928                 goto ptype_unknown;
2929         }
2930
2931         unused_mask = ptype_mask;
2932         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
2933                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
2934                 if (mask && mask != valid_ptype_masks[i]) {
2935                         ret = -EINVAL;
2936                         goto ptype_unknown;
2937                 }
2938                 unused_mask &= ~valid_ptype_masks[i];
2939         }
2940
2941         if (unused_mask) {
2942                 ret = -EINVAL;
2943                 goto ptype_unknown;
2944         }
2945
2946         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2947         if (all_ptypes == NULL) {
2948                 ret = 0;
2949                 goto ptype_unknown;
2950         }
2951
2952         /*
2953          * Accommodate as many set_ptypes as possible. If the supplied
2954          * set_ptypes array is insufficient fill it partially.
2955          */
2956         for (i = 0, j = 0; set_ptypes != NULL &&
2957                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
2958                 if (ptype_mask & all_ptypes[i]) {
2959                         if (j < num - 1) {
2960                                 set_ptypes[j] = all_ptypes[i];
2961                                 j++;
2962                                 continue;
2963                         }
2964                         break;
2965                 }
2966         }
2967
2968         if (set_ptypes != NULL && j < num)
2969                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
2970
2971         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
2972
2973 ptype_unknown:
2974         if (num > 0)
2975                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
2976
2977         return ret;
2978 }
2979
2980 int
2981 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
2982 {
2983         struct rte_eth_dev *dev;
2984
2985         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2986         dev = &rte_eth_devices[port_id];
2987         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2988
2989         return 0;
2990 }
2991
2992 int
2993 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2994 {
2995         struct rte_eth_dev *dev;
2996
2997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2998
2999         dev = &rte_eth_devices[port_id];
3000         *mtu = dev->data->mtu;
3001         return 0;
3002 }
3003
3004 int
3005 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3006 {
3007         int ret;
3008         struct rte_eth_dev_info dev_info;
3009         struct rte_eth_dev *dev;
3010
3011         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3012         dev = &rte_eth_devices[port_id];
3013         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3014
3015         /*
3016          * Check if the device supports dev_infos_get, if it does not
3017          * skip min_mtu/max_mtu validation here as this requires values
3018          * that are populated within the call to rte_eth_dev_info_get()
3019          * which relies on dev->dev_ops->dev_infos_get.
3020          */
3021         if (*dev->dev_ops->dev_infos_get != NULL) {
3022                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3023                 if (ret != 0)
3024                         return ret;
3025
3026                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3027                         return -EINVAL;
3028         }
3029
3030         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3031         if (!ret)
3032                 dev->data->mtu = mtu;
3033
3034         return eth_err(port_id, ret);
3035 }
3036
3037 int
3038 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3039 {
3040         struct rte_eth_dev *dev;
3041         int ret;
3042
3043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3044         dev = &rte_eth_devices[port_id];
3045         if (!(dev->data->dev_conf.rxmode.offloads &
3046               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3047                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3048                         port_id);
3049                 return -ENOSYS;
3050         }
3051
3052         if (vlan_id > 4095) {
3053                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3054                         port_id, vlan_id);
3055                 return -EINVAL;
3056         }
3057         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3058
3059         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3060         if (ret == 0) {
3061                 struct rte_vlan_filter_conf *vfc;
3062                 int vidx;
3063                 int vbit;
3064
3065                 vfc = &dev->data->vlan_filter_conf;
3066                 vidx = vlan_id / 64;
3067                 vbit = vlan_id % 64;
3068
3069                 if (on)
3070                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3071                 else
3072                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3073         }
3074
3075         return eth_err(port_id, ret);
3076 }
3077
3078 int
3079 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3080                                     int on)
3081 {
3082         struct rte_eth_dev *dev;
3083
3084         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3085         dev = &rte_eth_devices[port_id];
3086         if (rx_queue_id >= dev->data->nb_rx_queues) {
3087                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3088                 return -EINVAL;
3089         }
3090
3091         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3092         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3093
3094         return 0;
3095 }
3096
3097 int
3098 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3099                                 enum rte_vlan_type vlan_type,
3100                                 uint16_t tpid)
3101 {
3102         struct rte_eth_dev *dev;
3103
3104         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3105         dev = &rte_eth_devices[port_id];
3106         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3107
3108         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3109                                                                tpid));
3110 }
3111
3112 int
3113 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3114 {
3115         struct rte_eth_dev *dev;
3116         int ret = 0;
3117         int mask = 0;
3118         int cur, org = 0;
3119         uint64_t orig_offloads;
3120         uint64_t *dev_offloads;
3121
3122         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3123         dev = &rte_eth_devices[port_id];
3124
3125         /* save original values in case of failure */
3126         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3127         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3128
3129         /*check which option changed by application*/
3130         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3131         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3132         if (cur != org) {
3133                 if (cur)
3134                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3135                 else
3136                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3137                 mask |= ETH_VLAN_STRIP_MASK;
3138         }
3139
3140         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3141         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3142         if (cur != org) {
3143                 if (cur)
3144                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3145                 else
3146                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3147                 mask |= ETH_VLAN_FILTER_MASK;
3148         }
3149
3150         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3151         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3152         if (cur != org) {
3153                 if (cur)
3154                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3155                 else
3156                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3157                 mask |= ETH_VLAN_EXTEND_MASK;
3158         }
3159
3160         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3161         org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3162         if (cur != org) {
3163                 if (cur)
3164                         *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3165                 else
3166                         *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3167                 mask |= ETH_QINQ_STRIP_MASK;
3168         }
3169
3170         /*no change*/
3171         if (mask == 0)
3172                 return ret;
3173
3174         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3175         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3176         if (ret) {
3177                 /* hit an error restore  original values */
3178                 *dev_offloads = orig_offloads;
3179         }
3180
3181         return eth_err(port_id, ret);
3182 }
3183
3184 int
3185 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3186 {
3187         struct rte_eth_dev *dev;
3188         uint64_t *dev_offloads;
3189         int ret = 0;
3190
3191         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3192         dev = &rte_eth_devices[port_id];
3193         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3194
3195         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3196                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3197
3198         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3199                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3200
3201         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3202                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3203
3204         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3205                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3206
3207         return ret;
3208 }
3209
3210 int
3211 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3212 {
3213         struct rte_eth_dev *dev;
3214
3215         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3216         dev = &rte_eth_devices[port_id];
3217         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3218
3219         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3220 }
3221
3222 int
3223 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3224 {
3225         struct rte_eth_dev *dev;
3226
3227         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3228         dev = &rte_eth_devices[port_id];
3229         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3230         memset(fc_conf, 0, sizeof(*fc_conf));
3231         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3232 }
3233
3234 int
3235 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3236 {
3237         struct rte_eth_dev *dev;
3238
3239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3240         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3241                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3242                 return -EINVAL;
3243         }
3244
3245         dev = &rte_eth_devices[port_id];
3246         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3247         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3248 }
3249
3250 int
3251 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3252                                    struct rte_eth_pfc_conf *pfc_conf)
3253 {
3254         struct rte_eth_dev *dev;
3255
3256         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3257         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3258                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3259                 return -EINVAL;
3260         }
3261
3262         dev = &rte_eth_devices[port_id];
3263         /* High water, low water validation are device specific */
3264         if  (*dev->dev_ops->priority_flow_ctrl_set)
3265                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3266                                         (dev, pfc_conf));
3267         return -ENOTSUP;
3268 }
3269
3270 static int
3271 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3272                         uint16_t reta_size)
3273 {
3274         uint16_t i, num;
3275
3276         if (!reta_conf)
3277                 return -EINVAL;
3278
3279         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3280         for (i = 0; i < num; i++) {
3281                 if (reta_conf[i].mask)
3282                         return 0;
3283         }
3284
3285         return -EINVAL;
3286 }
3287
3288 static int
3289 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3290                          uint16_t reta_size,
3291                          uint16_t max_rxq)
3292 {
3293         uint16_t i, idx, shift;
3294
3295         if (!reta_conf)
3296                 return -EINVAL;
3297
3298         if (max_rxq == 0) {
3299                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3300                 return -EINVAL;
3301         }
3302
3303         for (i = 0; i < reta_size; i++) {
3304                 idx = i / RTE_RETA_GROUP_SIZE;
3305                 shift = i % RTE_RETA_GROUP_SIZE;
3306                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3307                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3308                         RTE_ETHDEV_LOG(ERR,
3309                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3310                                 idx, shift,
3311                                 reta_conf[idx].reta[shift], max_rxq);
3312                         return -EINVAL;
3313                 }
3314         }
3315
3316         return 0;
3317 }
3318
3319 int
3320 rte_eth_dev_rss_reta_update(uint16_t port_id,
3321                             struct rte_eth_rss_reta_entry64 *reta_conf,
3322                             uint16_t reta_size)
3323 {
3324         struct rte_eth_dev *dev;
3325         int ret;
3326
3327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3328         /* Check mask bits */
3329         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3330         if (ret < 0)
3331                 return ret;
3332
3333         dev = &rte_eth_devices[port_id];
3334
3335         /* Check entry value */
3336         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3337                                 dev->data->nb_rx_queues);
3338         if (ret < 0)
3339                 return ret;
3340
3341         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3342         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3343                                                              reta_size));
3344 }
3345
3346 int
3347 rte_eth_dev_rss_reta_query(uint16_t port_id,
3348                            struct rte_eth_rss_reta_entry64 *reta_conf,
3349                            uint16_t reta_size)
3350 {
3351         struct rte_eth_dev *dev;
3352         int ret;
3353
3354         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3355
3356         /* Check mask bits */
3357         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3358         if (ret < 0)
3359                 return ret;
3360
3361         dev = &rte_eth_devices[port_id];
3362         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3363         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3364                                                             reta_size));
3365 }
3366
3367 int
3368 rte_eth_dev_rss_hash_update(uint16_t port_id,
3369                             struct rte_eth_rss_conf *rss_conf)
3370 {
3371         struct rte_eth_dev *dev;
3372         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3373         int ret;
3374
3375         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3376
3377         ret = rte_eth_dev_info_get(port_id, &dev_info);
3378         if (ret != 0)
3379                 return ret;
3380
3381         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3382
3383         dev = &rte_eth_devices[port_id];
3384         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3385             dev_info.flow_type_rss_offloads) {
3386                 RTE_ETHDEV_LOG(ERR,
3387                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3388                         port_id, rss_conf->rss_hf,
3389                         dev_info.flow_type_rss_offloads);
3390                 return -EINVAL;
3391         }
3392         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3393         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3394                                                                  rss_conf));
3395 }
3396
3397 int
3398 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3399                               struct rte_eth_rss_conf *rss_conf)
3400 {
3401         struct rte_eth_dev *dev;
3402
3403         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3404         dev = &rte_eth_devices[port_id];
3405         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3406         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3407                                                                    rss_conf));
3408 }
3409
3410 int
3411 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3412                                 struct rte_eth_udp_tunnel *udp_tunnel)
3413 {
3414         struct rte_eth_dev *dev;
3415
3416         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3417         if (udp_tunnel == NULL) {
3418                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3419                 return -EINVAL;
3420         }
3421
3422         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3423                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3424                 return -EINVAL;
3425         }
3426
3427         dev = &rte_eth_devices[port_id];
3428         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3429         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3430                                                                 udp_tunnel));
3431 }
3432
3433 int
3434 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3435                                    struct rte_eth_udp_tunnel *udp_tunnel)
3436 {
3437         struct rte_eth_dev *dev;
3438
3439         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3440         dev = &rte_eth_devices[port_id];
3441
3442         if (udp_tunnel == NULL) {
3443                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3444                 return -EINVAL;
3445         }
3446
3447         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3448                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3449                 return -EINVAL;
3450         }
3451
3452         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3453         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3454                                                                 udp_tunnel));
3455 }
3456
3457 int
3458 rte_eth_led_on(uint16_t port_id)
3459 {
3460         struct rte_eth_dev *dev;
3461
3462         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3463         dev = &rte_eth_devices[port_id];
3464         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3465         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3466 }
3467
3468 int
3469 rte_eth_led_off(uint16_t port_id)
3470 {
3471         struct rte_eth_dev *dev;
3472
3473         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3474         dev = &rte_eth_devices[port_id];
3475         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3476         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3477 }
3478
3479 /*
3480  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3481  * an empty spot.
3482  */
3483 static int
3484 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3485 {
3486         struct rte_eth_dev_info dev_info;
3487         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3488         unsigned i;
3489         int ret;
3490
3491         ret = rte_eth_dev_info_get(port_id, &dev_info);
3492         if (ret != 0)
3493                 return -1;
3494
3495         for (i = 0; i < dev_info.max_mac_addrs; i++)
3496                 if (memcmp(addr, &dev->data->mac_addrs[i],
3497                                 RTE_ETHER_ADDR_LEN) == 0)
3498                         return i;
3499
3500         return -1;
3501 }
3502
3503 static const struct rte_ether_addr null_mac_addr;
3504
3505 int
3506 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3507                         uint32_t pool)
3508 {
3509         struct rte_eth_dev *dev;
3510         int index;
3511         uint64_t pool_mask;
3512         int ret;
3513
3514         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3515         dev = &rte_eth_devices[port_id];
3516         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3517
3518         if (rte_is_zero_ether_addr(addr)) {
3519                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3520                         port_id);
3521                 return -EINVAL;
3522         }
3523         if (pool >= ETH_64_POOLS) {
3524                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3525                 return -EINVAL;
3526         }
3527
3528         index = get_mac_addr_index(port_id, addr);
3529         if (index < 0) {
3530                 index = get_mac_addr_index(port_id, &null_mac_addr);
3531                 if (index < 0) {
3532                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3533                                 port_id);
3534                         return -ENOSPC;
3535                 }
3536         } else {
3537                 pool_mask = dev->data->mac_pool_sel[index];
3538
3539                 /* Check if both MAC address and pool is already there, and do nothing */
3540                 if (pool_mask & (1ULL << pool))
3541                         return 0;
3542         }
3543
3544         /* Update NIC */
3545         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3546
3547         if (ret == 0) {
3548                 /* Update address in NIC data structure */
3549                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3550
3551                 /* Update pool bitmap in NIC data structure */
3552                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3553         }
3554
3555         return eth_err(port_id, ret);
3556 }
3557
3558 int
3559 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3560 {
3561         struct rte_eth_dev *dev;
3562         int index;
3563
3564         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3565         dev = &rte_eth_devices[port_id];
3566         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3567
3568         index = get_mac_addr_index(port_id, addr);
3569         if (index == 0) {
3570                 RTE_ETHDEV_LOG(ERR,
3571                         "Port %u: Cannot remove default MAC address\n",
3572                         port_id);
3573                 return -EADDRINUSE;
3574         } else if (index < 0)
3575                 return 0;  /* Do nothing if address wasn't found */
3576
3577         /* Update NIC */
3578         (*dev->dev_ops->mac_addr_remove)(dev, index);
3579
3580         /* Update address in NIC data structure */
3581         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3582
3583         /* reset pool bitmap */
3584         dev->data->mac_pool_sel[index] = 0;
3585
3586         return 0;
3587 }
3588
3589 int
3590 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3591 {
3592         struct rte_eth_dev *dev;
3593         int ret;
3594
3595         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3596
3597         if (!rte_is_valid_assigned_ether_addr(addr))
3598                 return -EINVAL;
3599
3600         dev = &rte_eth_devices[port_id];
3601         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3602
3603         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3604         if (ret < 0)
3605                 return ret;
3606
3607         /* Update default address in NIC data structure */
3608         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3609
3610         return 0;
3611 }
3612
3613
3614 /*
3615  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3616  * an empty spot.
3617  */
3618 static int
3619 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3620 {
3621         struct rte_eth_dev_info dev_info;
3622         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3623         unsigned i;
3624         int ret;
3625
3626         ret = rte_eth_dev_info_get(port_id, &dev_info);
3627         if (ret != 0)
3628                 return -1;
3629
3630         if (!dev->data->hash_mac_addrs)
3631                 return -1;
3632
3633         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3634                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3635                         RTE_ETHER_ADDR_LEN) == 0)
3636                         return i;
3637
3638         return -1;
3639 }
3640
3641 int
3642 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3643                                 uint8_t on)
3644 {
3645         int index;
3646         int ret;
3647         struct rte_eth_dev *dev;
3648
3649         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3650
3651         dev = &rte_eth_devices[port_id];
3652         if (rte_is_zero_ether_addr(addr)) {
3653                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3654                         port_id);
3655                 return -EINVAL;
3656         }
3657
3658         index = get_hash_mac_addr_index(port_id, addr);
3659         /* Check if it's already there, and do nothing */
3660         if ((index >= 0) && on)
3661                 return 0;
3662
3663         if (index < 0) {
3664                 if (!on) {
3665                         RTE_ETHDEV_LOG(ERR,
3666                                 "Port %u: the MAC address was not set in UTA\n",
3667                                 port_id);
3668                         return -EINVAL;
3669                 }
3670
3671                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3672                 if (index < 0) {
3673                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3674                                 port_id);
3675                         return -ENOSPC;
3676                 }
3677         }
3678
3679         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3680         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3681         if (ret == 0) {
3682                 /* Update address in NIC data structure */
3683                 if (on)
3684                         rte_ether_addr_copy(addr,
3685                                         &dev->data->hash_mac_addrs[index]);
3686                 else
3687                         rte_ether_addr_copy(&null_mac_addr,
3688                                         &dev->data->hash_mac_addrs[index]);
3689         }
3690
3691         return eth_err(port_id, ret);
3692 }
3693
3694 int
3695 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3696 {
3697         struct rte_eth_dev *dev;
3698
3699         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3700
3701         dev = &rte_eth_devices[port_id];
3702
3703         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3704         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3705                                                                        on));
3706 }
3707
3708 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3709                                         uint16_t tx_rate)
3710 {
3711         struct rte_eth_dev *dev;
3712         struct rte_eth_dev_info dev_info;
3713         struct rte_eth_link link;
3714         int ret;
3715
3716         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3717
3718         ret = rte_eth_dev_info_get(port_id, &dev_info);
3719         if (ret != 0)
3720                 return ret;
3721
3722         dev = &rte_eth_devices[port_id];
3723         link = dev->data->dev_link;
3724
3725         if (queue_idx > dev_info.max_tx_queues) {
3726                 RTE_ETHDEV_LOG(ERR,
3727                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3728                         port_id, queue_idx);
3729                 return -EINVAL;
3730         }
3731
3732         if (tx_rate > link.link_speed) {
3733                 RTE_ETHDEV_LOG(ERR,
3734                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3735                         tx_rate, link.link_speed);
3736                 return -EINVAL;
3737         }
3738
3739         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3740         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3741                                                         queue_idx, tx_rate));
3742 }
3743
3744 int
3745 rte_eth_mirror_rule_set(uint16_t port_id,
3746                         struct rte_eth_mirror_conf *mirror_conf,
3747                         uint8_t rule_id, uint8_t on)
3748 {
3749         struct rte_eth_dev *dev;
3750
3751         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3752         if (mirror_conf->rule_type == 0) {
3753                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3754                 return -EINVAL;
3755         }
3756
3757         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3758                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3759                         ETH_64_POOLS - 1);
3760                 return -EINVAL;
3761         }
3762
3763         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3764              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3765             (mirror_conf->pool_mask == 0)) {
3766                 RTE_ETHDEV_LOG(ERR,
3767                         "Invalid mirror pool, pool mask can not be 0\n");
3768                 return -EINVAL;
3769         }
3770
3771         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3772             mirror_conf->vlan.vlan_mask == 0) {
3773                 RTE_ETHDEV_LOG(ERR,
3774                         "Invalid vlan mask, vlan mask can not be 0\n");
3775                 return -EINVAL;
3776         }
3777
3778         dev = &rte_eth_devices[port_id];
3779         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3780
3781         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3782                                                 mirror_conf, rule_id, on));
3783 }
3784
3785 int
3786 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3787 {
3788         struct rte_eth_dev *dev;
3789
3790         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3791
3792         dev = &rte_eth_devices[port_id];
3793         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3794
3795         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3796                                                                    rule_id));
3797 }
3798
3799 RTE_INIT(eth_dev_init_cb_lists)
3800 {
3801         int i;
3802
3803         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3804                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3805 }
3806
3807 int
3808 rte_eth_dev_callback_register(uint16_t port_id,
3809                         enum rte_eth_event_type event,
3810                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3811 {
3812         struct rte_eth_dev *dev;
3813         struct rte_eth_dev_callback *user_cb;
3814         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3815         uint16_t last_port;
3816
3817         if (!cb_fn)
3818                 return -EINVAL;
3819
3820         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3821                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3822                 return -EINVAL;
3823         }
3824
3825         if (port_id == RTE_ETH_ALL) {
3826                 next_port = 0;
3827                 last_port = RTE_MAX_ETHPORTS - 1;
3828         } else {
3829                 next_port = last_port = port_id;
3830         }
3831
3832         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3833
3834         do {
3835                 dev = &rte_eth_devices[next_port];
3836
3837                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3838                         if (user_cb->cb_fn == cb_fn &&
3839                                 user_cb->cb_arg == cb_arg &&
3840                                 user_cb->event == event) {
3841                                 break;
3842                         }
3843                 }
3844
3845                 /* create a new callback. */
3846                 if (user_cb == NULL) {
3847                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3848                                 sizeof(struct rte_eth_dev_callback), 0);
3849                         if (user_cb != NULL) {
3850                                 user_cb->cb_fn = cb_fn;
3851                                 user_cb->cb_arg = cb_arg;
3852                                 user_cb->event = event;
3853                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3854                                                   user_cb, next);
3855                         } else {
3856                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3857                                 rte_eth_dev_callback_unregister(port_id, event,
3858                                                                 cb_fn, cb_arg);
3859                                 return -ENOMEM;
3860                         }
3861
3862                 }
3863         } while (++next_port <= last_port);
3864
3865         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3866         return 0;
3867 }
3868
3869 int
3870 rte_eth_dev_callback_unregister(uint16_t port_id,
3871                         enum rte_eth_event_type event,
3872                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3873 {
3874         int ret;
3875         struct rte_eth_dev *dev;
3876         struct rte_eth_dev_callback *cb, *next;
3877         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3878         uint16_t last_port;
3879
3880         if (!cb_fn)
3881                 return -EINVAL;
3882
3883         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3884                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3885                 return -EINVAL;
3886         }
3887
3888         if (port_id == RTE_ETH_ALL) {
3889                 next_port = 0;
3890                 last_port = RTE_MAX_ETHPORTS - 1;
3891         } else {
3892                 next_port = last_port = port_id;
3893         }
3894
3895         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3896
3897         do {
3898                 dev = &rte_eth_devices[next_port];
3899                 ret = 0;
3900                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3901                      cb = next) {
3902
3903                         next = TAILQ_NEXT(cb, next);
3904
3905                         if (cb->cb_fn != cb_fn || cb->event != event ||
3906                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3907                                 continue;
3908
3909                         /*
3910                          * if this callback is not executing right now,
3911                          * then remove it.
3912                          */
3913                         if (cb->active == 0) {
3914                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3915                                 rte_free(cb);
3916                         } else {
3917                                 ret = -EAGAIN;
3918                         }
3919                 }
3920         } while (++next_port <= last_port);
3921
3922         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3923         return ret;
3924 }
3925
3926 int
3927 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3928         enum rte_eth_event_type event, void *ret_param)
3929 {
3930         struct rte_eth_dev_callback *cb_lst;
3931         struct rte_eth_dev_callback dev_cb;
3932         int rc = 0;
3933
3934         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3935         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3936                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3937                         continue;
3938                 dev_cb = *cb_lst;
3939                 cb_lst->active = 1;
3940                 if (ret_param != NULL)
3941                         dev_cb.ret_param = ret_param;
3942
3943                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3944                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3945                                 dev_cb.cb_arg, dev_cb.ret_param);
3946                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3947                 cb_lst->active = 0;
3948         }
3949         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3950         return rc;
3951 }
3952
3953 void
3954 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3955 {
3956         if (dev == NULL)
3957                 return;
3958
3959         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3960
3961         dev->state = RTE_ETH_DEV_ATTACHED;
3962 }
3963
3964 int
3965 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3966 {
3967         uint32_t vec;
3968         struct rte_eth_dev *dev;
3969         struct rte_intr_handle *intr_handle;
3970         uint16_t qid;
3971         int rc;
3972
3973         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3974
3975         dev = &rte_eth_devices[port_id];
3976
3977         if (!dev->intr_handle) {
3978                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3979                 return -ENOTSUP;
3980         }
3981
3982         intr_handle = dev->intr_handle;
3983         if (!intr_handle->intr_vec) {
3984                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3985                 return -EPERM;
3986         }
3987
3988         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3989                 vec = intr_handle->intr_vec[qid];
3990                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3991                 if (rc && rc != -EEXIST) {
3992                         RTE_ETHDEV_LOG(ERR,
3993                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3994                                 port_id, qid, op, epfd, vec);
3995                 }
3996         }
3997
3998         return 0;
3999 }
4000
4001 int
4002 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4003 {
4004         struct rte_intr_handle *intr_handle;
4005         struct rte_eth_dev *dev;
4006         unsigned int efd_idx;
4007         uint32_t vec;
4008         int fd;
4009
4010         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4011
4012         dev = &rte_eth_devices[port_id];
4013
4014         if (queue_id >= dev->data->nb_rx_queues) {
4015                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4016                 return -1;
4017         }
4018
4019         if (!dev->intr_handle) {
4020                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4021                 return -1;
4022         }
4023
4024         intr_handle = dev->intr_handle;
4025         if (!intr_handle->intr_vec) {
4026                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4027                 return -1;
4028         }
4029
4030         vec = intr_handle->intr_vec[queue_id];
4031         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4032                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4033         fd = intr_handle->efds[efd_idx];
4034
4035         return fd;
4036 }
4037
4038 const struct rte_memzone *
4039 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4040                          uint16_t queue_id, size_t size, unsigned align,
4041                          int socket_id)
4042 {
4043         char z_name[RTE_MEMZONE_NAMESIZE];
4044         const struct rte_memzone *mz;
4045         int rc;
4046
4047         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
4048                       dev->data->port_id, queue_id, ring_name);
4049         if (rc >= RTE_MEMZONE_NAMESIZE) {
4050                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4051                 rte_errno = ENAMETOOLONG;
4052                 return NULL;
4053         }
4054
4055         mz = rte_memzone_lookup(z_name);
4056         if (mz)
4057                 return mz;
4058
4059         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4060                         RTE_MEMZONE_IOVA_CONTIG, align);
4061 }
4062
4063 int
4064 rte_eth_dev_create(struct rte_device *device, const char *name,
4065         size_t priv_data_size,
4066         ethdev_bus_specific_init ethdev_bus_specific_init,
4067         void *bus_init_params,
4068         ethdev_init_t ethdev_init, void *init_params)
4069 {
4070         struct rte_eth_dev *ethdev;
4071         int retval;
4072
4073         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4074
4075         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4076                 ethdev = rte_eth_dev_allocate(name);
4077                 if (!ethdev)
4078                         return -ENODEV;
4079
4080                 if (priv_data_size) {
4081                         ethdev->data->dev_private = rte_zmalloc_socket(
4082                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4083                                 device->numa_node);
4084
4085                         if (!ethdev->data->dev_private) {
4086                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
4087                                 retval = -ENOMEM;
4088                                 goto probe_failed;
4089                         }
4090                 }
4091         } else {
4092                 ethdev = rte_eth_dev_attach_secondary(name);
4093                 if (!ethdev) {
4094                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
4095                                 "ethdev doesn't exist");
4096                         return  -ENODEV;
4097                 }
4098         }
4099
4100         ethdev->device = device;
4101
4102         if (ethdev_bus_specific_init) {
4103                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4104                 if (retval) {
4105                         RTE_LOG(ERR, EAL,
4106                                 "ethdev bus specific initialisation failed");
4107                         goto probe_failed;
4108                 }
4109         }
4110
4111         retval = ethdev_init(ethdev, init_params);
4112         if (retval) {
4113                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
4114                 goto probe_failed;
4115         }
4116
4117         rte_eth_dev_probing_finish(ethdev);
4118
4119         return retval;
4120
4121 probe_failed:
4122         rte_eth_dev_release_port(ethdev);
4123         return retval;
4124 }
4125
4126 int
4127 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4128         ethdev_uninit_t ethdev_uninit)
4129 {
4130         int ret;
4131
4132         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4133         if (!ethdev)
4134                 return -ENODEV;
4135
4136         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4137
4138         ret = ethdev_uninit(ethdev);
4139         if (ret)
4140                 return ret;
4141
4142         return rte_eth_dev_release_port(ethdev);
4143 }
4144
4145 int
4146 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4147                           int epfd, int op, void *data)
4148 {
4149         uint32_t vec;
4150         struct rte_eth_dev *dev;
4151         struct rte_intr_handle *intr_handle;
4152         int rc;
4153
4154         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4155
4156         dev = &rte_eth_devices[port_id];
4157         if (queue_id >= dev->data->nb_rx_queues) {
4158                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4159                 return -EINVAL;
4160         }
4161
4162         if (!dev->intr_handle) {
4163                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4164                 return -ENOTSUP;
4165         }
4166
4167         intr_handle = dev->intr_handle;
4168         if (!intr_handle->intr_vec) {
4169                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4170                 return -EPERM;
4171         }
4172
4173         vec = intr_handle->intr_vec[queue_id];
4174         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4175         if (rc && rc != -EEXIST) {
4176                 RTE_ETHDEV_LOG(ERR,
4177                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4178                         port_id, queue_id, op, epfd, vec);
4179                 return rc;
4180         }
4181
4182         return 0;
4183 }
4184
4185 int
4186 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4187                            uint16_t queue_id)
4188 {
4189         struct rte_eth_dev *dev;
4190
4191         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4192
4193         dev = &rte_eth_devices[port_id];
4194
4195         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4196         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4197                                                                 queue_id));
4198 }
4199
4200 int
4201 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4202                             uint16_t queue_id)
4203 {
4204         struct rte_eth_dev *dev;
4205
4206         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4207
4208         dev = &rte_eth_devices[port_id];
4209
4210         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4211         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4212                                                                 queue_id));
4213 }
4214
4215
4216 int
4217 rte_eth_dev_filter_supported(uint16_t port_id,
4218                              enum rte_filter_type filter_type)
4219 {
4220         struct rte_eth_dev *dev;
4221
4222         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4223
4224         dev = &rte_eth_devices[port_id];
4225         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4226         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4227                                 RTE_ETH_FILTER_NOP, NULL);
4228 }
4229
4230 int
4231 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4232                         enum rte_filter_op filter_op, void *arg)
4233 {
4234         struct rte_eth_dev *dev;
4235
4236         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4237
4238         dev = &rte_eth_devices[port_id];
4239         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4240         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4241                                                              filter_op, arg));
4242 }
4243
4244 const struct rte_eth_rxtx_callback *
4245 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4246                 rte_rx_callback_fn fn, void *user_param)
4247 {
4248 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4249         rte_errno = ENOTSUP;
4250         return NULL;
4251 #endif
4252         struct rte_eth_dev *dev;
4253
4254         /* check input parameters */
4255         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4256                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4257                 rte_errno = EINVAL;
4258                 return NULL;
4259         }
4260         dev = &rte_eth_devices[port_id];
4261         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4262                 rte_errno = EINVAL;
4263                 return NULL;
4264         }
4265         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4266
4267         if (cb == NULL) {
4268                 rte_errno = ENOMEM;
4269                 return NULL;
4270         }
4271
4272         cb->fn.rx = fn;
4273         cb->param = user_param;
4274
4275         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4276         /* Add the callbacks in fifo order. */
4277         struct rte_eth_rxtx_callback *tail =
4278                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4279
4280         if (!tail) {
4281                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4282
4283         } else {
4284                 while (tail->next)
4285                         tail = tail->next;
4286                 tail->next = cb;
4287         }
4288         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4289
4290         return cb;
4291 }
4292
4293 const struct rte_eth_rxtx_callback *
4294 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4295                 rte_rx_callback_fn fn, void *user_param)
4296 {
4297 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4298         rte_errno = ENOTSUP;
4299         return NULL;
4300 #endif
4301         /* check input parameters */
4302         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4303                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4304                 rte_errno = EINVAL;
4305                 return NULL;
4306         }
4307
4308         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4309
4310         if (cb == NULL) {
4311                 rte_errno = ENOMEM;
4312                 return NULL;
4313         }
4314
4315         cb->fn.rx = fn;
4316         cb->param = user_param;
4317
4318         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4319         /* Add the callbacks at fisrt position*/
4320         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4321         rte_smp_wmb();
4322         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4323         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4324
4325         return cb;
4326 }
4327
4328 const struct rte_eth_rxtx_callback *
4329 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4330                 rte_tx_callback_fn fn, void *user_param)
4331 {
4332 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4333         rte_errno = ENOTSUP;
4334         return NULL;
4335 #endif
4336         struct rte_eth_dev *dev;
4337
4338         /* check input parameters */
4339         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4340                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4341                 rte_errno = EINVAL;
4342                 return NULL;
4343         }
4344
4345         dev = &rte_eth_devices[port_id];
4346         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4347                 rte_errno = EINVAL;
4348                 return NULL;
4349         }
4350
4351         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4352
4353         if (cb == NULL) {
4354                 rte_errno = ENOMEM;
4355                 return NULL;
4356         }
4357
4358         cb->fn.tx = fn;
4359         cb->param = user_param;
4360
4361         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4362         /* Add the callbacks in fifo order. */
4363         struct rte_eth_rxtx_callback *tail =
4364                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4365
4366         if (!tail) {
4367                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4368
4369         } else {
4370                 while (tail->next)
4371                         tail = tail->next;
4372                 tail->next = cb;
4373         }
4374         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4375
4376         return cb;
4377 }
4378
4379 int
4380 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4381                 const struct rte_eth_rxtx_callback *user_cb)
4382 {
4383 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4384         return -ENOTSUP;
4385 #endif
4386         /* Check input parameters. */
4387         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4388         if (user_cb == NULL ||
4389                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4390                 return -EINVAL;
4391
4392         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4393         struct rte_eth_rxtx_callback *cb;
4394         struct rte_eth_rxtx_callback **prev_cb;
4395         int ret = -EINVAL;
4396
4397         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4398         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4399         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4400                 cb = *prev_cb;
4401                 if (cb == user_cb) {
4402                         /* Remove the user cb from the callback list. */
4403                         *prev_cb = cb->next;
4404                         ret = 0;
4405                         break;
4406                 }
4407         }
4408         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4409
4410         return ret;
4411 }
4412
4413 int
4414 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4415                 const struct rte_eth_rxtx_callback *user_cb)
4416 {
4417 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4418         return -ENOTSUP;
4419 #endif
4420         /* Check input parameters. */
4421         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4422         if (user_cb == NULL ||
4423                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4424                 return -EINVAL;
4425
4426         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4427         int ret = -EINVAL;
4428         struct rte_eth_rxtx_callback *cb;
4429         struct rte_eth_rxtx_callback **prev_cb;
4430
4431         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4432         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4433         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4434                 cb = *prev_cb;
4435                 if (cb == user_cb) {
4436                         /* Remove the user cb from the callback list. */
4437                         *prev_cb = cb->next;
4438                         ret = 0;
4439                         break;
4440                 }
4441         }
4442         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4443
4444         return ret;
4445 }
4446
4447 int
4448 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4449         struct rte_eth_rxq_info *qinfo)
4450 {
4451         struct rte_eth_dev *dev;
4452
4453         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4454
4455         if (qinfo == NULL)
4456                 return -EINVAL;
4457
4458         dev = &rte_eth_devices[port_id];
4459         if (queue_id >= dev->data->nb_rx_queues) {
4460                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4461                 return -EINVAL;
4462         }
4463
4464         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4465                 RTE_ETHDEV_LOG(INFO,
4466                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4467                         queue_id, port_id);
4468                 return -EINVAL;
4469         }
4470
4471         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4472
4473         memset(qinfo, 0, sizeof(*qinfo));
4474         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4475         return 0;
4476 }
4477
4478 int
4479 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4480         struct rte_eth_txq_info *qinfo)
4481 {
4482         struct rte_eth_dev *dev;
4483
4484         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4485
4486         if (qinfo == NULL)
4487                 return -EINVAL;
4488
4489         dev = &rte_eth_devices[port_id];
4490         if (queue_id >= dev->data->nb_tx_queues) {
4491                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4492                 return -EINVAL;
4493         }
4494
4495         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4496                 RTE_ETHDEV_LOG(INFO,
4497                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4498                         queue_id, port_id);
4499                 return -EINVAL;
4500         }
4501
4502         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4503
4504         memset(qinfo, 0, sizeof(*qinfo));
4505         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4506
4507         return 0;
4508 }
4509
4510 int
4511 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4512                           struct rte_eth_burst_mode *mode)
4513 {
4514         struct rte_eth_dev *dev;
4515
4516         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4517
4518         if (mode == NULL)
4519                 return -EINVAL;
4520
4521         dev = &rte_eth_devices[port_id];
4522
4523         if (queue_id >= dev->data->nb_rx_queues) {
4524                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4525                 return -EINVAL;
4526         }
4527
4528         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4529         memset(mode, 0, sizeof(*mode));
4530         return eth_err(port_id,
4531                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4532 }
4533
4534 int
4535 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4536                           struct rte_eth_burst_mode *mode)
4537 {
4538         struct rte_eth_dev *dev;
4539
4540         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4541
4542         if (mode == NULL)
4543                 return -EINVAL;
4544
4545         dev = &rte_eth_devices[port_id];
4546
4547         if (queue_id >= dev->data->nb_tx_queues) {
4548                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4549                 return -EINVAL;
4550         }
4551
4552         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4553         memset(mode, 0, sizeof(*mode));
4554         return eth_err(port_id,
4555                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4556 }
4557
4558 int
4559 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4560                              struct rte_ether_addr *mc_addr_set,
4561                              uint32_t nb_mc_addr)
4562 {
4563         struct rte_eth_dev *dev;
4564
4565         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4566
4567         dev = &rte_eth_devices[port_id];
4568         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4569         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4570                                                 mc_addr_set, nb_mc_addr));
4571 }
4572
4573 int
4574 rte_eth_timesync_enable(uint16_t port_id)
4575 {
4576         struct rte_eth_dev *dev;
4577
4578         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4579         dev = &rte_eth_devices[port_id];
4580
4581         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4582         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4583 }
4584
4585 int
4586 rte_eth_timesync_disable(uint16_t port_id)
4587 {
4588         struct rte_eth_dev *dev;
4589
4590         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4591         dev = &rte_eth_devices[port_id];
4592
4593         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4594         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4595 }
4596
4597 int
4598 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4599                                    uint32_t flags)
4600 {
4601         struct rte_eth_dev *dev;
4602
4603         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4604         dev = &rte_eth_devices[port_id];
4605
4606         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4607         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4608                                 (dev, timestamp, flags));
4609 }
4610
4611 int
4612 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4613                                    struct timespec *timestamp)
4614 {
4615         struct rte_eth_dev *dev;
4616
4617         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4618         dev = &rte_eth_devices[port_id];
4619
4620         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4621         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4622                                 (dev, timestamp));
4623 }
4624
4625 int
4626 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4627 {
4628         struct rte_eth_dev *dev;
4629
4630         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4631         dev = &rte_eth_devices[port_id];
4632
4633         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4634         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4635                                                                       delta));
4636 }
4637
4638 int
4639 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4640 {
4641         struct rte_eth_dev *dev;
4642
4643         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4644         dev = &rte_eth_devices[port_id];
4645
4646         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4647         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4648                                                                 timestamp));
4649 }
4650
4651 int
4652 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4653 {
4654         struct rte_eth_dev *dev;
4655
4656         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4657         dev = &rte_eth_devices[port_id];
4658
4659         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4660         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4661                                                                 timestamp));
4662 }
4663
4664 int
4665 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4666 {
4667         struct rte_eth_dev *dev;
4668
4669         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4670         dev = &rte_eth_devices[port_id];
4671
4672         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4673         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4674 }
4675
4676 int
4677 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4678 {
4679         struct rte_eth_dev *dev;
4680
4681         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4682
4683         dev = &rte_eth_devices[port_id];
4684         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4685         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4686 }
4687
4688 int
4689 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4690 {
4691         struct rte_eth_dev *dev;
4692
4693         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4694
4695         dev = &rte_eth_devices[port_id];
4696         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4697         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4698 }
4699
4700 int
4701 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4702 {
4703         struct rte_eth_dev *dev;
4704
4705         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4706
4707         dev = &rte_eth_devices[port_id];
4708         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4709         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4710 }
4711
4712 int
4713 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4714 {
4715         struct rte_eth_dev *dev;
4716
4717         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4718
4719         dev = &rte_eth_devices[port_id];
4720         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4721         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4722 }
4723
4724 int
4725 rte_eth_dev_get_module_info(uint16_t port_id,
4726                             struct rte_eth_dev_module_info *modinfo)
4727 {
4728         struct rte_eth_dev *dev;
4729
4730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4731
4732         dev = &rte_eth_devices[port_id];
4733         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4734         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4735 }
4736
4737 int
4738 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4739                               struct rte_dev_eeprom_info *info)
4740 {
4741         struct rte_eth_dev *dev;
4742
4743         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4744
4745         dev = &rte_eth_devices[port_id];
4746         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4747         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4748 }
4749
4750 int
4751 rte_eth_dev_get_dcb_info(uint16_t port_id,
4752                              struct rte_eth_dcb_info *dcb_info)
4753 {
4754         struct rte_eth_dev *dev;
4755
4756         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4757
4758         dev = &rte_eth_devices[port_id];
4759         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4760
4761         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4762         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4763 }
4764
4765 int
4766 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4767                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4768 {
4769         struct rte_eth_dev *dev;
4770
4771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4772         if (l2_tunnel == NULL) {
4773                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4774                 return -EINVAL;
4775         }
4776
4777         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4778                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4779                 return -EINVAL;
4780         }
4781
4782         dev = &rte_eth_devices[port_id];
4783         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4784                                 -ENOTSUP);
4785         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4786                                                                 l2_tunnel));
4787 }
4788
4789 int
4790 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4791                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4792                                   uint32_t mask,
4793                                   uint8_t en)
4794 {
4795         struct rte_eth_dev *dev;
4796
4797         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4798
4799         if (l2_tunnel == NULL) {
4800                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4801                 return -EINVAL;
4802         }
4803
4804         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4805                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4806                 return -EINVAL;
4807         }
4808
4809         if (mask == 0) {
4810                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4811                 return -EINVAL;
4812         }
4813
4814         dev = &rte_eth_devices[port_id];
4815         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4816                                 -ENOTSUP);
4817         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4818                                                         l2_tunnel, mask, en));
4819 }
4820
4821 static void
4822 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4823                            const struct rte_eth_desc_lim *desc_lim)
4824 {
4825         if (desc_lim->nb_align != 0)
4826                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4827
4828         if (desc_lim->nb_max != 0)
4829                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4830
4831         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4832 }
4833
4834 int
4835 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4836                                  uint16_t *nb_rx_desc,
4837                                  uint16_t *nb_tx_desc)
4838 {
4839         struct rte_eth_dev_info dev_info;
4840         int ret;
4841
4842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4843
4844         ret = rte_eth_dev_info_get(port_id, &dev_info);
4845         if (ret != 0)
4846                 return ret;
4847
4848         if (nb_rx_desc != NULL)
4849                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4850
4851         if (nb_tx_desc != NULL)
4852                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4853
4854         return 0;
4855 }
4856
4857 int
4858 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4859                                    struct rte_eth_hairpin_cap *cap)
4860 {
4861         struct rte_eth_dev *dev;
4862
4863         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4864
4865         dev = &rte_eth_devices[port_id];
4866         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
4867         memset(cap, 0, sizeof(*cap));
4868         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
4869 }
4870
4871 int
4872 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
4873 {
4874         if (dev->data->rx_queue_state[queue_id] ==
4875             RTE_ETH_QUEUE_STATE_HAIRPIN)
4876                 return 1;
4877         return 0;
4878 }
4879
4880 int
4881 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
4882 {
4883         if (dev->data->tx_queue_state[queue_id] ==
4884             RTE_ETH_QUEUE_STATE_HAIRPIN)
4885                 return 1;
4886         return 0;
4887 }
4888
4889 int
4890 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4891 {
4892         struct rte_eth_dev *dev;
4893
4894         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4895
4896         if (pool == NULL)
4897                 return -EINVAL;
4898
4899         dev = &rte_eth_devices[port_id];
4900
4901         if (*dev->dev_ops->pool_ops_supported == NULL)
4902                 return 1; /* all pools are supported */
4903
4904         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4905 }
4906
4907 /**
4908  * A set of values to describe the possible states of a switch domain.
4909  */
4910 enum rte_eth_switch_domain_state {
4911         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4912         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4913 };
4914
4915 /**
4916  * Array of switch domains available for allocation. Array is sized to
4917  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4918  * ethdev ports in a single process.
4919  */
4920 static struct rte_eth_dev_switch {
4921         enum rte_eth_switch_domain_state state;
4922 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4923
4924 int
4925 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4926 {
4927         unsigned int i;
4928
4929         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4930
4931         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4932                 i < RTE_MAX_ETHPORTS; i++) {
4933                 if (rte_eth_switch_domains[i].state ==
4934                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4935                         rte_eth_switch_domains[i].state =
4936                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4937                         *domain_id = i;
4938                         return 0;
4939                 }
4940         }
4941
4942         return -ENOSPC;
4943 }
4944
4945 int
4946 rte_eth_switch_domain_free(uint16_t domain_id)
4947 {
4948         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4949                 domain_id >= RTE_MAX_ETHPORTS)
4950                 return -EINVAL;
4951
4952         if (rte_eth_switch_domains[domain_id].state !=
4953                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4954                 return -EINVAL;
4955
4956         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4957
4958         return 0;
4959 }
4960
4961 static int
4962 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4963 {
4964         int state;
4965         struct rte_kvargs_pair *pair;
4966         char *letter;
4967
4968         arglist->str = strdup(str_in);
4969         if (arglist->str == NULL)
4970                 return -ENOMEM;
4971
4972         letter = arglist->str;
4973         state = 0;
4974         arglist->count = 0;
4975         pair = &arglist->pairs[0];
4976         while (1) {
4977                 switch (state) {
4978                 case 0: /* Initial */
4979                         if (*letter == '=')
4980                                 return -EINVAL;
4981                         else if (*letter == '\0')
4982                                 return 0;
4983
4984                         state = 1;
4985                         pair->key = letter;
4986                         /* fall-thru */
4987
4988                 case 1: /* Parsing key */
4989                         if (*letter == '=') {
4990                                 *letter = '\0';
4991                                 pair->value = letter + 1;
4992                                 state = 2;
4993                         } else if (*letter == ',' || *letter == '\0')
4994                                 return -EINVAL;
4995                         break;
4996
4997
4998                 case 2: /* Parsing value */
4999                         if (*letter == '[')
5000                                 state = 3;
5001                         else if (*letter == ',') {
5002                                 *letter = '\0';
5003                                 arglist->count++;
5004                                 pair = &arglist->pairs[arglist->count];
5005                                 state = 0;
5006                         } else if (*letter == '\0') {
5007                                 letter--;
5008                                 arglist->count++;
5009                                 pair = &arglist->pairs[arglist->count];
5010                                 state = 0;
5011                         }
5012                         break;
5013
5014                 case 3: /* Parsing list */
5015                         if (*letter == ']')
5016                                 state = 2;
5017                         else if (*letter == '\0')
5018                                 return -EINVAL;
5019                         break;
5020                 }
5021                 letter++;
5022         }
5023 }
5024
5025 int
5026 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5027 {
5028         struct rte_kvargs args;
5029         struct rte_kvargs_pair *pair;
5030         unsigned int i;
5031         int result = 0;
5032
5033         memset(eth_da, 0, sizeof(*eth_da));
5034
5035         result = rte_eth_devargs_tokenise(&args, dargs);
5036         if (result < 0)
5037                 goto parse_cleanup;
5038
5039         for (i = 0; i < args.count; i++) {
5040                 pair = &args.pairs[i];
5041                 if (strcmp("representor", pair->key) == 0) {
5042                         result = rte_eth_devargs_parse_list(pair->value,
5043                                 rte_eth_devargs_parse_representor_ports,
5044                                 eth_da);
5045                         if (result < 0)
5046                                 goto parse_cleanup;
5047                 }
5048         }
5049
5050 parse_cleanup:
5051         if (args.str)
5052                 free(args.str);
5053
5054         return result;
5055 }
5056
5057 RTE_INIT(ethdev_init_log)
5058 {
5059         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
5060         if (rte_eth_dev_logtype >= 0)
5061                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
5062 }