ethdev: add hairpin queue
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132 };
133
134 #undef RTE_RX_OFFLOAD_BIT2STR
135
136 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
137         { DEV_TX_OFFLOAD_##_name, #_name }
138
139 static const struct {
140         uint64_t offload;
141         const char *name;
142 } rte_tx_offload_names[] = {
143         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
144         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
152         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
157         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
158         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
159         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
160         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
161         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
164         RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA),
165 };
166
167 #undef RTE_TX_OFFLOAD_BIT2STR
168
169 static const struct {
170         uint64_t option;
171         const char *name;
172 } rte_burst_option_names[] = {
173         { RTE_ETH_BURST_SCALAR, "Scalar" },
174         { RTE_ETH_BURST_VECTOR, "Vector" },
175
176         { RTE_ETH_BURST_ALTIVEC, "AltiVec" },
177         { RTE_ETH_BURST_NEON, "Neon" },
178         { RTE_ETH_BURST_SSE, "SSE" },
179         { RTE_ETH_BURST_AVX2, "AVX2" },
180         { RTE_ETH_BURST_AVX512, "AVX512" },
181
182         { RTE_ETH_BURST_SCATTERED, "Scattered" },
183         { RTE_ETH_BURST_BULK_ALLOC, "Bulk Alloc" },
184         { RTE_ETH_BURST_SIMPLE, "Simple" },
185         { RTE_ETH_BURST_PER_QUEUE, "Per Queue" },
186 };
187
188 /**
189  * The user application callback description.
190  *
191  * It contains callback address to be registered by user application,
192  * the pointer to the parameters for callback, and the event type.
193  */
194 struct rte_eth_dev_callback {
195         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
196         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
197         void *cb_arg;                           /**< Parameter for callback */
198         void *ret_param;                        /**< Return parameter */
199         enum rte_eth_event_type event;          /**< Interrupt event type */
200         uint32_t active;                        /**< Callback is executing */
201 };
202
203 enum {
204         STAT_QMAP_TX = 0,
205         STAT_QMAP_RX
206 };
207
208 int
209 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
210 {
211         int ret;
212         struct rte_devargs devargs = {.args = NULL};
213         const char *bus_param_key;
214         char *bus_str = NULL;
215         char *cls_str = NULL;
216         int str_size;
217
218         memset(iter, 0, sizeof(*iter));
219
220         /*
221          * The devargs string may use various syntaxes:
222          *   - 0000:08:00.0,representor=[1-3]
223          *   - pci:0000:06:00.0,representor=[0,5]
224          *   - class=eth,mac=00:11:22:33:44:55
225          * A new syntax is in development (not yet supported):
226          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
227          */
228
229         /*
230          * Handle pure class filter (i.e. without any bus-level argument),
231          * from future new syntax.
232          * rte_devargs_parse() is not yet supporting the new syntax,
233          * that's why this simple case is temporarily parsed here.
234          */
235 #define iter_anybus_str "class=eth,"
236         if (strncmp(devargs_str, iter_anybus_str,
237                         strlen(iter_anybus_str)) == 0) {
238                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
239                 goto end;
240         }
241
242         /* Split bus, device and parameters. */
243         ret = rte_devargs_parse(&devargs, devargs_str);
244         if (ret != 0)
245                 goto error;
246
247         /*
248          * Assume parameters of old syntax can match only at ethdev level.
249          * Extra parameters will be ignored, thanks to "+" prefix.
250          */
251         str_size = strlen(devargs.args) + 2;
252         cls_str = malloc(str_size);
253         if (cls_str == NULL) {
254                 ret = -ENOMEM;
255                 goto error;
256         }
257         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
258         if (ret != str_size - 1) {
259                 ret = -EINVAL;
260                 goto error;
261         }
262         iter->cls_str = cls_str;
263         free(devargs.args); /* allocated by rte_devargs_parse() */
264         devargs.args = NULL;
265
266         iter->bus = devargs.bus;
267         if (iter->bus->dev_iterate == NULL) {
268                 ret = -ENOTSUP;
269                 goto error;
270         }
271
272         /* Convert bus args to new syntax for use with new API dev_iterate. */
273         if (strcmp(iter->bus->name, "vdev") == 0) {
274                 bus_param_key = "name";
275         } else if (strcmp(iter->bus->name, "pci") == 0) {
276                 bus_param_key = "addr";
277         } else {
278                 ret = -ENOTSUP;
279                 goto error;
280         }
281         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
282         bus_str = malloc(str_size);
283         if (bus_str == NULL) {
284                 ret = -ENOMEM;
285                 goto error;
286         }
287         ret = snprintf(bus_str, str_size, "%s=%s",
288                         bus_param_key, devargs.name);
289         if (ret != str_size - 1) {
290                 ret = -EINVAL;
291                 goto error;
292         }
293         iter->bus_str = bus_str;
294
295 end:
296         iter->cls = rte_class_find_by_name("eth");
297         return 0;
298
299 error:
300         if (ret == -ENOTSUP)
301                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
302                                 iter->bus->name);
303         free(devargs.args);
304         free(bus_str);
305         free(cls_str);
306         return ret;
307 }
308
309 uint16_t
310 rte_eth_iterator_next(struct rte_dev_iterator *iter)
311 {
312         if (iter->cls == NULL) /* invalid ethdev iterator */
313                 return RTE_MAX_ETHPORTS;
314
315         do { /* loop to try all matching rte_device */
316                 /* If not pure ethdev filter and */
317                 if (iter->bus != NULL &&
318                                 /* not in middle of rte_eth_dev iteration, */
319                                 iter->class_device == NULL) {
320                         /* get next rte_device to try. */
321                         iter->device = iter->bus->dev_iterate(
322                                         iter->device, iter->bus_str, iter);
323                         if (iter->device == NULL)
324                                 break; /* no more rte_device candidate */
325                 }
326                 /* A device is matching bus part, need to check ethdev part. */
327                 iter->class_device = iter->cls->dev_iterate(
328                                 iter->class_device, iter->cls_str, iter);
329                 if (iter->class_device != NULL)
330                         return eth_dev_to_id(iter->class_device); /* match */
331         } while (iter->bus != NULL); /* need to try next rte_device */
332
333         /* No more ethdev port to iterate. */
334         rte_eth_iterator_cleanup(iter);
335         return RTE_MAX_ETHPORTS;
336 }
337
338 void
339 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
340 {
341         if (iter->bus_str == NULL)
342                 return; /* nothing to free in pure class filter */
343         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
344         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
345         memset(iter, 0, sizeof(*iter));
346 }
347
348 uint16_t
349 rte_eth_find_next(uint16_t port_id)
350 {
351         while (port_id < RTE_MAX_ETHPORTS &&
352                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
353                 port_id++;
354
355         if (port_id >= RTE_MAX_ETHPORTS)
356                 return RTE_MAX_ETHPORTS;
357
358         return port_id;
359 }
360
361 /*
362  * Macro to iterate over all valid ports for internal usage.
363  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
364  */
365 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
366         for (port_id = rte_eth_find_next(0); \
367              port_id < RTE_MAX_ETHPORTS; \
368              port_id = rte_eth_find_next(port_id + 1))
369
370 uint16_t
371 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
372 {
373         port_id = rte_eth_find_next(port_id);
374         while (port_id < RTE_MAX_ETHPORTS &&
375                         rte_eth_devices[port_id].device != parent)
376                 port_id = rte_eth_find_next(port_id + 1);
377
378         return port_id;
379 }
380
381 uint16_t
382 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
383 {
384         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
385         return rte_eth_find_next_of(port_id,
386                         rte_eth_devices[ref_port_id].device);
387 }
388
389 static void
390 rte_eth_dev_shared_data_prepare(void)
391 {
392         const unsigned flags = 0;
393         const struct rte_memzone *mz;
394
395         rte_spinlock_lock(&rte_eth_shared_data_lock);
396
397         if (rte_eth_dev_shared_data == NULL) {
398                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
399                         /* Allocate port data and ownership shared memory. */
400                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
401                                         sizeof(*rte_eth_dev_shared_data),
402                                         rte_socket_id(), flags);
403                 } else
404                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
405                 if (mz == NULL)
406                         rte_panic("Cannot allocate ethdev shared data\n");
407
408                 rte_eth_dev_shared_data = mz->addr;
409                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
410                         rte_eth_dev_shared_data->next_owner_id =
411                                         RTE_ETH_DEV_NO_OWNER + 1;
412                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
413                         memset(rte_eth_dev_shared_data->data, 0,
414                                sizeof(rte_eth_dev_shared_data->data));
415                 }
416         }
417
418         rte_spinlock_unlock(&rte_eth_shared_data_lock);
419 }
420
421 static bool
422 is_allocated(const struct rte_eth_dev *ethdev)
423 {
424         return ethdev->data->name[0] != '\0';
425 }
426
427 static struct rte_eth_dev *
428 _rte_eth_dev_allocated(const char *name)
429 {
430         unsigned i;
431
432         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
433                 if (rte_eth_devices[i].data != NULL &&
434                     strcmp(rte_eth_devices[i].data->name, name) == 0)
435                         return &rte_eth_devices[i];
436         }
437         return NULL;
438 }
439
440 struct rte_eth_dev *
441 rte_eth_dev_allocated(const char *name)
442 {
443         struct rte_eth_dev *ethdev;
444
445         rte_eth_dev_shared_data_prepare();
446
447         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
448
449         ethdev = _rte_eth_dev_allocated(name);
450
451         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
452
453         return ethdev;
454 }
455
456 static uint16_t
457 rte_eth_dev_find_free_port(void)
458 {
459         unsigned i;
460
461         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
462                 /* Using shared name field to find a free port. */
463                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
464                         RTE_ASSERT(rte_eth_devices[i].state ==
465                                    RTE_ETH_DEV_UNUSED);
466                         return i;
467                 }
468         }
469         return RTE_MAX_ETHPORTS;
470 }
471
472 static struct rte_eth_dev *
473 eth_dev_get(uint16_t port_id)
474 {
475         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
476
477         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
478
479         return eth_dev;
480 }
481
482 struct rte_eth_dev *
483 rte_eth_dev_allocate(const char *name)
484 {
485         uint16_t port_id;
486         struct rte_eth_dev *eth_dev = NULL;
487         size_t name_len;
488
489         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
490         if (name_len == 0) {
491                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
492                 return NULL;
493         }
494
495         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
496                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
497                 return NULL;
498         }
499
500         rte_eth_dev_shared_data_prepare();
501
502         /* Synchronize port creation between primary and secondary threads. */
503         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
504
505         if (_rte_eth_dev_allocated(name) != NULL) {
506                 RTE_ETHDEV_LOG(ERR,
507                         "Ethernet device with name %s already allocated\n",
508                         name);
509                 goto unlock;
510         }
511
512         port_id = rte_eth_dev_find_free_port();
513         if (port_id == RTE_MAX_ETHPORTS) {
514                 RTE_ETHDEV_LOG(ERR,
515                         "Reached maximum number of Ethernet ports\n");
516                 goto unlock;
517         }
518
519         eth_dev = eth_dev_get(port_id);
520         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
521         eth_dev->data->port_id = port_id;
522         eth_dev->data->mtu = RTE_ETHER_MTU;
523
524 unlock:
525         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
526
527         return eth_dev;
528 }
529
530 /*
531  * Attach to a port already registered by the primary process, which
532  * makes sure that the same device would have the same port id both
533  * in the primary and secondary process.
534  */
535 struct rte_eth_dev *
536 rte_eth_dev_attach_secondary(const char *name)
537 {
538         uint16_t i;
539         struct rte_eth_dev *eth_dev = NULL;
540
541         rte_eth_dev_shared_data_prepare();
542
543         /* Synchronize port attachment to primary port creation and release. */
544         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
545
546         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
547                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
548                         break;
549         }
550         if (i == RTE_MAX_ETHPORTS) {
551                 RTE_ETHDEV_LOG(ERR,
552                         "Device %s is not driven by the primary process\n",
553                         name);
554         } else {
555                 eth_dev = eth_dev_get(i);
556                 RTE_ASSERT(eth_dev->data->port_id == i);
557         }
558
559         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
560         return eth_dev;
561 }
562
563 int
564 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
565 {
566         if (eth_dev == NULL)
567                 return -EINVAL;
568
569         rte_eth_dev_shared_data_prepare();
570
571         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
572                 _rte_eth_dev_callback_process(eth_dev,
573                                 RTE_ETH_EVENT_DESTROY, NULL);
574
575         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
576
577         eth_dev->state = RTE_ETH_DEV_UNUSED;
578
579         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
580                 rte_free(eth_dev->data->rx_queues);
581                 rte_free(eth_dev->data->tx_queues);
582                 rte_free(eth_dev->data->mac_addrs);
583                 rte_free(eth_dev->data->hash_mac_addrs);
584                 rte_free(eth_dev->data->dev_private);
585                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
586         }
587
588         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
589
590         return 0;
591 }
592
593 int
594 rte_eth_dev_is_valid_port(uint16_t port_id)
595 {
596         if (port_id >= RTE_MAX_ETHPORTS ||
597             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
598                 return 0;
599         else
600                 return 1;
601 }
602
603 static int
604 rte_eth_is_valid_owner_id(uint64_t owner_id)
605 {
606         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
607             rte_eth_dev_shared_data->next_owner_id <= owner_id)
608                 return 0;
609         return 1;
610 }
611
612 uint64_t
613 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
614 {
615         port_id = rte_eth_find_next(port_id);
616         while (port_id < RTE_MAX_ETHPORTS &&
617                         rte_eth_devices[port_id].data->owner.id != owner_id)
618                 port_id = rte_eth_find_next(port_id + 1);
619
620         return port_id;
621 }
622
623 int
624 rte_eth_dev_owner_new(uint64_t *owner_id)
625 {
626         rte_eth_dev_shared_data_prepare();
627
628         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
629
630         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
631
632         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
633         return 0;
634 }
635
636 static int
637 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
638                        const struct rte_eth_dev_owner *new_owner)
639 {
640         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
641         struct rte_eth_dev_owner *port_owner;
642
643         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
644                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
645                         port_id);
646                 return -ENODEV;
647         }
648
649         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
650             !rte_eth_is_valid_owner_id(old_owner_id)) {
651                 RTE_ETHDEV_LOG(ERR,
652                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
653                        old_owner_id, new_owner->id);
654                 return -EINVAL;
655         }
656
657         port_owner = &rte_eth_devices[port_id].data->owner;
658         if (port_owner->id != old_owner_id) {
659                 RTE_ETHDEV_LOG(ERR,
660                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
661                         port_id, port_owner->name, port_owner->id);
662                 return -EPERM;
663         }
664
665         /* can not truncate (same structure) */
666         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
667
668         port_owner->id = new_owner->id;
669
670         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
671                 port_id, new_owner->name, new_owner->id);
672
673         return 0;
674 }
675
676 int
677 rte_eth_dev_owner_set(const uint16_t port_id,
678                       const struct rte_eth_dev_owner *owner)
679 {
680         int ret;
681
682         rte_eth_dev_shared_data_prepare();
683
684         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
685
686         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
687
688         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
689         return ret;
690 }
691
692 int
693 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
694 {
695         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
696                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
697         int ret;
698
699         rte_eth_dev_shared_data_prepare();
700
701         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
702
703         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
704
705         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
706         return ret;
707 }
708
709 int
710 rte_eth_dev_owner_delete(const uint64_t owner_id)
711 {
712         uint16_t port_id;
713         int ret = 0;
714
715         rte_eth_dev_shared_data_prepare();
716
717         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
718
719         if (rte_eth_is_valid_owner_id(owner_id)) {
720                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
721                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
722                                 memset(&rte_eth_devices[port_id].data->owner, 0,
723                                        sizeof(struct rte_eth_dev_owner));
724                 RTE_ETHDEV_LOG(NOTICE,
725                         "All port owners owned by %016"PRIx64" identifier have removed\n",
726                         owner_id);
727         } else {
728                 RTE_ETHDEV_LOG(ERR,
729                                "Invalid owner id=%016"PRIx64"\n",
730                                owner_id);
731                 ret = -EINVAL;
732         }
733
734         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
735
736         return ret;
737 }
738
739 int
740 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
741 {
742         int ret = 0;
743         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
744
745         rte_eth_dev_shared_data_prepare();
746
747         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
748
749         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
750                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
751                         port_id);
752                 ret = -ENODEV;
753         } else {
754                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
755         }
756
757         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
758         return ret;
759 }
760
761 int
762 rte_eth_dev_socket_id(uint16_t port_id)
763 {
764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
765         return rte_eth_devices[port_id].data->numa_node;
766 }
767
768 void *
769 rte_eth_dev_get_sec_ctx(uint16_t port_id)
770 {
771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
772         return rte_eth_devices[port_id].security_ctx;
773 }
774
775 uint16_t
776 rte_eth_dev_count(void)
777 {
778         return rte_eth_dev_count_avail();
779 }
780
781 uint16_t
782 rte_eth_dev_count_avail(void)
783 {
784         uint16_t p;
785         uint16_t count;
786
787         count = 0;
788
789         RTE_ETH_FOREACH_DEV(p)
790                 count++;
791
792         return count;
793 }
794
795 uint16_t
796 rte_eth_dev_count_total(void)
797 {
798         uint16_t port, count = 0;
799
800         RTE_ETH_FOREACH_VALID_DEV(port)
801                 count++;
802
803         return count;
804 }
805
806 int
807 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
808 {
809         char *tmp;
810
811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
812
813         if (name == NULL) {
814                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
815                 return -EINVAL;
816         }
817
818         /* shouldn't check 'rte_eth_devices[i].data',
819          * because it might be overwritten by VDEV PMD */
820         tmp = rte_eth_dev_shared_data->data[port_id].name;
821         strcpy(name, tmp);
822         return 0;
823 }
824
825 int
826 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
827 {
828         uint32_t pid;
829
830         if (name == NULL) {
831                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
832                 return -EINVAL;
833         }
834
835         RTE_ETH_FOREACH_VALID_DEV(pid)
836                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
837                         *port_id = pid;
838                         return 0;
839                 }
840
841         return -ENODEV;
842 }
843
844 static int
845 eth_err(uint16_t port_id, int ret)
846 {
847         if (ret == 0)
848                 return 0;
849         if (rte_eth_dev_is_removed(port_id))
850                 return -EIO;
851         return ret;
852 }
853
854 static int
855 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
856 {
857         uint16_t old_nb_queues = dev->data->nb_rx_queues;
858         void **rxq;
859         unsigned i;
860
861         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
862                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
863                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
864                                 RTE_CACHE_LINE_SIZE);
865                 if (dev->data->rx_queues == NULL) {
866                         dev->data->nb_rx_queues = 0;
867                         return -(ENOMEM);
868                 }
869         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
870                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
871
872                 rxq = dev->data->rx_queues;
873
874                 for (i = nb_queues; i < old_nb_queues; i++)
875                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
876                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
877                                 RTE_CACHE_LINE_SIZE);
878                 if (rxq == NULL)
879                         return -(ENOMEM);
880                 if (nb_queues > old_nb_queues) {
881                         uint16_t new_qs = nb_queues - old_nb_queues;
882
883                         memset(rxq + old_nb_queues, 0,
884                                 sizeof(rxq[0]) * new_qs);
885                 }
886
887                 dev->data->rx_queues = rxq;
888
889         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
890                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
891
892                 rxq = dev->data->rx_queues;
893
894                 for (i = nb_queues; i < old_nb_queues; i++)
895                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
896
897                 rte_free(dev->data->rx_queues);
898                 dev->data->rx_queues = NULL;
899         }
900         dev->data->nb_rx_queues = nb_queues;
901         return 0;
902 }
903
904 int
905 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
906 {
907         struct rte_eth_dev *dev;
908
909         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
910
911         dev = &rte_eth_devices[port_id];
912         if (!dev->data->dev_started) {
913                 RTE_ETHDEV_LOG(ERR,
914                         "Port %u must be started before start any queue\n",
915                         port_id);
916                 return -EINVAL;
917         }
918
919         if (rx_queue_id >= dev->data->nb_rx_queues) {
920                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
921                 return -EINVAL;
922         }
923
924         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
925
926         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
927                 RTE_ETHDEV_LOG(INFO,
928                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
929                         rx_queue_id, port_id);
930                 return -EINVAL;
931         }
932
933         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
934                 RTE_ETHDEV_LOG(INFO,
935                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
936                         rx_queue_id, port_id);
937                 return 0;
938         }
939
940         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
941                                                              rx_queue_id));
942
943 }
944
945 int
946 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
947 {
948         struct rte_eth_dev *dev;
949
950         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
951
952         dev = &rte_eth_devices[port_id];
953         if (rx_queue_id >= dev->data->nb_rx_queues) {
954                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
955                 return -EINVAL;
956         }
957
958         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
959
960         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
961                 RTE_ETHDEV_LOG(INFO,
962                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
963                         rx_queue_id, port_id);
964                 return -EINVAL;
965         }
966
967         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
968                 RTE_ETHDEV_LOG(INFO,
969                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
970                         rx_queue_id, port_id);
971                 return 0;
972         }
973
974         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
975
976 }
977
978 int
979 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
980 {
981         struct rte_eth_dev *dev;
982
983         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
984
985         dev = &rte_eth_devices[port_id];
986         if (!dev->data->dev_started) {
987                 RTE_ETHDEV_LOG(ERR,
988                         "Port %u must be started before start any queue\n",
989                         port_id);
990                 return -EINVAL;
991         }
992
993         if (tx_queue_id >= dev->data->nb_tx_queues) {
994                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
995                 return -EINVAL;
996         }
997
998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
999
1000         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1001                 RTE_ETHDEV_LOG(INFO,
1002                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1003                         tx_queue_id, port_id);
1004                 return -EINVAL;
1005         }
1006
1007         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1010                         tx_queue_id, port_id);
1011                 return 0;
1012         }
1013
1014         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1015 }
1016
1017 int
1018 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1019 {
1020         struct rte_eth_dev *dev;
1021
1022         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1023
1024         dev = &rte_eth_devices[port_id];
1025         if (tx_queue_id >= dev->data->nb_tx_queues) {
1026                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1027                 return -EINVAL;
1028         }
1029
1030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1031
1032         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1033                 RTE_ETHDEV_LOG(INFO,
1034                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1035                         tx_queue_id, port_id);
1036                 return -EINVAL;
1037         }
1038
1039         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1040                 RTE_ETHDEV_LOG(INFO,
1041                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1042                         tx_queue_id, port_id);
1043                 return 0;
1044         }
1045
1046         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1047
1048 }
1049
1050 static int
1051 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1052 {
1053         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1054         void **txq;
1055         unsigned i;
1056
1057         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1058                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1059                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1060                                                    RTE_CACHE_LINE_SIZE);
1061                 if (dev->data->tx_queues == NULL) {
1062                         dev->data->nb_tx_queues = 0;
1063                         return -(ENOMEM);
1064                 }
1065         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1066                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1067
1068                 txq = dev->data->tx_queues;
1069
1070                 for (i = nb_queues; i < old_nb_queues; i++)
1071                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1072                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1073                                   RTE_CACHE_LINE_SIZE);
1074                 if (txq == NULL)
1075                         return -ENOMEM;
1076                 if (nb_queues > old_nb_queues) {
1077                         uint16_t new_qs = nb_queues - old_nb_queues;
1078
1079                         memset(txq + old_nb_queues, 0,
1080                                sizeof(txq[0]) * new_qs);
1081                 }
1082
1083                 dev->data->tx_queues = txq;
1084
1085         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1086                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1087
1088                 txq = dev->data->tx_queues;
1089
1090                 for (i = nb_queues; i < old_nb_queues; i++)
1091                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1092
1093                 rte_free(dev->data->tx_queues);
1094                 dev->data->tx_queues = NULL;
1095         }
1096         dev->data->nb_tx_queues = nb_queues;
1097         return 0;
1098 }
1099
1100 uint32_t
1101 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1102 {
1103         switch (speed) {
1104         case ETH_SPEED_NUM_10M:
1105                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1106         case ETH_SPEED_NUM_100M:
1107                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1108         case ETH_SPEED_NUM_1G:
1109                 return ETH_LINK_SPEED_1G;
1110         case ETH_SPEED_NUM_2_5G:
1111                 return ETH_LINK_SPEED_2_5G;
1112         case ETH_SPEED_NUM_5G:
1113                 return ETH_LINK_SPEED_5G;
1114         case ETH_SPEED_NUM_10G:
1115                 return ETH_LINK_SPEED_10G;
1116         case ETH_SPEED_NUM_20G:
1117                 return ETH_LINK_SPEED_20G;
1118         case ETH_SPEED_NUM_25G:
1119                 return ETH_LINK_SPEED_25G;
1120         case ETH_SPEED_NUM_40G:
1121                 return ETH_LINK_SPEED_40G;
1122         case ETH_SPEED_NUM_50G:
1123                 return ETH_LINK_SPEED_50G;
1124         case ETH_SPEED_NUM_56G:
1125                 return ETH_LINK_SPEED_56G;
1126         case ETH_SPEED_NUM_100G:
1127                 return ETH_LINK_SPEED_100G;
1128         default:
1129                 return 0;
1130         }
1131 }
1132
1133 const char *
1134 rte_eth_dev_rx_offload_name(uint64_t offload)
1135 {
1136         const char *name = "UNKNOWN";
1137         unsigned int i;
1138
1139         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1140                 if (offload == rte_rx_offload_names[i].offload) {
1141                         name = rte_rx_offload_names[i].name;
1142                         break;
1143                 }
1144         }
1145
1146         return name;
1147 }
1148
1149 const char *
1150 rte_eth_dev_tx_offload_name(uint64_t offload)
1151 {
1152         const char *name = "UNKNOWN";
1153         unsigned int i;
1154
1155         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1156                 if (offload == rte_tx_offload_names[i].offload) {
1157                         name = rte_tx_offload_names[i].name;
1158                         break;
1159                 }
1160         }
1161
1162         return name;
1163 }
1164
1165 int
1166 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1167                       const struct rte_eth_conf *dev_conf)
1168 {
1169         struct rte_eth_dev *dev;
1170         struct rte_eth_dev_info dev_info;
1171         struct rte_eth_conf orig_conf;
1172         int diag;
1173         int ret;
1174
1175         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1176
1177         dev = &rte_eth_devices[port_id];
1178
1179         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1180
1181         if (dev->data->dev_started) {
1182                 RTE_ETHDEV_LOG(ERR,
1183                         "Port %u must be stopped to allow configuration\n",
1184                         port_id);
1185                 return -EBUSY;
1186         }
1187
1188          /* Store original config, as rollback required on failure */
1189         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1190
1191         /*
1192          * Copy the dev_conf parameter into the dev structure.
1193          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1194          */
1195         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1196
1197         ret = rte_eth_dev_info_get(port_id, &dev_info);
1198         if (ret != 0)
1199                 goto rollback;
1200
1201         /* If number of queues specified by application for both Rx and Tx is
1202          * zero, use driver preferred values. This cannot be done individually
1203          * as it is valid for either Tx or Rx (but not both) to be zero.
1204          * If driver does not provide any preferred valued, fall back on
1205          * EAL defaults.
1206          */
1207         if (nb_rx_q == 0 && nb_tx_q == 0) {
1208                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1209                 if (nb_rx_q == 0)
1210                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1211                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1212                 if (nb_tx_q == 0)
1213                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1214         }
1215
1216         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1217                 RTE_ETHDEV_LOG(ERR,
1218                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1219                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1220                 ret = -EINVAL;
1221                 goto rollback;
1222         }
1223
1224         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1225                 RTE_ETHDEV_LOG(ERR,
1226                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1227                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1228                 ret = -EINVAL;
1229                 goto rollback;
1230         }
1231
1232         /*
1233          * Check that the numbers of RX and TX queues are not greater
1234          * than the maximum number of RX and TX queues supported by the
1235          * configured device.
1236          */
1237         if (nb_rx_q > dev_info.max_rx_queues) {
1238                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1239                         port_id, nb_rx_q, dev_info.max_rx_queues);
1240                 ret = -EINVAL;
1241                 goto rollback;
1242         }
1243
1244         if (nb_tx_q > dev_info.max_tx_queues) {
1245                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1246                         port_id, nb_tx_q, dev_info.max_tx_queues);
1247                 ret = -EINVAL;
1248                 goto rollback;
1249         }
1250
1251         /* Check that the device supports requested interrupts */
1252         if ((dev_conf->intr_conf.lsc == 1) &&
1253                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1254                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1255                         dev->device->driver->name);
1256                 ret = -EINVAL;
1257                 goto rollback;
1258         }
1259         if ((dev_conf->intr_conf.rmv == 1) &&
1260                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1261                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1262                         dev->device->driver->name);
1263                 ret = -EINVAL;
1264                 goto rollback;
1265         }
1266
1267         /*
1268          * If jumbo frames are enabled, check that the maximum RX packet
1269          * length is supported by the configured device.
1270          */
1271         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1272                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1273                         RTE_ETHDEV_LOG(ERR,
1274                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1275                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1276                                 dev_info.max_rx_pktlen);
1277                         ret = -EINVAL;
1278                         goto rollback;
1279                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1280                         RTE_ETHDEV_LOG(ERR,
1281                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1282                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1283                                 (unsigned int)RTE_ETHER_MIN_LEN);
1284                         ret = -EINVAL;
1285                         goto rollback;
1286                 }
1287         } else {
1288                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1289                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1290                         /* Use default value */
1291                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1292                                                         RTE_ETHER_MAX_LEN;
1293         }
1294
1295         /* Any requested offloading must be within its device capabilities */
1296         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1297              dev_conf->rxmode.offloads) {
1298                 RTE_ETHDEV_LOG(ERR,
1299                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1300                         "capabilities 0x%"PRIx64" in %s()\n",
1301                         port_id, dev_conf->rxmode.offloads,
1302                         dev_info.rx_offload_capa,
1303                         __func__);
1304                 ret = -EINVAL;
1305                 goto rollback;
1306         }
1307         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1308              dev_conf->txmode.offloads) {
1309                 RTE_ETHDEV_LOG(ERR,
1310                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1311                         "capabilities 0x%"PRIx64" in %s()\n",
1312                         port_id, dev_conf->txmode.offloads,
1313                         dev_info.tx_offload_capa,
1314                         __func__);
1315                 ret = -EINVAL;
1316                 goto rollback;
1317         }
1318
1319         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1320                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1321
1322         /* Check that device supports requested rss hash functions. */
1323         if ((dev_info.flow_type_rss_offloads |
1324              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1325             dev_info.flow_type_rss_offloads) {
1326                 RTE_ETHDEV_LOG(ERR,
1327                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1328                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1329                         dev_info.flow_type_rss_offloads);
1330                 ret = -EINVAL;
1331                 goto rollback;
1332         }
1333
1334         /*
1335          * Setup new number of RX/TX queues and reconfigure device.
1336          */
1337         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1338         if (diag != 0) {
1339                 RTE_ETHDEV_LOG(ERR,
1340                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1341                         port_id, diag);
1342                 ret = diag;
1343                 goto rollback;
1344         }
1345
1346         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1347         if (diag != 0) {
1348                 RTE_ETHDEV_LOG(ERR,
1349                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1350                         port_id, diag);
1351                 rte_eth_dev_rx_queue_config(dev, 0);
1352                 ret = diag;
1353                 goto rollback;
1354         }
1355
1356         diag = (*dev->dev_ops->dev_configure)(dev);
1357         if (diag != 0) {
1358                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1359                         port_id, diag);
1360                 rte_eth_dev_rx_queue_config(dev, 0);
1361                 rte_eth_dev_tx_queue_config(dev, 0);
1362                 ret = eth_err(port_id, diag);
1363                 goto rollback;
1364         }
1365
1366         /* Initialize Rx profiling if enabled at compilation time. */
1367         diag = __rte_eth_dev_profile_init(port_id, dev);
1368         if (diag != 0) {
1369                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1370                         port_id, diag);
1371                 rte_eth_dev_rx_queue_config(dev, 0);
1372                 rte_eth_dev_tx_queue_config(dev, 0);
1373                 ret = eth_err(port_id, diag);
1374                 goto rollback;
1375         }
1376
1377         return 0;
1378
1379 rollback:
1380         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1381
1382         return ret;
1383 }
1384
1385 void
1386 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1387 {
1388         if (dev->data->dev_started) {
1389                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1390                         dev->data->port_id);
1391                 return;
1392         }
1393
1394         rte_eth_dev_rx_queue_config(dev, 0);
1395         rte_eth_dev_tx_queue_config(dev, 0);
1396
1397         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1398 }
1399
1400 static void
1401 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1402                         struct rte_eth_dev_info *dev_info)
1403 {
1404         struct rte_ether_addr *addr;
1405         uint16_t i;
1406         uint32_t pool = 0;
1407         uint64_t pool_mask;
1408
1409         /* replay MAC address configuration including default MAC */
1410         addr = &dev->data->mac_addrs[0];
1411         if (*dev->dev_ops->mac_addr_set != NULL)
1412                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1413         else if (*dev->dev_ops->mac_addr_add != NULL)
1414                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1415
1416         if (*dev->dev_ops->mac_addr_add != NULL) {
1417                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1418                         addr = &dev->data->mac_addrs[i];
1419
1420                         /* skip zero address */
1421                         if (rte_is_zero_ether_addr(addr))
1422                                 continue;
1423
1424                         pool = 0;
1425                         pool_mask = dev->data->mac_pool_sel[i];
1426
1427                         do {
1428                                 if (pool_mask & 1ULL)
1429                                         (*dev->dev_ops->mac_addr_add)(dev,
1430                                                 addr, i, pool);
1431                                 pool_mask >>= 1;
1432                                 pool++;
1433                         } while (pool_mask);
1434                 }
1435         }
1436 }
1437
1438 static int
1439 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1440                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1441 {
1442         int ret;
1443
1444         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1445                 rte_eth_dev_mac_restore(dev, dev_info);
1446
1447         /* replay promiscuous configuration */
1448         /*
1449          * use callbacks directly since we don't need port_id check and
1450          * would like to bypass the same value set
1451          */
1452         if (rte_eth_promiscuous_get(port_id) == 1 &&
1453             *dev->dev_ops->promiscuous_enable != NULL) {
1454                 ret = eth_err(port_id,
1455                               (*dev->dev_ops->promiscuous_enable)(dev));
1456                 if (ret != 0 && ret != -ENOTSUP) {
1457                         RTE_ETHDEV_LOG(ERR,
1458                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1459                                 port_id, rte_strerror(-ret));
1460                         return ret;
1461                 }
1462         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1463                    *dev->dev_ops->promiscuous_disable != NULL) {
1464                 ret = eth_err(port_id,
1465                               (*dev->dev_ops->promiscuous_disable)(dev));
1466                 if (ret != 0 && ret != -ENOTSUP) {
1467                         RTE_ETHDEV_LOG(ERR,
1468                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1469                                 port_id, rte_strerror(-ret));
1470                         return ret;
1471                 }
1472         }
1473
1474         /* replay all multicast configuration */
1475         /*
1476          * use callbacks directly since we don't need port_id check and
1477          * would like to bypass the same value set
1478          */
1479         if (rte_eth_allmulticast_get(port_id) == 1 &&
1480             *dev->dev_ops->allmulticast_enable != NULL) {
1481                 ret = eth_err(port_id,
1482                               (*dev->dev_ops->allmulticast_enable)(dev));
1483                 if (ret != 0 && ret != -ENOTSUP) {
1484                         RTE_ETHDEV_LOG(ERR,
1485                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1486                                 port_id, rte_strerror(-ret));
1487                         return ret;
1488                 }
1489         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1490                    *dev->dev_ops->allmulticast_disable != NULL) {
1491                 ret = eth_err(port_id,
1492                               (*dev->dev_ops->allmulticast_disable)(dev));
1493                 if (ret != 0 && ret != -ENOTSUP) {
1494                         RTE_ETHDEV_LOG(ERR,
1495                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1496                                 port_id, rte_strerror(-ret));
1497                         return ret;
1498                 }
1499         }
1500
1501         return 0;
1502 }
1503
1504 int
1505 rte_eth_dev_start(uint16_t port_id)
1506 {
1507         struct rte_eth_dev *dev;
1508         struct rte_eth_dev_info dev_info;
1509         int diag;
1510         int ret;
1511
1512         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1513
1514         dev = &rte_eth_devices[port_id];
1515
1516         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1517
1518         if (dev->data->dev_started != 0) {
1519                 RTE_ETHDEV_LOG(INFO,
1520                         "Device with port_id=%"PRIu16" already started\n",
1521                         port_id);
1522                 return 0;
1523         }
1524
1525         ret = rte_eth_dev_info_get(port_id, &dev_info);
1526         if (ret != 0)
1527                 return ret;
1528
1529         /* Lets restore MAC now if device does not support live change */
1530         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1531                 rte_eth_dev_mac_restore(dev, &dev_info);
1532
1533         diag = (*dev->dev_ops->dev_start)(dev);
1534         if (diag == 0)
1535                 dev->data->dev_started = 1;
1536         else
1537                 return eth_err(port_id, diag);
1538
1539         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1540         if (ret != 0) {
1541                 RTE_ETHDEV_LOG(ERR,
1542                         "Error during restoring configuration for device (port %u): %s\n",
1543                         port_id, rte_strerror(-ret));
1544                 rte_eth_dev_stop(port_id);
1545                 return ret;
1546         }
1547
1548         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1549                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1550                 (*dev->dev_ops->link_update)(dev, 0);
1551         }
1552         return 0;
1553 }
1554
1555 void
1556 rte_eth_dev_stop(uint16_t port_id)
1557 {
1558         struct rte_eth_dev *dev;
1559
1560         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1561         dev = &rte_eth_devices[port_id];
1562
1563         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1564
1565         if (dev->data->dev_started == 0) {
1566                 RTE_ETHDEV_LOG(INFO,
1567                         "Device with port_id=%"PRIu16" already stopped\n",
1568                         port_id);
1569                 return;
1570         }
1571
1572         dev->data->dev_started = 0;
1573         (*dev->dev_ops->dev_stop)(dev);
1574 }
1575
1576 int
1577 rte_eth_dev_set_link_up(uint16_t port_id)
1578 {
1579         struct rte_eth_dev *dev;
1580
1581         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1582
1583         dev = &rte_eth_devices[port_id];
1584
1585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1586         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1587 }
1588
1589 int
1590 rte_eth_dev_set_link_down(uint16_t port_id)
1591 {
1592         struct rte_eth_dev *dev;
1593
1594         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1595
1596         dev = &rte_eth_devices[port_id];
1597
1598         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1599         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1600 }
1601
1602 void
1603 rte_eth_dev_close(uint16_t port_id)
1604 {
1605         struct rte_eth_dev *dev;
1606
1607         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1608         dev = &rte_eth_devices[port_id];
1609
1610         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1611         dev->data->dev_started = 0;
1612         (*dev->dev_ops->dev_close)(dev);
1613
1614         /* check behaviour flag - temporary for PMD migration */
1615         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1616                 /* new behaviour: send event + reset state + free all data */
1617                 rte_eth_dev_release_port(dev);
1618                 return;
1619         }
1620         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1621                         "The driver %s should migrate to the new behaviour.\n",
1622                         dev->device->driver->name);
1623         /* old behaviour: only free queue arrays */
1624         dev->data->nb_rx_queues = 0;
1625         rte_free(dev->data->rx_queues);
1626         dev->data->rx_queues = NULL;
1627         dev->data->nb_tx_queues = 0;
1628         rte_free(dev->data->tx_queues);
1629         dev->data->tx_queues = NULL;
1630 }
1631
1632 int
1633 rte_eth_dev_reset(uint16_t port_id)
1634 {
1635         struct rte_eth_dev *dev;
1636         int ret;
1637
1638         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1639         dev = &rte_eth_devices[port_id];
1640
1641         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1642
1643         rte_eth_dev_stop(port_id);
1644         ret = dev->dev_ops->dev_reset(dev);
1645
1646         return eth_err(port_id, ret);
1647 }
1648
1649 int
1650 rte_eth_dev_is_removed(uint16_t port_id)
1651 {
1652         struct rte_eth_dev *dev;
1653         int ret;
1654
1655         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1656
1657         dev = &rte_eth_devices[port_id];
1658
1659         if (dev->state == RTE_ETH_DEV_REMOVED)
1660                 return 1;
1661
1662         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1663
1664         ret = dev->dev_ops->is_removed(dev);
1665         if (ret != 0)
1666                 /* Device is physically removed. */
1667                 dev->state = RTE_ETH_DEV_REMOVED;
1668
1669         return ret;
1670 }
1671
1672 int
1673 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1674                        uint16_t nb_rx_desc, unsigned int socket_id,
1675                        const struct rte_eth_rxconf *rx_conf,
1676                        struct rte_mempool *mp)
1677 {
1678         int ret;
1679         uint32_t mbp_buf_size;
1680         struct rte_eth_dev *dev;
1681         struct rte_eth_dev_info dev_info;
1682         struct rte_eth_rxconf local_conf;
1683         void **rxq;
1684
1685         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1686
1687         dev = &rte_eth_devices[port_id];
1688         if (rx_queue_id >= dev->data->nb_rx_queues) {
1689                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1690                 return -EINVAL;
1691         }
1692
1693         if (mp == NULL) {
1694                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1695                 return -EINVAL;
1696         }
1697
1698         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1699
1700         /*
1701          * Check the size of the mbuf data buffer.
1702          * This value must be provided in the private data of the memory pool.
1703          * First check that the memory pool has a valid private data.
1704          */
1705         ret = rte_eth_dev_info_get(port_id, &dev_info);
1706         if (ret != 0)
1707                 return ret;
1708
1709         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1710                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1711                         mp->name, (int)mp->private_data_size,
1712                         (int)sizeof(struct rte_pktmbuf_pool_private));
1713                 return -ENOSPC;
1714         }
1715         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1716
1717         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1718                 RTE_ETHDEV_LOG(ERR,
1719                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1720                         mp->name, (int)mbp_buf_size,
1721                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1722                         (int)RTE_PKTMBUF_HEADROOM,
1723                         (int)dev_info.min_rx_bufsize);
1724                 return -EINVAL;
1725         }
1726
1727         /* Use default specified by driver, if nb_rx_desc is zero */
1728         if (nb_rx_desc == 0) {
1729                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1730                 /* If driver default is also zero, fall back on EAL default */
1731                 if (nb_rx_desc == 0)
1732                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1733         }
1734
1735         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1736                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1737                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1738
1739                 RTE_ETHDEV_LOG(ERR,
1740                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1741                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1742                         dev_info.rx_desc_lim.nb_min,
1743                         dev_info.rx_desc_lim.nb_align);
1744                 return -EINVAL;
1745         }
1746
1747         if (dev->data->dev_started &&
1748                 !(dev_info.dev_capa &
1749                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1750                 return -EBUSY;
1751
1752         if (dev->data->dev_started &&
1753                 (dev->data->rx_queue_state[rx_queue_id] !=
1754                         RTE_ETH_QUEUE_STATE_STOPPED))
1755                 return -EBUSY;
1756
1757         rxq = dev->data->rx_queues;
1758         if (rxq[rx_queue_id]) {
1759                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1760                                         -ENOTSUP);
1761                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1762                 rxq[rx_queue_id] = NULL;
1763         }
1764
1765         if (rx_conf == NULL)
1766                 rx_conf = &dev_info.default_rxconf;
1767
1768         local_conf = *rx_conf;
1769
1770         /*
1771          * If an offloading has already been enabled in
1772          * rte_eth_dev_configure(), it has been enabled on all queues,
1773          * so there is no need to enable it in this queue again.
1774          * The local_conf.offloads input to underlying PMD only carries
1775          * those offloadings which are only enabled on this queue and
1776          * not enabled on all queues.
1777          */
1778         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1779
1780         /*
1781          * New added offloadings for this queue are those not enabled in
1782          * rte_eth_dev_configure() and they must be per-queue type.
1783          * A pure per-port offloading can't be enabled on a queue while
1784          * disabled on another queue. A pure per-port offloading can't
1785          * be enabled for any queue as new added one if it hasn't been
1786          * enabled in rte_eth_dev_configure().
1787          */
1788         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1789              local_conf.offloads) {
1790                 RTE_ETHDEV_LOG(ERR,
1791                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1792                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1793                         port_id, rx_queue_id, local_conf.offloads,
1794                         dev_info.rx_queue_offload_capa,
1795                         __func__);
1796                 return -EINVAL;
1797         }
1798
1799         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1800                                               socket_id, &local_conf, mp);
1801         if (!ret) {
1802                 if (!dev->data->min_rx_buf_size ||
1803                     dev->data->min_rx_buf_size > mbp_buf_size)
1804                         dev->data->min_rx_buf_size = mbp_buf_size;
1805         }
1806
1807         return eth_err(port_id, ret);
1808 }
1809
1810 int
1811 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1812                                uint16_t nb_rx_desc,
1813                                const struct rte_eth_hairpin_conf *conf)
1814 {
1815         int ret;
1816         struct rte_eth_dev *dev;
1817         struct rte_eth_hairpin_cap cap;
1818         void **rxq;
1819         int i;
1820         int count;
1821
1822         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1823
1824         dev = &rte_eth_devices[port_id];
1825         if (rx_queue_id >= dev->data->nb_rx_queues) {
1826                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1827                 return -EINVAL;
1828         }
1829         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1830         if (ret != 0)
1831                 return ret;
1832         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1833                                 -ENOTSUP);
1834         /* if nb_rx_desc is zero use max number of desc from the driver. */
1835         if (nb_rx_desc == 0)
1836                 nb_rx_desc = cap.max_nb_desc;
1837         if (nb_rx_desc > cap.max_nb_desc) {
1838                 RTE_ETHDEV_LOG(ERR,
1839                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1840                         nb_rx_desc, cap.max_nb_desc);
1841                 return -EINVAL;
1842         }
1843         if (conf->peer_count > cap.max_rx_2_tx) {
1844                 RTE_ETHDEV_LOG(ERR,
1845                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1846                         conf->peer_count, cap.max_rx_2_tx);
1847                 return -EINVAL;
1848         }
1849         if (conf->peer_count == 0) {
1850                 RTE_ETHDEV_LOG(ERR,
1851                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1852                         conf->peer_count);
1853                 return -EINVAL;
1854         }
1855         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1856              cap.max_nb_queues != UINT16_MAX; i++) {
1857                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1858                         count++;
1859         }
1860         if (count > cap.max_nb_queues) {
1861                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1862                 cap.max_nb_queues);
1863                 return -EINVAL;
1864         }
1865         if (dev->data->dev_started)
1866                 return -EBUSY;
1867         rxq = dev->data->rx_queues;
1868         if (rxq[rx_queue_id] != NULL) {
1869                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1870                                         -ENOTSUP);
1871                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1872                 rxq[rx_queue_id] = NULL;
1873         }
1874         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1875                                                       nb_rx_desc, conf);
1876         if (ret == 0)
1877                 dev->data->rx_queue_state[rx_queue_id] =
1878                         RTE_ETH_QUEUE_STATE_HAIRPIN;
1879         return eth_err(port_id, ret);
1880 }
1881
1882 int
1883 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1884                        uint16_t nb_tx_desc, unsigned int socket_id,
1885                        const struct rte_eth_txconf *tx_conf)
1886 {
1887         struct rte_eth_dev *dev;
1888         struct rte_eth_dev_info dev_info;
1889         struct rte_eth_txconf local_conf;
1890         void **txq;
1891         int ret;
1892
1893         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1894
1895         dev = &rte_eth_devices[port_id];
1896         if (tx_queue_id >= dev->data->nb_tx_queues) {
1897                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1898                 return -EINVAL;
1899         }
1900
1901         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1902
1903         ret = rte_eth_dev_info_get(port_id, &dev_info);
1904         if (ret != 0)
1905                 return ret;
1906
1907         /* Use default specified by driver, if nb_tx_desc is zero */
1908         if (nb_tx_desc == 0) {
1909                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1910                 /* If driver default is zero, fall back on EAL default */
1911                 if (nb_tx_desc == 0)
1912                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1913         }
1914         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1915             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1916             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1917                 RTE_ETHDEV_LOG(ERR,
1918                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1919                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1920                         dev_info.tx_desc_lim.nb_min,
1921                         dev_info.tx_desc_lim.nb_align);
1922                 return -EINVAL;
1923         }
1924
1925         if (dev->data->dev_started &&
1926                 !(dev_info.dev_capa &
1927                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1928                 return -EBUSY;
1929
1930         if (dev->data->dev_started &&
1931                 (dev->data->tx_queue_state[tx_queue_id] !=
1932                         RTE_ETH_QUEUE_STATE_STOPPED))
1933                 return -EBUSY;
1934
1935         txq = dev->data->tx_queues;
1936         if (txq[tx_queue_id]) {
1937                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1938                                         -ENOTSUP);
1939                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1940                 txq[tx_queue_id] = NULL;
1941         }
1942
1943         if (tx_conf == NULL)
1944                 tx_conf = &dev_info.default_txconf;
1945
1946         local_conf = *tx_conf;
1947
1948         /*
1949          * If an offloading has already been enabled in
1950          * rte_eth_dev_configure(), it has been enabled on all queues,
1951          * so there is no need to enable it in this queue again.
1952          * The local_conf.offloads input to underlying PMD only carries
1953          * those offloadings which are only enabled on this queue and
1954          * not enabled on all queues.
1955          */
1956         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1957
1958         /*
1959          * New added offloadings for this queue are those not enabled in
1960          * rte_eth_dev_configure() and they must be per-queue type.
1961          * A pure per-port offloading can't be enabled on a queue while
1962          * disabled on another queue. A pure per-port offloading can't
1963          * be enabled for any queue as new added one if it hasn't been
1964          * enabled in rte_eth_dev_configure().
1965          */
1966         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1967              local_conf.offloads) {
1968                 RTE_ETHDEV_LOG(ERR,
1969                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1970                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1971                         port_id, tx_queue_id, local_conf.offloads,
1972                         dev_info.tx_queue_offload_capa,
1973                         __func__);
1974                 return -EINVAL;
1975         }
1976
1977         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1978                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1979 }
1980
1981 int
1982 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1983                                uint16_t nb_tx_desc,
1984                                const struct rte_eth_hairpin_conf *conf)
1985 {
1986         struct rte_eth_dev *dev;
1987         struct rte_eth_hairpin_cap cap;
1988         void **txq;
1989         int i;
1990         int count;
1991         int ret;
1992
1993         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1994         dev = &rte_eth_devices[port_id];
1995         if (tx_queue_id >= dev->data->nb_tx_queues) {
1996                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1997                 return -EINVAL;
1998         }
1999         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2000         if (ret != 0)
2001                 return ret;
2002         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2003                                 -ENOTSUP);
2004         /* if nb_rx_desc is zero use max number of desc from the driver. */
2005         if (nb_tx_desc == 0)
2006                 nb_tx_desc = cap.max_nb_desc;
2007         if (nb_tx_desc > cap.max_nb_desc) {
2008                 RTE_ETHDEV_LOG(ERR,
2009                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2010                         nb_tx_desc, cap.max_nb_desc);
2011                 return -EINVAL;
2012         }
2013         if (conf->peer_count > cap.max_tx_2_rx) {
2014                 RTE_ETHDEV_LOG(ERR,
2015                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2016                         conf->peer_count, cap.max_tx_2_rx);
2017                 return -EINVAL;
2018         }
2019         if (conf->peer_count == 0) {
2020                 RTE_ETHDEV_LOG(ERR,
2021                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2022                         conf->peer_count);
2023                 return -EINVAL;
2024         }
2025         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2026              cap.max_nb_queues != UINT16_MAX; i++) {
2027                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2028                         count++;
2029         }
2030         if (count > cap.max_nb_queues) {
2031                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2032                 cap.max_nb_queues);
2033                 return -EINVAL;
2034         }
2035         if (dev->data->dev_started)
2036                 return -EBUSY;
2037         txq = dev->data->tx_queues;
2038         if (txq[tx_queue_id] != NULL) {
2039                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2040                                         -ENOTSUP);
2041                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2042                 txq[tx_queue_id] = NULL;
2043         }
2044         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2045                 (dev, tx_queue_id, nb_tx_desc, conf);
2046         if (ret == 0)
2047                 dev->data->tx_queue_state[tx_queue_id] =
2048                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2049         return eth_err(port_id, ret);
2050 }
2051
2052 void
2053 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2054                 void *userdata __rte_unused)
2055 {
2056         unsigned i;
2057
2058         for (i = 0; i < unsent; i++)
2059                 rte_pktmbuf_free(pkts[i]);
2060 }
2061
2062 void
2063 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2064                 void *userdata)
2065 {
2066         uint64_t *count = userdata;
2067         unsigned i;
2068
2069         for (i = 0; i < unsent; i++)
2070                 rte_pktmbuf_free(pkts[i]);
2071
2072         *count += unsent;
2073 }
2074
2075 int
2076 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2077                 buffer_tx_error_fn cbfn, void *userdata)
2078 {
2079         buffer->error_callback = cbfn;
2080         buffer->error_userdata = userdata;
2081         return 0;
2082 }
2083
2084 int
2085 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2086 {
2087         int ret = 0;
2088
2089         if (buffer == NULL)
2090                 return -EINVAL;
2091
2092         buffer->size = size;
2093         if (buffer->error_callback == NULL) {
2094                 ret = rte_eth_tx_buffer_set_err_callback(
2095                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2096         }
2097
2098         return ret;
2099 }
2100
2101 int
2102 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2103 {
2104         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2105         int ret;
2106
2107         /* Validate Input Data. Bail if not valid or not supported. */
2108         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2109         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2110
2111         /* Call driver to free pending mbufs. */
2112         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2113                                                free_cnt);
2114         return eth_err(port_id, ret);
2115 }
2116
2117 int
2118 rte_eth_promiscuous_enable(uint16_t port_id)
2119 {
2120         struct rte_eth_dev *dev;
2121         int diag = 0;
2122
2123         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2124         dev = &rte_eth_devices[port_id];
2125
2126         if (dev->data->promiscuous == 1)
2127                 return 0;
2128
2129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2130
2131         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2132         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2133
2134         return eth_err(port_id, diag);
2135 }
2136
2137 int
2138 rte_eth_promiscuous_disable(uint16_t port_id)
2139 {
2140         struct rte_eth_dev *dev;
2141         int diag = 0;
2142
2143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2144         dev = &rte_eth_devices[port_id];
2145
2146         if (dev->data->promiscuous == 0)
2147                 return 0;
2148
2149         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2150
2151         dev->data->promiscuous = 0;
2152         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2153         if (diag != 0)
2154                 dev->data->promiscuous = 1;
2155
2156         return eth_err(port_id, diag);
2157 }
2158
2159 int
2160 rte_eth_promiscuous_get(uint16_t port_id)
2161 {
2162         struct rte_eth_dev *dev;
2163
2164         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2165
2166         dev = &rte_eth_devices[port_id];
2167         return dev->data->promiscuous;
2168 }
2169
2170 int
2171 rte_eth_allmulticast_enable(uint16_t port_id)
2172 {
2173         struct rte_eth_dev *dev;
2174         int diag;
2175
2176         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2177         dev = &rte_eth_devices[port_id];
2178
2179         if (dev->data->all_multicast == 1)
2180                 return 0;
2181
2182         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2183         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2184         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2185
2186         return eth_err(port_id, diag);
2187 }
2188
2189 int
2190 rte_eth_allmulticast_disable(uint16_t port_id)
2191 {
2192         struct rte_eth_dev *dev;
2193         int diag;
2194
2195         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2196         dev = &rte_eth_devices[port_id];
2197
2198         if (dev->data->all_multicast == 0)
2199                 return 0;
2200
2201         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2202         dev->data->all_multicast = 0;
2203         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2204         if (diag != 0)
2205                 dev->data->all_multicast = 1;
2206
2207         return eth_err(port_id, diag);
2208 }
2209
2210 int
2211 rte_eth_allmulticast_get(uint16_t port_id)
2212 {
2213         struct rte_eth_dev *dev;
2214
2215         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2216
2217         dev = &rte_eth_devices[port_id];
2218         return dev->data->all_multicast;
2219 }
2220
2221 int
2222 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2223 {
2224         struct rte_eth_dev *dev;
2225
2226         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2227         dev = &rte_eth_devices[port_id];
2228
2229         if (dev->data->dev_conf.intr_conf.lsc &&
2230             dev->data->dev_started)
2231                 rte_eth_linkstatus_get(dev, eth_link);
2232         else {
2233                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2234                 (*dev->dev_ops->link_update)(dev, 1);
2235                 *eth_link = dev->data->dev_link;
2236         }
2237
2238         return 0;
2239 }
2240
2241 int
2242 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2243 {
2244         struct rte_eth_dev *dev;
2245
2246         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2247         dev = &rte_eth_devices[port_id];
2248
2249         if (dev->data->dev_conf.intr_conf.lsc &&
2250             dev->data->dev_started)
2251                 rte_eth_linkstatus_get(dev, eth_link);
2252         else {
2253                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2254                 (*dev->dev_ops->link_update)(dev, 0);
2255                 *eth_link = dev->data->dev_link;
2256         }
2257
2258         return 0;
2259 }
2260
2261 int
2262 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2263 {
2264         struct rte_eth_dev *dev;
2265
2266         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2267
2268         dev = &rte_eth_devices[port_id];
2269         memset(stats, 0, sizeof(*stats));
2270
2271         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2272         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2273         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2274 }
2275
2276 int
2277 rte_eth_stats_reset(uint16_t port_id)
2278 {
2279         struct rte_eth_dev *dev;
2280         int ret;
2281
2282         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2283         dev = &rte_eth_devices[port_id];
2284
2285         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2286         ret = (*dev->dev_ops->stats_reset)(dev);
2287         if (ret != 0)
2288                 return eth_err(port_id, ret);
2289
2290         dev->data->rx_mbuf_alloc_failed = 0;
2291
2292         return 0;
2293 }
2294
2295 static inline int
2296 get_xstats_basic_count(struct rte_eth_dev *dev)
2297 {
2298         uint16_t nb_rxqs, nb_txqs;
2299         int count;
2300
2301         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2302         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2303
2304         count = RTE_NB_STATS;
2305         count += nb_rxqs * RTE_NB_RXQ_STATS;
2306         count += nb_txqs * RTE_NB_TXQ_STATS;
2307
2308         return count;
2309 }
2310
2311 static int
2312 get_xstats_count(uint16_t port_id)
2313 {
2314         struct rte_eth_dev *dev;
2315         int count;
2316
2317         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2318         dev = &rte_eth_devices[port_id];
2319         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2320                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2321                                 NULL, 0);
2322                 if (count < 0)
2323                         return eth_err(port_id, count);
2324         }
2325         if (dev->dev_ops->xstats_get_names != NULL) {
2326                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2327                 if (count < 0)
2328                         return eth_err(port_id, count);
2329         } else
2330                 count = 0;
2331
2332
2333         count += get_xstats_basic_count(dev);
2334
2335         return count;
2336 }
2337
2338 int
2339 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2340                 uint64_t *id)
2341 {
2342         int cnt_xstats, idx_xstat;
2343
2344         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2345
2346         if (!id) {
2347                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2348                 return -ENOMEM;
2349         }
2350
2351         if (!xstat_name) {
2352                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2353                 return -ENOMEM;
2354         }
2355
2356         /* Get count */
2357         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2358         if (cnt_xstats  < 0) {
2359                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2360                 return -ENODEV;
2361         }
2362
2363         /* Get id-name lookup table */
2364         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2365
2366         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2367                         port_id, xstats_names, cnt_xstats, NULL)) {
2368                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2369                 return -1;
2370         }
2371
2372         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2373                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2374                         *id = idx_xstat;
2375                         return 0;
2376                 };
2377         }
2378
2379         return -EINVAL;
2380 }
2381
2382 /* retrieve basic stats names */
2383 static int
2384 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2385         struct rte_eth_xstat_name *xstats_names)
2386 {
2387         int cnt_used_entries = 0;
2388         uint32_t idx, id_queue;
2389         uint16_t num_q;
2390
2391         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2392                 strlcpy(xstats_names[cnt_used_entries].name,
2393                         rte_stats_strings[idx].name,
2394                         sizeof(xstats_names[0].name));
2395                 cnt_used_entries++;
2396         }
2397         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2398         for (id_queue = 0; id_queue < num_q; id_queue++) {
2399                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2400                         snprintf(xstats_names[cnt_used_entries].name,
2401                                 sizeof(xstats_names[0].name),
2402                                 "rx_q%u%s",
2403                                 id_queue, rte_rxq_stats_strings[idx].name);
2404                         cnt_used_entries++;
2405                 }
2406
2407         }
2408         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2409         for (id_queue = 0; id_queue < num_q; id_queue++) {
2410                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2411                         snprintf(xstats_names[cnt_used_entries].name,
2412                                 sizeof(xstats_names[0].name),
2413                                 "tx_q%u%s",
2414                                 id_queue, rte_txq_stats_strings[idx].name);
2415                         cnt_used_entries++;
2416                 }
2417         }
2418         return cnt_used_entries;
2419 }
2420
2421 /* retrieve ethdev extended statistics names */
2422 int
2423 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2424         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2425         uint64_t *ids)
2426 {
2427         struct rte_eth_xstat_name *xstats_names_copy;
2428         unsigned int no_basic_stat_requested = 1;
2429         unsigned int no_ext_stat_requested = 1;
2430         unsigned int expected_entries;
2431         unsigned int basic_count;
2432         struct rte_eth_dev *dev;
2433         unsigned int i;
2434         int ret;
2435
2436         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2437         dev = &rte_eth_devices[port_id];
2438
2439         basic_count = get_xstats_basic_count(dev);
2440         ret = get_xstats_count(port_id);
2441         if (ret < 0)
2442                 return ret;
2443         expected_entries = (unsigned int)ret;
2444
2445         /* Return max number of stats if no ids given */
2446         if (!ids) {
2447                 if (!xstats_names)
2448                         return expected_entries;
2449                 else if (xstats_names && size < expected_entries)
2450                         return expected_entries;
2451         }
2452
2453         if (ids && !xstats_names)
2454                 return -EINVAL;
2455
2456         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2457                 uint64_t ids_copy[size];
2458
2459                 for (i = 0; i < size; i++) {
2460                         if (ids[i] < basic_count) {
2461                                 no_basic_stat_requested = 0;
2462                                 break;
2463                         }
2464
2465                         /*
2466                          * Convert ids to xstats ids that PMD knows.
2467                          * ids known by user are basic + extended stats.
2468                          */
2469                         ids_copy[i] = ids[i] - basic_count;
2470                 }
2471
2472                 if (no_basic_stat_requested)
2473                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2474                                         xstats_names, ids_copy, size);
2475         }
2476
2477         /* Retrieve all stats */
2478         if (!ids) {
2479                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2480                                 expected_entries);
2481                 if (num_stats < 0 || num_stats > (int)expected_entries)
2482                         return num_stats;
2483                 else
2484                         return expected_entries;
2485         }
2486
2487         xstats_names_copy = calloc(expected_entries,
2488                 sizeof(struct rte_eth_xstat_name));
2489
2490         if (!xstats_names_copy) {
2491                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2492                 return -ENOMEM;
2493         }
2494
2495         if (ids) {
2496                 for (i = 0; i < size; i++) {
2497                         if (ids[i] >= basic_count) {
2498                                 no_ext_stat_requested = 0;
2499                                 break;
2500                         }
2501                 }
2502         }
2503
2504         /* Fill xstats_names_copy structure */
2505         if (ids && no_ext_stat_requested) {
2506                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2507         } else {
2508                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2509                         expected_entries);
2510                 if (ret < 0) {
2511                         free(xstats_names_copy);
2512                         return ret;
2513                 }
2514         }
2515
2516         /* Filter stats */
2517         for (i = 0; i < size; i++) {
2518                 if (ids[i] >= expected_entries) {
2519                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2520                         free(xstats_names_copy);
2521                         return -1;
2522                 }
2523                 xstats_names[i] = xstats_names_copy[ids[i]];
2524         }
2525
2526         free(xstats_names_copy);
2527         return size;
2528 }
2529
2530 int
2531 rte_eth_xstats_get_names(uint16_t port_id,
2532         struct rte_eth_xstat_name *xstats_names,
2533         unsigned int size)
2534 {
2535         struct rte_eth_dev *dev;
2536         int cnt_used_entries;
2537         int cnt_expected_entries;
2538         int cnt_driver_entries;
2539
2540         cnt_expected_entries = get_xstats_count(port_id);
2541         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2542                         (int)size < cnt_expected_entries)
2543                 return cnt_expected_entries;
2544
2545         /* port_id checked in get_xstats_count() */
2546         dev = &rte_eth_devices[port_id];
2547
2548         cnt_used_entries = rte_eth_basic_stats_get_names(
2549                 dev, xstats_names);
2550
2551         if (dev->dev_ops->xstats_get_names != NULL) {
2552                 /* If there are any driver-specific xstats, append them
2553                  * to end of list.
2554                  */
2555                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2556                         dev,
2557                         xstats_names + cnt_used_entries,
2558                         size - cnt_used_entries);
2559                 if (cnt_driver_entries < 0)
2560                         return eth_err(port_id, cnt_driver_entries);
2561                 cnt_used_entries += cnt_driver_entries;
2562         }
2563
2564         return cnt_used_entries;
2565 }
2566
2567
2568 static int
2569 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2570 {
2571         struct rte_eth_dev *dev;
2572         struct rte_eth_stats eth_stats;
2573         unsigned int count = 0, i, q;
2574         uint64_t val, *stats_ptr;
2575         uint16_t nb_rxqs, nb_txqs;
2576         int ret;
2577
2578         ret = rte_eth_stats_get(port_id, &eth_stats);
2579         if (ret < 0)
2580                 return ret;
2581
2582         dev = &rte_eth_devices[port_id];
2583
2584         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2585         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2586
2587         /* global stats */
2588         for (i = 0; i < RTE_NB_STATS; i++) {
2589                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2590                                         rte_stats_strings[i].offset);
2591                 val = *stats_ptr;
2592                 xstats[count++].value = val;
2593         }
2594
2595         /* per-rxq stats */
2596         for (q = 0; q < nb_rxqs; q++) {
2597                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2598                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2599                                         rte_rxq_stats_strings[i].offset +
2600                                         q * sizeof(uint64_t));
2601                         val = *stats_ptr;
2602                         xstats[count++].value = val;
2603                 }
2604         }
2605
2606         /* per-txq stats */
2607         for (q = 0; q < nb_txqs; q++) {
2608                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2609                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2610                                         rte_txq_stats_strings[i].offset +
2611                                         q * sizeof(uint64_t));
2612                         val = *stats_ptr;
2613                         xstats[count++].value = val;
2614                 }
2615         }
2616         return count;
2617 }
2618
2619 /* retrieve ethdev extended statistics */
2620 int
2621 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2622                          uint64_t *values, unsigned int size)
2623 {
2624         unsigned int no_basic_stat_requested = 1;
2625         unsigned int no_ext_stat_requested = 1;
2626         unsigned int num_xstats_filled;
2627         unsigned int basic_count;
2628         uint16_t expected_entries;
2629         struct rte_eth_dev *dev;
2630         unsigned int i;
2631         int ret;
2632
2633         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2634         ret = get_xstats_count(port_id);
2635         if (ret < 0)
2636                 return ret;
2637         expected_entries = (uint16_t)ret;
2638         struct rte_eth_xstat xstats[expected_entries];
2639         dev = &rte_eth_devices[port_id];
2640         basic_count = get_xstats_basic_count(dev);
2641
2642         /* Return max number of stats if no ids given */
2643         if (!ids) {
2644                 if (!values)
2645                         return expected_entries;
2646                 else if (values && size < expected_entries)
2647                         return expected_entries;
2648         }
2649
2650         if (ids && !values)
2651                 return -EINVAL;
2652
2653         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2654                 unsigned int basic_count = get_xstats_basic_count(dev);
2655                 uint64_t ids_copy[size];
2656
2657                 for (i = 0; i < size; i++) {
2658                         if (ids[i] < basic_count) {
2659                                 no_basic_stat_requested = 0;
2660                                 break;
2661                         }
2662
2663                         /*
2664                          * Convert ids to xstats ids that PMD knows.
2665                          * ids known by user are basic + extended stats.
2666                          */
2667                         ids_copy[i] = ids[i] - basic_count;
2668                 }
2669
2670                 if (no_basic_stat_requested)
2671                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2672                                         values, size);
2673         }
2674
2675         if (ids) {
2676                 for (i = 0; i < size; i++) {
2677                         if (ids[i] >= basic_count) {
2678                                 no_ext_stat_requested = 0;
2679                                 break;
2680                         }
2681                 }
2682         }
2683
2684         /* Fill the xstats structure */
2685         if (ids && no_ext_stat_requested)
2686                 ret = rte_eth_basic_stats_get(port_id, xstats);
2687         else
2688                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2689
2690         if (ret < 0)
2691                 return ret;
2692         num_xstats_filled = (unsigned int)ret;
2693
2694         /* Return all stats */
2695         if (!ids) {
2696                 for (i = 0; i < num_xstats_filled; i++)
2697                         values[i] = xstats[i].value;
2698                 return expected_entries;
2699         }
2700
2701         /* Filter stats */
2702         for (i = 0; i < size; i++) {
2703                 if (ids[i] >= expected_entries) {
2704                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2705                         return -1;
2706                 }
2707                 values[i] = xstats[ids[i]].value;
2708         }
2709         return size;
2710 }
2711
2712 int
2713 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2714         unsigned int n)
2715 {
2716         struct rte_eth_dev *dev;
2717         unsigned int count = 0, i;
2718         signed int xcount = 0;
2719         uint16_t nb_rxqs, nb_txqs;
2720         int ret;
2721
2722         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2723
2724         dev = &rte_eth_devices[port_id];
2725
2726         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2727         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2728
2729         /* Return generic statistics */
2730         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2731                 (nb_txqs * RTE_NB_TXQ_STATS);
2732
2733         /* implemented by the driver */
2734         if (dev->dev_ops->xstats_get != NULL) {
2735                 /* Retrieve the xstats from the driver at the end of the
2736                  * xstats struct.
2737                  */
2738                 xcount = (*dev->dev_ops->xstats_get)(dev,
2739                                      xstats ? xstats + count : NULL,
2740                                      (n > count) ? n - count : 0);
2741
2742                 if (xcount < 0)
2743                         return eth_err(port_id, xcount);
2744         }
2745
2746         if (n < count + xcount || xstats == NULL)
2747                 return count + xcount;
2748
2749         /* now fill the xstats structure */
2750         ret = rte_eth_basic_stats_get(port_id, xstats);
2751         if (ret < 0)
2752                 return ret;
2753         count = ret;
2754
2755         for (i = 0; i < count; i++)
2756                 xstats[i].id = i;
2757         /* add an offset to driver-specific stats */
2758         for ( ; i < count + xcount; i++)
2759                 xstats[i].id += count;
2760
2761         return count + xcount;
2762 }
2763
2764 /* reset ethdev extended statistics */
2765 int
2766 rte_eth_xstats_reset(uint16_t port_id)
2767 {
2768         struct rte_eth_dev *dev;
2769
2770         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2771         dev = &rte_eth_devices[port_id];
2772
2773         /* implemented by the driver */
2774         if (dev->dev_ops->xstats_reset != NULL)
2775                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2776
2777         /* fallback to default */
2778         return rte_eth_stats_reset(port_id);
2779 }
2780
2781 static int
2782 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2783                 uint8_t is_rx)
2784 {
2785         struct rte_eth_dev *dev;
2786
2787         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2788
2789         dev = &rte_eth_devices[port_id];
2790
2791         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2792
2793         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2794                 return -EINVAL;
2795
2796         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2797                 return -EINVAL;
2798
2799         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2800                 return -EINVAL;
2801
2802         return (*dev->dev_ops->queue_stats_mapping_set)
2803                         (dev, queue_id, stat_idx, is_rx);
2804 }
2805
2806
2807 int
2808 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2809                 uint8_t stat_idx)
2810 {
2811         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2812                                                 stat_idx, STAT_QMAP_TX));
2813 }
2814
2815
2816 int
2817 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2818                 uint8_t stat_idx)
2819 {
2820         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2821                                                 stat_idx, STAT_QMAP_RX));
2822 }
2823
2824 int
2825 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2826 {
2827         struct rte_eth_dev *dev;
2828
2829         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2830         dev = &rte_eth_devices[port_id];
2831
2832         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2833         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2834                                                         fw_version, fw_size));
2835 }
2836
2837 int
2838 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2839 {
2840         struct rte_eth_dev *dev;
2841         const struct rte_eth_desc_lim lim = {
2842                 .nb_max = UINT16_MAX,
2843                 .nb_min = 0,
2844                 .nb_align = 1,
2845                 .nb_seg_max = UINT16_MAX,
2846                 .nb_mtu_seg_max = UINT16_MAX,
2847         };
2848         int diag;
2849
2850         /*
2851          * Init dev_info before port_id check since caller does not have
2852          * return status and does not know if get is successful or not.
2853          */
2854         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2855
2856         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2857         dev = &rte_eth_devices[port_id];
2858
2859         dev_info->rx_desc_lim = lim;
2860         dev_info->tx_desc_lim = lim;
2861         dev_info->device = dev->device;
2862         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2863         dev_info->max_mtu = UINT16_MAX;
2864
2865         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2866         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2867         if (diag != 0) {
2868                 /* Cleanup already filled in device information */
2869                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2870                 return eth_err(port_id, diag);
2871         }
2872
2873         dev_info->driver_name = dev->device->driver->name;
2874         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2875         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2876
2877         dev_info->dev_flags = &dev->data->dev_flags;
2878
2879         return 0;
2880 }
2881
2882 int
2883 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2884                                  uint32_t *ptypes, int num)
2885 {
2886         int i, j;
2887         struct rte_eth_dev *dev;
2888         const uint32_t *all_ptypes;
2889
2890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2891         dev = &rte_eth_devices[port_id];
2892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2893         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2894
2895         if (!all_ptypes)
2896                 return 0;
2897
2898         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2899                 if (all_ptypes[i] & ptype_mask) {
2900                         if (j < num)
2901                                 ptypes[j] = all_ptypes[i];
2902                         j++;
2903                 }
2904
2905         return j;
2906 }
2907
2908 int
2909 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
2910 {
2911         struct rte_eth_dev *dev;
2912
2913         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2914         dev = &rte_eth_devices[port_id];
2915         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2916
2917         return 0;
2918 }
2919
2920
2921 int
2922 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2923 {
2924         struct rte_eth_dev *dev;
2925
2926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2927
2928         dev = &rte_eth_devices[port_id];
2929         *mtu = dev->data->mtu;
2930         return 0;
2931 }
2932
2933 int
2934 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2935 {
2936         int ret;
2937         struct rte_eth_dev_info dev_info;
2938         struct rte_eth_dev *dev;
2939
2940         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2941         dev = &rte_eth_devices[port_id];
2942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2943
2944         /*
2945          * Check if the device supports dev_infos_get, if it does not
2946          * skip min_mtu/max_mtu validation here as this requires values
2947          * that are populated within the call to rte_eth_dev_info_get()
2948          * which relies on dev->dev_ops->dev_infos_get.
2949          */
2950         if (*dev->dev_ops->dev_infos_get != NULL) {
2951                 ret = rte_eth_dev_info_get(port_id, &dev_info);
2952                 if (ret != 0)
2953                         return ret;
2954
2955                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
2956                         return -EINVAL;
2957         }
2958
2959         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2960         if (!ret)
2961                 dev->data->mtu = mtu;
2962
2963         return eth_err(port_id, ret);
2964 }
2965
2966 int
2967 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2968 {
2969         struct rte_eth_dev *dev;
2970         int ret;
2971
2972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2973         dev = &rte_eth_devices[port_id];
2974         if (!(dev->data->dev_conf.rxmode.offloads &
2975               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2976                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
2977                         port_id);
2978                 return -ENOSYS;
2979         }
2980
2981         if (vlan_id > 4095) {
2982                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
2983                         port_id, vlan_id);
2984                 return -EINVAL;
2985         }
2986         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2987
2988         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2989         if (ret == 0) {
2990                 struct rte_vlan_filter_conf *vfc;
2991                 int vidx;
2992                 int vbit;
2993
2994                 vfc = &dev->data->vlan_filter_conf;
2995                 vidx = vlan_id / 64;
2996                 vbit = vlan_id % 64;
2997
2998                 if (on)
2999                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3000                 else
3001                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3002         }
3003
3004         return eth_err(port_id, ret);
3005 }
3006
3007 int
3008 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3009                                     int on)
3010 {
3011         struct rte_eth_dev *dev;
3012
3013         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3014         dev = &rte_eth_devices[port_id];
3015         if (rx_queue_id >= dev->data->nb_rx_queues) {
3016                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3017                 return -EINVAL;
3018         }
3019
3020         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3021         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3022
3023         return 0;
3024 }
3025
3026 int
3027 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3028                                 enum rte_vlan_type vlan_type,
3029                                 uint16_t tpid)
3030 {
3031         struct rte_eth_dev *dev;
3032
3033         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3034         dev = &rte_eth_devices[port_id];
3035         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3036
3037         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3038                                                                tpid));
3039 }
3040
3041 int
3042 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3043 {
3044         struct rte_eth_dev *dev;
3045         int ret = 0;
3046         int mask = 0;
3047         int cur, org = 0;
3048         uint64_t orig_offloads;
3049         uint64_t *dev_offloads;
3050
3051         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3052         dev = &rte_eth_devices[port_id];
3053
3054         /* save original values in case of failure */
3055         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3056         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3057
3058         /*check which option changed by application*/
3059         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3060         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3061         if (cur != org) {
3062                 if (cur)
3063                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3064                 else
3065                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3066                 mask |= ETH_VLAN_STRIP_MASK;
3067         }
3068
3069         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3070         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3071         if (cur != org) {
3072                 if (cur)
3073                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3074                 else
3075                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3076                 mask |= ETH_VLAN_FILTER_MASK;
3077         }
3078
3079         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3080         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3081         if (cur != org) {
3082                 if (cur)
3083                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3084                 else
3085                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3086                 mask |= ETH_VLAN_EXTEND_MASK;
3087         }
3088
3089         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3090         org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3091         if (cur != org) {
3092                 if (cur)
3093                         *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3094                 else
3095                         *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3096                 mask |= ETH_QINQ_STRIP_MASK;
3097         }
3098
3099         /*no change*/
3100         if (mask == 0)
3101                 return ret;
3102
3103         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3104         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3105         if (ret) {
3106                 /* hit an error restore  original values */
3107                 *dev_offloads = orig_offloads;
3108         }
3109
3110         return eth_err(port_id, ret);
3111 }
3112
3113 int
3114 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3115 {
3116         struct rte_eth_dev *dev;
3117         uint64_t *dev_offloads;
3118         int ret = 0;
3119
3120         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3121         dev = &rte_eth_devices[port_id];
3122         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3123
3124         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3125                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3126
3127         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3128                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3129
3130         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3131                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3132
3133         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3134                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3135
3136         return ret;
3137 }
3138
3139 int
3140 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3141 {
3142         struct rte_eth_dev *dev;
3143
3144         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3145         dev = &rte_eth_devices[port_id];
3146         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3147
3148         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3149 }
3150
3151 int
3152 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3153 {
3154         struct rte_eth_dev *dev;
3155
3156         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3157         dev = &rte_eth_devices[port_id];
3158         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3159         memset(fc_conf, 0, sizeof(*fc_conf));
3160         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3161 }
3162
3163 int
3164 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3165 {
3166         struct rte_eth_dev *dev;
3167
3168         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3169         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3170                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3171                 return -EINVAL;
3172         }
3173
3174         dev = &rte_eth_devices[port_id];
3175         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3176         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3177 }
3178
3179 int
3180 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3181                                    struct rte_eth_pfc_conf *pfc_conf)
3182 {
3183         struct rte_eth_dev *dev;
3184
3185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3186         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3187                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3188                 return -EINVAL;
3189         }
3190
3191         dev = &rte_eth_devices[port_id];
3192         /* High water, low water validation are device specific */
3193         if  (*dev->dev_ops->priority_flow_ctrl_set)
3194                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3195                                         (dev, pfc_conf));
3196         return -ENOTSUP;
3197 }
3198
3199 static int
3200 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3201                         uint16_t reta_size)
3202 {
3203         uint16_t i, num;
3204
3205         if (!reta_conf)
3206                 return -EINVAL;
3207
3208         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3209         for (i = 0; i < num; i++) {
3210                 if (reta_conf[i].mask)
3211                         return 0;
3212         }
3213
3214         return -EINVAL;
3215 }
3216
3217 static int
3218 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3219                          uint16_t reta_size,
3220                          uint16_t max_rxq)
3221 {
3222         uint16_t i, idx, shift;
3223
3224         if (!reta_conf)
3225                 return -EINVAL;
3226
3227         if (max_rxq == 0) {
3228                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3229                 return -EINVAL;
3230         }
3231
3232         for (i = 0; i < reta_size; i++) {
3233                 idx = i / RTE_RETA_GROUP_SIZE;
3234                 shift = i % RTE_RETA_GROUP_SIZE;
3235                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3236                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3237                         RTE_ETHDEV_LOG(ERR,
3238                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3239                                 idx, shift,
3240                                 reta_conf[idx].reta[shift], max_rxq);
3241                         return -EINVAL;
3242                 }
3243         }
3244
3245         return 0;
3246 }
3247
3248 int
3249 rte_eth_dev_rss_reta_update(uint16_t port_id,
3250                             struct rte_eth_rss_reta_entry64 *reta_conf,
3251                             uint16_t reta_size)
3252 {
3253         struct rte_eth_dev *dev;
3254         int ret;
3255
3256         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3257         /* Check mask bits */
3258         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3259         if (ret < 0)
3260                 return ret;
3261
3262         dev = &rte_eth_devices[port_id];
3263
3264         /* Check entry value */
3265         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3266                                 dev->data->nb_rx_queues);
3267         if (ret < 0)
3268                 return ret;
3269
3270         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3271         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3272                                                              reta_size));
3273 }
3274
3275 int
3276 rte_eth_dev_rss_reta_query(uint16_t port_id,
3277                            struct rte_eth_rss_reta_entry64 *reta_conf,
3278                            uint16_t reta_size)
3279 {
3280         struct rte_eth_dev *dev;
3281         int ret;
3282
3283         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3284
3285         /* Check mask bits */
3286         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3287         if (ret < 0)
3288                 return ret;
3289
3290         dev = &rte_eth_devices[port_id];
3291         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3292         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3293                                                             reta_size));
3294 }
3295
3296 int
3297 rte_eth_dev_rss_hash_update(uint16_t port_id,
3298                             struct rte_eth_rss_conf *rss_conf)
3299 {
3300         struct rte_eth_dev *dev;
3301         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3302         int ret;
3303
3304         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3305
3306         ret = rte_eth_dev_info_get(port_id, &dev_info);
3307         if (ret != 0)
3308                 return ret;
3309
3310         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3311
3312         dev = &rte_eth_devices[port_id];
3313         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3314             dev_info.flow_type_rss_offloads) {
3315                 RTE_ETHDEV_LOG(ERR,
3316                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3317                         port_id, rss_conf->rss_hf,
3318                         dev_info.flow_type_rss_offloads);
3319                 return -EINVAL;
3320         }
3321         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3322         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3323                                                                  rss_conf));
3324 }
3325
3326 int
3327 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3328                               struct rte_eth_rss_conf *rss_conf)
3329 {
3330         struct rte_eth_dev *dev;
3331
3332         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3333         dev = &rte_eth_devices[port_id];
3334         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3335         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3336                                                                    rss_conf));
3337 }
3338
3339 int
3340 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3341                                 struct rte_eth_udp_tunnel *udp_tunnel)
3342 {
3343         struct rte_eth_dev *dev;
3344
3345         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3346         if (udp_tunnel == NULL) {
3347                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3348                 return -EINVAL;
3349         }
3350
3351         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3352                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3353                 return -EINVAL;
3354         }
3355
3356         dev = &rte_eth_devices[port_id];
3357         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3358         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3359                                                                 udp_tunnel));
3360 }
3361
3362 int
3363 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3364                                    struct rte_eth_udp_tunnel *udp_tunnel)
3365 {
3366         struct rte_eth_dev *dev;
3367
3368         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3369         dev = &rte_eth_devices[port_id];
3370
3371         if (udp_tunnel == NULL) {
3372                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3373                 return -EINVAL;
3374         }
3375
3376         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3377                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3378                 return -EINVAL;
3379         }
3380
3381         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3382         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3383                                                                 udp_tunnel));
3384 }
3385
3386 int
3387 rte_eth_led_on(uint16_t port_id)
3388 {
3389         struct rte_eth_dev *dev;
3390
3391         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3392         dev = &rte_eth_devices[port_id];
3393         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3394         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3395 }
3396
3397 int
3398 rte_eth_led_off(uint16_t port_id)
3399 {
3400         struct rte_eth_dev *dev;
3401
3402         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3403         dev = &rte_eth_devices[port_id];
3404         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3405         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3406 }
3407
3408 /*
3409  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3410  * an empty spot.
3411  */
3412 static int
3413 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3414 {
3415         struct rte_eth_dev_info dev_info;
3416         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3417         unsigned i;
3418         int ret;
3419
3420         ret = rte_eth_dev_info_get(port_id, &dev_info);
3421         if (ret != 0)
3422                 return -1;
3423
3424         for (i = 0; i < dev_info.max_mac_addrs; i++)
3425                 if (memcmp(addr, &dev->data->mac_addrs[i],
3426                                 RTE_ETHER_ADDR_LEN) == 0)
3427                         return i;
3428
3429         return -1;
3430 }
3431
3432 static const struct rte_ether_addr null_mac_addr;
3433
3434 int
3435 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3436                         uint32_t pool)
3437 {
3438         struct rte_eth_dev *dev;
3439         int index;
3440         uint64_t pool_mask;
3441         int ret;
3442
3443         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3444         dev = &rte_eth_devices[port_id];
3445         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3446
3447         if (rte_is_zero_ether_addr(addr)) {
3448                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3449                         port_id);
3450                 return -EINVAL;
3451         }
3452         if (pool >= ETH_64_POOLS) {
3453                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3454                 return -EINVAL;
3455         }
3456
3457         index = get_mac_addr_index(port_id, addr);
3458         if (index < 0) {
3459                 index = get_mac_addr_index(port_id, &null_mac_addr);
3460                 if (index < 0) {
3461                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3462                                 port_id);
3463                         return -ENOSPC;
3464                 }
3465         } else {
3466                 pool_mask = dev->data->mac_pool_sel[index];
3467
3468                 /* Check if both MAC address and pool is already there, and do nothing */
3469                 if (pool_mask & (1ULL << pool))
3470                         return 0;
3471         }
3472
3473         /* Update NIC */
3474         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3475
3476         if (ret == 0) {
3477                 /* Update address in NIC data structure */
3478                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3479
3480                 /* Update pool bitmap in NIC data structure */
3481                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3482         }
3483
3484         return eth_err(port_id, ret);
3485 }
3486
3487 int
3488 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3489 {
3490         struct rte_eth_dev *dev;
3491         int index;
3492
3493         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3494         dev = &rte_eth_devices[port_id];
3495         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3496
3497         index = get_mac_addr_index(port_id, addr);
3498         if (index == 0) {
3499                 RTE_ETHDEV_LOG(ERR,
3500                         "Port %u: Cannot remove default MAC address\n",
3501                         port_id);
3502                 return -EADDRINUSE;
3503         } else if (index < 0)
3504                 return 0;  /* Do nothing if address wasn't found */
3505
3506         /* Update NIC */
3507         (*dev->dev_ops->mac_addr_remove)(dev, index);
3508
3509         /* Update address in NIC data structure */
3510         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3511
3512         /* reset pool bitmap */
3513         dev->data->mac_pool_sel[index] = 0;
3514
3515         return 0;
3516 }
3517
3518 int
3519 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3520 {
3521         struct rte_eth_dev *dev;
3522         int ret;
3523
3524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3525
3526         if (!rte_is_valid_assigned_ether_addr(addr))
3527                 return -EINVAL;
3528
3529         dev = &rte_eth_devices[port_id];
3530         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3531
3532         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3533         if (ret < 0)
3534                 return ret;
3535
3536         /* Update default address in NIC data structure */
3537         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3538
3539         return 0;
3540 }
3541
3542
3543 /*
3544  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3545  * an empty spot.
3546  */
3547 static int
3548 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3549 {
3550         struct rte_eth_dev_info dev_info;
3551         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3552         unsigned i;
3553         int ret;
3554
3555         ret = rte_eth_dev_info_get(port_id, &dev_info);
3556         if (ret != 0)
3557                 return -1;
3558
3559         if (!dev->data->hash_mac_addrs)
3560                 return -1;
3561
3562         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3563                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3564                         RTE_ETHER_ADDR_LEN) == 0)
3565                         return i;
3566
3567         return -1;
3568 }
3569
3570 int
3571 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3572                                 uint8_t on)
3573 {
3574         int index;
3575         int ret;
3576         struct rte_eth_dev *dev;
3577
3578         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3579
3580         dev = &rte_eth_devices[port_id];
3581         if (rte_is_zero_ether_addr(addr)) {
3582                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3583                         port_id);
3584                 return -EINVAL;
3585         }
3586
3587         index = get_hash_mac_addr_index(port_id, addr);
3588         /* Check if it's already there, and do nothing */
3589         if ((index >= 0) && on)
3590                 return 0;
3591
3592         if (index < 0) {
3593                 if (!on) {
3594                         RTE_ETHDEV_LOG(ERR,
3595                                 "Port %u: the MAC address was not set in UTA\n",
3596                                 port_id);
3597                         return -EINVAL;
3598                 }
3599
3600                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3601                 if (index < 0) {
3602                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3603                                 port_id);
3604                         return -ENOSPC;
3605                 }
3606         }
3607
3608         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3609         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3610         if (ret == 0) {
3611                 /* Update address in NIC data structure */
3612                 if (on)
3613                         rte_ether_addr_copy(addr,
3614                                         &dev->data->hash_mac_addrs[index]);
3615                 else
3616                         rte_ether_addr_copy(&null_mac_addr,
3617                                         &dev->data->hash_mac_addrs[index]);
3618         }
3619
3620         return eth_err(port_id, ret);
3621 }
3622
3623 int
3624 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3625 {
3626         struct rte_eth_dev *dev;
3627
3628         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3629
3630         dev = &rte_eth_devices[port_id];
3631
3632         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3633         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3634                                                                        on));
3635 }
3636
3637 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3638                                         uint16_t tx_rate)
3639 {
3640         struct rte_eth_dev *dev;
3641         struct rte_eth_dev_info dev_info;
3642         struct rte_eth_link link;
3643         int ret;
3644
3645         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3646
3647         ret = rte_eth_dev_info_get(port_id, &dev_info);
3648         if (ret != 0)
3649                 return ret;
3650
3651         dev = &rte_eth_devices[port_id];
3652         link = dev->data->dev_link;
3653
3654         if (queue_idx > dev_info.max_tx_queues) {
3655                 RTE_ETHDEV_LOG(ERR,
3656                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3657                         port_id, queue_idx);
3658                 return -EINVAL;
3659         }
3660
3661         if (tx_rate > link.link_speed) {
3662                 RTE_ETHDEV_LOG(ERR,
3663                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3664                         tx_rate, link.link_speed);
3665                 return -EINVAL;
3666         }
3667
3668         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3669         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3670                                                         queue_idx, tx_rate));
3671 }
3672
3673 int
3674 rte_eth_mirror_rule_set(uint16_t port_id,
3675                         struct rte_eth_mirror_conf *mirror_conf,
3676                         uint8_t rule_id, uint8_t on)
3677 {
3678         struct rte_eth_dev *dev;
3679
3680         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3681         if (mirror_conf->rule_type == 0) {
3682                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3683                 return -EINVAL;
3684         }
3685
3686         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3687                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3688                         ETH_64_POOLS - 1);
3689                 return -EINVAL;
3690         }
3691
3692         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3693              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3694             (mirror_conf->pool_mask == 0)) {
3695                 RTE_ETHDEV_LOG(ERR,
3696                         "Invalid mirror pool, pool mask can not be 0\n");
3697                 return -EINVAL;
3698         }
3699
3700         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3701             mirror_conf->vlan.vlan_mask == 0) {
3702                 RTE_ETHDEV_LOG(ERR,
3703                         "Invalid vlan mask, vlan mask can not be 0\n");
3704                 return -EINVAL;
3705         }
3706
3707         dev = &rte_eth_devices[port_id];
3708         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3709
3710         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3711                                                 mirror_conf, rule_id, on));
3712 }
3713
3714 int
3715 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3716 {
3717         struct rte_eth_dev *dev;
3718
3719         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3720
3721         dev = &rte_eth_devices[port_id];
3722         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3723
3724         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3725                                                                    rule_id));
3726 }
3727
3728 RTE_INIT(eth_dev_init_cb_lists)
3729 {
3730         int i;
3731
3732         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3733                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3734 }
3735
3736 int
3737 rte_eth_dev_callback_register(uint16_t port_id,
3738                         enum rte_eth_event_type event,
3739                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3740 {
3741         struct rte_eth_dev *dev;
3742         struct rte_eth_dev_callback *user_cb;
3743         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3744         uint16_t last_port;
3745
3746         if (!cb_fn)
3747                 return -EINVAL;
3748
3749         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3750                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3751                 return -EINVAL;
3752         }
3753
3754         if (port_id == RTE_ETH_ALL) {
3755                 next_port = 0;
3756                 last_port = RTE_MAX_ETHPORTS - 1;
3757         } else {
3758                 next_port = last_port = port_id;
3759         }
3760
3761         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3762
3763         do {
3764                 dev = &rte_eth_devices[next_port];
3765
3766                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3767                         if (user_cb->cb_fn == cb_fn &&
3768                                 user_cb->cb_arg == cb_arg &&
3769                                 user_cb->event == event) {
3770                                 break;
3771                         }
3772                 }
3773
3774                 /* create a new callback. */
3775                 if (user_cb == NULL) {
3776                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3777                                 sizeof(struct rte_eth_dev_callback), 0);
3778                         if (user_cb != NULL) {
3779                                 user_cb->cb_fn = cb_fn;
3780                                 user_cb->cb_arg = cb_arg;
3781                                 user_cb->event = event;
3782                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3783                                                   user_cb, next);
3784                         } else {
3785                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3786                                 rte_eth_dev_callback_unregister(port_id, event,
3787                                                                 cb_fn, cb_arg);
3788                                 return -ENOMEM;
3789                         }
3790
3791                 }
3792         } while (++next_port <= last_port);
3793
3794         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3795         return 0;
3796 }
3797
3798 int
3799 rte_eth_dev_callback_unregister(uint16_t port_id,
3800                         enum rte_eth_event_type event,
3801                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3802 {
3803         int ret;
3804         struct rte_eth_dev *dev;
3805         struct rte_eth_dev_callback *cb, *next;
3806         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3807         uint16_t last_port;
3808
3809         if (!cb_fn)
3810                 return -EINVAL;
3811
3812         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3813                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3814                 return -EINVAL;
3815         }
3816
3817         if (port_id == RTE_ETH_ALL) {
3818                 next_port = 0;
3819                 last_port = RTE_MAX_ETHPORTS - 1;
3820         } else {
3821                 next_port = last_port = port_id;
3822         }
3823
3824         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3825
3826         do {
3827                 dev = &rte_eth_devices[next_port];
3828                 ret = 0;
3829                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3830                      cb = next) {
3831
3832                         next = TAILQ_NEXT(cb, next);
3833
3834                         if (cb->cb_fn != cb_fn || cb->event != event ||
3835                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3836                                 continue;
3837
3838                         /*
3839                          * if this callback is not executing right now,
3840                          * then remove it.
3841                          */
3842                         if (cb->active == 0) {
3843                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3844                                 rte_free(cb);
3845                         } else {
3846                                 ret = -EAGAIN;
3847                         }
3848                 }
3849         } while (++next_port <= last_port);
3850
3851         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3852         return ret;
3853 }
3854
3855 int
3856 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3857         enum rte_eth_event_type event, void *ret_param)
3858 {
3859         struct rte_eth_dev_callback *cb_lst;
3860         struct rte_eth_dev_callback dev_cb;
3861         int rc = 0;
3862
3863         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3864         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3865                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3866                         continue;
3867                 dev_cb = *cb_lst;
3868                 cb_lst->active = 1;
3869                 if (ret_param != NULL)
3870                         dev_cb.ret_param = ret_param;
3871
3872                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3873                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3874                                 dev_cb.cb_arg, dev_cb.ret_param);
3875                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3876                 cb_lst->active = 0;
3877         }
3878         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3879         return rc;
3880 }
3881
3882 void
3883 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3884 {
3885         if (dev == NULL)
3886                 return;
3887
3888         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3889
3890         dev->state = RTE_ETH_DEV_ATTACHED;
3891 }
3892
3893 int
3894 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3895 {
3896         uint32_t vec;
3897         struct rte_eth_dev *dev;
3898         struct rte_intr_handle *intr_handle;
3899         uint16_t qid;
3900         int rc;
3901
3902         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3903
3904         dev = &rte_eth_devices[port_id];
3905
3906         if (!dev->intr_handle) {
3907                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3908                 return -ENOTSUP;
3909         }
3910
3911         intr_handle = dev->intr_handle;
3912         if (!intr_handle->intr_vec) {
3913                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3914                 return -EPERM;
3915         }
3916
3917         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3918                 vec = intr_handle->intr_vec[qid];
3919                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3920                 if (rc && rc != -EEXIST) {
3921                         RTE_ETHDEV_LOG(ERR,
3922                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3923                                 port_id, qid, op, epfd, vec);
3924                 }
3925         }
3926
3927         return 0;
3928 }
3929
3930 int
3931 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
3932 {
3933         struct rte_intr_handle *intr_handle;
3934         struct rte_eth_dev *dev;
3935         unsigned int efd_idx;
3936         uint32_t vec;
3937         int fd;
3938
3939         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
3940
3941         dev = &rte_eth_devices[port_id];
3942
3943         if (queue_id >= dev->data->nb_rx_queues) {
3944                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3945                 return -1;
3946         }
3947
3948         if (!dev->intr_handle) {
3949                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3950                 return -1;
3951         }
3952
3953         intr_handle = dev->intr_handle;
3954         if (!intr_handle->intr_vec) {
3955                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3956                 return -1;
3957         }
3958
3959         vec = intr_handle->intr_vec[queue_id];
3960         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
3961                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
3962         fd = intr_handle->efds[efd_idx];
3963
3964         return fd;
3965 }
3966
3967 const struct rte_memzone *
3968 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3969                          uint16_t queue_id, size_t size, unsigned align,
3970                          int socket_id)
3971 {
3972         char z_name[RTE_MEMZONE_NAMESIZE];
3973         const struct rte_memzone *mz;
3974         int rc;
3975
3976         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
3977                       dev->data->port_id, queue_id, ring_name);
3978         if (rc >= RTE_MEMZONE_NAMESIZE) {
3979                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
3980                 rte_errno = ENAMETOOLONG;
3981                 return NULL;
3982         }
3983
3984         mz = rte_memzone_lookup(z_name);
3985         if (mz)
3986                 return mz;
3987
3988         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3989                         RTE_MEMZONE_IOVA_CONTIG, align);
3990 }
3991
3992 int
3993 rte_eth_dev_create(struct rte_device *device, const char *name,
3994         size_t priv_data_size,
3995         ethdev_bus_specific_init ethdev_bus_specific_init,
3996         void *bus_init_params,
3997         ethdev_init_t ethdev_init, void *init_params)
3998 {
3999         struct rte_eth_dev *ethdev;
4000         int retval;
4001
4002         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4003
4004         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4005                 ethdev = rte_eth_dev_allocate(name);
4006                 if (!ethdev)
4007                         return -ENODEV;
4008
4009                 if (priv_data_size) {
4010                         ethdev->data->dev_private = rte_zmalloc_socket(
4011                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4012                                 device->numa_node);
4013
4014                         if (!ethdev->data->dev_private) {
4015                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
4016                                 retval = -ENOMEM;
4017                                 goto probe_failed;
4018                         }
4019                 }
4020         } else {
4021                 ethdev = rte_eth_dev_attach_secondary(name);
4022                 if (!ethdev) {
4023                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
4024                                 "ethdev doesn't exist");
4025                         return  -ENODEV;
4026                 }
4027         }
4028
4029         ethdev->device = device;
4030
4031         if (ethdev_bus_specific_init) {
4032                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4033                 if (retval) {
4034                         RTE_LOG(ERR, EAL,
4035                                 "ethdev bus specific initialisation failed");
4036                         goto probe_failed;
4037                 }
4038         }
4039
4040         retval = ethdev_init(ethdev, init_params);
4041         if (retval) {
4042                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
4043                 goto probe_failed;
4044         }
4045
4046         rte_eth_dev_probing_finish(ethdev);
4047
4048         return retval;
4049
4050 probe_failed:
4051         rte_eth_dev_release_port(ethdev);
4052         return retval;
4053 }
4054
4055 int
4056 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4057         ethdev_uninit_t ethdev_uninit)
4058 {
4059         int ret;
4060
4061         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4062         if (!ethdev)
4063                 return -ENODEV;
4064
4065         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4066
4067         ret = ethdev_uninit(ethdev);
4068         if (ret)
4069                 return ret;
4070
4071         return rte_eth_dev_release_port(ethdev);
4072 }
4073
4074 int
4075 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4076                           int epfd, int op, void *data)
4077 {
4078         uint32_t vec;
4079         struct rte_eth_dev *dev;
4080         struct rte_intr_handle *intr_handle;
4081         int rc;
4082
4083         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4084
4085         dev = &rte_eth_devices[port_id];
4086         if (queue_id >= dev->data->nb_rx_queues) {
4087                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4088                 return -EINVAL;
4089         }
4090
4091         if (!dev->intr_handle) {
4092                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4093                 return -ENOTSUP;
4094         }
4095
4096         intr_handle = dev->intr_handle;
4097         if (!intr_handle->intr_vec) {
4098                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4099                 return -EPERM;
4100         }
4101
4102         vec = intr_handle->intr_vec[queue_id];
4103         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4104         if (rc && rc != -EEXIST) {
4105                 RTE_ETHDEV_LOG(ERR,
4106                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4107                         port_id, queue_id, op, epfd, vec);
4108                 return rc;
4109         }
4110
4111         return 0;
4112 }
4113
4114 int
4115 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4116                            uint16_t queue_id)
4117 {
4118         struct rte_eth_dev *dev;
4119
4120         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4121
4122         dev = &rte_eth_devices[port_id];
4123
4124         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4125         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4126                                                                 queue_id));
4127 }
4128
4129 int
4130 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4131                             uint16_t queue_id)
4132 {
4133         struct rte_eth_dev *dev;
4134
4135         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4136
4137         dev = &rte_eth_devices[port_id];
4138
4139         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4140         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4141                                                                 queue_id));
4142 }
4143
4144
4145 int
4146 rte_eth_dev_filter_supported(uint16_t port_id,
4147                              enum rte_filter_type filter_type)
4148 {
4149         struct rte_eth_dev *dev;
4150
4151         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4152
4153         dev = &rte_eth_devices[port_id];
4154         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4155         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4156                                 RTE_ETH_FILTER_NOP, NULL);
4157 }
4158
4159 int
4160 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4161                         enum rte_filter_op filter_op, void *arg)
4162 {
4163         struct rte_eth_dev *dev;
4164
4165         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4166
4167         dev = &rte_eth_devices[port_id];
4168         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4169         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4170                                                              filter_op, arg));
4171 }
4172
4173 const struct rte_eth_rxtx_callback *
4174 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4175                 rte_rx_callback_fn fn, void *user_param)
4176 {
4177 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4178         rte_errno = ENOTSUP;
4179         return NULL;
4180 #endif
4181         struct rte_eth_dev *dev;
4182
4183         /* check input parameters */
4184         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4185                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4186                 rte_errno = EINVAL;
4187                 return NULL;
4188         }
4189         dev = &rte_eth_devices[port_id];
4190         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4191                 rte_errno = EINVAL;
4192                 return NULL;
4193         }
4194         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4195
4196         if (cb == NULL) {
4197                 rte_errno = ENOMEM;
4198                 return NULL;
4199         }
4200
4201         cb->fn.rx = fn;
4202         cb->param = user_param;
4203
4204         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4205         /* Add the callbacks in fifo order. */
4206         struct rte_eth_rxtx_callback *tail =
4207                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4208
4209         if (!tail) {
4210                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4211
4212         } else {
4213                 while (tail->next)
4214                         tail = tail->next;
4215                 tail->next = cb;
4216         }
4217         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4218
4219         return cb;
4220 }
4221
4222 const struct rte_eth_rxtx_callback *
4223 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4224                 rte_rx_callback_fn fn, void *user_param)
4225 {
4226 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4227         rte_errno = ENOTSUP;
4228         return NULL;
4229 #endif
4230         /* check input parameters */
4231         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4232                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4233                 rte_errno = EINVAL;
4234                 return NULL;
4235         }
4236
4237         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4238
4239         if (cb == NULL) {
4240                 rte_errno = ENOMEM;
4241                 return NULL;
4242         }
4243
4244         cb->fn.rx = fn;
4245         cb->param = user_param;
4246
4247         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4248         /* Add the callbacks at fisrt position*/
4249         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4250         rte_smp_wmb();
4251         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4252         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4253
4254         return cb;
4255 }
4256
4257 const struct rte_eth_rxtx_callback *
4258 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4259                 rte_tx_callback_fn fn, void *user_param)
4260 {
4261 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4262         rte_errno = ENOTSUP;
4263         return NULL;
4264 #endif
4265         struct rte_eth_dev *dev;
4266
4267         /* check input parameters */
4268         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4269                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4270                 rte_errno = EINVAL;
4271                 return NULL;
4272         }
4273
4274         dev = &rte_eth_devices[port_id];
4275         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4276                 rte_errno = EINVAL;
4277                 return NULL;
4278         }
4279
4280         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4281
4282         if (cb == NULL) {
4283                 rte_errno = ENOMEM;
4284                 return NULL;
4285         }
4286
4287         cb->fn.tx = fn;
4288         cb->param = user_param;
4289
4290         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4291         /* Add the callbacks in fifo order. */
4292         struct rte_eth_rxtx_callback *tail =
4293                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4294
4295         if (!tail) {
4296                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4297
4298         } else {
4299                 while (tail->next)
4300                         tail = tail->next;
4301                 tail->next = cb;
4302         }
4303         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4304
4305         return cb;
4306 }
4307
4308 int
4309 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4310                 const struct rte_eth_rxtx_callback *user_cb)
4311 {
4312 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4313         return -ENOTSUP;
4314 #endif
4315         /* Check input parameters. */
4316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4317         if (user_cb == NULL ||
4318                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4319                 return -EINVAL;
4320
4321         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4322         struct rte_eth_rxtx_callback *cb;
4323         struct rte_eth_rxtx_callback **prev_cb;
4324         int ret = -EINVAL;
4325
4326         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4327         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4328         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4329                 cb = *prev_cb;
4330                 if (cb == user_cb) {
4331                         /* Remove the user cb from the callback list. */
4332                         *prev_cb = cb->next;
4333                         ret = 0;
4334                         break;
4335                 }
4336         }
4337         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4338
4339         return ret;
4340 }
4341
4342 int
4343 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4344                 const struct rte_eth_rxtx_callback *user_cb)
4345 {
4346 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4347         return -ENOTSUP;
4348 #endif
4349         /* Check input parameters. */
4350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4351         if (user_cb == NULL ||
4352                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4353                 return -EINVAL;
4354
4355         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4356         int ret = -EINVAL;
4357         struct rte_eth_rxtx_callback *cb;
4358         struct rte_eth_rxtx_callback **prev_cb;
4359
4360         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4361         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4362         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4363                 cb = *prev_cb;
4364                 if (cb == user_cb) {
4365                         /* Remove the user cb from the callback list. */
4366                         *prev_cb = cb->next;
4367                         ret = 0;
4368                         break;
4369                 }
4370         }
4371         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4372
4373         return ret;
4374 }
4375
4376 int
4377 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4378         struct rte_eth_rxq_info *qinfo)
4379 {
4380         struct rte_eth_dev *dev;
4381
4382         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4383
4384         if (qinfo == NULL)
4385                 return -EINVAL;
4386
4387         dev = &rte_eth_devices[port_id];
4388         if (queue_id >= dev->data->nb_rx_queues) {
4389                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4390                 return -EINVAL;
4391         }
4392
4393         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4394                 RTE_ETHDEV_LOG(INFO,
4395                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4396                         queue_id, port_id);
4397                 return -EINVAL;
4398         }
4399
4400         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4401
4402         memset(qinfo, 0, sizeof(*qinfo));
4403         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4404         return 0;
4405 }
4406
4407 int
4408 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4409         struct rte_eth_txq_info *qinfo)
4410 {
4411         struct rte_eth_dev *dev;
4412
4413         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4414
4415         if (qinfo == NULL)
4416                 return -EINVAL;
4417
4418         dev = &rte_eth_devices[port_id];
4419         if (queue_id >= dev->data->nb_tx_queues) {
4420                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4421                 return -EINVAL;
4422         }
4423
4424         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4425                 RTE_ETHDEV_LOG(INFO,
4426                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4427                         queue_id, port_id);
4428                 return -EINVAL;
4429         }
4430
4431         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4432
4433         memset(qinfo, 0, sizeof(*qinfo));
4434         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4435
4436         return 0;
4437 }
4438
4439 int
4440 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4441                           struct rte_eth_burst_mode *mode)
4442 {
4443         struct rte_eth_dev *dev;
4444
4445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4446
4447         if (mode == NULL)
4448                 return -EINVAL;
4449
4450         dev = &rte_eth_devices[port_id];
4451
4452         if (queue_id >= dev->data->nb_rx_queues) {
4453                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4454                 return -EINVAL;
4455         }
4456
4457         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4458         memset(mode, 0, sizeof(*mode));
4459         return eth_err(port_id,
4460                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4461 }
4462
4463 int
4464 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4465                           struct rte_eth_burst_mode *mode)
4466 {
4467         struct rte_eth_dev *dev;
4468
4469         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4470
4471         if (mode == NULL)
4472                 return -EINVAL;
4473
4474         dev = &rte_eth_devices[port_id];
4475
4476         if (queue_id >= dev->data->nb_tx_queues) {
4477                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4478                 return -EINVAL;
4479         }
4480
4481         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4482         memset(mode, 0, sizeof(*mode));
4483         return eth_err(port_id,
4484                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4485 }
4486
4487 const char *
4488 rte_eth_burst_mode_option_name(uint64_t option)
4489 {
4490         const char *name = "";
4491         unsigned int i;
4492
4493         for (i = 0; i < RTE_DIM(rte_burst_option_names); ++i) {
4494                 if (option == rte_burst_option_names[i].option) {
4495                         name = rte_burst_option_names[i].name;
4496                         break;
4497                 }
4498         }
4499
4500         return name;
4501 }
4502
4503 int
4504 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4505                              struct rte_ether_addr *mc_addr_set,
4506                              uint32_t nb_mc_addr)
4507 {
4508         struct rte_eth_dev *dev;
4509
4510         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4511
4512         dev = &rte_eth_devices[port_id];
4513         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4514         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4515                                                 mc_addr_set, nb_mc_addr));
4516 }
4517
4518 int
4519 rte_eth_timesync_enable(uint16_t port_id)
4520 {
4521         struct rte_eth_dev *dev;
4522
4523         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4524         dev = &rte_eth_devices[port_id];
4525
4526         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4527         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4528 }
4529
4530 int
4531 rte_eth_timesync_disable(uint16_t port_id)
4532 {
4533         struct rte_eth_dev *dev;
4534
4535         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4536         dev = &rte_eth_devices[port_id];
4537
4538         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4539         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4540 }
4541
4542 int
4543 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4544                                    uint32_t flags)
4545 {
4546         struct rte_eth_dev *dev;
4547
4548         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4549         dev = &rte_eth_devices[port_id];
4550
4551         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4552         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4553                                 (dev, timestamp, flags));
4554 }
4555
4556 int
4557 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4558                                    struct timespec *timestamp)
4559 {
4560         struct rte_eth_dev *dev;
4561
4562         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4563         dev = &rte_eth_devices[port_id];
4564
4565         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4566         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4567                                 (dev, timestamp));
4568 }
4569
4570 int
4571 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4572 {
4573         struct rte_eth_dev *dev;
4574
4575         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4576         dev = &rte_eth_devices[port_id];
4577
4578         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4579         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4580                                                                       delta));
4581 }
4582
4583 int
4584 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4585 {
4586         struct rte_eth_dev *dev;
4587
4588         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4589         dev = &rte_eth_devices[port_id];
4590
4591         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4592         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4593                                                                 timestamp));
4594 }
4595
4596 int
4597 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4598 {
4599         struct rte_eth_dev *dev;
4600
4601         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4602         dev = &rte_eth_devices[port_id];
4603
4604         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4605         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4606                                                                 timestamp));
4607 }
4608
4609 int
4610 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4611 {
4612         struct rte_eth_dev *dev;
4613
4614         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4615         dev = &rte_eth_devices[port_id];
4616
4617         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4618         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4619 }
4620
4621 int
4622 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4623 {
4624         struct rte_eth_dev *dev;
4625
4626         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4627
4628         dev = &rte_eth_devices[port_id];
4629         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4630         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4631 }
4632
4633 int
4634 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4635 {
4636         struct rte_eth_dev *dev;
4637
4638         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4639
4640         dev = &rte_eth_devices[port_id];
4641         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4642         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4643 }
4644
4645 int
4646 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4647 {
4648         struct rte_eth_dev *dev;
4649
4650         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4651
4652         dev = &rte_eth_devices[port_id];
4653         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4654         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4655 }
4656
4657 int
4658 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4659 {
4660         struct rte_eth_dev *dev;
4661
4662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4663
4664         dev = &rte_eth_devices[port_id];
4665         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4666         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4667 }
4668
4669 int
4670 rte_eth_dev_get_module_info(uint16_t port_id,
4671                             struct rte_eth_dev_module_info *modinfo)
4672 {
4673         struct rte_eth_dev *dev;
4674
4675         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4676
4677         dev = &rte_eth_devices[port_id];
4678         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4679         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4680 }
4681
4682 int
4683 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4684                               struct rte_dev_eeprom_info *info)
4685 {
4686         struct rte_eth_dev *dev;
4687
4688         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4689
4690         dev = &rte_eth_devices[port_id];
4691         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4692         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4693 }
4694
4695 int
4696 rte_eth_dev_get_dcb_info(uint16_t port_id,
4697                              struct rte_eth_dcb_info *dcb_info)
4698 {
4699         struct rte_eth_dev *dev;
4700
4701         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4702
4703         dev = &rte_eth_devices[port_id];
4704         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4705
4706         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4707         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4708 }
4709
4710 int
4711 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4712                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4713 {
4714         struct rte_eth_dev *dev;
4715
4716         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4717         if (l2_tunnel == NULL) {
4718                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4719                 return -EINVAL;
4720         }
4721
4722         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4723                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4724                 return -EINVAL;
4725         }
4726
4727         dev = &rte_eth_devices[port_id];
4728         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4729                                 -ENOTSUP);
4730         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4731                                                                 l2_tunnel));
4732 }
4733
4734 int
4735 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4736                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4737                                   uint32_t mask,
4738                                   uint8_t en)
4739 {
4740         struct rte_eth_dev *dev;
4741
4742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4743
4744         if (l2_tunnel == NULL) {
4745                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4746                 return -EINVAL;
4747         }
4748
4749         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4750                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4751                 return -EINVAL;
4752         }
4753
4754         if (mask == 0) {
4755                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4756                 return -EINVAL;
4757         }
4758
4759         dev = &rte_eth_devices[port_id];
4760         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4761                                 -ENOTSUP);
4762         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4763                                                         l2_tunnel, mask, en));
4764 }
4765
4766 static void
4767 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4768                            const struct rte_eth_desc_lim *desc_lim)
4769 {
4770         if (desc_lim->nb_align != 0)
4771                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4772
4773         if (desc_lim->nb_max != 0)
4774                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4775
4776         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4777 }
4778
4779 int
4780 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4781                                  uint16_t *nb_rx_desc,
4782                                  uint16_t *nb_tx_desc)
4783 {
4784         struct rte_eth_dev_info dev_info;
4785         int ret;
4786
4787         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4788
4789         ret = rte_eth_dev_info_get(port_id, &dev_info);
4790         if (ret != 0)
4791                 return ret;
4792
4793         if (nb_rx_desc != NULL)
4794                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4795
4796         if (nb_tx_desc != NULL)
4797                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4798
4799         return 0;
4800 }
4801
4802 int
4803 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4804                                    struct rte_eth_hairpin_cap *cap)
4805 {
4806         struct rte_eth_dev *dev;
4807
4808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4809
4810         dev = &rte_eth_devices[port_id];
4811         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
4812         memset(cap, 0, sizeof(*cap));
4813         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
4814 }
4815
4816 int
4817 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
4818 {
4819         if (dev->data->rx_queue_state[queue_id] ==
4820             RTE_ETH_QUEUE_STATE_HAIRPIN)
4821                 return 1;
4822         return 0;
4823 }
4824
4825 int
4826 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
4827 {
4828         if (dev->data->tx_queue_state[queue_id] ==
4829             RTE_ETH_QUEUE_STATE_HAIRPIN)
4830                 return 1;
4831         return 0;
4832 }
4833
4834 int
4835 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4836 {
4837         struct rte_eth_dev *dev;
4838
4839         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4840
4841         if (pool == NULL)
4842                 return -EINVAL;
4843
4844         dev = &rte_eth_devices[port_id];
4845
4846         if (*dev->dev_ops->pool_ops_supported == NULL)
4847                 return 1; /* all pools are supported */
4848
4849         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4850 }
4851
4852 /**
4853  * A set of values to describe the possible states of a switch domain.
4854  */
4855 enum rte_eth_switch_domain_state {
4856         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4857         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4858 };
4859
4860 /**
4861  * Array of switch domains available for allocation. Array is sized to
4862  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4863  * ethdev ports in a single process.
4864  */
4865 static struct rte_eth_dev_switch {
4866         enum rte_eth_switch_domain_state state;
4867 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4868
4869 int
4870 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4871 {
4872         unsigned int i;
4873
4874         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4875
4876         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4877                 i < RTE_MAX_ETHPORTS; i++) {
4878                 if (rte_eth_switch_domains[i].state ==
4879                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4880                         rte_eth_switch_domains[i].state =
4881                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4882                         *domain_id = i;
4883                         return 0;
4884                 }
4885         }
4886
4887         return -ENOSPC;
4888 }
4889
4890 int
4891 rte_eth_switch_domain_free(uint16_t domain_id)
4892 {
4893         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4894                 domain_id >= RTE_MAX_ETHPORTS)
4895                 return -EINVAL;
4896
4897         if (rte_eth_switch_domains[domain_id].state !=
4898                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4899                 return -EINVAL;
4900
4901         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4902
4903         return 0;
4904 }
4905
4906 static int
4907 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4908 {
4909         int state;
4910         struct rte_kvargs_pair *pair;
4911         char *letter;
4912
4913         arglist->str = strdup(str_in);
4914         if (arglist->str == NULL)
4915                 return -ENOMEM;
4916
4917         letter = arglist->str;
4918         state = 0;
4919         arglist->count = 0;
4920         pair = &arglist->pairs[0];
4921         while (1) {
4922                 switch (state) {
4923                 case 0: /* Initial */
4924                         if (*letter == '=')
4925                                 return -EINVAL;
4926                         else if (*letter == '\0')
4927                                 return 0;
4928
4929                         state = 1;
4930                         pair->key = letter;
4931                         /* fall-thru */
4932
4933                 case 1: /* Parsing key */
4934                         if (*letter == '=') {
4935                                 *letter = '\0';
4936                                 pair->value = letter + 1;
4937                                 state = 2;
4938                         } else if (*letter == ',' || *letter == '\0')
4939                                 return -EINVAL;
4940                         break;
4941
4942
4943                 case 2: /* Parsing value */
4944                         if (*letter == '[')
4945                                 state = 3;
4946                         else if (*letter == ',') {
4947                                 *letter = '\0';
4948                                 arglist->count++;
4949                                 pair = &arglist->pairs[arglist->count];
4950                                 state = 0;
4951                         } else if (*letter == '\0') {
4952                                 letter--;
4953                                 arglist->count++;
4954                                 pair = &arglist->pairs[arglist->count];
4955                                 state = 0;
4956                         }
4957                         break;
4958
4959                 case 3: /* Parsing list */
4960                         if (*letter == ']')
4961                                 state = 2;
4962                         else if (*letter == '\0')
4963                                 return -EINVAL;
4964                         break;
4965                 }
4966                 letter++;
4967         }
4968 }
4969
4970 int
4971 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4972 {
4973         struct rte_kvargs args;
4974         struct rte_kvargs_pair *pair;
4975         unsigned int i;
4976         int result = 0;
4977
4978         memset(eth_da, 0, sizeof(*eth_da));
4979
4980         result = rte_eth_devargs_tokenise(&args, dargs);
4981         if (result < 0)
4982                 goto parse_cleanup;
4983
4984         for (i = 0; i < args.count; i++) {
4985                 pair = &args.pairs[i];
4986                 if (strcmp("representor", pair->key) == 0) {
4987                         result = rte_eth_devargs_parse_list(pair->value,
4988                                 rte_eth_devargs_parse_representor_ports,
4989                                 eth_da);
4990                         if (result < 0)
4991                                 goto parse_cleanup;
4992                 }
4993         }
4994
4995 parse_cleanup:
4996         if (args.str)
4997                 free(args.str);
4998
4999         return result;
5000 }
5001
5002 RTE_INIT(ethdev_init_log)
5003 {
5004         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
5005         if (rte_eth_dev_logtype >= 0)
5006                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
5007 }