85ab5f0df7bef4de48e6840b5de2a8ae7623ebef
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132 };
133
134 #undef RTE_RX_OFFLOAD_BIT2STR
135
136 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
137         { DEV_TX_OFFLOAD_##_name, #_name }
138
139 static const struct {
140         uint64_t offload;
141         const char *name;
142 } rte_tx_offload_names[] = {
143         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
144         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
152         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
157         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
158         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
159         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
160         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
161         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
164         RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA),
165 };
166
167 #undef RTE_TX_OFFLOAD_BIT2STR
168
169 static const struct {
170         uint64_t option;
171         const char *name;
172 } rte_burst_option_names[] = {
173         { RTE_ETH_BURST_SCALAR, "Scalar" },
174         { RTE_ETH_BURST_VECTOR, "Vector" },
175
176         { RTE_ETH_BURST_ALTIVEC, "AltiVec" },
177         { RTE_ETH_BURST_NEON, "Neon" },
178         { RTE_ETH_BURST_SSE, "SSE" },
179         { RTE_ETH_BURST_AVX2, "AVX2" },
180         { RTE_ETH_BURST_AVX512, "AVX512" },
181
182         { RTE_ETH_BURST_SCATTERED, "Scattered" },
183         { RTE_ETH_BURST_BULK_ALLOC, "Bulk Alloc" },
184         { RTE_ETH_BURST_SIMPLE, "Simple" },
185         { RTE_ETH_BURST_PER_QUEUE, "Per Queue" },
186 };
187
188 /**
189  * The user application callback description.
190  *
191  * It contains callback address to be registered by user application,
192  * the pointer to the parameters for callback, and the event type.
193  */
194 struct rte_eth_dev_callback {
195         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
196         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
197         void *cb_arg;                           /**< Parameter for callback */
198         void *ret_param;                        /**< Return parameter */
199         enum rte_eth_event_type event;          /**< Interrupt event type */
200         uint32_t active;                        /**< Callback is executing */
201 };
202
203 enum {
204         STAT_QMAP_TX = 0,
205         STAT_QMAP_RX
206 };
207
208 int
209 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
210 {
211         int ret;
212         struct rte_devargs devargs = {.args = NULL};
213         const char *bus_param_key;
214         char *bus_str = NULL;
215         char *cls_str = NULL;
216         int str_size;
217
218         memset(iter, 0, sizeof(*iter));
219
220         /*
221          * The devargs string may use various syntaxes:
222          *   - 0000:08:00.0,representor=[1-3]
223          *   - pci:0000:06:00.0,representor=[0,5]
224          *   - class=eth,mac=00:11:22:33:44:55
225          * A new syntax is in development (not yet supported):
226          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
227          */
228
229         /*
230          * Handle pure class filter (i.e. without any bus-level argument),
231          * from future new syntax.
232          * rte_devargs_parse() is not yet supporting the new syntax,
233          * that's why this simple case is temporarily parsed here.
234          */
235 #define iter_anybus_str "class=eth,"
236         if (strncmp(devargs_str, iter_anybus_str,
237                         strlen(iter_anybus_str)) == 0) {
238                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
239                 goto end;
240         }
241
242         /* Split bus, device and parameters. */
243         ret = rte_devargs_parse(&devargs, devargs_str);
244         if (ret != 0)
245                 goto error;
246
247         /*
248          * Assume parameters of old syntax can match only at ethdev level.
249          * Extra parameters will be ignored, thanks to "+" prefix.
250          */
251         str_size = strlen(devargs.args) + 2;
252         cls_str = malloc(str_size);
253         if (cls_str == NULL) {
254                 ret = -ENOMEM;
255                 goto error;
256         }
257         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
258         if (ret != str_size - 1) {
259                 ret = -EINVAL;
260                 goto error;
261         }
262         iter->cls_str = cls_str;
263         free(devargs.args); /* allocated by rte_devargs_parse() */
264         devargs.args = NULL;
265
266         iter->bus = devargs.bus;
267         if (iter->bus->dev_iterate == NULL) {
268                 ret = -ENOTSUP;
269                 goto error;
270         }
271
272         /* Convert bus args to new syntax for use with new API dev_iterate. */
273         if (strcmp(iter->bus->name, "vdev") == 0) {
274                 bus_param_key = "name";
275         } else if (strcmp(iter->bus->name, "pci") == 0) {
276                 bus_param_key = "addr";
277         } else {
278                 ret = -ENOTSUP;
279                 goto error;
280         }
281         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
282         bus_str = malloc(str_size);
283         if (bus_str == NULL) {
284                 ret = -ENOMEM;
285                 goto error;
286         }
287         ret = snprintf(bus_str, str_size, "%s=%s",
288                         bus_param_key, devargs.name);
289         if (ret != str_size - 1) {
290                 ret = -EINVAL;
291                 goto error;
292         }
293         iter->bus_str = bus_str;
294
295 end:
296         iter->cls = rte_class_find_by_name("eth");
297         return 0;
298
299 error:
300         if (ret == -ENOTSUP)
301                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
302                                 iter->bus->name);
303         free(devargs.args);
304         free(bus_str);
305         free(cls_str);
306         return ret;
307 }
308
309 uint16_t
310 rte_eth_iterator_next(struct rte_dev_iterator *iter)
311 {
312         if (iter->cls == NULL) /* invalid ethdev iterator */
313                 return RTE_MAX_ETHPORTS;
314
315         do { /* loop to try all matching rte_device */
316                 /* If not pure ethdev filter and */
317                 if (iter->bus != NULL &&
318                                 /* not in middle of rte_eth_dev iteration, */
319                                 iter->class_device == NULL) {
320                         /* get next rte_device to try. */
321                         iter->device = iter->bus->dev_iterate(
322                                         iter->device, iter->bus_str, iter);
323                         if (iter->device == NULL)
324                                 break; /* no more rte_device candidate */
325                 }
326                 /* A device is matching bus part, need to check ethdev part. */
327                 iter->class_device = iter->cls->dev_iterate(
328                                 iter->class_device, iter->cls_str, iter);
329                 if (iter->class_device != NULL)
330                         return eth_dev_to_id(iter->class_device); /* match */
331         } while (iter->bus != NULL); /* need to try next rte_device */
332
333         /* No more ethdev port to iterate. */
334         rte_eth_iterator_cleanup(iter);
335         return RTE_MAX_ETHPORTS;
336 }
337
338 void
339 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
340 {
341         if (iter->bus_str == NULL)
342                 return; /* nothing to free in pure class filter */
343         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
344         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
345         memset(iter, 0, sizeof(*iter));
346 }
347
348 uint16_t
349 rte_eth_find_next(uint16_t port_id)
350 {
351         while (port_id < RTE_MAX_ETHPORTS &&
352                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
353                 port_id++;
354
355         if (port_id >= RTE_MAX_ETHPORTS)
356                 return RTE_MAX_ETHPORTS;
357
358         return port_id;
359 }
360
361 /*
362  * Macro to iterate over all valid ports for internal usage.
363  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
364  */
365 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
366         for (port_id = rte_eth_find_next(0); \
367              port_id < RTE_MAX_ETHPORTS; \
368              port_id = rte_eth_find_next(port_id + 1))
369
370 uint16_t
371 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
372 {
373         port_id = rte_eth_find_next(port_id);
374         while (port_id < RTE_MAX_ETHPORTS &&
375                         rte_eth_devices[port_id].device != parent)
376                 port_id = rte_eth_find_next(port_id + 1);
377
378         return port_id;
379 }
380
381 uint16_t
382 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
383 {
384         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
385         return rte_eth_find_next_of(port_id,
386                         rte_eth_devices[ref_port_id].device);
387 }
388
389 static void
390 rte_eth_dev_shared_data_prepare(void)
391 {
392         const unsigned flags = 0;
393         const struct rte_memzone *mz;
394
395         rte_spinlock_lock(&rte_eth_shared_data_lock);
396
397         if (rte_eth_dev_shared_data == NULL) {
398                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
399                         /* Allocate port data and ownership shared memory. */
400                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
401                                         sizeof(*rte_eth_dev_shared_data),
402                                         rte_socket_id(), flags);
403                 } else
404                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
405                 if (mz == NULL)
406                         rte_panic("Cannot allocate ethdev shared data\n");
407
408                 rte_eth_dev_shared_data = mz->addr;
409                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
410                         rte_eth_dev_shared_data->next_owner_id =
411                                         RTE_ETH_DEV_NO_OWNER + 1;
412                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
413                         memset(rte_eth_dev_shared_data->data, 0,
414                                sizeof(rte_eth_dev_shared_data->data));
415                 }
416         }
417
418         rte_spinlock_unlock(&rte_eth_shared_data_lock);
419 }
420
421 static bool
422 is_allocated(const struct rte_eth_dev *ethdev)
423 {
424         return ethdev->data->name[0] != '\0';
425 }
426
427 static struct rte_eth_dev *
428 _rte_eth_dev_allocated(const char *name)
429 {
430         unsigned i;
431
432         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
433                 if (rte_eth_devices[i].data != NULL &&
434                     strcmp(rte_eth_devices[i].data->name, name) == 0)
435                         return &rte_eth_devices[i];
436         }
437         return NULL;
438 }
439
440 struct rte_eth_dev *
441 rte_eth_dev_allocated(const char *name)
442 {
443         struct rte_eth_dev *ethdev;
444
445         rte_eth_dev_shared_data_prepare();
446
447         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
448
449         ethdev = _rte_eth_dev_allocated(name);
450
451         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
452
453         return ethdev;
454 }
455
456 static uint16_t
457 rte_eth_dev_find_free_port(void)
458 {
459         unsigned i;
460
461         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
462                 /* Using shared name field to find a free port. */
463                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
464                         RTE_ASSERT(rte_eth_devices[i].state ==
465                                    RTE_ETH_DEV_UNUSED);
466                         return i;
467                 }
468         }
469         return RTE_MAX_ETHPORTS;
470 }
471
472 static struct rte_eth_dev *
473 eth_dev_get(uint16_t port_id)
474 {
475         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
476
477         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
478
479         return eth_dev;
480 }
481
482 struct rte_eth_dev *
483 rte_eth_dev_allocate(const char *name)
484 {
485         uint16_t port_id;
486         struct rte_eth_dev *eth_dev = NULL;
487         size_t name_len;
488
489         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
490         if (name_len == 0) {
491                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
492                 return NULL;
493         }
494
495         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
496                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
497                 return NULL;
498         }
499
500         rte_eth_dev_shared_data_prepare();
501
502         /* Synchronize port creation between primary and secondary threads. */
503         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
504
505         if (_rte_eth_dev_allocated(name) != NULL) {
506                 RTE_ETHDEV_LOG(ERR,
507                         "Ethernet device with name %s already allocated\n",
508                         name);
509                 goto unlock;
510         }
511
512         port_id = rte_eth_dev_find_free_port();
513         if (port_id == RTE_MAX_ETHPORTS) {
514                 RTE_ETHDEV_LOG(ERR,
515                         "Reached maximum number of Ethernet ports\n");
516                 goto unlock;
517         }
518
519         eth_dev = eth_dev_get(port_id);
520         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
521         eth_dev->data->port_id = port_id;
522         eth_dev->data->mtu = RTE_ETHER_MTU;
523
524 unlock:
525         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
526
527         return eth_dev;
528 }
529
530 /*
531  * Attach to a port already registered by the primary process, which
532  * makes sure that the same device would have the same port id both
533  * in the primary and secondary process.
534  */
535 struct rte_eth_dev *
536 rte_eth_dev_attach_secondary(const char *name)
537 {
538         uint16_t i;
539         struct rte_eth_dev *eth_dev = NULL;
540
541         rte_eth_dev_shared_data_prepare();
542
543         /* Synchronize port attachment to primary port creation and release. */
544         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
545
546         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
547                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
548                         break;
549         }
550         if (i == RTE_MAX_ETHPORTS) {
551                 RTE_ETHDEV_LOG(ERR,
552                         "Device %s is not driven by the primary process\n",
553                         name);
554         } else {
555                 eth_dev = eth_dev_get(i);
556                 RTE_ASSERT(eth_dev->data->port_id == i);
557         }
558
559         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
560         return eth_dev;
561 }
562
563 int
564 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
565 {
566         if (eth_dev == NULL)
567                 return -EINVAL;
568
569         rte_eth_dev_shared_data_prepare();
570
571         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
572                 _rte_eth_dev_callback_process(eth_dev,
573                                 RTE_ETH_EVENT_DESTROY, NULL);
574
575         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
576
577         eth_dev->state = RTE_ETH_DEV_UNUSED;
578
579         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
580                 rte_free(eth_dev->data->rx_queues);
581                 rte_free(eth_dev->data->tx_queues);
582                 rte_free(eth_dev->data->mac_addrs);
583                 rte_free(eth_dev->data->hash_mac_addrs);
584                 rte_free(eth_dev->data->dev_private);
585                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
586         }
587
588         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
589
590         return 0;
591 }
592
593 int
594 rte_eth_dev_is_valid_port(uint16_t port_id)
595 {
596         if (port_id >= RTE_MAX_ETHPORTS ||
597             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
598                 return 0;
599         else
600                 return 1;
601 }
602
603 static int
604 rte_eth_is_valid_owner_id(uint64_t owner_id)
605 {
606         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
607             rte_eth_dev_shared_data->next_owner_id <= owner_id)
608                 return 0;
609         return 1;
610 }
611
612 uint64_t
613 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
614 {
615         port_id = rte_eth_find_next(port_id);
616         while (port_id < RTE_MAX_ETHPORTS &&
617                         rte_eth_devices[port_id].data->owner.id != owner_id)
618                 port_id = rte_eth_find_next(port_id + 1);
619
620         return port_id;
621 }
622
623 int
624 rte_eth_dev_owner_new(uint64_t *owner_id)
625 {
626         rte_eth_dev_shared_data_prepare();
627
628         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
629
630         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
631
632         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
633         return 0;
634 }
635
636 static int
637 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
638                        const struct rte_eth_dev_owner *new_owner)
639 {
640         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
641         struct rte_eth_dev_owner *port_owner;
642
643         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
644                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
645                         port_id);
646                 return -ENODEV;
647         }
648
649         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
650             !rte_eth_is_valid_owner_id(old_owner_id)) {
651                 RTE_ETHDEV_LOG(ERR,
652                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
653                        old_owner_id, new_owner->id);
654                 return -EINVAL;
655         }
656
657         port_owner = &rte_eth_devices[port_id].data->owner;
658         if (port_owner->id != old_owner_id) {
659                 RTE_ETHDEV_LOG(ERR,
660                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
661                         port_id, port_owner->name, port_owner->id);
662                 return -EPERM;
663         }
664
665         /* can not truncate (same structure) */
666         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
667
668         port_owner->id = new_owner->id;
669
670         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
671                 port_id, new_owner->name, new_owner->id);
672
673         return 0;
674 }
675
676 int
677 rte_eth_dev_owner_set(const uint16_t port_id,
678                       const struct rte_eth_dev_owner *owner)
679 {
680         int ret;
681
682         rte_eth_dev_shared_data_prepare();
683
684         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
685
686         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
687
688         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
689         return ret;
690 }
691
692 int
693 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
694 {
695         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
696                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
697         int ret;
698
699         rte_eth_dev_shared_data_prepare();
700
701         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
702
703         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
704
705         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
706         return ret;
707 }
708
709 int
710 rte_eth_dev_owner_delete(const uint64_t owner_id)
711 {
712         uint16_t port_id;
713         int ret = 0;
714
715         rte_eth_dev_shared_data_prepare();
716
717         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
718
719         if (rte_eth_is_valid_owner_id(owner_id)) {
720                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
721                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
722                                 memset(&rte_eth_devices[port_id].data->owner, 0,
723                                        sizeof(struct rte_eth_dev_owner));
724                 RTE_ETHDEV_LOG(NOTICE,
725                         "All port owners owned by %016"PRIx64" identifier have removed\n",
726                         owner_id);
727         } else {
728                 RTE_ETHDEV_LOG(ERR,
729                                "Invalid owner id=%016"PRIx64"\n",
730                                owner_id);
731                 ret = -EINVAL;
732         }
733
734         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
735
736         return ret;
737 }
738
739 int
740 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
741 {
742         int ret = 0;
743         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
744
745         rte_eth_dev_shared_data_prepare();
746
747         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
748
749         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
750                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
751                         port_id);
752                 ret = -ENODEV;
753         } else {
754                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
755         }
756
757         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
758         return ret;
759 }
760
761 int
762 rte_eth_dev_socket_id(uint16_t port_id)
763 {
764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
765         return rte_eth_devices[port_id].data->numa_node;
766 }
767
768 void *
769 rte_eth_dev_get_sec_ctx(uint16_t port_id)
770 {
771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
772         return rte_eth_devices[port_id].security_ctx;
773 }
774
775 uint16_t
776 rte_eth_dev_count_avail(void)
777 {
778         uint16_t p;
779         uint16_t count;
780
781         count = 0;
782
783         RTE_ETH_FOREACH_DEV(p)
784                 count++;
785
786         return count;
787 }
788
789 uint16_t
790 rte_eth_dev_count_total(void)
791 {
792         uint16_t port, count = 0;
793
794         RTE_ETH_FOREACH_VALID_DEV(port)
795                 count++;
796
797         return count;
798 }
799
800 int
801 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
802 {
803         char *tmp;
804
805         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
806
807         if (name == NULL) {
808                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
809                 return -EINVAL;
810         }
811
812         /* shouldn't check 'rte_eth_devices[i].data',
813          * because it might be overwritten by VDEV PMD */
814         tmp = rte_eth_dev_shared_data->data[port_id].name;
815         strcpy(name, tmp);
816         return 0;
817 }
818
819 int
820 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
821 {
822         uint32_t pid;
823
824         if (name == NULL) {
825                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
826                 return -EINVAL;
827         }
828
829         RTE_ETH_FOREACH_VALID_DEV(pid)
830                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
831                         *port_id = pid;
832                         return 0;
833                 }
834
835         return -ENODEV;
836 }
837
838 static int
839 eth_err(uint16_t port_id, int ret)
840 {
841         if (ret == 0)
842                 return 0;
843         if (rte_eth_dev_is_removed(port_id))
844                 return -EIO;
845         return ret;
846 }
847
848 static int
849 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
850 {
851         uint16_t old_nb_queues = dev->data->nb_rx_queues;
852         void **rxq;
853         unsigned i;
854
855         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
856                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
857                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
858                                 RTE_CACHE_LINE_SIZE);
859                 if (dev->data->rx_queues == NULL) {
860                         dev->data->nb_rx_queues = 0;
861                         return -(ENOMEM);
862                 }
863         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
864                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
865
866                 rxq = dev->data->rx_queues;
867
868                 for (i = nb_queues; i < old_nb_queues; i++)
869                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
870                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
871                                 RTE_CACHE_LINE_SIZE);
872                 if (rxq == NULL)
873                         return -(ENOMEM);
874                 if (nb_queues > old_nb_queues) {
875                         uint16_t new_qs = nb_queues - old_nb_queues;
876
877                         memset(rxq + old_nb_queues, 0,
878                                 sizeof(rxq[0]) * new_qs);
879                 }
880
881                 dev->data->rx_queues = rxq;
882
883         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
884                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
885
886                 rxq = dev->data->rx_queues;
887
888                 for (i = nb_queues; i < old_nb_queues; i++)
889                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
890
891                 rte_free(dev->data->rx_queues);
892                 dev->data->rx_queues = NULL;
893         }
894         dev->data->nb_rx_queues = nb_queues;
895         return 0;
896 }
897
898 int
899 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
900 {
901         struct rte_eth_dev *dev;
902
903         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
904
905         dev = &rte_eth_devices[port_id];
906         if (!dev->data->dev_started) {
907                 RTE_ETHDEV_LOG(ERR,
908                         "Port %u must be started before start any queue\n",
909                         port_id);
910                 return -EINVAL;
911         }
912
913         if (rx_queue_id >= dev->data->nb_rx_queues) {
914                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
915                 return -EINVAL;
916         }
917
918         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
919
920         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
921                 RTE_ETHDEV_LOG(INFO,
922                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
923                         rx_queue_id, port_id);
924                 return -EINVAL;
925         }
926
927         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
928                 RTE_ETHDEV_LOG(INFO,
929                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
930                         rx_queue_id, port_id);
931                 return 0;
932         }
933
934         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
935                                                              rx_queue_id));
936
937 }
938
939 int
940 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
941 {
942         struct rte_eth_dev *dev;
943
944         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
945
946         dev = &rte_eth_devices[port_id];
947         if (rx_queue_id >= dev->data->nb_rx_queues) {
948                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
949                 return -EINVAL;
950         }
951
952         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
953
954         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
955                 RTE_ETHDEV_LOG(INFO,
956                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
957                         rx_queue_id, port_id);
958                 return -EINVAL;
959         }
960
961         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
962                 RTE_ETHDEV_LOG(INFO,
963                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
964                         rx_queue_id, port_id);
965                 return 0;
966         }
967
968         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
969
970 }
971
972 int
973 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
974 {
975         struct rte_eth_dev *dev;
976
977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
978
979         dev = &rte_eth_devices[port_id];
980         if (!dev->data->dev_started) {
981                 RTE_ETHDEV_LOG(ERR,
982                         "Port %u must be started before start any queue\n",
983                         port_id);
984                 return -EINVAL;
985         }
986
987         if (tx_queue_id >= dev->data->nb_tx_queues) {
988                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
989                 return -EINVAL;
990         }
991
992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
993
994         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
995                 RTE_ETHDEV_LOG(INFO,
996                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
997                         tx_queue_id, port_id);
998                 return -EINVAL;
999         }
1000
1001         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1002                 RTE_ETHDEV_LOG(INFO,
1003                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1004                         tx_queue_id, port_id);
1005                 return 0;
1006         }
1007
1008         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1009 }
1010
1011 int
1012 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1013 {
1014         struct rte_eth_dev *dev;
1015
1016         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1017
1018         dev = &rte_eth_devices[port_id];
1019         if (tx_queue_id >= dev->data->nb_tx_queues) {
1020                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1021                 return -EINVAL;
1022         }
1023
1024         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1025
1026         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1027                 RTE_ETHDEV_LOG(INFO,
1028                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1029                         tx_queue_id, port_id);
1030                 return -EINVAL;
1031         }
1032
1033         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1034                 RTE_ETHDEV_LOG(INFO,
1035                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1036                         tx_queue_id, port_id);
1037                 return 0;
1038         }
1039
1040         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1041
1042 }
1043
1044 static int
1045 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1046 {
1047         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1048         void **txq;
1049         unsigned i;
1050
1051         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1052                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1053                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1054                                                    RTE_CACHE_LINE_SIZE);
1055                 if (dev->data->tx_queues == NULL) {
1056                         dev->data->nb_tx_queues = 0;
1057                         return -(ENOMEM);
1058                 }
1059         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1060                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1061
1062                 txq = dev->data->tx_queues;
1063
1064                 for (i = nb_queues; i < old_nb_queues; i++)
1065                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1066                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1067                                   RTE_CACHE_LINE_SIZE);
1068                 if (txq == NULL)
1069                         return -ENOMEM;
1070                 if (nb_queues > old_nb_queues) {
1071                         uint16_t new_qs = nb_queues - old_nb_queues;
1072
1073                         memset(txq + old_nb_queues, 0,
1074                                sizeof(txq[0]) * new_qs);
1075                 }
1076
1077                 dev->data->tx_queues = txq;
1078
1079         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1080                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1081
1082                 txq = dev->data->tx_queues;
1083
1084                 for (i = nb_queues; i < old_nb_queues; i++)
1085                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1086
1087                 rte_free(dev->data->tx_queues);
1088                 dev->data->tx_queues = NULL;
1089         }
1090         dev->data->nb_tx_queues = nb_queues;
1091         return 0;
1092 }
1093
1094 uint32_t
1095 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1096 {
1097         switch (speed) {
1098         case ETH_SPEED_NUM_10M:
1099                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1100         case ETH_SPEED_NUM_100M:
1101                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1102         case ETH_SPEED_NUM_1G:
1103                 return ETH_LINK_SPEED_1G;
1104         case ETH_SPEED_NUM_2_5G:
1105                 return ETH_LINK_SPEED_2_5G;
1106         case ETH_SPEED_NUM_5G:
1107                 return ETH_LINK_SPEED_5G;
1108         case ETH_SPEED_NUM_10G:
1109                 return ETH_LINK_SPEED_10G;
1110         case ETH_SPEED_NUM_20G:
1111                 return ETH_LINK_SPEED_20G;
1112         case ETH_SPEED_NUM_25G:
1113                 return ETH_LINK_SPEED_25G;
1114         case ETH_SPEED_NUM_40G:
1115                 return ETH_LINK_SPEED_40G;
1116         case ETH_SPEED_NUM_50G:
1117                 return ETH_LINK_SPEED_50G;
1118         case ETH_SPEED_NUM_56G:
1119                 return ETH_LINK_SPEED_56G;
1120         case ETH_SPEED_NUM_100G:
1121                 return ETH_LINK_SPEED_100G;
1122         default:
1123                 return 0;
1124         }
1125 }
1126
1127 const char *
1128 rte_eth_dev_rx_offload_name(uint64_t offload)
1129 {
1130         const char *name = "UNKNOWN";
1131         unsigned int i;
1132
1133         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1134                 if (offload == rte_rx_offload_names[i].offload) {
1135                         name = rte_rx_offload_names[i].name;
1136                         break;
1137                 }
1138         }
1139
1140         return name;
1141 }
1142
1143 const char *
1144 rte_eth_dev_tx_offload_name(uint64_t offload)
1145 {
1146         const char *name = "UNKNOWN";
1147         unsigned int i;
1148
1149         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1150                 if (offload == rte_tx_offload_names[i].offload) {
1151                         name = rte_tx_offload_names[i].name;
1152                         break;
1153                 }
1154         }
1155
1156         return name;
1157 }
1158
1159 int
1160 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1161                       const struct rte_eth_conf *dev_conf)
1162 {
1163         struct rte_eth_dev *dev;
1164         struct rte_eth_dev_info dev_info;
1165         struct rte_eth_conf orig_conf;
1166         int diag;
1167         int ret;
1168
1169         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1170
1171         dev = &rte_eth_devices[port_id];
1172
1173         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1174
1175         if (dev->data->dev_started) {
1176                 RTE_ETHDEV_LOG(ERR,
1177                         "Port %u must be stopped to allow configuration\n",
1178                         port_id);
1179                 return -EBUSY;
1180         }
1181
1182          /* Store original config, as rollback required on failure */
1183         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1184
1185         /*
1186          * Copy the dev_conf parameter into the dev structure.
1187          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1188          */
1189         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1190
1191         ret = rte_eth_dev_info_get(port_id, &dev_info);
1192         if (ret != 0)
1193                 goto rollback;
1194
1195         /* If number of queues specified by application for both Rx and Tx is
1196          * zero, use driver preferred values. This cannot be done individually
1197          * as it is valid for either Tx or Rx (but not both) to be zero.
1198          * If driver does not provide any preferred valued, fall back on
1199          * EAL defaults.
1200          */
1201         if (nb_rx_q == 0 && nb_tx_q == 0) {
1202                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1203                 if (nb_rx_q == 0)
1204                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1205                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1206                 if (nb_tx_q == 0)
1207                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1208         }
1209
1210         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1211                 RTE_ETHDEV_LOG(ERR,
1212                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1213                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1214                 ret = -EINVAL;
1215                 goto rollback;
1216         }
1217
1218         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1219                 RTE_ETHDEV_LOG(ERR,
1220                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1221                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1222                 ret = -EINVAL;
1223                 goto rollback;
1224         }
1225
1226         /*
1227          * Check that the numbers of RX and TX queues are not greater
1228          * than the maximum number of RX and TX queues supported by the
1229          * configured device.
1230          */
1231         if (nb_rx_q > dev_info.max_rx_queues) {
1232                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1233                         port_id, nb_rx_q, dev_info.max_rx_queues);
1234                 ret = -EINVAL;
1235                 goto rollback;
1236         }
1237
1238         if (nb_tx_q > dev_info.max_tx_queues) {
1239                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1240                         port_id, nb_tx_q, dev_info.max_tx_queues);
1241                 ret = -EINVAL;
1242                 goto rollback;
1243         }
1244
1245         /* Check that the device supports requested interrupts */
1246         if ((dev_conf->intr_conf.lsc == 1) &&
1247                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1248                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1249                         dev->device->driver->name);
1250                 ret = -EINVAL;
1251                 goto rollback;
1252         }
1253         if ((dev_conf->intr_conf.rmv == 1) &&
1254                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1255                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1256                         dev->device->driver->name);
1257                 ret = -EINVAL;
1258                 goto rollback;
1259         }
1260
1261         /*
1262          * If jumbo frames are enabled, check that the maximum RX packet
1263          * length is supported by the configured device.
1264          */
1265         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1266                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1267                         RTE_ETHDEV_LOG(ERR,
1268                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1269                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1270                                 dev_info.max_rx_pktlen);
1271                         ret = -EINVAL;
1272                         goto rollback;
1273                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1274                         RTE_ETHDEV_LOG(ERR,
1275                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1276                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1277                                 (unsigned int)RTE_ETHER_MIN_LEN);
1278                         ret = -EINVAL;
1279                         goto rollback;
1280                 }
1281         } else {
1282                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1283                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1284                         /* Use default value */
1285                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1286                                                         RTE_ETHER_MAX_LEN;
1287         }
1288
1289         /* Any requested offloading must be within its device capabilities */
1290         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1291              dev_conf->rxmode.offloads) {
1292                 RTE_ETHDEV_LOG(ERR,
1293                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1294                         "capabilities 0x%"PRIx64" in %s()\n",
1295                         port_id, dev_conf->rxmode.offloads,
1296                         dev_info.rx_offload_capa,
1297                         __func__);
1298                 ret = -EINVAL;
1299                 goto rollback;
1300         }
1301         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1302              dev_conf->txmode.offloads) {
1303                 RTE_ETHDEV_LOG(ERR,
1304                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1305                         "capabilities 0x%"PRIx64" in %s()\n",
1306                         port_id, dev_conf->txmode.offloads,
1307                         dev_info.tx_offload_capa,
1308                         __func__);
1309                 ret = -EINVAL;
1310                 goto rollback;
1311         }
1312
1313         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1314                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1315
1316         /* Check that device supports requested rss hash functions. */
1317         if ((dev_info.flow_type_rss_offloads |
1318              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1319             dev_info.flow_type_rss_offloads) {
1320                 RTE_ETHDEV_LOG(ERR,
1321                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1322                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1323                         dev_info.flow_type_rss_offloads);
1324                 ret = -EINVAL;
1325                 goto rollback;
1326         }
1327
1328         /*
1329          * Setup new number of RX/TX queues and reconfigure device.
1330          */
1331         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1332         if (diag != 0) {
1333                 RTE_ETHDEV_LOG(ERR,
1334                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1335                         port_id, diag);
1336                 ret = diag;
1337                 goto rollback;
1338         }
1339
1340         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1341         if (diag != 0) {
1342                 RTE_ETHDEV_LOG(ERR,
1343                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1344                         port_id, diag);
1345                 rte_eth_dev_rx_queue_config(dev, 0);
1346                 ret = diag;
1347                 goto rollback;
1348         }
1349
1350         diag = (*dev->dev_ops->dev_configure)(dev);
1351         if (diag != 0) {
1352                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1353                         port_id, diag);
1354                 rte_eth_dev_rx_queue_config(dev, 0);
1355                 rte_eth_dev_tx_queue_config(dev, 0);
1356                 ret = eth_err(port_id, diag);
1357                 goto rollback;
1358         }
1359
1360         /* Initialize Rx profiling if enabled at compilation time. */
1361         diag = __rte_eth_dev_profile_init(port_id, dev);
1362         if (diag != 0) {
1363                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1364                         port_id, diag);
1365                 rte_eth_dev_rx_queue_config(dev, 0);
1366                 rte_eth_dev_tx_queue_config(dev, 0);
1367                 ret = eth_err(port_id, diag);
1368                 goto rollback;
1369         }
1370
1371         return 0;
1372
1373 rollback:
1374         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1375
1376         return ret;
1377 }
1378
1379 void
1380 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1381 {
1382         if (dev->data->dev_started) {
1383                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1384                         dev->data->port_id);
1385                 return;
1386         }
1387
1388         rte_eth_dev_rx_queue_config(dev, 0);
1389         rte_eth_dev_tx_queue_config(dev, 0);
1390
1391         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1392 }
1393
1394 static void
1395 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1396                         struct rte_eth_dev_info *dev_info)
1397 {
1398         struct rte_ether_addr *addr;
1399         uint16_t i;
1400         uint32_t pool = 0;
1401         uint64_t pool_mask;
1402
1403         /* replay MAC address configuration including default MAC */
1404         addr = &dev->data->mac_addrs[0];
1405         if (*dev->dev_ops->mac_addr_set != NULL)
1406                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1407         else if (*dev->dev_ops->mac_addr_add != NULL)
1408                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1409
1410         if (*dev->dev_ops->mac_addr_add != NULL) {
1411                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1412                         addr = &dev->data->mac_addrs[i];
1413
1414                         /* skip zero address */
1415                         if (rte_is_zero_ether_addr(addr))
1416                                 continue;
1417
1418                         pool = 0;
1419                         pool_mask = dev->data->mac_pool_sel[i];
1420
1421                         do {
1422                                 if (pool_mask & 1ULL)
1423                                         (*dev->dev_ops->mac_addr_add)(dev,
1424                                                 addr, i, pool);
1425                                 pool_mask >>= 1;
1426                                 pool++;
1427                         } while (pool_mask);
1428                 }
1429         }
1430 }
1431
1432 static int
1433 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1434                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1435 {
1436         int ret;
1437
1438         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1439                 rte_eth_dev_mac_restore(dev, dev_info);
1440
1441         /* replay promiscuous configuration */
1442         /*
1443          * use callbacks directly since we don't need port_id check and
1444          * would like to bypass the same value set
1445          */
1446         if (rte_eth_promiscuous_get(port_id) == 1 &&
1447             *dev->dev_ops->promiscuous_enable != NULL) {
1448                 ret = eth_err(port_id,
1449                               (*dev->dev_ops->promiscuous_enable)(dev));
1450                 if (ret != 0 && ret != -ENOTSUP) {
1451                         RTE_ETHDEV_LOG(ERR,
1452                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1453                                 port_id, rte_strerror(-ret));
1454                         return ret;
1455                 }
1456         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1457                    *dev->dev_ops->promiscuous_disable != NULL) {
1458                 ret = eth_err(port_id,
1459                               (*dev->dev_ops->promiscuous_disable)(dev));
1460                 if (ret != 0 && ret != -ENOTSUP) {
1461                         RTE_ETHDEV_LOG(ERR,
1462                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1463                                 port_id, rte_strerror(-ret));
1464                         return ret;
1465                 }
1466         }
1467
1468         /* replay all multicast configuration */
1469         /*
1470          * use callbacks directly since we don't need port_id check and
1471          * would like to bypass the same value set
1472          */
1473         if (rte_eth_allmulticast_get(port_id) == 1 &&
1474             *dev->dev_ops->allmulticast_enable != NULL) {
1475                 ret = eth_err(port_id,
1476                               (*dev->dev_ops->allmulticast_enable)(dev));
1477                 if (ret != 0 && ret != -ENOTSUP) {
1478                         RTE_ETHDEV_LOG(ERR,
1479                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1480                                 port_id, rte_strerror(-ret));
1481                         return ret;
1482                 }
1483         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1484                    *dev->dev_ops->allmulticast_disable != NULL) {
1485                 ret = eth_err(port_id,
1486                               (*dev->dev_ops->allmulticast_disable)(dev));
1487                 if (ret != 0 && ret != -ENOTSUP) {
1488                         RTE_ETHDEV_LOG(ERR,
1489                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1490                                 port_id, rte_strerror(-ret));
1491                         return ret;
1492                 }
1493         }
1494
1495         return 0;
1496 }
1497
1498 int
1499 rte_eth_dev_start(uint16_t port_id)
1500 {
1501         struct rte_eth_dev *dev;
1502         struct rte_eth_dev_info dev_info;
1503         int diag;
1504         int ret;
1505
1506         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1507
1508         dev = &rte_eth_devices[port_id];
1509
1510         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1511
1512         if (dev->data->dev_started != 0) {
1513                 RTE_ETHDEV_LOG(INFO,
1514                         "Device with port_id=%"PRIu16" already started\n",
1515                         port_id);
1516                 return 0;
1517         }
1518
1519         ret = rte_eth_dev_info_get(port_id, &dev_info);
1520         if (ret != 0)
1521                 return ret;
1522
1523         /* Lets restore MAC now if device does not support live change */
1524         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1525                 rte_eth_dev_mac_restore(dev, &dev_info);
1526
1527         diag = (*dev->dev_ops->dev_start)(dev);
1528         if (diag == 0)
1529                 dev->data->dev_started = 1;
1530         else
1531                 return eth_err(port_id, diag);
1532
1533         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1534         if (ret != 0) {
1535                 RTE_ETHDEV_LOG(ERR,
1536                         "Error during restoring configuration for device (port %u): %s\n",
1537                         port_id, rte_strerror(-ret));
1538                 rte_eth_dev_stop(port_id);
1539                 return ret;
1540         }
1541
1542         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1543                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1544                 (*dev->dev_ops->link_update)(dev, 0);
1545         }
1546         return 0;
1547 }
1548
1549 void
1550 rte_eth_dev_stop(uint16_t port_id)
1551 {
1552         struct rte_eth_dev *dev;
1553
1554         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1555         dev = &rte_eth_devices[port_id];
1556
1557         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1558
1559         if (dev->data->dev_started == 0) {
1560                 RTE_ETHDEV_LOG(INFO,
1561                         "Device with port_id=%"PRIu16" already stopped\n",
1562                         port_id);
1563                 return;
1564         }
1565
1566         dev->data->dev_started = 0;
1567         (*dev->dev_ops->dev_stop)(dev);
1568 }
1569
1570 int
1571 rte_eth_dev_set_link_up(uint16_t port_id)
1572 {
1573         struct rte_eth_dev *dev;
1574
1575         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1576
1577         dev = &rte_eth_devices[port_id];
1578
1579         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1580         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1581 }
1582
1583 int
1584 rte_eth_dev_set_link_down(uint16_t port_id)
1585 {
1586         struct rte_eth_dev *dev;
1587
1588         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1589
1590         dev = &rte_eth_devices[port_id];
1591
1592         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1593         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1594 }
1595
1596 void
1597 rte_eth_dev_close(uint16_t port_id)
1598 {
1599         struct rte_eth_dev *dev;
1600
1601         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1602         dev = &rte_eth_devices[port_id];
1603
1604         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1605         dev->data->dev_started = 0;
1606         (*dev->dev_ops->dev_close)(dev);
1607
1608         /* check behaviour flag - temporary for PMD migration */
1609         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1610                 /* new behaviour: send event + reset state + free all data */
1611                 rte_eth_dev_release_port(dev);
1612                 return;
1613         }
1614         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1615                         "The driver %s should migrate to the new behaviour.\n",
1616                         dev->device->driver->name);
1617         /* old behaviour: only free queue arrays */
1618         dev->data->nb_rx_queues = 0;
1619         rte_free(dev->data->rx_queues);
1620         dev->data->rx_queues = NULL;
1621         dev->data->nb_tx_queues = 0;
1622         rte_free(dev->data->tx_queues);
1623         dev->data->tx_queues = NULL;
1624 }
1625
1626 int
1627 rte_eth_dev_reset(uint16_t port_id)
1628 {
1629         struct rte_eth_dev *dev;
1630         int ret;
1631
1632         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1633         dev = &rte_eth_devices[port_id];
1634
1635         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1636
1637         rte_eth_dev_stop(port_id);
1638         ret = dev->dev_ops->dev_reset(dev);
1639
1640         return eth_err(port_id, ret);
1641 }
1642
1643 int
1644 rte_eth_dev_is_removed(uint16_t port_id)
1645 {
1646         struct rte_eth_dev *dev;
1647         int ret;
1648
1649         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1650
1651         dev = &rte_eth_devices[port_id];
1652
1653         if (dev->state == RTE_ETH_DEV_REMOVED)
1654                 return 1;
1655
1656         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1657
1658         ret = dev->dev_ops->is_removed(dev);
1659         if (ret != 0)
1660                 /* Device is physically removed. */
1661                 dev->state = RTE_ETH_DEV_REMOVED;
1662
1663         return ret;
1664 }
1665
1666 int
1667 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1668                        uint16_t nb_rx_desc, unsigned int socket_id,
1669                        const struct rte_eth_rxconf *rx_conf,
1670                        struct rte_mempool *mp)
1671 {
1672         int ret;
1673         uint32_t mbp_buf_size;
1674         struct rte_eth_dev *dev;
1675         struct rte_eth_dev_info dev_info;
1676         struct rte_eth_rxconf local_conf;
1677         void **rxq;
1678
1679         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1680
1681         dev = &rte_eth_devices[port_id];
1682         if (rx_queue_id >= dev->data->nb_rx_queues) {
1683                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1684                 return -EINVAL;
1685         }
1686
1687         if (mp == NULL) {
1688                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1689                 return -EINVAL;
1690         }
1691
1692         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1693
1694         /*
1695          * Check the size of the mbuf data buffer.
1696          * This value must be provided in the private data of the memory pool.
1697          * First check that the memory pool has a valid private data.
1698          */
1699         ret = rte_eth_dev_info_get(port_id, &dev_info);
1700         if (ret != 0)
1701                 return ret;
1702
1703         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1704                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1705                         mp->name, (int)mp->private_data_size,
1706                         (int)sizeof(struct rte_pktmbuf_pool_private));
1707                 return -ENOSPC;
1708         }
1709         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1710
1711         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1712                 RTE_ETHDEV_LOG(ERR,
1713                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1714                         mp->name, (int)mbp_buf_size,
1715                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1716                         (int)RTE_PKTMBUF_HEADROOM,
1717                         (int)dev_info.min_rx_bufsize);
1718                 return -EINVAL;
1719         }
1720
1721         /* Use default specified by driver, if nb_rx_desc is zero */
1722         if (nb_rx_desc == 0) {
1723                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1724                 /* If driver default is also zero, fall back on EAL default */
1725                 if (nb_rx_desc == 0)
1726                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1727         }
1728
1729         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1730                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1731                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1732
1733                 RTE_ETHDEV_LOG(ERR,
1734                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1735                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1736                         dev_info.rx_desc_lim.nb_min,
1737                         dev_info.rx_desc_lim.nb_align);
1738                 return -EINVAL;
1739         }
1740
1741         if (dev->data->dev_started &&
1742                 !(dev_info.dev_capa &
1743                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1744                 return -EBUSY;
1745
1746         if (dev->data->dev_started &&
1747                 (dev->data->rx_queue_state[rx_queue_id] !=
1748                         RTE_ETH_QUEUE_STATE_STOPPED))
1749                 return -EBUSY;
1750
1751         rxq = dev->data->rx_queues;
1752         if (rxq[rx_queue_id]) {
1753                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1754                                         -ENOTSUP);
1755                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1756                 rxq[rx_queue_id] = NULL;
1757         }
1758
1759         if (rx_conf == NULL)
1760                 rx_conf = &dev_info.default_rxconf;
1761
1762         local_conf = *rx_conf;
1763
1764         /*
1765          * If an offloading has already been enabled in
1766          * rte_eth_dev_configure(), it has been enabled on all queues,
1767          * so there is no need to enable it in this queue again.
1768          * The local_conf.offloads input to underlying PMD only carries
1769          * those offloadings which are only enabled on this queue and
1770          * not enabled on all queues.
1771          */
1772         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1773
1774         /*
1775          * New added offloadings for this queue are those not enabled in
1776          * rte_eth_dev_configure() and they must be per-queue type.
1777          * A pure per-port offloading can't be enabled on a queue while
1778          * disabled on another queue. A pure per-port offloading can't
1779          * be enabled for any queue as new added one if it hasn't been
1780          * enabled in rte_eth_dev_configure().
1781          */
1782         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1783              local_conf.offloads) {
1784                 RTE_ETHDEV_LOG(ERR,
1785                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1786                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1787                         port_id, rx_queue_id, local_conf.offloads,
1788                         dev_info.rx_queue_offload_capa,
1789                         __func__);
1790                 return -EINVAL;
1791         }
1792
1793         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1794                                               socket_id, &local_conf, mp);
1795         if (!ret) {
1796                 if (!dev->data->min_rx_buf_size ||
1797                     dev->data->min_rx_buf_size > mbp_buf_size)
1798                         dev->data->min_rx_buf_size = mbp_buf_size;
1799         }
1800
1801         return eth_err(port_id, ret);
1802 }
1803
1804 int
1805 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1806                                uint16_t nb_rx_desc,
1807                                const struct rte_eth_hairpin_conf *conf)
1808 {
1809         int ret;
1810         struct rte_eth_dev *dev;
1811         struct rte_eth_hairpin_cap cap;
1812         void **rxq;
1813         int i;
1814         int count;
1815
1816         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1817
1818         dev = &rte_eth_devices[port_id];
1819         if (rx_queue_id >= dev->data->nb_rx_queues) {
1820                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1821                 return -EINVAL;
1822         }
1823         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1824         if (ret != 0)
1825                 return ret;
1826         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1827                                 -ENOTSUP);
1828         /* if nb_rx_desc is zero use max number of desc from the driver. */
1829         if (nb_rx_desc == 0)
1830                 nb_rx_desc = cap.max_nb_desc;
1831         if (nb_rx_desc > cap.max_nb_desc) {
1832                 RTE_ETHDEV_LOG(ERR,
1833                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1834                         nb_rx_desc, cap.max_nb_desc);
1835                 return -EINVAL;
1836         }
1837         if (conf->peer_count > cap.max_rx_2_tx) {
1838                 RTE_ETHDEV_LOG(ERR,
1839                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1840                         conf->peer_count, cap.max_rx_2_tx);
1841                 return -EINVAL;
1842         }
1843         if (conf->peer_count == 0) {
1844                 RTE_ETHDEV_LOG(ERR,
1845                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1846                         conf->peer_count);
1847                 return -EINVAL;
1848         }
1849         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1850              cap.max_nb_queues != UINT16_MAX; i++) {
1851                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1852                         count++;
1853         }
1854         if (count > cap.max_nb_queues) {
1855                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1856                 cap.max_nb_queues);
1857                 return -EINVAL;
1858         }
1859         if (dev->data->dev_started)
1860                 return -EBUSY;
1861         rxq = dev->data->rx_queues;
1862         if (rxq[rx_queue_id] != NULL) {
1863                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1864                                         -ENOTSUP);
1865                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1866                 rxq[rx_queue_id] = NULL;
1867         }
1868         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1869                                                       nb_rx_desc, conf);
1870         if (ret == 0)
1871                 dev->data->rx_queue_state[rx_queue_id] =
1872                         RTE_ETH_QUEUE_STATE_HAIRPIN;
1873         return eth_err(port_id, ret);
1874 }
1875
1876 int
1877 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1878                        uint16_t nb_tx_desc, unsigned int socket_id,
1879                        const struct rte_eth_txconf *tx_conf)
1880 {
1881         struct rte_eth_dev *dev;
1882         struct rte_eth_dev_info dev_info;
1883         struct rte_eth_txconf local_conf;
1884         void **txq;
1885         int ret;
1886
1887         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1888
1889         dev = &rte_eth_devices[port_id];
1890         if (tx_queue_id >= dev->data->nb_tx_queues) {
1891                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1892                 return -EINVAL;
1893         }
1894
1895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1896
1897         ret = rte_eth_dev_info_get(port_id, &dev_info);
1898         if (ret != 0)
1899                 return ret;
1900
1901         /* Use default specified by driver, if nb_tx_desc is zero */
1902         if (nb_tx_desc == 0) {
1903                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1904                 /* If driver default is zero, fall back on EAL default */
1905                 if (nb_tx_desc == 0)
1906                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1907         }
1908         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1909             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1910             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1911                 RTE_ETHDEV_LOG(ERR,
1912                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1913                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1914                         dev_info.tx_desc_lim.nb_min,
1915                         dev_info.tx_desc_lim.nb_align);
1916                 return -EINVAL;
1917         }
1918
1919         if (dev->data->dev_started &&
1920                 !(dev_info.dev_capa &
1921                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1922                 return -EBUSY;
1923
1924         if (dev->data->dev_started &&
1925                 (dev->data->tx_queue_state[tx_queue_id] !=
1926                         RTE_ETH_QUEUE_STATE_STOPPED))
1927                 return -EBUSY;
1928
1929         txq = dev->data->tx_queues;
1930         if (txq[tx_queue_id]) {
1931                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1932                                         -ENOTSUP);
1933                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1934                 txq[tx_queue_id] = NULL;
1935         }
1936
1937         if (tx_conf == NULL)
1938                 tx_conf = &dev_info.default_txconf;
1939
1940         local_conf = *tx_conf;
1941
1942         /*
1943          * If an offloading has already been enabled in
1944          * rte_eth_dev_configure(), it has been enabled on all queues,
1945          * so there is no need to enable it in this queue again.
1946          * The local_conf.offloads input to underlying PMD only carries
1947          * those offloadings which are only enabled on this queue and
1948          * not enabled on all queues.
1949          */
1950         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1951
1952         /*
1953          * New added offloadings for this queue are those not enabled in
1954          * rte_eth_dev_configure() and they must be per-queue type.
1955          * A pure per-port offloading can't be enabled on a queue while
1956          * disabled on another queue. A pure per-port offloading can't
1957          * be enabled for any queue as new added one if it hasn't been
1958          * enabled in rte_eth_dev_configure().
1959          */
1960         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1961              local_conf.offloads) {
1962                 RTE_ETHDEV_LOG(ERR,
1963                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1964                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1965                         port_id, tx_queue_id, local_conf.offloads,
1966                         dev_info.tx_queue_offload_capa,
1967                         __func__);
1968                 return -EINVAL;
1969         }
1970
1971         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1972                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1973 }
1974
1975 int
1976 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1977                                uint16_t nb_tx_desc,
1978                                const struct rte_eth_hairpin_conf *conf)
1979 {
1980         struct rte_eth_dev *dev;
1981         struct rte_eth_hairpin_cap cap;
1982         void **txq;
1983         int i;
1984         int count;
1985         int ret;
1986
1987         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1988         dev = &rte_eth_devices[port_id];
1989         if (tx_queue_id >= dev->data->nb_tx_queues) {
1990                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1991                 return -EINVAL;
1992         }
1993         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1994         if (ret != 0)
1995                 return ret;
1996         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
1997                                 -ENOTSUP);
1998         /* if nb_rx_desc is zero use max number of desc from the driver. */
1999         if (nb_tx_desc == 0)
2000                 nb_tx_desc = cap.max_nb_desc;
2001         if (nb_tx_desc > cap.max_nb_desc) {
2002                 RTE_ETHDEV_LOG(ERR,
2003                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2004                         nb_tx_desc, cap.max_nb_desc);
2005                 return -EINVAL;
2006         }
2007         if (conf->peer_count > cap.max_tx_2_rx) {
2008                 RTE_ETHDEV_LOG(ERR,
2009                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2010                         conf->peer_count, cap.max_tx_2_rx);
2011                 return -EINVAL;
2012         }
2013         if (conf->peer_count == 0) {
2014                 RTE_ETHDEV_LOG(ERR,
2015                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2016                         conf->peer_count);
2017                 return -EINVAL;
2018         }
2019         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2020              cap.max_nb_queues != UINT16_MAX; i++) {
2021                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2022                         count++;
2023         }
2024         if (count > cap.max_nb_queues) {
2025                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2026                 cap.max_nb_queues);
2027                 return -EINVAL;
2028         }
2029         if (dev->data->dev_started)
2030                 return -EBUSY;
2031         txq = dev->data->tx_queues;
2032         if (txq[tx_queue_id] != NULL) {
2033                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2034                                         -ENOTSUP);
2035                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2036                 txq[tx_queue_id] = NULL;
2037         }
2038         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2039                 (dev, tx_queue_id, nb_tx_desc, conf);
2040         if (ret == 0)
2041                 dev->data->tx_queue_state[tx_queue_id] =
2042                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2043         return eth_err(port_id, ret);
2044 }
2045
2046 void
2047 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2048                 void *userdata __rte_unused)
2049 {
2050         unsigned i;
2051
2052         for (i = 0; i < unsent; i++)
2053                 rte_pktmbuf_free(pkts[i]);
2054 }
2055
2056 void
2057 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2058                 void *userdata)
2059 {
2060         uint64_t *count = userdata;
2061         unsigned i;
2062
2063         for (i = 0; i < unsent; i++)
2064                 rte_pktmbuf_free(pkts[i]);
2065
2066         *count += unsent;
2067 }
2068
2069 int
2070 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2071                 buffer_tx_error_fn cbfn, void *userdata)
2072 {
2073         buffer->error_callback = cbfn;
2074         buffer->error_userdata = userdata;
2075         return 0;
2076 }
2077
2078 int
2079 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2080 {
2081         int ret = 0;
2082
2083         if (buffer == NULL)
2084                 return -EINVAL;
2085
2086         buffer->size = size;
2087         if (buffer->error_callback == NULL) {
2088                 ret = rte_eth_tx_buffer_set_err_callback(
2089                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2090         }
2091
2092         return ret;
2093 }
2094
2095 int
2096 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2097 {
2098         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2099         int ret;
2100
2101         /* Validate Input Data. Bail if not valid or not supported. */
2102         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2103         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2104
2105         /* Call driver to free pending mbufs. */
2106         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2107                                                free_cnt);
2108         return eth_err(port_id, ret);
2109 }
2110
2111 int
2112 rte_eth_promiscuous_enable(uint16_t port_id)
2113 {
2114         struct rte_eth_dev *dev;
2115         int diag = 0;
2116
2117         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2118         dev = &rte_eth_devices[port_id];
2119
2120         if (dev->data->promiscuous == 1)
2121                 return 0;
2122
2123         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2124
2125         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2126         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2127
2128         return eth_err(port_id, diag);
2129 }
2130
2131 int
2132 rte_eth_promiscuous_disable(uint16_t port_id)
2133 {
2134         struct rte_eth_dev *dev;
2135         int diag = 0;
2136
2137         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2138         dev = &rte_eth_devices[port_id];
2139
2140         if (dev->data->promiscuous == 0)
2141                 return 0;
2142
2143         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2144
2145         dev->data->promiscuous = 0;
2146         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2147         if (diag != 0)
2148                 dev->data->promiscuous = 1;
2149
2150         return eth_err(port_id, diag);
2151 }
2152
2153 int
2154 rte_eth_promiscuous_get(uint16_t port_id)
2155 {
2156         struct rte_eth_dev *dev;
2157
2158         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2159
2160         dev = &rte_eth_devices[port_id];
2161         return dev->data->promiscuous;
2162 }
2163
2164 int
2165 rte_eth_allmulticast_enable(uint16_t port_id)
2166 {
2167         struct rte_eth_dev *dev;
2168         int diag;
2169
2170         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2171         dev = &rte_eth_devices[port_id];
2172
2173         if (dev->data->all_multicast == 1)
2174                 return 0;
2175
2176         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2177         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2178         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2179
2180         return eth_err(port_id, diag);
2181 }
2182
2183 int
2184 rte_eth_allmulticast_disable(uint16_t port_id)
2185 {
2186         struct rte_eth_dev *dev;
2187         int diag;
2188
2189         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2190         dev = &rte_eth_devices[port_id];
2191
2192         if (dev->data->all_multicast == 0)
2193                 return 0;
2194
2195         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2196         dev->data->all_multicast = 0;
2197         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2198         if (diag != 0)
2199                 dev->data->all_multicast = 1;
2200
2201         return eth_err(port_id, diag);
2202 }
2203
2204 int
2205 rte_eth_allmulticast_get(uint16_t port_id)
2206 {
2207         struct rte_eth_dev *dev;
2208
2209         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2210
2211         dev = &rte_eth_devices[port_id];
2212         return dev->data->all_multicast;
2213 }
2214
2215 int
2216 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2217 {
2218         struct rte_eth_dev *dev;
2219
2220         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2221         dev = &rte_eth_devices[port_id];
2222
2223         if (dev->data->dev_conf.intr_conf.lsc &&
2224             dev->data->dev_started)
2225                 rte_eth_linkstatus_get(dev, eth_link);
2226         else {
2227                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2228                 (*dev->dev_ops->link_update)(dev, 1);
2229                 *eth_link = dev->data->dev_link;
2230         }
2231
2232         return 0;
2233 }
2234
2235 int
2236 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2237 {
2238         struct rte_eth_dev *dev;
2239
2240         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2241         dev = &rte_eth_devices[port_id];
2242
2243         if (dev->data->dev_conf.intr_conf.lsc &&
2244             dev->data->dev_started)
2245                 rte_eth_linkstatus_get(dev, eth_link);
2246         else {
2247                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2248                 (*dev->dev_ops->link_update)(dev, 0);
2249                 *eth_link = dev->data->dev_link;
2250         }
2251
2252         return 0;
2253 }
2254
2255 int
2256 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2257 {
2258         struct rte_eth_dev *dev;
2259
2260         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2261
2262         dev = &rte_eth_devices[port_id];
2263         memset(stats, 0, sizeof(*stats));
2264
2265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2266         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2267         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2268 }
2269
2270 int
2271 rte_eth_stats_reset(uint16_t port_id)
2272 {
2273         struct rte_eth_dev *dev;
2274         int ret;
2275
2276         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2277         dev = &rte_eth_devices[port_id];
2278
2279         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2280         ret = (*dev->dev_ops->stats_reset)(dev);
2281         if (ret != 0)
2282                 return eth_err(port_id, ret);
2283
2284         dev->data->rx_mbuf_alloc_failed = 0;
2285
2286         return 0;
2287 }
2288
2289 static inline int
2290 get_xstats_basic_count(struct rte_eth_dev *dev)
2291 {
2292         uint16_t nb_rxqs, nb_txqs;
2293         int count;
2294
2295         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2296         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2297
2298         count = RTE_NB_STATS;
2299         count += nb_rxqs * RTE_NB_RXQ_STATS;
2300         count += nb_txqs * RTE_NB_TXQ_STATS;
2301
2302         return count;
2303 }
2304
2305 static int
2306 get_xstats_count(uint16_t port_id)
2307 {
2308         struct rte_eth_dev *dev;
2309         int count;
2310
2311         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2312         dev = &rte_eth_devices[port_id];
2313         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2314                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2315                                 NULL, 0);
2316                 if (count < 0)
2317                         return eth_err(port_id, count);
2318         }
2319         if (dev->dev_ops->xstats_get_names != NULL) {
2320                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2321                 if (count < 0)
2322                         return eth_err(port_id, count);
2323         } else
2324                 count = 0;
2325
2326
2327         count += get_xstats_basic_count(dev);
2328
2329         return count;
2330 }
2331
2332 int
2333 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2334                 uint64_t *id)
2335 {
2336         int cnt_xstats, idx_xstat;
2337
2338         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2339
2340         if (!id) {
2341                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2342                 return -ENOMEM;
2343         }
2344
2345         if (!xstat_name) {
2346                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2347                 return -ENOMEM;
2348         }
2349
2350         /* Get count */
2351         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2352         if (cnt_xstats  < 0) {
2353                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2354                 return -ENODEV;
2355         }
2356
2357         /* Get id-name lookup table */
2358         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2359
2360         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2361                         port_id, xstats_names, cnt_xstats, NULL)) {
2362                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2363                 return -1;
2364         }
2365
2366         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2367                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2368                         *id = idx_xstat;
2369                         return 0;
2370                 };
2371         }
2372
2373         return -EINVAL;
2374 }
2375
2376 /* retrieve basic stats names */
2377 static int
2378 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2379         struct rte_eth_xstat_name *xstats_names)
2380 {
2381         int cnt_used_entries = 0;
2382         uint32_t idx, id_queue;
2383         uint16_t num_q;
2384
2385         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2386                 strlcpy(xstats_names[cnt_used_entries].name,
2387                         rte_stats_strings[idx].name,
2388                         sizeof(xstats_names[0].name));
2389                 cnt_used_entries++;
2390         }
2391         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2392         for (id_queue = 0; id_queue < num_q; id_queue++) {
2393                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2394                         snprintf(xstats_names[cnt_used_entries].name,
2395                                 sizeof(xstats_names[0].name),
2396                                 "rx_q%u%s",
2397                                 id_queue, rte_rxq_stats_strings[idx].name);
2398                         cnt_used_entries++;
2399                 }
2400
2401         }
2402         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2403         for (id_queue = 0; id_queue < num_q; id_queue++) {
2404                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2405                         snprintf(xstats_names[cnt_used_entries].name,
2406                                 sizeof(xstats_names[0].name),
2407                                 "tx_q%u%s",
2408                                 id_queue, rte_txq_stats_strings[idx].name);
2409                         cnt_used_entries++;
2410                 }
2411         }
2412         return cnt_used_entries;
2413 }
2414
2415 /* retrieve ethdev extended statistics names */
2416 int
2417 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2418         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2419         uint64_t *ids)
2420 {
2421         struct rte_eth_xstat_name *xstats_names_copy;
2422         unsigned int no_basic_stat_requested = 1;
2423         unsigned int no_ext_stat_requested = 1;
2424         unsigned int expected_entries;
2425         unsigned int basic_count;
2426         struct rte_eth_dev *dev;
2427         unsigned int i;
2428         int ret;
2429
2430         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2431         dev = &rte_eth_devices[port_id];
2432
2433         basic_count = get_xstats_basic_count(dev);
2434         ret = get_xstats_count(port_id);
2435         if (ret < 0)
2436                 return ret;
2437         expected_entries = (unsigned int)ret;
2438
2439         /* Return max number of stats if no ids given */
2440         if (!ids) {
2441                 if (!xstats_names)
2442                         return expected_entries;
2443                 else if (xstats_names && size < expected_entries)
2444                         return expected_entries;
2445         }
2446
2447         if (ids && !xstats_names)
2448                 return -EINVAL;
2449
2450         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2451                 uint64_t ids_copy[size];
2452
2453                 for (i = 0; i < size; i++) {
2454                         if (ids[i] < basic_count) {
2455                                 no_basic_stat_requested = 0;
2456                                 break;
2457                         }
2458
2459                         /*
2460                          * Convert ids to xstats ids that PMD knows.
2461                          * ids known by user are basic + extended stats.
2462                          */
2463                         ids_copy[i] = ids[i] - basic_count;
2464                 }
2465
2466                 if (no_basic_stat_requested)
2467                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2468                                         xstats_names, ids_copy, size);
2469         }
2470
2471         /* Retrieve all stats */
2472         if (!ids) {
2473                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2474                                 expected_entries);
2475                 if (num_stats < 0 || num_stats > (int)expected_entries)
2476                         return num_stats;
2477                 else
2478                         return expected_entries;
2479         }
2480
2481         xstats_names_copy = calloc(expected_entries,
2482                 sizeof(struct rte_eth_xstat_name));
2483
2484         if (!xstats_names_copy) {
2485                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2486                 return -ENOMEM;
2487         }
2488
2489         if (ids) {
2490                 for (i = 0; i < size; i++) {
2491                         if (ids[i] >= basic_count) {
2492                                 no_ext_stat_requested = 0;
2493                                 break;
2494                         }
2495                 }
2496         }
2497
2498         /* Fill xstats_names_copy structure */
2499         if (ids && no_ext_stat_requested) {
2500                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2501         } else {
2502                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2503                         expected_entries);
2504                 if (ret < 0) {
2505                         free(xstats_names_copy);
2506                         return ret;
2507                 }
2508         }
2509
2510         /* Filter stats */
2511         for (i = 0; i < size; i++) {
2512                 if (ids[i] >= expected_entries) {
2513                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2514                         free(xstats_names_copy);
2515                         return -1;
2516                 }
2517                 xstats_names[i] = xstats_names_copy[ids[i]];
2518         }
2519
2520         free(xstats_names_copy);
2521         return size;
2522 }
2523
2524 int
2525 rte_eth_xstats_get_names(uint16_t port_id,
2526         struct rte_eth_xstat_name *xstats_names,
2527         unsigned int size)
2528 {
2529         struct rte_eth_dev *dev;
2530         int cnt_used_entries;
2531         int cnt_expected_entries;
2532         int cnt_driver_entries;
2533
2534         cnt_expected_entries = get_xstats_count(port_id);
2535         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2536                         (int)size < cnt_expected_entries)
2537                 return cnt_expected_entries;
2538
2539         /* port_id checked in get_xstats_count() */
2540         dev = &rte_eth_devices[port_id];
2541
2542         cnt_used_entries = rte_eth_basic_stats_get_names(
2543                 dev, xstats_names);
2544
2545         if (dev->dev_ops->xstats_get_names != NULL) {
2546                 /* If there are any driver-specific xstats, append them
2547                  * to end of list.
2548                  */
2549                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2550                         dev,
2551                         xstats_names + cnt_used_entries,
2552                         size - cnt_used_entries);
2553                 if (cnt_driver_entries < 0)
2554                         return eth_err(port_id, cnt_driver_entries);
2555                 cnt_used_entries += cnt_driver_entries;
2556         }
2557
2558         return cnt_used_entries;
2559 }
2560
2561
2562 static int
2563 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2564 {
2565         struct rte_eth_dev *dev;
2566         struct rte_eth_stats eth_stats;
2567         unsigned int count = 0, i, q;
2568         uint64_t val, *stats_ptr;
2569         uint16_t nb_rxqs, nb_txqs;
2570         int ret;
2571
2572         ret = rte_eth_stats_get(port_id, &eth_stats);
2573         if (ret < 0)
2574                 return ret;
2575
2576         dev = &rte_eth_devices[port_id];
2577
2578         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2579         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2580
2581         /* global stats */
2582         for (i = 0; i < RTE_NB_STATS; i++) {
2583                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2584                                         rte_stats_strings[i].offset);
2585                 val = *stats_ptr;
2586                 xstats[count++].value = val;
2587         }
2588
2589         /* per-rxq stats */
2590         for (q = 0; q < nb_rxqs; q++) {
2591                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2592                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2593                                         rte_rxq_stats_strings[i].offset +
2594                                         q * sizeof(uint64_t));
2595                         val = *stats_ptr;
2596                         xstats[count++].value = val;
2597                 }
2598         }
2599
2600         /* per-txq stats */
2601         for (q = 0; q < nb_txqs; q++) {
2602                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2603                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2604                                         rte_txq_stats_strings[i].offset +
2605                                         q * sizeof(uint64_t));
2606                         val = *stats_ptr;
2607                         xstats[count++].value = val;
2608                 }
2609         }
2610         return count;
2611 }
2612
2613 /* retrieve ethdev extended statistics */
2614 int
2615 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2616                          uint64_t *values, unsigned int size)
2617 {
2618         unsigned int no_basic_stat_requested = 1;
2619         unsigned int no_ext_stat_requested = 1;
2620         unsigned int num_xstats_filled;
2621         unsigned int basic_count;
2622         uint16_t expected_entries;
2623         struct rte_eth_dev *dev;
2624         unsigned int i;
2625         int ret;
2626
2627         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2628         ret = get_xstats_count(port_id);
2629         if (ret < 0)
2630                 return ret;
2631         expected_entries = (uint16_t)ret;
2632         struct rte_eth_xstat xstats[expected_entries];
2633         dev = &rte_eth_devices[port_id];
2634         basic_count = get_xstats_basic_count(dev);
2635
2636         /* Return max number of stats if no ids given */
2637         if (!ids) {
2638                 if (!values)
2639                         return expected_entries;
2640                 else if (values && size < expected_entries)
2641                         return expected_entries;
2642         }
2643
2644         if (ids && !values)
2645                 return -EINVAL;
2646
2647         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2648                 unsigned int basic_count = get_xstats_basic_count(dev);
2649                 uint64_t ids_copy[size];
2650
2651                 for (i = 0; i < size; i++) {
2652                         if (ids[i] < basic_count) {
2653                                 no_basic_stat_requested = 0;
2654                                 break;
2655                         }
2656
2657                         /*
2658                          * Convert ids to xstats ids that PMD knows.
2659                          * ids known by user are basic + extended stats.
2660                          */
2661                         ids_copy[i] = ids[i] - basic_count;
2662                 }
2663
2664                 if (no_basic_stat_requested)
2665                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2666                                         values, size);
2667         }
2668
2669         if (ids) {
2670                 for (i = 0; i < size; i++) {
2671                         if (ids[i] >= basic_count) {
2672                                 no_ext_stat_requested = 0;
2673                                 break;
2674                         }
2675                 }
2676         }
2677
2678         /* Fill the xstats structure */
2679         if (ids && no_ext_stat_requested)
2680                 ret = rte_eth_basic_stats_get(port_id, xstats);
2681         else
2682                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2683
2684         if (ret < 0)
2685                 return ret;
2686         num_xstats_filled = (unsigned int)ret;
2687
2688         /* Return all stats */
2689         if (!ids) {
2690                 for (i = 0; i < num_xstats_filled; i++)
2691                         values[i] = xstats[i].value;
2692                 return expected_entries;
2693         }
2694
2695         /* Filter stats */
2696         for (i = 0; i < size; i++) {
2697                 if (ids[i] >= expected_entries) {
2698                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2699                         return -1;
2700                 }
2701                 values[i] = xstats[ids[i]].value;
2702         }
2703         return size;
2704 }
2705
2706 int
2707 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2708         unsigned int n)
2709 {
2710         struct rte_eth_dev *dev;
2711         unsigned int count = 0, i;
2712         signed int xcount = 0;
2713         uint16_t nb_rxqs, nb_txqs;
2714         int ret;
2715
2716         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2717
2718         dev = &rte_eth_devices[port_id];
2719
2720         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2721         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2722
2723         /* Return generic statistics */
2724         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2725                 (nb_txqs * RTE_NB_TXQ_STATS);
2726
2727         /* implemented by the driver */
2728         if (dev->dev_ops->xstats_get != NULL) {
2729                 /* Retrieve the xstats from the driver at the end of the
2730                  * xstats struct.
2731                  */
2732                 xcount = (*dev->dev_ops->xstats_get)(dev,
2733                                      xstats ? xstats + count : NULL,
2734                                      (n > count) ? n - count : 0);
2735
2736                 if (xcount < 0)
2737                         return eth_err(port_id, xcount);
2738         }
2739
2740         if (n < count + xcount || xstats == NULL)
2741                 return count + xcount;
2742
2743         /* now fill the xstats structure */
2744         ret = rte_eth_basic_stats_get(port_id, xstats);
2745         if (ret < 0)
2746                 return ret;
2747         count = ret;
2748
2749         for (i = 0; i < count; i++)
2750                 xstats[i].id = i;
2751         /* add an offset to driver-specific stats */
2752         for ( ; i < count + xcount; i++)
2753                 xstats[i].id += count;
2754
2755         return count + xcount;
2756 }
2757
2758 /* reset ethdev extended statistics */
2759 int
2760 rte_eth_xstats_reset(uint16_t port_id)
2761 {
2762         struct rte_eth_dev *dev;
2763
2764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2765         dev = &rte_eth_devices[port_id];
2766
2767         /* implemented by the driver */
2768         if (dev->dev_ops->xstats_reset != NULL)
2769                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2770
2771         /* fallback to default */
2772         return rte_eth_stats_reset(port_id);
2773 }
2774
2775 static int
2776 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2777                 uint8_t is_rx)
2778 {
2779         struct rte_eth_dev *dev;
2780
2781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2782
2783         dev = &rte_eth_devices[port_id];
2784
2785         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2786
2787         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2788                 return -EINVAL;
2789
2790         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2791                 return -EINVAL;
2792
2793         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2794                 return -EINVAL;
2795
2796         return (*dev->dev_ops->queue_stats_mapping_set)
2797                         (dev, queue_id, stat_idx, is_rx);
2798 }
2799
2800
2801 int
2802 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2803                 uint8_t stat_idx)
2804 {
2805         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2806                                                 stat_idx, STAT_QMAP_TX));
2807 }
2808
2809
2810 int
2811 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2812                 uint8_t stat_idx)
2813 {
2814         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2815                                                 stat_idx, STAT_QMAP_RX));
2816 }
2817
2818 int
2819 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2820 {
2821         struct rte_eth_dev *dev;
2822
2823         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2824         dev = &rte_eth_devices[port_id];
2825
2826         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2827         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2828                                                         fw_version, fw_size));
2829 }
2830
2831 int
2832 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2833 {
2834         struct rte_eth_dev *dev;
2835         const struct rte_eth_desc_lim lim = {
2836                 .nb_max = UINT16_MAX,
2837                 .nb_min = 0,
2838                 .nb_align = 1,
2839                 .nb_seg_max = UINT16_MAX,
2840                 .nb_mtu_seg_max = UINT16_MAX,
2841         };
2842         int diag;
2843
2844         /*
2845          * Init dev_info before port_id check since caller does not have
2846          * return status and does not know if get is successful or not.
2847          */
2848         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2849
2850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2851         dev = &rte_eth_devices[port_id];
2852
2853         dev_info->rx_desc_lim = lim;
2854         dev_info->tx_desc_lim = lim;
2855         dev_info->device = dev->device;
2856         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2857         dev_info->max_mtu = UINT16_MAX;
2858
2859         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2860         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2861         if (diag != 0) {
2862                 /* Cleanup already filled in device information */
2863                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2864                 return eth_err(port_id, diag);
2865         }
2866
2867         dev_info->driver_name = dev->device->driver->name;
2868         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2869         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2870
2871         dev_info->dev_flags = &dev->data->dev_flags;
2872
2873         return 0;
2874 }
2875
2876 int
2877 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2878                                  uint32_t *ptypes, int num)
2879 {
2880         int i, j;
2881         struct rte_eth_dev *dev;
2882         const uint32_t *all_ptypes;
2883
2884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2885         dev = &rte_eth_devices[port_id];
2886         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2887         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2888
2889         if (!all_ptypes)
2890                 return 0;
2891
2892         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2893                 if (all_ptypes[i] & ptype_mask) {
2894                         if (j < num)
2895                                 ptypes[j] = all_ptypes[i];
2896                         j++;
2897                 }
2898
2899         return j;
2900 }
2901
2902 int
2903 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
2904 {
2905         struct rte_eth_dev *dev;
2906
2907         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2908         dev = &rte_eth_devices[port_id];
2909         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2910
2911         return 0;
2912 }
2913
2914
2915 int
2916 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2917 {
2918         struct rte_eth_dev *dev;
2919
2920         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2921
2922         dev = &rte_eth_devices[port_id];
2923         *mtu = dev->data->mtu;
2924         return 0;
2925 }
2926
2927 int
2928 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2929 {
2930         int ret;
2931         struct rte_eth_dev_info dev_info;
2932         struct rte_eth_dev *dev;
2933
2934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2935         dev = &rte_eth_devices[port_id];
2936         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2937
2938         /*
2939          * Check if the device supports dev_infos_get, if it does not
2940          * skip min_mtu/max_mtu validation here as this requires values
2941          * that are populated within the call to rte_eth_dev_info_get()
2942          * which relies on dev->dev_ops->dev_infos_get.
2943          */
2944         if (*dev->dev_ops->dev_infos_get != NULL) {
2945                 ret = rte_eth_dev_info_get(port_id, &dev_info);
2946                 if (ret != 0)
2947                         return ret;
2948
2949                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
2950                         return -EINVAL;
2951         }
2952
2953         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2954         if (!ret)
2955                 dev->data->mtu = mtu;
2956
2957         return eth_err(port_id, ret);
2958 }
2959
2960 int
2961 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2962 {
2963         struct rte_eth_dev *dev;
2964         int ret;
2965
2966         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2967         dev = &rte_eth_devices[port_id];
2968         if (!(dev->data->dev_conf.rxmode.offloads &
2969               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2970                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
2971                         port_id);
2972                 return -ENOSYS;
2973         }
2974
2975         if (vlan_id > 4095) {
2976                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
2977                         port_id, vlan_id);
2978                 return -EINVAL;
2979         }
2980         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2981
2982         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2983         if (ret == 0) {
2984                 struct rte_vlan_filter_conf *vfc;
2985                 int vidx;
2986                 int vbit;
2987
2988                 vfc = &dev->data->vlan_filter_conf;
2989                 vidx = vlan_id / 64;
2990                 vbit = vlan_id % 64;
2991
2992                 if (on)
2993                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2994                 else
2995                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2996         }
2997
2998         return eth_err(port_id, ret);
2999 }
3000
3001 int
3002 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3003                                     int on)
3004 {
3005         struct rte_eth_dev *dev;
3006
3007         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3008         dev = &rte_eth_devices[port_id];
3009         if (rx_queue_id >= dev->data->nb_rx_queues) {
3010                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3011                 return -EINVAL;
3012         }
3013
3014         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3015         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3016
3017         return 0;
3018 }
3019
3020 int
3021 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3022                                 enum rte_vlan_type vlan_type,
3023                                 uint16_t tpid)
3024 {
3025         struct rte_eth_dev *dev;
3026
3027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3028         dev = &rte_eth_devices[port_id];
3029         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3030
3031         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3032                                                                tpid));
3033 }
3034
3035 int
3036 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3037 {
3038         struct rte_eth_dev *dev;
3039         int ret = 0;
3040         int mask = 0;
3041         int cur, org = 0;
3042         uint64_t orig_offloads;
3043         uint64_t *dev_offloads;
3044
3045         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3046         dev = &rte_eth_devices[port_id];
3047
3048         /* save original values in case of failure */
3049         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3050         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3051
3052         /*check which option changed by application*/
3053         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3054         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3055         if (cur != org) {
3056                 if (cur)
3057                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3058                 else
3059                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3060                 mask |= ETH_VLAN_STRIP_MASK;
3061         }
3062
3063         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3064         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3065         if (cur != org) {
3066                 if (cur)
3067                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3068                 else
3069                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3070                 mask |= ETH_VLAN_FILTER_MASK;
3071         }
3072
3073         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3074         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3075         if (cur != org) {
3076                 if (cur)
3077                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3078                 else
3079                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3080                 mask |= ETH_VLAN_EXTEND_MASK;
3081         }
3082
3083         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3084         org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3085         if (cur != org) {
3086                 if (cur)
3087                         *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3088                 else
3089                         *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3090                 mask |= ETH_QINQ_STRIP_MASK;
3091         }
3092
3093         /*no change*/
3094         if (mask == 0)
3095                 return ret;
3096
3097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3098         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3099         if (ret) {
3100                 /* hit an error restore  original values */
3101                 *dev_offloads = orig_offloads;
3102         }
3103
3104         return eth_err(port_id, ret);
3105 }
3106
3107 int
3108 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3109 {
3110         struct rte_eth_dev *dev;
3111         uint64_t *dev_offloads;
3112         int ret = 0;
3113
3114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3115         dev = &rte_eth_devices[port_id];
3116         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3117
3118         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3119                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3120
3121         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3122                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3123
3124         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3125                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3126
3127         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3128                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3129
3130         return ret;
3131 }
3132
3133 int
3134 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3135 {
3136         struct rte_eth_dev *dev;
3137
3138         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3139         dev = &rte_eth_devices[port_id];
3140         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3141
3142         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3143 }
3144
3145 int
3146 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3147 {
3148         struct rte_eth_dev *dev;
3149
3150         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3151         dev = &rte_eth_devices[port_id];
3152         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3153         memset(fc_conf, 0, sizeof(*fc_conf));
3154         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3155 }
3156
3157 int
3158 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3159 {
3160         struct rte_eth_dev *dev;
3161
3162         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3163         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3164                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3165                 return -EINVAL;
3166         }
3167
3168         dev = &rte_eth_devices[port_id];
3169         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3170         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3171 }
3172
3173 int
3174 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3175                                    struct rte_eth_pfc_conf *pfc_conf)
3176 {
3177         struct rte_eth_dev *dev;
3178
3179         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3180         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3181                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3182                 return -EINVAL;
3183         }
3184
3185         dev = &rte_eth_devices[port_id];
3186         /* High water, low water validation are device specific */
3187         if  (*dev->dev_ops->priority_flow_ctrl_set)
3188                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3189                                         (dev, pfc_conf));
3190         return -ENOTSUP;
3191 }
3192
3193 static int
3194 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3195                         uint16_t reta_size)
3196 {
3197         uint16_t i, num;
3198
3199         if (!reta_conf)
3200                 return -EINVAL;
3201
3202         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3203         for (i = 0; i < num; i++) {
3204                 if (reta_conf[i].mask)
3205                         return 0;
3206         }
3207
3208         return -EINVAL;
3209 }
3210
3211 static int
3212 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3213                          uint16_t reta_size,
3214                          uint16_t max_rxq)
3215 {
3216         uint16_t i, idx, shift;
3217
3218         if (!reta_conf)
3219                 return -EINVAL;
3220
3221         if (max_rxq == 0) {
3222                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3223                 return -EINVAL;
3224         }
3225
3226         for (i = 0; i < reta_size; i++) {
3227                 idx = i / RTE_RETA_GROUP_SIZE;
3228                 shift = i % RTE_RETA_GROUP_SIZE;
3229                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3230                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3231                         RTE_ETHDEV_LOG(ERR,
3232                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3233                                 idx, shift,
3234                                 reta_conf[idx].reta[shift], max_rxq);
3235                         return -EINVAL;
3236                 }
3237         }
3238
3239         return 0;
3240 }
3241
3242 int
3243 rte_eth_dev_rss_reta_update(uint16_t port_id,
3244                             struct rte_eth_rss_reta_entry64 *reta_conf,
3245                             uint16_t reta_size)
3246 {
3247         struct rte_eth_dev *dev;
3248         int ret;
3249
3250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3251         /* Check mask bits */
3252         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3253         if (ret < 0)
3254                 return ret;
3255
3256         dev = &rte_eth_devices[port_id];
3257
3258         /* Check entry value */
3259         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3260                                 dev->data->nb_rx_queues);
3261         if (ret < 0)
3262                 return ret;
3263
3264         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3265         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3266                                                              reta_size));
3267 }
3268
3269 int
3270 rte_eth_dev_rss_reta_query(uint16_t port_id,
3271                            struct rte_eth_rss_reta_entry64 *reta_conf,
3272                            uint16_t reta_size)
3273 {
3274         struct rte_eth_dev *dev;
3275         int ret;
3276
3277         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3278
3279         /* Check mask bits */
3280         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3281         if (ret < 0)
3282                 return ret;
3283
3284         dev = &rte_eth_devices[port_id];
3285         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3286         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3287                                                             reta_size));
3288 }
3289
3290 int
3291 rte_eth_dev_rss_hash_update(uint16_t port_id,
3292                             struct rte_eth_rss_conf *rss_conf)
3293 {
3294         struct rte_eth_dev *dev;
3295         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3296         int ret;
3297
3298         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3299
3300         ret = rte_eth_dev_info_get(port_id, &dev_info);
3301         if (ret != 0)
3302                 return ret;
3303
3304         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3305
3306         dev = &rte_eth_devices[port_id];
3307         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3308             dev_info.flow_type_rss_offloads) {
3309                 RTE_ETHDEV_LOG(ERR,
3310                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3311                         port_id, rss_conf->rss_hf,
3312                         dev_info.flow_type_rss_offloads);
3313                 return -EINVAL;
3314         }
3315         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3316         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3317                                                                  rss_conf));
3318 }
3319
3320 int
3321 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3322                               struct rte_eth_rss_conf *rss_conf)
3323 {
3324         struct rte_eth_dev *dev;
3325
3326         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3327         dev = &rte_eth_devices[port_id];
3328         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3329         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3330                                                                    rss_conf));
3331 }
3332
3333 int
3334 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3335                                 struct rte_eth_udp_tunnel *udp_tunnel)
3336 {
3337         struct rte_eth_dev *dev;
3338
3339         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3340         if (udp_tunnel == NULL) {
3341                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3342                 return -EINVAL;
3343         }
3344
3345         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3346                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3347                 return -EINVAL;
3348         }
3349
3350         dev = &rte_eth_devices[port_id];
3351         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3352         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3353                                                                 udp_tunnel));
3354 }
3355
3356 int
3357 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3358                                    struct rte_eth_udp_tunnel *udp_tunnel)
3359 {
3360         struct rte_eth_dev *dev;
3361
3362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3363         dev = &rte_eth_devices[port_id];
3364
3365         if (udp_tunnel == NULL) {
3366                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3367                 return -EINVAL;
3368         }
3369
3370         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3371                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3372                 return -EINVAL;
3373         }
3374
3375         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3376         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3377                                                                 udp_tunnel));
3378 }
3379
3380 int
3381 rte_eth_led_on(uint16_t port_id)
3382 {
3383         struct rte_eth_dev *dev;
3384
3385         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3386         dev = &rte_eth_devices[port_id];
3387         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3388         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3389 }
3390
3391 int
3392 rte_eth_led_off(uint16_t port_id)
3393 {
3394         struct rte_eth_dev *dev;
3395
3396         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3397         dev = &rte_eth_devices[port_id];
3398         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3399         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3400 }
3401
3402 /*
3403  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3404  * an empty spot.
3405  */
3406 static int
3407 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3408 {
3409         struct rte_eth_dev_info dev_info;
3410         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3411         unsigned i;
3412         int ret;
3413
3414         ret = rte_eth_dev_info_get(port_id, &dev_info);
3415         if (ret != 0)
3416                 return -1;
3417
3418         for (i = 0; i < dev_info.max_mac_addrs; i++)
3419                 if (memcmp(addr, &dev->data->mac_addrs[i],
3420                                 RTE_ETHER_ADDR_LEN) == 0)
3421                         return i;
3422
3423         return -1;
3424 }
3425
3426 static const struct rte_ether_addr null_mac_addr;
3427
3428 int
3429 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3430                         uint32_t pool)
3431 {
3432         struct rte_eth_dev *dev;
3433         int index;
3434         uint64_t pool_mask;
3435         int ret;
3436
3437         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3438         dev = &rte_eth_devices[port_id];
3439         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3440
3441         if (rte_is_zero_ether_addr(addr)) {
3442                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3443                         port_id);
3444                 return -EINVAL;
3445         }
3446         if (pool >= ETH_64_POOLS) {
3447                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3448                 return -EINVAL;
3449         }
3450
3451         index = get_mac_addr_index(port_id, addr);
3452         if (index < 0) {
3453                 index = get_mac_addr_index(port_id, &null_mac_addr);
3454                 if (index < 0) {
3455                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3456                                 port_id);
3457                         return -ENOSPC;
3458                 }
3459         } else {
3460                 pool_mask = dev->data->mac_pool_sel[index];
3461
3462                 /* Check if both MAC address and pool is already there, and do nothing */
3463                 if (pool_mask & (1ULL << pool))
3464                         return 0;
3465         }
3466
3467         /* Update NIC */
3468         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3469
3470         if (ret == 0) {
3471                 /* Update address in NIC data structure */
3472                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3473
3474                 /* Update pool bitmap in NIC data structure */
3475                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3476         }
3477
3478         return eth_err(port_id, ret);
3479 }
3480
3481 int
3482 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3483 {
3484         struct rte_eth_dev *dev;
3485         int index;
3486
3487         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3488         dev = &rte_eth_devices[port_id];
3489         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3490
3491         index = get_mac_addr_index(port_id, addr);
3492         if (index == 0) {
3493                 RTE_ETHDEV_LOG(ERR,
3494                         "Port %u: Cannot remove default MAC address\n",
3495                         port_id);
3496                 return -EADDRINUSE;
3497         } else if (index < 0)
3498                 return 0;  /* Do nothing if address wasn't found */
3499
3500         /* Update NIC */
3501         (*dev->dev_ops->mac_addr_remove)(dev, index);
3502
3503         /* Update address in NIC data structure */
3504         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3505
3506         /* reset pool bitmap */
3507         dev->data->mac_pool_sel[index] = 0;
3508
3509         return 0;
3510 }
3511
3512 int
3513 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3514 {
3515         struct rte_eth_dev *dev;
3516         int ret;
3517
3518         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3519
3520         if (!rte_is_valid_assigned_ether_addr(addr))
3521                 return -EINVAL;
3522
3523         dev = &rte_eth_devices[port_id];
3524         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3525
3526         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3527         if (ret < 0)
3528                 return ret;
3529
3530         /* Update default address in NIC data structure */
3531         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3532
3533         return 0;
3534 }
3535
3536
3537 /*
3538  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3539  * an empty spot.
3540  */
3541 static int
3542 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3543 {
3544         struct rte_eth_dev_info dev_info;
3545         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3546         unsigned i;
3547         int ret;
3548
3549         ret = rte_eth_dev_info_get(port_id, &dev_info);
3550         if (ret != 0)
3551                 return -1;
3552
3553         if (!dev->data->hash_mac_addrs)
3554                 return -1;
3555
3556         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3557                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3558                         RTE_ETHER_ADDR_LEN) == 0)
3559                         return i;
3560
3561         return -1;
3562 }
3563
3564 int
3565 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3566                                 uint8_t on)
3567 {
3568         int index;
3569         int ret;
3570         struct rte_eth_dev *dev;
3571
3572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3573
3574         dev = &rte_eth_devices[port_id];
3575         if (rte_is_zero_ether_addr(addr)) {
3576                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3577                         port_id);
3578                 return -EINVAL;
3579         }
3580
3581         index = get_hash_mac_addr_index(port_id, addr);
3582         /* Check if it's already there, and do nothing */
3583         if ((index >= 0) && on)
3584                 return 0;
3585
3586         if (index < 0) {
3587                 if (!on) {
3588                         RTE_ETHDEV_LOG(ERR,
3589                                 "Port %u: the MAC address was not set in UTA\n",
3590                                 port_id);
3591                         return -EINVAL;
3592                 }
3593
3594                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3595                 if (index < 0) {
3596                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3597                                 port_id);
3598                         return -ENOSPC;
3599                 }
3600         }
3601
3602         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3603         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3604         if (ret == 0) {
3605                 /* Update address in NIC data structure */
3606                 if (on)
3607                         rte_ether_addr_copy(addr,
3608                                         &dev->data->hash_mac_addrs[index]);
3609                 else
3610                         rte_ether_addr_copy(&null_mac_addr,
3611                                         &dev->data->hash_mac_addrs[index]);
3612         }
3613
3614         return eth_err(port_id, ret);
3615 }
3616
3617 int
3618 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3619 {
3620         struct rte_eth_dev *dev;
3621
3622         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3623
3624         dev = &rte_eth_devices[port_id];
3625
3626         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3627         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3628                                                                        on));
3629 }
3630
3631 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3632                                         uint16_t tx_rate)
3633 {
3634         struct rte_eth_dev *dev;
3635         struct rte_eth_dev_info dev_info;
3636         struct rte_eth_link link;
3637         int ret;
3638
3639         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3640
3641         ret = rte_eth_dev_info_get(port_id, &dev_info);
3642         if (ret != 0)
3643                 return ret;
3644
3645         dev = &rte_eth_devices[port_id];
3646         link = dev->data->dev_link;
3647
3648         if (queue_idx > dev_info.max_tx_queues) {
3649                 RTE_ETHDEV_LOG(ERR,
3650                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3651                         port_id, queue_idx);
3652                 return -EINVAL;
3653         }
3654
3655         if (tx_rate > link.link_speed) {
3656                 RTE_ETHDEV_LOG(ERR,
3657                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3658                         tx_rate, link.link_speed);
3659                 return -EINVAL;
3660         }
3661
3662         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3663         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3664                                                         queue_idx, tx_rate));
3665 }
3666
3667 int
3668 rte_eth_mirror_rule_set(uint16_t port_id,
3669                         struct rte_eth_mirror_conf *mirror_conf,
3670                         uint8_t rule_id, uint8_t on)
3671 {
3672         struct rte_eth_dev *dev;
3673
3674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3675         if (mirror_conf->rule_type == 0) {
3676                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3677                 return -EINVAL;
3678         }
3679
3680         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3681                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3682                         ETH_64_POOLS - 1);
3683                 return -EINVAL;
3684         }
3685
3686         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3687              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3688             (mirror_conf->pool_mask == 0)) {
3689                 RTE_ETHDEV_LOG(ERR,
3690                         "Invalid mirror pool, pool mask can not be 0\n");
3691                 return -EINVAL;
3692         }
3693
3694         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3695             mirror_conf->vlan.vlan_mask == 0) {
3696                 RTE_ETHDEV_LOG(ERR,
3697                         "Invalid vlan mask, vlan mask can not be 0\n");
3698                 return -EINVAL;
3699         }
3700
3701         dev = &rte_eth_devices[port_id];
3702         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3703
3704         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3705                                                 mirror_conf, rule_id, on));
3706 }
3707
3708 int
3709 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3710 {
3711         struct rte_eth_dev *dev;
3712
3713         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3714
3715         dev = &rte_eth_devices[port_id];
3716         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3717
3718         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3719                                                                    rule_id));
3720 }
3721
3722 RTE_INIT(eth_dev_init_cb_lists)
3723 {
3724         int i;
3725
3726         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3727                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3728 }
3729
3730 int
3731 rte_eth_dev_callback_register(uint16_t port_id,
3732                         enum rte_eth_event_type event,
3733                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3734 {
3735         struct rte_eth_dev *dev;
3736         struct rte_eth_dev_callback *user_cb;
3737         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3738         uint16_t last_port;
3739
3740         if (!cb_fn)
3741                 return -EINVAL;
3742
3743         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3744                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3745                 return -EINVAL;
3746         }
3747
3748         if (port_id == RTE_ETH_ALL) {
3749                 next_port = 0;
3750                 last_port = RTE_MAX_ETHPORTS - 1;
3751         } else {
3752                 next_port = last_port = port_id;
3753         }
3754
3755         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3756
3757         do {
3758                 dev = &rte_eth_devices[next_port];
3759
3760                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3761                         if (user_cb->cb_fn == cb_fn &&
3762                                 user_cb->cb_arg == cb_arg &&
3763                                 user_cb->event == event) {
3764                                 break;
3765                         }
3766                 }
3767
3768                 /* create a new callback. */
3769                 if (user_cb == NULL) {
3770                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3771                                 sizeof(struct rte_eth_dev_callback), 0);
3772                         if (user_cb != NULL) {
3773                                 user_cb->cb_fn = cb_fn;
3774                                 user_cb->cb_arg = cb_arg;
3775                                 user_cb->event = event;
3776                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3777                                                   user_cb, next);
3778                         } else {
3779                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3780                                 rte_eth_dev_callback_unregister(port_id, event,
3781                                                                 cb_fn, cb_arg);
3782                                 return -ENOMEM;
3783                         }
3784
3785                 }
3786         } while (++next_port <= last_port);
3787
3788         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3789         return 0;
3790 }
3791
3792 int
3793 rte_eth_dev_callback_unregister(uint16_t port_id,
3794                         enum rte_eth_event_type event,
3795                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3796 {
3797         int ret;
3798         struct rte_eth_dev *dev;
3799         struct rte_eth_dev_callback *cb, *next;
3800         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3801         uint16_t last_port;
3802
3803         if (!cb_fn)
3804                 return -EINVAL;
3805
3806         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3807                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3808                 return -EINVAL;
3809         }
3810
3811         if (port_id == RTE_ETH_ALL) {
3812                 next_port = 0;
3813                 last_port = RTE_MAX_ETHPORTS - 1;
3814         } else {
3815                 next_port = last_port = port_id;
3816         }
3817
3818         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3819
3820         do {
3821                 dev = &rte_eth_devices[next_port];
3822                 ret = 0;
3823                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3824                      cb = next) {
3825
3826                         next = TAILQ_NEXT(cb, next);
3827
3828                         if (cb->cb_fn != cb_fn || cb->event != event ||
3829                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3830                                 continue;
3831
3832                         /*
3833                          * if this callback is not executing right now,
3834                          * then remove it.
3835                          */
3836                         if (cb->active == 0) {
3837                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3838                                 rte_free(cb);
3839                         } else {
3840                                 ret = -EAGAIN;
3841                         }
3842                 }
3843         } while (++next_port <= last_port);
3844
3845         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3846         return ret;
3847 }
3848
3849 int
3850 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3851         enum rte_eth_event_type event, void *ret_param)
3852 {
3853         struct rte_eth_dev_callback *cb_lst;
3854         struct rte_eth_dev_callback dev_cb;
3855         int rc = 0;
3856
3857         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3858         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3859                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3860                         continue;
3861                 dev_cb = *cb_lst;
3862                 cb_lst->active = 1;
3863                 if (ret_param != NULL)
3864                         dev_cb.ret_param = ret_param;
3865
3866                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3867                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3868                                 dev_cb.cb_arg, dev_cb.ret_param);
3869                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3870                 cb_lst->active = 0;
3871         }
3872         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3873         return rc;
3874 }
3875
3876 void
3877 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3878 {
3879         if (dev == NULL)
3880                 return;
3881
3882         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3883
3884         dev->state = RTE_ETH_DEV_ATTACHED;
3885 }
3886
3887 int
3888 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3889 {
3890         uint32_t vec;
3891         struct rte_eth_dev *dev;
3892         struct rte_intr_handle *intr_handle;
3893         uint16_t qid;
3894         int rc;
3895
3896         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3897
3898         dev = &rte_eth_devices[port_id];
3899
3900         if (!dev->intr_handle) {
3901                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3902                 return -ENOTSUP;
3903         }
3904
3905         intr_handle = dev->intr_handle;
3906         if (!intr_handle->intr_vec) {
3907                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3908                 return -EPERM;
3909         }
3910
3911         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3912                 vec = intr_handle->intr_vec[qid];
3913                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3914                 if (rc && rc != -EEXIST) {
3915                         RTE_ETHDEV_LOG(ERR,
3916                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3917                                 port_id, qid, op, epfd, vec);
3918                 }
3919         }
3920
3921         return 0;
3922 }
3923
3924 int
3925 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
3926 {
3927         struct rte_intr_handle *intr_handle;
3928         struct rte_eth_dev *dev;
3929         unsigned int efd_idx;
3930         uint32_t vec;
3931         int fd;
3932
3933         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
3934
3935         dev = &rte_eth_devices[port_id];
3936
3937         if (queue_id >= dev->data->nb_rx_queues) {
3938                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3939                 return -1;
3940         }
3941
3942         if (!dev->intr_handle) {
3943                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3944                 return -1;
3945         }
3946
3947         intr_handle = dev->intr_handle;
3948         if (!intr_handle->intr_vec) {
3949                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3950                 return -1;
3951         }
3952
3953         vec = intr_handle->intr_vec[queue_id];
3954         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
3955                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
3956         fd = intr_handle->efds[efd_idx];
3957
3958         return fd;
3959 }
3960
3961 const struct rte_memzone *
3962 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3963                          uint16_t queue_id, size_t size, unsigned align,
3964                          int socket_id)
3965 {
3966         char z_name[RTE_MEMZONE_NAMESIZE];
3967         const struct rte_memzone *mz;
3968         int rc;
3969
3970         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
3971                       dev->data->port_id, queue_id, ring_name);
3972         if (rc >= RTE_MEMZONE_NAMESIZE) {
3973                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
3974                 rte_errno = ENAMETOOLONG;
3975                 return NULL;
3976         }
3977
3978         mz = rte_memzone_lookup(z_name);
3979         if (mz)
3980                 return mz;
3981
3982         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3983                         RTE_MEMZONE_IOVA_CONTIG, align);
3984 }
3985
3986 int
3987 rte_eth_dev_create(struct rte_device *device, const char *name,
3988         size_t priv_data_size,
3989         ethdev_bus_specific_init ethdev_bus_specific_init,
3990         void *bus_init_params,
3991         ethdev_init_t ethdev_init, void *init_params)
3992 {
3993         struct rte_eth_dev *ethdev;
3994         int retval;
3995
3996         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3997
3998         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3999                 ethdev = rte_eth_dev_allocate(name);
4000                 if (!ethdev)
4001                         return -ENODEV;
4002
4003                 if (priv_data_size) {
4004                         ethdev->data->dev_private = rte_zmalloc_socket(
4005                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4006                                 device->numa_node);
4007
4008                         if (!ethdev->data->dev_private) {
4009                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
4010                                 retval = -ENOMEM;
4011                                 goto probe_failed;
4012                         }
4013                 }
4014         } else {
4015                 ethdev = rte_eth_dev_attach_secondary(name);
4016                 if (!ethdev) {
4017                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
4018                                 "ethdev doesn't exist");
4019                         return  -ENODEV;
4020                 }
4021         }
4022
4023         ethdev->device = device;
4024
4025         if (ethdev_bus_specific_init) {
4026                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4027                 if (retval) {
4028                         RTE_LOG(ERR, EAL,
4029                                 "ethdev bus specific initialisation failed");
4030                         goto probe_failed;
4031                 }
4032         }
4033
4034         retval = ethdev_init(ethdev, init_params);
4035         if (retval) {
4036                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
4037                 goto probe_failed;
4038         }
4039
4040         rte_eth_dev_probing_finish(ethdev);
4041
4042         return retval;
4043
4044 probe_failed:
4045         rte_eth_dev_release_port(ethdev);
4046         return retval;
4047 }
4048
4049 int
4050 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4051         ethdev_uninit_t ethdev_uninit)
4052 {
4053         int ret;
4054
4055         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4056         if (!ethdev)
4057                 return -ENODEV;
4058
4059         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4060
4061         ret = ethdev_uninit(ethdev);
4062         if (ret)
4063                 return ret;
4064
4065         return rte_eth_dev_release_port(ethdev);
4066 }
4067
4068 int
4069 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4070                           int epfd, int op, void *data)
4071 {
4072         uint32_t vec;
4073         struct rte_eth_dev *dev;
4074         struct rte_intr_handle *intr_handle;
4075         int rc;
4076
4077         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4078
4079         dev = &rte_eth_devices[port_id];
4080         if (queue_id >= dev->data->nb_rx_queues) {
4081                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4082                 return -EINVAL;
4083         }
4084
4085         if (!dev->intr_handle) {
4086                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4087                 return -ENOTSUP;
4088         }
4089
4090         intr_handle = dev->intr_handle;
4091         if (!intr_handle->intr_vec) {
4092                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4093                 return -EPERM;
4094         }
4095
4096         vec = intr_handle->intr_vec[queue_id];
4097         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4098         if (rc && rc != -EEXIST) {
4099                 RTE_ETHDEV_LOG(ERR,
4100                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4101                         port_id, queue_id, op, epfd, vec);
4102                 return rc;
4103         }
4104
4105         return 0;
4106 }
4107
4108 int
4109 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4110                            uint16_t queue_id)
4111 {
4112         struct rte_eth_dev *dev;
4113
4114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4115
4116         dev = &rte_eth_devices[port_id];
4117
4118         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4119         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4120                                                                 queue_id));
4121 }
4122
4123 int
4124 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4125                             uint16_t queue_id)
4126 {
4127         struct rte_eth_dev *dev;
4128
4129         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4130
4131         dev = &rte_eth_devices[port_id];
4132
4133         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4134         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4135                                                                 queue_id));
4136 }
4137
4138
4139 int
4140 rte_eth_dev_filter_supported(uint16_t port_id,
4141                              enum rte_filter_type filter_type)
4142 {
4143         struct rte_eth_dev *dev;
4144
4145         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4146
4147         dev = &rte_eth_devices[port_id];
4148         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4149         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4150                                 RTE_ETH_FILTER_NOP, NULL);
4151 }
4152
4153 int
4154 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4155                         enum rte_filter_op filter_op, void *arg)
4156 {
4157         struct rte_eth_dev *dev;
4158
4159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4160
4161         dev = &rte_eth_devices[port_id];
4162         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4163         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4164                                                              filter_op, arg));
4165 }
4166
4167 const struct rte_eth_rxtx_callback *
4168 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4169                 rte_rx_callback_fn fn, void *user_param)
4170 {
4171 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4172         rte_errno = ENOTSUP;
4173         return NULL;
4174 #endif
4175         struct rte_eth_dev *dev;
4176
4177         /* check input parameters */
4178         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4179                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4180                 rte_errno = EINVAL;
4181                 return NULL;
4182         }
4183         dev = &rte_eth_devices[port_id];
4184         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4185                 rte_errno = EINVAL;
4186                 return NULL;
4187         }
4188         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4189
4190         if (cb == NULL) {
4191                 rte_errno = ENOMEM;
4192                 return NULL;
4193         }
4194
4195         cb->fn.rx = fn;
4196         cb->param = user_param;
4197
4198         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4199         /* Add the callbacks in fifo order. */
4200         struct rte_eth_rxtx_callback *tail =
4201                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4202
4203         if (!tail) {
4204                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4205
4206         } else {
4207                 while (tail->next)
4208                         tail = tail->next;
4209                 tail->next = cb;
4210         }
4211         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4212
4213         return cb;
4214 }
4215
4216 const struct rte_eth_rxtx_callback *
4217 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4218                 rte_rx_callback_fn fn, void *user_param)
4219 {
4220 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4221         rte_errno = ENOTSUP;
4222         return NULL;
4223 #endif
4224         /* check input parameters */
4225         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4226                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4227                 rte_errno = EINVAL;
4228                 return NULL;
4229         }
4230
4231         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4232
4233         if (cb == NULL) {
4234                 rte_errno = ENOMEM;
4235                 return NULL;
4236         }
4237
4238         cb->fn.rx = fn;
4239         cb->param = user_param;
4240
4241         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4242         /* Add the callbacks at fisrt position*/
4243         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4244         rte_smp_wmb();
4245         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4246         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4247
4248         return cb;
4249 }
4250
4251 const struct rte_eth_rxtx_callback *
4252 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4253                 rte_tx_callback_fn fn, void *user_param)
4254 {
4255 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4256         rte_errno = ENOTSUP;
4257         return NULL;
4258 #endif
4259         struct rte_eth_dev *dev;
4260
4261         /* check input parameters */
4262         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4263                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4264                 rte_errno = EINVAL;
4265                 return NULL;
4266         }
4267
4268         dev = &rte_eth_devices[port_id];
4269         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4270                 rte_errno = EINVAL;
4271                 return NULL;
4272         }
4273
4274         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4275
4276         if (cb == NULL) {
4277                 rte_errno = ENOMEM;
4278                 return NULL;
4279         }
4280
4281         cb->fn.tx = fn;
4282         cb->param = user_param;
4283
4284         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4285         /* Add the callbacks in fifo order. */
4286         struct rte_eth_rxtx_callback *tail =
4287                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4288
4289         if (!tail) {
4290                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4291
4292         } else {
4293                 while (tail->next)
4294                         tail = tail->next;
4295                 tail->next = cb;
4296         }
4297         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4298
4299         return cb;
4300 }
4301
4302 int
4303 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4304                 const struct rte_eth_rxtx_callback *user_cb)
4305 {
4306 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4307         return -ENOTSUP;
4308 #endif
4309         /* Check input parameters. */
4310         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4311         if (user_cb == NULL ||
4312                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4313                 return -EINVAL;
4314
4315         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4316         struct rte_eth_rxtx_callback *cb;
4317         struct rte_eth_rxtx_callback **prev_cb;
4318         int ret = -EINVAL;
4319
4320         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4321         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4322         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4323                 cb = *prev_cb;
4324                 if (cb == user_cb) {
4325                         /* Remove the user cb from the callback list. */
4326                         *prev_cb = cb->next;
4327                         ret = 0;
4328                         break;
4329                 }
4330         }
4331         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4332
4333         return ret;
4334 }
4335
4336 int
4337 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4338                 const struct rte_eth_rxtx_callback *user_cb)
4339 {
4340 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4341         return -ENOTSUP;
4342 #endif
4343         /* Check input parameters. */
4344         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4345         if (user_cb == NULL ||
4346                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4347                 return -EINVAL;
4348
4349         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4350         int ret = -EINVAL;
4351         struct rte_eth_rxtx_callback *cb;
4352         struct rte_eth_rxtx_callback **prev_cb;
4353
4354         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4355         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4356         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4357                 cb = *prev_cb;
4358                 if (cb == user_cb) {
4359                         /* Remove the user cb from the callback list. */
4360                         *prev_cb = cb->next;
4361                         ret = 0;
4362                         break;
4363                 }
4364         }
4365         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4366
4367         return ret;
4368 }
4369
4370 int
4371 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4372         struct rte_eth_rxq_info *qinfo)
4373 {
4374         struct rte_eth_dev *dev;
4375
4376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4377
4378         if (qinfo == NULL)
4379                 return -EINVAL;
4380
4381         dev = &rte_eth_devices[port_id];
4382         if (queue_id >= dev->data->nb_rx_queues) {
4383                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4384                 return -EINVAL;
4385         }
4386
4387         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4388                 RTE_ETHDEV_LOG(INFO,
4389                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4390                         queue_id, port_id);
4391                 return -EINVAL;
4392         }
4393
4394         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4395
4396         memset(qinfo, 0, sizeof(*qinfo));
4397         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4398         return 0;
4399 }
4400
4401 int
4402 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4403         struct rte_eth_txq_info *qinfo)
4404 {
4405         struct rte_eth_dev *dev;
4406
4407         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4408
4409         if (qinfo == NULL)
4410                 return -EINVAL;
4411
4412         dev = &rte_eth_devices[port_id];
4413         if (queue_id >= dev->data->nb_tx_queues) {
4414                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4415                 return -EINVAL;
4416         }
4417
4418         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4419                 RTE_ETHDEV_LOG(INFO,
4420                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4421                         queue_id, port_id);
4422                 return -EINVAL;
4423         }
4424
4425         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4426
4427         memset(qinfo, 0, sizeof(*qinfo));
4428         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4429
4430         return 0;
4431 }
4432
4433 int
4434 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4435                           struct rte_eth_burst_mode *mode)
4436 {
4437         struct rte_eth_dev *dev;
4438
4439         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4440
4441         if (mode == NULL)
4442                 return -EINVAL;
4443
4444         dev = &rte_eth_devices[port_id];
4445
4446         if (queue_id >= dev->data->nb_rx_queues) {
4447                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4448                 return -EINVAL;
4449         }
4450
4451         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4452         memset(mode, 0, sizeof(*mode));
4453         return eth_err(port_id,
4454                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4455 }
4456
4457 int
4458 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4459                           struct rte_eth_burst_mode *mode)
4460 {
4461         struct rte_eth_dev *dev;
4462
4463         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4464
4465         if (mode == NULL)
4466                 return -EINVAL;
4467
4468         dev = &rte_eth_devices[port_id];
4469
4470         if (queue_id >= dev->data->nb_tx_queues) {
4471                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4472                 return -EINVAL;
4473         }
4474
4475         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4476         memset(mode, 0, sizeof(*mode));
4477         return eth_err(port_id,
4478                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4479 }
4480
4481 const char *
4482 rte_eth_burst_mode_option_name(uint64_t option)
4483 {
4484         const char *name = "";
4485         unsigned int i;
4486
4487         for (i = 0; i < RTE_DIM(rte_burst_option_names); ++i) {
4488                 if (option == rte_burst_option_names[i].option) {
4489                         name = rte_burst_option_names[i].name;
4490                         break;
4491                 }
4492         }
4493
4494         return name;
4495 }
4496
4497 int
4498 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4499                              struct rte_ether_addr *mc_addr_set,
4500                              uint32_t nb_mc_addr)
4501 {
4502         struct rte_eth_dev *dev;
4503
4504         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4505
4506         dev = &rte_eth_devices[port_id];
4507         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4508         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4509                                                 mc_addr_set, nb_mc_addr));
4510 }
4511
4512 int
4513 rte_eth_timesync_enable(uint16_t port_id)
4514 {
4515         struct rte_eth_dev *dev;
4516
4517         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4518         dev = &rte_eth_devices[port_id];
4519
4520         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4521         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4522 }
4523
4524 int
4525 rte_eth_timesync_disable(uint16_t port_id)
4526 {
4527         struct rte_eth_dev *dev;
4528
4529         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4530         dev = &rte_eth_devices[port_id];
4531
4532         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4533         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4534 }
4535
4536 int
4537 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4538                                    uint32_t flags)
4539 {
4540         struct rte_eth_dev *dev;
4541
4542         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4543         dev = &rte_eth_devices[port_id];
4544
4545         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4546         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4547                                 (dev, timestamp, flags));
4548 }
4549
4550 int
4551 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4552                                    struct timespec *timestamp)
4553 {
4554         struct rte_eth_dev *dev;
4555
4556         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4557         dev = &rte_eth_devices[port_id];
4558
4559         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4560         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4561                                 (dev, timestamp));
4562 }
4563
4564 int
4565 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4566 {
4567         struct rte_eth_dev *dev;
4568
4569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4570         dev = &rte_eth_devices[port_id];
4571
4572         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4573         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4574                                                                       delta));
4575 }
4576
4577 int
4578 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4579 {
4580         struct rte_eth_dev *dev;
4581
4582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4583         dev = &rte_eth_devices[port_id];
4584
4585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4586         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4587                                                                 timestamp));
4588 }
4589
4590 int
4591 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4592 {
4593         struct rte_eth_dev *dev;
4594
4595         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4596         dev = &rte_eth_devices[port_id];
4597
4598         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4599         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4600                                                                 timestamp));
4601 }
4602
4603 int
4604 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4605 {
4606         struct rte_eth_dev *dev;
4607
4608         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4609         dev = &rte_eth_devices[port_id];
4610
4611         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4612         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4613 }
4614
4615 int
4616 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4617 {
4618         struct rte_eth_dev *dev;
4619
4620         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4621
4622         dev = &rte_eth_devices[port_id];
4623         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4624         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4625 }
4626
4627 int
4628 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4629 {
4630         struct rte_eth_dev *dev;
4631
4632         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4633
4634         dev = &rte_eth_devices[port_id];
4635         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4636         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4637 }
4638
4639 int
4640 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4641 {
4642         struct rte_eth_dev *dev;
4643
4644         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4645
4646         dev = &rte_eth_devices[port_id];
4647         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4648         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4649 }
4650
4651 int
4652 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4653 {
4654         struct rte_eth_dev *dev;
4655
4656         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4657
4658         dev = &rte_eth_devices[port_id];
4659         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4660         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4661 }
4662
4663 int
4664 rte_eth_dev_get_module_info(uint16_t port_id,
4665                             struct rte_eth_dev_module_info *modinfo)
4666 {
4667         struct rte_eth_dev *dev;
4668
4669         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4670
4671         dev = &rte_eth_devices[port_id];
4672         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4673         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4674 }
4675
4676 int
4677 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4678                               struct rte_dev_eeprom_info *info)
4679 {
4680         struct rte_eth_dev *dev;
4681
4682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4683
4684         dev = &rte_eth_devices[port_id];
4685         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4686         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4687 }
4688
4689 int
4690 rte_eth_dev_get_dcb_info(uint16_t port_id,
4691                              struct rte_eth_dcb_info *dcb_info)
4692 {
4693         struct rte_eth_dev *dev;
4694
4695         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4696
4697         dev = &rte_eth_devices[port_id];
4698         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4699
4700         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4701         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4702 }
4703
4704 int
4705 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4706                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4707 {
4708         struct rte_eth_dev *dev;
4709
4710         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4711         if (l2_tunnel == NULL) {
4712                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4713                 return -EINVAL;
4714         }
4715
4716         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4717                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4718                 return -EINVAL;
4719         }
4720
4721         dev = &rte_eth_devices[port_id];
4722         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4723                                 -ENOTSUP);
4724         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4725                                                                 l2_tunnel));
4726 }
4727
4728 int
4729 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4730                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4731                                   uint32_t mask,
4732                                   uint8_t en)
4733 {
4734         struct rte_eth_dev *dev;
4735
4736         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4737
4738         if (l2_tunnel == NULL) {
4739                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4740                 return -EINVAL;
4741         }
4742
4743         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4744                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4745                 return -EINVAL;
4746         }
4747
4748         if (mask == 0) {
4749                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4750                 return -EINVAL;
4751         }
4752
4753         dev = &rte_eth_devices[port_id];
4754         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4755                                 -ENOTSUP);
4756         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4757                                                         l2_tunnel, mask, en));
4758 }
4759
4760 static void
4761 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4762                            const struct rte_eth_desc_lim *desc_lim)
4763 {
4764         if (desc_lim->nb_align != 0)
4765                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4766
4767         if (desc_lim->nb_max != 0)
4768                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4769
4770         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4771 }
4772
4773 int
4774 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4775                                  uint16_t *nb_rx_desc,
4776                                  uint16_t *nb_tx_desc)
4777 {
4778         struct rte_eth_dev_info dev_info;
4779         int ret;
4780
4781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4782
4783         ret = rte_eth_dev_info_get(port_id, &dev_info);
4784         if (ret != 0)
4785                 return ret;
4786
4787         if (nb_rx_desc != NULL)
4788                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4789
4790         if (nb_tx_desc != NULL)
4791                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4792
4793         return 0;
4794 }
4795
4796 int
4797 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4798                                    struct rte_eth_hairpin_cap *cap)
4799 {
4800         struct rte_eth_dev *dev;
4801
4802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4803
4804         dev = &rte_eth_devices[port_id];
4805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
4806         memset(cap, 0, sizeof(*cap));
4807         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
4808 }
4809
4810 int
4811 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
4812 {
4813         if (dev->data->rx_queue_state[queue_id] ==
4814             RTE_ETH_QUEUE_STATE_HAIRPIN)
4815                 return 1;
4816         return 0;
4817 }
4818
4819 int
4820 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
4821 {
4822         if (dev->data->tx_queue_state[queue_id] ==
4823             RTE_ETH_QUEUE_STATE_HAIRPIN)
4824                 return 1;
4825         return 0;
4826 }
4827
4828 int
4829 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4830 {
4831         struct rte_eth_dev *dev;
4832
4833         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4834
4835         if (pool == NULL)
4836                 return -EINVAL;
4837
4838         dev = &rte_eth_devices[port_id];
4839
4840         if (*dev->dev_ops->pool_ops_supported == NULL)
4841                 return 1; /* all pools are supported */
4842
4843         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4844 }
4845
4846 /**
4847  * A set of values to describe the possible states of a switch domain.
4848  */
4849 enum rte_eth_switch_domain_state {
4850         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4851         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4852 };
4853
4854 /**
4855  * Array of switch domains available for allocation. Array is sized to
4856  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4857  * ethdev ports in a single process.
4858  */
4859 static struct rte_eth_dev_switch {
4860         enum rte_eth_switch_domain_state state;
4861 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4862
4863 int
4864 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4865 {
4866         unsigned int i;
4867
4868         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4869
4870         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4871                 i < RTE_MAX_ETHPORTS; i++) {
4872                 if (rte_eth_switch_domains[i].state ==
4873                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4874                         rte_eth_switch_domains[i].state =
4875                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4876                         *domain_id = i;
4877                         return 0;
4878                 }
4879         }
4880
4881         return -ENOSPC;
4882 }
4883
4884 int
4885 rte_eth_switch_domain_free(uint16_t domain_id)
4886 {
4887         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4888                 domain_id >= RTE_MAX_ETHPORTS)
4889                 return -EINVAL;
4890
4891         if (rte_eth_switch_domains[domain_id].state !=
4892                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4893                 return -EINVAL;
4894
4895         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4896
4897         return 0;
4898 }
4899
4900 static int
4901 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4902 {
4903         int state;
4904         struct rte_kvargs_pair *pair;
4905         char *letter;
4906
4907         arglist->str = strdup(str_in);
4908         if (arglist->str == NULL)
4909                 return -ENOMEM;
4910
4911         letter = arglist->str;
4912         state = 0;
4913         arglist->count = 0;
4914         pair = &arglist->pairs[0];
4915         while (1) {
4916                 switch (state) {
4917                 case 0: /* Initial */
4918                         if (*letter == '=')
4919                                 return -EINVAL;
4920                         else if (*letter == '\0')
4921                                 return 0;
4922
4923                         state = 1;
4924                         pair->key = letter;
4925                         /* fall-thru */
4926
4927                 case 1: /* Parsing key */
4928                         if (*letter == '=') {
4929                                 *letter = '\0';
4930                                 pair->value = letter + 1;
4931                                 state = 2;
4932                         } else if (*letter == ',' || *letter == '\0')
4933                                 return -EINVAL;
4934                         break;
4935
4936
4937                 case 2: /* Parsing value */
4938                         if (*letter == '[')
4939                                 state = 3;
4940                         else if (*letter == ',') {
4941                                 *letter = '\0';
4942                                 arglist->count++;
4943                                 pair = &arglist->pairs[arglist->count];
4944                                 state = 0;
4945                         } else if (*letter == '\0') {
4946                                 letter--;
4947                                 arglist->count++;
4948                                 pair = &arglist->pairs[arglist->count];
4949                                 state = 0;
4950                         }
4951                         break;
4952
4953                 case 3: /* Parsing list */
4954                         if (*letter == ']')
4955                                 state = 2;
4956                         else if (*letter == '\0')
4957                                 return -EINVAL;
4958                         break;
4959                 }
4960                 letter++;
4961         }
4962 }
4963
4964 int
4965 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4966 {
4967         struct rte_kvargs args;
4968         struct rte_kvargs_pair *pair;
4969         unsigned int i;
4970         int result = 0;
4971
4972         memset(eth_da, 0, sizeof(*eth_da));
4973
4974         result = rte_eth_devargs_tokenise(&args, dargs);
4975         if (result < 0)
4976                 goto parse_cleanup;
4977
4978         for (i = 0; i < args.count; i++) {
4979                 pair = &args.pairs[i];
4980                 if (strcmp("representor", pair->key) == 0) {
4981                         result = rte_eth_devargs_parse_list(pair->value,
4982                                 rte_eth_devargs_parse_representor_ports,
4983                                 eth_da);
4984                         if (result < 0)
4985                                 goto parse_cleanup;
4986                 }
4987         }
4988
4989 parse_cleanup:
4990         if (args.str)
4991                 free(args.str);
4992
4993         return result;
4994 }
4995
4996 RTE_INIT(ethdev_init_log)
4997 {
4998         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
4999         if (rte_eth_dev_logtype >= 0)
5000                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
5001 }