ethdev: change owner delete function to return int
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40
41 #include "rte_ether.h"
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132 };
133
134 #undef RTE_RX_OFFLOAD_BIT2STR
135
136 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
137         { DEV_TX_OFFLOAD_##_name, #_name }
138
139 static const struct {
140         uint64_t offload;
141         const char *name;
142 } rte_tx_offload_names[] = {
143         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
144         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
152         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
157         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
158         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
159         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
160         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
161         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
164         RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA),
165 };
166
167 #undef RTE_TX_OFFLOAD_BIT2STR
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         void *ret_param;                        /**< Return parameter */
180         enum rte_eth_event_type event;          /**< Interrupt event type */
181         uint32_t active;                        /**< Callback is executing */
182 };
183
184 enum {
185         STAT_QMAP_TX = 0,
186         STAT_QMAP_RX
187 };
188
189 int
190 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
191 {
192         int ret;
193         struct rte_devargs devargs = {.args = NULL};
194         const char *bus_param_key;
195         char *bus_str = NULL;
196         char *cls_str = NULL;
197         int str_size;
198
199         memset(iter, 0, sizeof(*iter));
200
201         /*
202          * The devargs string may use various syntaxes:
203          *   - 0000:08:00.0,representor=[1-3]
204          *   - pci:0000:06:00.0,representor=[0,5]
205          *   - class=eth,mac=00:11:22:33:44:55
206          * A new syntax is in development (not yet supported):
207          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
208          */
209
210         /*
211          * Handle pure class filter (i.e. without any bus-level argument),
212          * from future new syntax.
213          * rte_devargs_parse() is not yet supporting the new syntax,
214          * that's why this simple case is temporarily parsed here.
215          */
216 #define iter_anybus_str "class=eth,"
217         if (strncmp(devargs_str, iter_anybus_str,
218                         strlen(iter_anybus_str)) == 0) {
219                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
220                 goto end;
221         }
222
223         /* Split bus, device and parameters. */
224         ret = rte_devargs_parse(&devargs, devargs_str);
225         if (ret != 0)
226                 goto error;
227
228         /*
229          * Assume parameters of old syntax can match only at ethdev level.
230          * Extra parameters will be ignored, thanks to "+" prefix.
231          */
232         str_size = strlen(devargs.args) + 2;
233         cls_str = malloc(str_size);
234         if (cls_str == NULL) {
235                 ret = -ENOMEM;
236                 goto error;
237         }
238         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
239         if (ret != str_size - 1) {
240                 ret = -EINVAL;
241                 goto error;
242         }
243         iter->cls_str = cls_str;
244         free(devargs.args); /* allocated by rte_devargs_parse() */
245         devargs.args = NULL;
246
247         iter->bus = devargs.bus;
248         if (iter->bus->dev_iterate == NULL) {
249                 ret = -ENOTSUP;
250                 goto error;
251         }
252
253         /* Convert bus args to new syntax for use with new API dev_iterate. */
254         if (strcmp(iter->bus->name, "vdev") == 0) {
255                 bus_param_key = "name";
256         } else if (strcmp(iter->bus->name, "pci") == 0) {
257                 bus_param_key = "addr";
258         } else {
259                 ret = -ENOTSUP;
260                 goto error;
261         }
262         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
263         bus_str = malloc(str_size);
264         if (bus_str == NULL) {
265                 ret = -ENOMEM;
266                 goto error;
267         }
268         ret = snprintf(bus_str, str_size, "%s=%s",
269                         bus_param_key, devargs.name);
270         if (ret != str_size - 1) {
271                 ret = -EINVAL;
272                 goto error;
273         }
274         iter->bus_str = bus_str;
275
276 end:
277         iter->cls = rte_class_find_by_name("eth");
278         return 0;
279
280 error:
281         if (ret == -ENOTSUP)
282                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
283                                 iter->bus->name);
284         free(devargs.args);
285         free(bus_str);
286         free(cls_str);
287         return ret;
288 }
289
290 uint16_t
291 rte_eth_iterator_next(struct rte_dev_iterator *iter)
292 {
293         if (iter->cls == NULL) /* invalid ethdev iterator */
294                 return RTE_MAX_ETHPORTS;
295
296         do { /* loop to try all matching rte_device */
297                 /* If not pure ethdev filter and */
298                 if (iter->bus != NULL &&
299                                 /* not in middle of rte_eth_dev iteration, */
300                                 iter->class_device == NULL) {
301                         /* get next rte_device to try. */
302                         iter->device = iter->bus->dev_iterate(
303                                         iter->device, iter->bus_str, iter);
304                         if (iter->device == NULL)
305                                 break; /* no more rte_device candidate */
306                 }
307                 /* A device is matching bus part, need to check ethdev part. */
308                 iter->class_device = iter->cls->dev_iterate(
309                                 iter->class_device, iter->cls_str, iter);
310                 if (iter->class_device != NULL)
311                         return eth_dev_to_id(iter->class_device); /* match */
312         } while (iter->bus != NULL); /* need to try next rte_device */
313
314         /* No more ethdev port to iterate. */
315         rte_eth_iterator_cleanup(iter);
316         return RTE_MAX_ETHPORTS;
317 }
318
319 void
320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
321 {
322         if (iter->bus_str == NULL)
323                 return; /* nothing to free in pure class filter */
324         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
325         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
326         memset(iter, 0, sizeof(*iter));
327 }
328
329 uint16_t
330 rte_eth_find_next(uint16_t port_id)
331 {
332         while (port_id < RTE_MAX_ETHPORTS &&
333                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
334                 port_id++;
335
336         if (port_id >= RTE_MAX_ETHPORTS)
337                 return RTE_MAX_ETHPORTS;
338
339         return port_id;
340 }
341
342 /*
343  * Macro to iterate over all valid ports for internal usage.
344  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
345  */
346 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
347         for (port_id = rte_eth_find_next(0); \
348              port_id < RTE_MAX_ETHPORTS; \
349              port_id = rte_eth_find_next(port_id + 1))
350
351 uint16_t
352 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
353 {
354         port_id = rte_eth_find_next(port_id);
355         while (port_id < RTE_MAX_ETHPORTS &&
356                         rte_eth_devices[port_id].device != parent)
357                 port_id = rte_eth_find_next(port_id + 1);
358
359         return port_id;
360 }
361
362 uint16_t
363 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
364 {
365         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
366         return rte_eth_find_next_of(port_id,
367                         rte_eth_devices[ref_port_id].device);
368 }
369
370 static void
371 rte_eth_dev_shared_data_prepare(void)
372 {
373         const unsigned flags = 0;
374         const struct rte_memzone *mz;
375
376         rte_spinlock_lock(&rte_eth_shared_data_lock);
377
378         if (rte_eth_dev_shared_data == NULL) {
379                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
380                         /* Allocate port data and ownership shared memory. */
381                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
382                                         sizeof(*rte_eth_dev_shared_data),
383                                         rte_socket_id(), flags);
384                 } else
385                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
386                 if (mz == NULL)
387                         rte_panic("Cannot allocate ethdev shared data\n");
388
389                 rte_eth_dev_shared_data = mz->addr;
390                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
391                         rte_eth_dev_shared_data->next_owner_id =
392                                         RTE_ETH_DEV_NO_OWNER + 1;
393                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
394                         memset(rte_eth_dev_shared_data->data, 0,
395                                sizeof(rte_eth_dev_shared_data->data));
396                 }
397         }
398
399         rte_spinlock_unlock(&rte_eth_shared_data_lock);
400 }
401
402 static bool
403 is_allocated(const struct rte_eth_dev *ethdev)
404 {
405         return ethdev->data->name[0] != '\0';
406 }
407
408 static struct rte_eth_dev *
409 _rte_eth_dev_allocated(const char *name)
410 {
411         unsigned i;
412
413         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414                 if (rte_eth_devices[i].data != NULL &&
415                     strcmp(rte_eth_devices[i].data->name, name) == 0)
416                         return &rte_eth_devices[i];
417         }
418         return NULL;
419 }
420
421 struct rte_eth_dev *
422 rte_eth_dev_allocated(const char *name)
423 {
424         struct rte_eth_dev *ethdev;
425
426         rte_eth_dev_shared_data_prepare();
427
428         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
429
430         ethdev = _rte_eth_dev_allocated(name);
431
432         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
433
434         return ethdev;
435 }
436
437 static uint16_t
438 rte_eth_dev_find_free_port(void)
439 {
440         unsigned i;
441
442         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
443                 /* Using shared name field to find a free port. */
444                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
445                         RTE_ASSERT(rte_eth_devices[i].state ==
446                                    RTE_ETH_DEV_UNUSED);
447                         return i;
448                 }
449         }
450         return RTE_MAX_ETHPORTS;
451 }
452
453 static struct rte_eth_dev *
454 eth_dev_get(uint16_t port_id)
455 {
456         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
457
458         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
459
460         return eth_dev;
461 }
462
463 struct rte_eth_dev *
464 rte_eth_dev_allocate(const char *name)
465 {
466         uint16_t port_id;
467         struct rte_eth_dev *eth_dev = NULL;
468         size_t name_len;
469
470         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
471         if (name_len == 0) {
472                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
473                 return NULL;
474         }
475
476         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
477                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
478                 return NULL;
479         }
480
481         rte_eth_dev_shared_data_prepare();
482
483         /* Synchronize port creation between primary and secondary threads. */
484         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
485
486         if (_rte_eth_dev_allocated(name) != NULL) {
487                 RTE_ETHDEV_LOG(ERR,
488                         "Ethernet device with name %s already allocated\n",
489                         name);
490                 goto unlock;
491         }
492
493         port_id = rte_eth_dev_find_free_port();
494         if (port_id == RTE_MAX_ETHPORTS) {
495                 RTE_ETHDEV_LOG(ERR,
496                         "Reached maximum number of Ethernet ports\n");
497                 goto unlock;
498         }
499
500         eth_dev = eth_dev_get(port_id);
501         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
502         eth_dev->data->port_id = port_id;
503         eth_dev->data->mtu = RTE_ETHER_MTU;
504
505 unlock:
506         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508         return eth_dev;
509 }
510
511 /*
512  * Attach to a port already registered by the primary process, which
513  * makes sure that the same device would have the same port id both
514  * in the primary and secondary process.
515  */
516 struct rte_eth_dev *
517 rte_eth_dev_attach_secondary(const char *name)
518 {
519         uint16_t i;
520         struct rte_eth_dev *eth_dev = NULL;
521
522         rte_eth_dev_shared_data_prepare();
523
524         /* Synchronize port attachment to primary port creation and release. */
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529                         break;
530         }
531         if (i == RTE_MAX_ETHPORTS) {
532                 RTE_ETHDEV_LOG(ERR,
533                         "Device %s is not driven by the primary process\n",
534                         name);
535         } else {
536                 eth_dev = eth_dev_get(i);
537                 RTE_ASSERT(eth_dev->data->port_id == i);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541         return eth_dev;
542 }
543
544 int
545 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546 {
547         if (eth_dev == NULL)
548                 return -EINVAL;
549
550         rte_eth_dev_shared_data_prepare();
551
552         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553                 _rte_eth_dev_callback_process(eth_dev,
554                                 RTE_ETH_EVENT_DESTROY, NULL);
555
556         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558         eth_dev->state = RTE_ETH_DEV_UNUSED;
559
560         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
561                 rte_free(eth_dev->data->rx_queues);
562                 rte_free(eth_dev->data->tx_queues);
563                 rte_free(eth_dev->data->mac_addrs);
564                 rte_free(eth_dev->data->hash_mac_addrs);
565                 rte_free(eth_dev->data->dev_private);
566                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
567         }
568
569         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
570
571         return 0;
572 }
573
574 int
575 rte_eth_dev_is_valid_port(uint16_t port_id)
576 {
577         if (port_id >= RTE_MAX_ETHPORTS ||
578             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
579                 return 0;
580         else
581                 return 1;
582 }
583
584 static int
585 rte_eth_is_valid_owner_id(uint64_t owner_id)
586 {
587         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
588             rte_eth_dev_shared_data->next_owner_id <= owner_id)
589                 return 0;
590         return 1;
591 }
592
593 uint64_t
594 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
595 {
596         port_id = rte_eth_find_next(port_id);
597         while (port_id < RTE_MAX_ETHPORTS &&
598                         rte_eth_devices[port_id].data->owner.id != owner_id)
599                 port_id = rte_eth_find_next(port_id + 1);
600
601         return port_id;
602 }
603
604 int
605 rte_eth_dev_owner_new(uint64_t *owner_id)
606 {
607         rte_eth_dev_shared_data_prepare();
608
609         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
610
611         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
612
613         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
614         return 0;
615 }
616
617 static int
618 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
619                        const struct rte_eth_dev_owner *new_owner)
620 {
621         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
622         struct rte_eth_dev_owner *port_owner;
623
624         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
625                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
626                         port_id);
627                 return -ENODEV;
628         }
629
630         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
631             !rte_eth_is_valid_owner_id(old_owner_id)) {
632                 RTE_ETHDEV_LOG(ERR,
633                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
634                        old_owner_id, new_owner->id);
635                 return -EINVAL;
636         }
637
638         port_owner = &rte_eth_devices[port_id].data->owner;
639         if (port_owner->id != old_owner_id) {
640                 RTE_ETHDEV_LOG(ERR,
641                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
642                         port_id, port_owner->name, port_owner->id);
643                 return -EPERM;
644         }
645
646         /* can not truncate (same structure) */
647         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
648
649         port_owner->id = new_owner->id;
650
651         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
652                 port_id, new_owner->name, new_owner->id);
653
654         return 0;
655 }
656
657 int
658 rte_eth_dev_owner_set(const uint16_t port_id,
659                       const struct rte_eth_dev_owner *owner)
660 {
661         int ret;
662
663         rte_eth_dev_shared_data_prepare();
664
665         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
666
667         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
668
669         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
670         return ret;
671 }
672
673 int
674 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
675 {
676         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
677                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
678         int ret;
679
680         rte_eth_dev_shared_data_prepare();
681
682         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
683
684         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
685
686         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
687         return ret;
688 }
689
690 int
691 rte_eth_dev_owner_delete(const uint64_t owner_id)
692 {
693         uint16_t port_id;
694         int ret = 0;
695
696         rte_eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
699
700         if (rte_eth_is_valid_owner_id(owner_id)) {
701                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
702                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
703                                 memset(&rte_eth_devices[port_id].data->owner, 0,
704                                        sizeof(struct rte_eth_dev_owner));
705                 RTE_ETHDEV_LOG(NOTICE,
706                         "All port owners owned by %016"PRIx64" identifier have removed\n",
707                         owner_id);
708         } else {
709                 RTE_ETHDEV_LOG(ERR,
710                                "Invalid owner id=%016"PRIx64"\n",
711                                owner_id);
712                 ret = -EINVAL;
713         }
714
715         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
716
717         return ret;
718 }
719
720 int
721 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
722 {
723         int ret = 0;
724         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
725
726         rte_eth_dev_shared_data_prepare();
727
728         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
729
730         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
731                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
732                         port_id);
733                 ret = -ENODEV;
734         } else {
735                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
736         }
737
738         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
739         return ret;
740 }
741
742 int
743 rte_eth_dev_socket_id(uint16_t port_id)
744 {
745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
746         return rte_eth_devices[port_id].data->numa_node;
747 }
748
749 void *
750 rte_eth_dev_get_sec_ctx(uint16_t port_id)
751 {
752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
753         return rte_eth_devices[port_id].security_ctx;
754 }
755
756 uint16_t
757 rte_eth_dev_count(void)
758 {
759         return rte_eth_dev_count_avail();
760 }
761
762 uint16_t
763 rte_eth_dev_count_avail(void)
764 {
765         uint16_t p;
766         uint16_t count;
767
768         count = 0;
769
770         RTE_ETH_FOREACH_DEV(p)
771                 count++;
772
773         return count;
774 }
775
776 uint16_t
777 rte_eth_dev_count_total(void)
778 {
779         uint16_t port, count = 0;
780
781         RTE_ETH_FOREACH_VALID_DEV(port)
782                 count++;
783
784         return count;
785 }
786
787 int
788 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
789 {
790         char *tmp;
791
792         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
793
794         if (name == NULL) {
795                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
796                 return -EINVAL;
797         }
798
799         /* shouldn't check 'rte_eth_devices[i].data',
800          * because it might be overwritten by VDEV PMD */
801         tmp = rte_eth_dev_shared_data->data[port_id].name;
802         strcpy(name, tmp);
803         return 0;
804 }
805
806 int
807 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
808 {
809         uint32_t pid;
810
811         if (name == NULL) {
812                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
813                 return -EINVAL;
814         }
815
816         RTE_ETH_FOREACH_VALID_DEV(pid)
817                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
818                         *port_id = pid;
819                         return 0;
820                 }
821
822         return -ENODEV;
823 }
824
825 static int
826 eth_err(uint16_t port_id, int ret)
827 {
828         if (ret == 0)
829                 return 0;
830         if (rte_eth_dev_is_removed(port_id))
831                 return -EIO;
832         return ret;
833 }
834
835 static int
836 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
837 {
838         uint16_t old_nb_queues = dev->data->nb_rx_queues;
839         void **rxq;
840         unsigned i;
841
842         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
843                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
844                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
845                                 RTE_CACHE_LINE_SIZE);
846                 if (dev->data->rx_queues == NULL) {
847                         dev->data->nb_rx_queues = 0;
848                         return -(ENOMEM);
849                 }
850         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
851                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
852
853                 rxq = dev->data->rx_queues;
854
855                 for (i = nb_queues; i < old_nb_queues; i++)
856                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
857                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
858                                 RTE_CACHE_LINE_SIZE);
859                 if (rxq == NULL)
860                         return -(ENOMEM);
861                 if (nb_queues > old_nb_queues) {
862                         uint16_t new_qs = nb_queues - old_nb_queues;
863
864                         memset(rxq + old_nb_queues, 0,
865                                 sizeof(rxq[0]) * new_qs);
866                 }
867
868                 dev->data->rx_queues = rxq;
869
870         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
871                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
872
873                 rxq = dev->data->rx_queues;
874
875                 for (i = nb_queues; i < old_nb_queues; i++)
876                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
877
878                 rte_free(dev->data->rx_queues);
879                 dev->data->rx_queues = NULL;
880         }
881         dev->data->nb_rx_queues = nb_queues;
882         return 0;
883 }
884
885 int
886 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
887 {
888         struct rte_eth_dev *dev;
889
890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
891
892         dev = &rte_eth_devices[port_id];
893         if (!dev->data->dev_started) {
894                 RTE_ETHDEV_LOG(ERR,
895                         "Port %u must be started before start any queue\n",
896                         port_id);
897                 return -EINVAL;
898         }
899
900         if (rx_queue_id >= dev->data->nb_rx_queues) {
901                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
902                 return -EINVAL;
903         }
904
905         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
906
907         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
908                 RTE_ETHDEV_LOG(INFO,
909                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
910                         rx_queue_id, port_id);
911                 return 0;
912         }
913
914         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
915                                                              rx_queue_id));
916
917 }
918
919 int
920 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
921 {
922         struct rte_eth_dev *dev;
923
924         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
925
926         dev = &rte_eth_devices[port_id];
927         if (rx_queue_id >= dev->data->nb_rx_queues) {
928                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
929                 return -EINVAL;
930         }
931
932         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
933
934         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
935                 RTE_ETHDEV_LOG(INFO,
936                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
937                         rx_queue_id, port_id);
938                 return 0;
939         }
940
941         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
942
943 }
944
945 int
946 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
947 {
948         struct rte_eth_dev *dev;
949
950         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
951
952         dev = &rte_eth_devices[port_id];
953         if (!dev->data->dev_started) {
954                 RTE_ETHDEV_LOG(ERR,
955                         "Port %u must be started before start any queue\n",
956                         port_id);
957                 return -EINVAL;
958         }
959
960         if (tx_queue_id >= dev->data->nb_tx_queues) {
961                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
962                 return -EINVAL;
963         }
964
965         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
966
967         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
968                 RTE_ETHDEV_LOG(INFO,
969                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
970                         tx_queue_id, port_id);
971                 return 0;
972         }
973
974         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
975 }
976
977 int
978 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
979 {
980         struct rte_eth_dev *dev;
981
982         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
983
984         dev = &rte_eth_devices[port_id];
985         if (tx_queue_id >= dev->data->nb_tx_queues) {
986                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
987                 return -EINVAL;
988         }
989
990         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
991
992         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
993                 RTE_ETHDEV_LOG(INFO,
994                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
995                         tx_queue_id, port_id);
996                 return 0;
997         }
998
999         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1000
1001 }
1002
1003 static int
1004 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1005 {
1006         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1007         void **txq;
1008         unsigned i;
1009
1010         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1011                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1012                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1013                                                    RTE_CACHE_LINE_SIZE);
1014                 if (dev->data->tx_queues == NULL) {
1015                         dev->data->nb_tx_queues = 0;
1016                         return -(ENOMEM);
1017                 }
1018         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1019                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1020
1021                 txq = dev->data->tx_queues;
1022
1023                 for (i = nb_queues; i < old_nb_queues; i++)
1024                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1025                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1026                                   RTE_CACHE_LINE_SIZE);
1027                 if (txq == NULL)
1028                         return -ENOMEM;
1029                 if (nb_queues > old_nb_queues) {
1030                         uint16_t new_qs = nb_queues - old_nb_queues;
1031
1032                         memset(txq + old_nb_queues, 0,
1033                                sizeof(txq[0]) * new_qs);
1034                 }
1035
1036                 dev->data->tx_queues = txq;
1037
1038         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1039                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1040
1041                 txq = dev->data->tx_queues;
1042
1043                 for (i = nb_queues; i < old_nb_queues; i++)
1044                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1045
1046                 rte_free(dev->data->tx_queues);
1047                 dev->data->tx_queues = NULL;
1048         }
1049         dev->data->nb_tx_queues = nb_queues;
1050         return 0;
1051 }
1052
1053 uint32_t
1054 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1055 {
1056         switch (speed) {
1057         case ETH_SPEED_NUM_10M:
1058                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1059         case ETH_SPEED_NUM_100M:
1060                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1061         case ETH_SPEED_NUM_1G:
1062                 return ETH_LINK_SPEED_1G;
1063         case ETH_SPEED_NUM_2_5G:
1064                 return ETH_LINK_SPEED_2_5G;
1065         case ETH_SPEED_NUM_5G:
1066                 return ETH_LINK_SPEED_5G;
1067         case ETH_SPEED_NUM_10G:
1068                 return ETH_LINK_SPEED_10G;
1069         case ETH_SPEED_NUM_20G:
1070                 return ETH_LINK_SPEED_20G;
1071         case ETH_SPEED_NUM_25G:
1072                 return ETH_LINK_SPEED_25G;
1073         case ETH_SPEED_NUM_40G:
1074                 return ETH_LINK_SPEED_40G;
1075         case ETH_SPEED_NUM_50G:
1076                 return ETH_LINK_SPEED_50G;
1077         case ETH_SPEED_NUM_56G:
1078                 return ETH_LINK_SPEED_56G;
1079         case ETH_SPEED_NUM_100G:
1080                 return ETH_LINK_SPEED_100G;
1081         default:
1082                 return 0;
1083         }
1084 }
1085
1086 const char *
1087 rte_eth_dev_rx_offload_name(uint64_t offload)
1088 {
1089         const char *name = "UNKNOWN";
1090         unsigned int i;
1091
1092         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1093                 if (offload == rte_rx_offload_names[i].offload) {
1094                         name = rte_rx_offload_names[i].name;
1095                         break;
1096                 }
1097         }
1098
1099         return name;
1100 }
1101
1102 const char *
1103 rte_eth_dev_tx_offload_name(uint64_t offload)
1104 {
1105         const char *name = "UNKNOWN";
1106         unsigned int i;
1107
1108         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1109                 if (offload == rte_tx_offload_names[i].offload) {
1110                         name = rte_tx_offload_names[i].name;
1111                         break;
1112                 }
1113         }
1114
1115         return name;
1116 }
1117
1118 int
1119 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1120                       const struct rte_eth_conf *dev_conf)
1121 {
1122         struct rte_eth_dev *dev;
1123         struct rte_eth_dev_info dev_info;
1124         struct rte_eth_conf orig_conf;
1125         int diag;
1126         int ret;
1127
1128         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1129
1130         dev = &rte_eth_devices[port_id];
1131
1132         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1133
1134         if (dev->data->dev_started) {
1135                 RTE_ETHDEV_LOG(ERR,
1136                         "Port %u must be stopped to allow configuration\n",
1137                         port_id);
1138                 return -EBUSY;
1139         }
1140
1141          /* Store original config, as rollback required on failure */
1142         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1143
1144         /*
1145          * Copy the dev_conf parameter into the dev structure.
1146          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1147          */
1148         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1149
1150         ret = rte_eth_dev_info_get(port_id, &dev_info);
1151         if (ret != 0)
1152                 goto rollback;
1153
1154         /* If number of queues specified by application for both Rx and Tx is
1155          * zero, use driver preferred values. This cannot be done individually
1156          * as it is valid for either Tx or Rx (but not both) to be zero.
1157          * If driver does not provide any preferred valued, fall back on
1158          * EAL defaults.
1159          */
1160         if (nb_rx_q == 0 && nb_tx_q == 0) {
1161                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1162                 if (nb_rx_q == 0)
1163                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1164                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1165                 if (nb_tx_q == 0)
1166                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1167         }
1168
1169         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1170                 RTE_ETHDEV_LOG(ERR,
1171                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1172                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1173                 ret = -EINVAL;
1174                 goto rollback;
1175         }
1176
1177         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1178                 RTE_ETHDEV_LOG(ERR,
1179                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1180                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1181                 ret = -EINVAL;
1182                 goto rollback;
1183         }
1184
1185         /*
1186          * Check that the numbers of RX and TX queues are not greater
1187          * than the maximum number of RX and TX queues supported by the
1188          * configured device.
1189          */
1190         if (nb_rx_q > dev_info.max_rx_queues) {
1191                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1192                         port_id, nb_rx_q, dev_info.max_rx_queues);
1193                 ret = -EINVAL;
1194                 goto rollback;
1195         }
1196
1197         if (nb_tx_q > dev_info.max_tx_queues) {
1198                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1199                         port_id, nb_tx_q, dev_info.max_tx_queues);
1200                 ret = -EINVAL;
1201                 goto rollback;
1202         }
1203
1204         /* Check that the device supports requested interrupts */
1205         if ((dev_conf->intr_conf.lsc == 1) &&
1206                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1207                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1208                         dev->device->driver->name);
1209                 ret = -EINVAL;
1210                 goto rollback;
1211         }
1212         if ((dev_conf->intr_conf.rmv == 1) &&
1213                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1214                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1215                         dev->device->driver->name);
1216                 ret = -EINVAL;
1217                 goto rollback;
1218         }
1219
1220         /*
1221          * If jumbo frames are enabled, check that the maximum RX packet
1222          * length is supported by the configured device.
1223          */
1224         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1225                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1226                         RTE_ETHDEV_LOG(ERR,
1227                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1228                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1229                                 dev_info.max_rx_pktlen);
1230                         ret = -EINVAL;
1231                         goto rollback;
1232                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1233                         RTE_ETHDEV_LOG(ERR,
1234                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1235                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1236                                 (unsigned int)RTE_ETHER_MIN_LEN);
1237                         ret = -EINVAL;
1238                         goto rollback;
1239                 }
1240         } else {
1241                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1242                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1243                         /* Use default value */
1244                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1245                                                         RTE_ETHER_MAX_LEN;
1246         }
1247
1248         /* Any requested offloading must be within its device capabilities */
1249         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1250              dev_conf->rxmode.offloads) {
1251                 RTE_ETHDEV_LOG(ERR,
1252                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1253                         "capabilities 0x%"PRIx64" in %s()\n",
1254                         port_id, dev_conf->rxmode.offloads,
1255                         dev_info.rx_offload_capa,
1256                         __func__);
1257                 ret = -EINVAL;
1258                 goto rollback;
1259         }
1260         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1261              dev_conf->txmode.offloads) {
1262                 RTE_ETHDEV_LOG(ERR,
1263                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1264                         "capabilities 0x%"PRIx64" in %s()\n",
1265                         port_id, dev_conf->txmode.offloads,
1266                         dev_info.tx_offload_capa,
1267                         __func__);
1268                 ret = -EINVAL;
1269                 goto rollback;
1270         }
1271
1272         /* Check that device supports requested rss hash functions. */
1273         if ((dev_info.flow_type_rss_offloads |
1274              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1275             dev_info.flow_type_rss_offloads) {
1276                 RTE_ETHDEV_LOG(ERR,
1277                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1278                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1279                         dev_info.flow_type_rss_offloads);
1280                 ret = -EINVAL;
1281                 goto rollback;
1282         }
1283
1284         /*
1285          * Setup new number of RX/TX queues and reconfigure device.
1286          */
1287         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1288         if (diag != 0) {
1289                 RTE_ETHDEV_LOG(ERR,
1290                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1291                         port_id, diag);
1292                 ret = diag;
1293                 goto rollback;
1294         }
1295
1296         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1297         if (diag != 0) {
1298                 RTE_ETHDEV_LOG(ERR,
1299                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1300                         port_id, diag);
1301                 rte_eth_dev_rx_queue_config(dev, 0);
1302                 ret = diag;
1303                 goto rollback;
1304         }
1305
1306         diag = (*dev->dev_ops->dev_configure)(dev);
1307         if (diag != 0) {
1308                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1309                         port_id, diag);
1310                 rte_eth_dev_rx_queue_config(dev, 0);
1311                 rte_eth_dev_tx_queue_config(dev, 0);
1312                 ret = eth_err(port_id, diag);
1313                 goto rollback;
1314         }
1315
1316         /* Initialize Rx profiling if enabled at compilation time. */
1317         diag = __rte_eth_dev_profile_init(port_id, dev);
1318         if (diag != 0) {
1319                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1320                         port_id, diag);
1321                 rte_eth_dev_rx_queue_config(dev, 0);
1322                 rte_eth_dev_tx_queue_config(dev, 0);
1323                 ret = eth_err(port_id, diag);
1324                 goto rollback;
1325         }
1326
1327         return 0;
1328
1329 rollback:
1330         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1331
1332         return ret;
1333 }
1334
1335 void
1336 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1337 {
1338         if (dev->data->dev_started) {
1339                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1340                         dev->data->port_id);
1341                 return;
1342         }
1343
1344         rte_eth_dev_rx_queue_config(dev, 0);
1345         rte_eth_dev_tx_queue_config(dev, 0);
1346
1347         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1348 }
1349
1350 static void
1351 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1352                         struct rte_eth_dev_info *dev_info)
1353 {
1354         struct rte_ether_addr *addr;
1355         uint16_t i;
1356         uint32_t pool = 0;
1357         uint64_t pool_mask;
1358
1359         /* replay MAC address configuration including default MAC */
1360         addr = &dev->data->mac_addrs[0];
1361         if (*dev->dev_ops->mac_addr_set != NULL)
1362                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1363         else if (*dev->dev_ops->mac_addr_add != NULL)
1364                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1365
1366         if (*dev->dev_ops->mac_addr_add != NULL) {
1367                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1368                         addr = &dev->data->mac_addrs[i];
1369
1370                         /* skip zero address */
1371                         if (rte_is_zero_ether_addr(addr))
1372                                 continue;
1373
1374                         pool = 0;
1375                         pool_mask = dev->data->mac_pool_sel[i];
1376
1377                         do {
1378                                 if (pool_mask & 1ULL)
1379                                         (*dev->dev_ops->mac_addr_add)(dev,
1380                                                 addr, i, pool);
1381                                 pool_mask >>= 1;
1382                                 pool++;
1383                         } while (pool_mask);
1384                 }
1385         }
1386 }
1387
1388 static int
1389 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1390                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1391 {
1392         int ret;
1393
1394         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1395                 rte_eth_dev_mac_restore(dev, dev_info);
1396
1397         /* replay promiscuous configuration */
1398         /*
1399          * use callbacks directly since we don't need port_id check and
1400          * would like to bypass the same value set
1401          */
1402         if (rte_eth_promiscuous_get(port_id) == 1 &&
1403             *dev->dev_ops->promiscuous_enable != NULL) {
1404                 ret = eth_err(port_id,
1405                               (*dev->dev_ops->promiscuous_enable)(dev));
1406                 if (ret != 0 && ret != -ENOTSUP) {
1407                         RTE_ETHDEV_LOG(ERR,
1408                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1409                                 port_id, rte_strerror(-ret));
1410                         return ret;
1411                 }
1412         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1413                    *dev->dev_ops->promiscuous_disable != NULL) {
1414                 ret = eth_err(port_id,
1415                               (*dev->dev_ops->promiscuous_disable)(dev));
1416                 if (ret != 0 && ret != -ENOTSUP) {
1417                         RTE_ETHDEV_LOG(ERR,
1418                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1419                                 port_id, rte_strerror(-ret));
1420                         return ret;
1421                 }
1422         }
1423
1424         /* replay all multicast configuration */
1425         if (rte_eth_allmulticast_get(port_id) == 1)
1426                 rte_eth_allmulticast_enable(port_id);
1427         else if (rte_eth_allmulticast_get(port_id) == 0)
1428                 rte_eth_allmulticast_disable(port_id);
1429
1430         return 0;
1431 }
1432
1433 int
1434 rte_eth_dev_start(uint16_t port_id)
1435 {
1436         struct rte_eth_dev *dev;
1437         struct rte_eth_dev_info dev_info;
1438         int diag;
1439         int ret;
1440
1441         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1442
1443         dev = &rte_eth_devices[port_id];
1444
1445         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1446
1447         if (dev->data->dev_started != 0) {
1448                 RTE_ETHDEV_LOG(INFO,
1449                         "Device with port_id=%"PRIu16" already started\n",
1450                         port_id);
1451                 return 0;
1452         }
1453
1454         ret = rte_eth_dev_info_get(port_id, &dev_info);
1455         if (ret != 0)
1456                 return ret;
1457
1458         /* Lets restore MAC now if device does not support live change */
1459         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1460                 rte_eth_dev_mac_restore(dev, &dev_info);
1461
1462         diag = (*dev->dev_ops->dev_start)(dev);
1463         if (diag == 0)
1464                 dev->data->dev_started = 1;
1465         else
1466                 return eth_err(port_id, diag);
1467
1468         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1469         if (ret != 0) {
1470                 RTE_ETHDEV_LOG(ERR,
1471                         "Error during restoring configuration for device (port %u): %s\n",
1472                         port_id, rte_strerror(-ret));
1473                 rte_eth_dev_stop(port_id);
1474                 return ret;
1475         }
1476
1477         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1478                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1479                 (*dev->dev_ops->link_update)(dev, 0);
1480         }
1481         return 0;
1482 }
1483
1484 void
1485 rte_eth_dev_stop(uint16_t port_id)
1486 {
1487         struct rte_eth_dev *dev;
1488
1489         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1490         dev = &rte_eth_devices[port_id];
1491
1492         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1493
1494         if (dev->data->dev_started == 0) {
1495                 RTE_ETHDEV_LOG(INFO,
1496                         "Device with port_id=%"PRIu16" already stopped\n",
1497                         port_id);
1498                 return;
1499         }
1500
1501         dev->data->dev_started = 0;
1502         (*dev->dev_ops->dev_stop)(dev);
1503 }
1504
1505 int
1506 rte_eth_dev_set_link_up(uint16_t port_id)
1507 {
1508         struct rte_eth_dev *dev;
1509
1510         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1511
1512         dev = &rte_eth_devices[port_id];
1513
1514         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1515         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1516 }
1517
1518 int
1519 rte_eth_dev_set_link_down(uint16_t port_id)
1520 {
1521         struct rte_eth_dev *dev;
1522
1523         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1524
1525         dev = &rte_eth_devices[port_id];
1526
1527         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1528         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1529 }
1530
1531 void
1532 rte_eth_dev_close(uint16_t port_id)
1533 {
1534         struct rte_eth_dev *dev;
1535
1536         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1537         dev = &rte_eth_devices[port_id];
1538
1539         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1540         dev->data->dev_started = 0;
1541         (*dev->dev_ops->dev_close)(dev);
1542
1543         /* check behaviour flag - temporary for PMD migration */
1544         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1545                 /* new behaviour: send event + reset state + free all data */
1546                 rte_eth_dev_release_port(dev);
1547                 return;
1548         }
1549         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1550                         "The driver %s should migrate to the new behaviour.\n",
1551                         dev->device->driver->name);
1552         /* old behaviour: only free queue arrays */
1553         dev->data->nb_rx_queues = 0;
1554         rte_free(dev->data->rx_queues);
1555         dev->data->rx_queues = NULL;
1556         dev->data->nb_tx_queues = 0;
1557         rte_free(dev->data->tx_queues);
1558         dev->data->tx_queues = NULL;
1559 }
1560
1561 int
1562 rte_eth_dev_reset(uint16_t port_id)
1563 {
1564         struct rte_eth_dev *dev;
1565         int ret;
1566
1567         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1568         dev = &rte_eth_devices[port_id];
1569
1570         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1571
1572         rte_eth_dev_stop(port_id);
1573         ret = dev->dev_ops->dev_reset(dev);
1574
1575         return eth_err(port_id, ret);
1576 }
1577
1578 int
1579 rte_eth_dev_is_removed(uint16_t port_id)
1580 {
1581         struct rte_eth_dev *dev;
1582         int ret;
1583
1584         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1585
1586         dev = &rte_eth_devices[port_id];
1587
1588         if (dev->state == RTE_ETH_DEV_REMOVED)
1589                 return 1;
1590
1591         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1592
1593         ret = dev->dev_ops->is_removed(dev);
1594         if (ret != 0)
1595                 /* Device is physically removed. */
1596                 dev->state = RTE_ETH_DEV_REMOVED;
1597
1598         return ret;
1599 }
1600
1601 int
1602 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1603                        uint16_t nb_rx_desc, unsigned int socket_id,
1604                        const struct rte_eth_rxconf *rx_conf,
1605                        struct rte_mempool *mp)
1606 {
1607         int ret;
1608         uint32_t mbp_buf_size;
1609         struct rte_eth_dev *dev;
1610         struct rte_eth_dev_info dev_info;
1611         struct rte_eth_rxconf local_conf;
1612         void **rxq;
1613
1614         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1615
1616         dev = &rte_eth_devices[port_id];
1617         if (rx_queue_id >= dev->data->nb_rx_queues) {
1618                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1619                 return -EINVAL;
1620         }
1621
1622         if (mp == NULL) {
1623                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1624                 return -EINVAL;
1625         }
1626
1627         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1628
1629         /*
1630          * Check the size of the mbuf data buffer.
1631          * This value must be provided in the private data of the memory pool.
1632          * First check that the memory pool has a valid private data.
1633          */
1634         ret = rte_eth_dev_info_get(port_id, &dev_info);
1635         if (ret != 0)
1636                 return ret;
1637
1638         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1639                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1640                         mp->name, (int)mp->private_data_size,
1641                         (int)sizeof(struct rte_pktmbuf_pool_private));
1642                 return -ENOSPC;
1643         }
1644         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1645
1646         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1647                 RTE_ETHDEV_LOG(ERR,
1648                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1649                         mp->name, (int)mbp_buf_size,
1650                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1651                         (int)RTE_PKTMBUF_HEADROOM,
1652                         (int)dev_info.min_rx_bufsize);
1653                 return -EINVAL;
1654         }
1655
1656         /* Use default specified by driver, if nb_rx_desc is zero */
1657         if (nb_rx_desc == 0) {
1658                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1659                 /* If driver default is also zero, fall back on EAL default */
1660                 if (nb_rx_desc == 0)
1661                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1662         }
1663
1664         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1665                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1666                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1667
1668                 RTE_ETHDEV_LOG(ERR,
1669                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1670                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1671                         dev_info.rx_desc_lim.nb_min,
1672                         dev_info.rx_desc_lim.nb_align);
1673                 return -EINVAL;
1674         }
1675
1676         if (dev->data->dev_started &&
1677                 !(dev_info.dev_capa &
1678                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1679                 return -EBUSY;
1680
1681         if (dev->data->dev_started &&
1682                 (dev->data->rx_queue_state[rx_queue_id] !=
1683                         RTE_ETH_QUEUE_STATE_STOPPED))
1684                 return -EBUSY;
1685
1686         rxq = dev->data->rx_queues;
1687         if (rxq[rx_queue_id]) {
1688                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1689                                         -ENOTSUP);
1690                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1691                 rxq[rx_queue_id] = NULL;
1692         }
1693
1694         if (rx_conf == NULL)
1695                 rx_conf = &dev_info.default_rxconf;
1696
1697         local_conf = *rx_conf;
1698
1699         /*
1700          * If an offloading has already been enabled in
1701          * rte_eth_dev_configure(), it has been enabled on all queues,
1702          * so there is no need to enable it in this queue again.
1703          * The local_conf.offloads input to underlying PMD only carries
1704          * those offloadings which are only enabled on this queue and
1705          * not enabled on all queues.
1706          */
1707         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1708
1709         /*
1710          * New added offloadings for this queue are those not enabled in
1711          * rte_eth_dev_configure() and they must be per-queue type.
1712          * A pure per-port offloading can't be enabled on a queue while
1713          * disabled on another queue. A pure per-port offloading can't
1714          * be enabled for any queue as new added one if it hasn't been
1715          * enabled in rte_eth_dev_configure().
1716          */
1717         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1718              local_conf.offloads) {
1719                 RTE_ETHDEV_LOG(ERR,
1720                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1721                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1722                         port_id, rx_queue_id, local_conf.offloads,
1723                         dev_info.rx_queue_offload_capa,
1724                         __func__);
1725                 return -EINVAL;
1726         }
1727
1728         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1729                                               socket_id, &local_conf, mp);
1730         if (!ret) {
1731                 if (!dev->data->min_rx_buf_size ||
1732                     dev->data->min_rx_buf_size > mbp_buf_size)
1733                         dev->data->min_rx_buf_size = mbp_buf_size;
1734         }
1735
1736         return eth_err(port_id, ret);
1737 }
1738
1739 int
1740 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1741                        uint16_t nb_tx_desc, unsigned int socket_id,
1742                        const struct rte_eth_txconf *tx_conf)
1743 {
1744         struct rte_eth_dev *dev;
1745         struct rte_eth_dev_info dev_info;
1746         struct rte_eth_txconf local_conf;
1747         void **txq;
1748         int ret;
1749
1750         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1751
1752         dev = &rte_eth_devices[port_id];
1753         if (tx_queue_id >= dev->data->nb_tx_queues) {
1754                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1755                 return -EINVAL;
1756         }
1757
1758         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1759
1760         ret = rte_eth_dev_info_get(port_id, &dev_info);
1761         if (ret != 0)
1762                 return ret;
1763
1764         /* Use default specified by driver, if nb_tx_desc is zero */
1765         if (nb_tx_desc == 0) {
1766                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1767                 /* If driver default is zero, fall back on EAL default */
1768                 if (nb_tx_desc == 0)
1769                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1770         }
1771         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1772             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1773             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1774                 RTE_ETHDEV_LOG(ERR,
1775                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1776                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1777                         dev_info.tx_desc_lim.nb_min,
1778                         dev_info.tx_desc_lim.nb_align);
1779                 return -EINVAL;
1780         }
1781
1782         if (dev->data->dev_started &&
1783                 !(dev_info.dev_capa &
1784                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1785                 return -EBUSY;
1786
1787         if (dev->data->dev_started &&
1788                 (dev->data->tx_queue_state[tx_queue_id] !=
1789                         RTE_ETH_QUEUE_STATE_STOPPED))
1790                 return -EBUSY;
1791
1792         txq = dev->data->tx_queues;
1793         if (txq[tx_queue_id]) {
1794                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1795                                         -ENOTSUP);
1796                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1797                 txq[tx_queue_id] = NULL;
1798         }
1799
1800         if (tx_conf == NULL)
1801                 tx_conf = &dev_info.default_txconf;
1802
1803         local_conf = *tx_conf;
1804
1805         /*
1806          * If an offloading has already been enabled in
1807          * rte_eth_dev_configure(), it has been enabled on all queues,
1808          * so there is no need to enable it in this queue again.
1809          * The local_conf.offloads input to underlying PMD only carries
1810          * those offloadings which are only enabled on this queue and
1811          * not enabled on all queues.
1812          */
1813         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1814
1815         /*
1816          * New added offloadings for this queue are those not enabled in
1817          * rte_eth_dev_configure() and they must be per-queue type.
1818          * A pure per-port offloading can't be enabled on a queue while
1819          * disabled on another queue. A pure per-port offloading can't
1820          * be enabled for any queue as new added one if it hasn't been
1821          * enabled in rte_eth_dev_configure().
1822          */
1823         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1824              local_conf.offloads) {
1825                 RTE_ETHDEV_LOG(ERR,
1826                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1827                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1828                         port_id, tx_queue_id, local_conf.offloads,
1829                         dev_info.tx_queue_offload_capa,
1830                         __func__);
1831                 return -EINVAL;
1832         }
1833
1834         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1835                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1836 }
1837
1838 void
1839 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1840                 void *userdata __rte_unused)
1841 {
1842         unsigned i;
1843
1844         for (i = 0; i < unsent; i++)
1845                 rte_pktmbuf_free(pkts[i]);
1846 }
1847
1848 void
1849 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1850                 void *userdata)
1851 {
1852         uint64_t *count = userdata;
1853         unsigned i;
1854
1855         for (i = 0; i < unsent; i++)
1856                 rte_pktmbuf_free(pkts[i]);
1857
1858         *count += unsent;
1859 }
1860
1861 int
1862 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1863                 buffer_tx_error_fn cbfn, void *userdata)
1864 {
1865         buffer->error_callback = cbfn;
1866         buffer->error_userdata = userdata;
1867         return 0;
1868 }
1869
1870 int
1871 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1872 {
1873         int ret = 0;
1874
1875         if (buffer == NULL)
1876                 return -EINVAL;
1877
1878         buffer->size = size;
1879         if (buffer->error_callback == NULL) {
1880                 ret = rte_eth_tx_buffer_set_err_callback(
1881                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1882         }
1883
1884         return ret;
1885 }
1886
1887 int
1888 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1889 {
1890         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1891         int ret;
1892
1893         /* Validate Input Data. Bail if not valid or not supported. */
1894         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1896
1897         /* Call driver to free pending mbufs. */
1898         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1899                                                free_cnt);
1900         return eth_err(port_id, ret);
1901 }
1902
1903 int
1904 rte_eth_promiscuous_enable(uint16_t port_id)
1905 {
1906         struct rte_eth_dev *dev;
1907         int diag = 0;
1908
1909         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1910         dev = &rte_eth_devices[port_id];
1911
1912         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
1913
1914         if (dev->data->promiscuous == 0) {
1915                 diag = (*dev->dev_ops->promiscuous_enable)(dev);
1916                 dev->data->promiscuous = (diag == 0) ? 1 : 0;
1917         }
1918
1919         return eth_err(port_id, diag);
1920 }
1921
1922 int
1923 rte_eth_promiscuous_disable(uint16_t port_id)
1924 {
1925         struct rte_eth_dev *dev;
1926         int diag = 0;
1927
1928         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1929         dev = &rte_eth_devices[port_id];
1930
1931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
1932
1933         if (dev->data->promiscuous == 1) {
1934                 dev->data->promiscuous = 0;
1935                 diag = (*dev->dev_ops->promiscuous_disable)(dev);
1936                 if (diag != 0)
1937                         dev->data->promiscuous = 1;
1938         }
1939
1940         return eth_err(port_id, diag);
1941 }
1942
1943 int
1944 rte_eth_promiscuous_get(uint16_t port_id)
1945 {
1946         struct rte_eth_dev *dev;
1947
1948         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1949
1950         dev = &rte_eth_devices[port_id];
1951         return dev->data->promiscuous;
1952 }
1953
1954 void
1955 rte_eth_allmulticast_enable(uint16_t port_id)
1956 {
1957         struct rte_eth_dev *dev;
1958
1959         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1960         dev = &rte_eth_devices[port_id];
1961
1962         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1963         (*dev->dev_ops->allmulticast_enable)(dev);
1964         dev->data->all_multicast = 1;
1965 }
1966
1967 void
1968 rte_eth_allmulticast_disable(uint16_t port_id)
1969 {
1970         struct rte_eth_dev *dev;
1971
1972         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1973         dev = &rte_eth_devices[port_id];
1974
1975         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1976         dev->data->all_multicast = 0;
1977         (*dev->dev_ops->allmulticast_disable)(dev);
1978 }
1979
1980 int
1981 rte_eth_allmulticast_get(uint16_t port_id)
1982 {
1983         struct rte_eth_dev *dev;
1984
1985         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1986
1987         dev = &rte_eth_devices[port_id];
1988         return dev->data->all_multicast;
1989 }
1990
1991 int
1992 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1993 {
1994         struct rte_eth_dev *dev;
1995
1996         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1997         dev = &rte_eth_devices[port_id];
1998
1999         if (dev->data->dev_conf.intr_conf.lsc &&
2000             dev->data->dev_started)
2001                 rte_eth_linkstatus_get(dev, eth_link);
2002         else {
2003                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2004                 (*dev->dev_ops->link_update)(dev, 1);
2005                 *eth_link = dev->data->dev_link;
2006         }
2007
2008         return 0;
2009 }
2010
2011 int
2012 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2013 {
2014         struct rte_eth_dev *dev;
2015
2016         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2017         dev = &rte_eth_devices[port_id];
2018
2019         if (dev->data->dev_conf.intr_conf.lsc &&
2020             dev->data->dev_started)
2021                 rte_eth_linkstatus_get(dev, eth_link);
2022         else {
2023                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2024                 (*dev->dev_ops->link_update)(dev, 0);
2025                 *eth_link = dev->data->dev_link;
2026         }
2027
2028         return 0;
2029 }
2030
2031 int
2032 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2033 {
2034         struct rte_eth_dev *dev;
2035
2036         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2037
2038         dev = &rte_eth_devices[port_id];
2039         memset(stats, 0, sizeof(*stats));
2040
2041         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2042         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2043         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2044 }
2045
2046 int
2047 rte_eth_stats_reset(uint16_t port_id)
2048 {
2049         struct rte_eth_dev *dev;
2050         int ret;
2051
2052         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2053         dev = &rte_eth_devices[port_id];
2054
2055         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2056         ret = (*dev->dev_ops->stats_reset)(dev);
2057         if (ret != 0)
2058                 return eth_err(port_id, ret);
2059
2060         dev->data->rx_mbuf_alloc_failed = 0;
2061
2062         return 0;
2063 }
2064
2065 static inline int
2066 get_xstats_basic_count(struct rte_eth_dev *dev)
2067 {
2068         uint16_t nb_rxqs, nb_txqs;
2069         int count;
2070
2071         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2072         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2073
2074         count = RTE_NB_STATS;
2075         count += nb_rxqs * RTE_NB_RXQ_STATS;
2076         count += nb_txqs * RTE_NB_TXQ_STATS;
2077
2078         return count;
2079 }
2080
2081 static int
2082 get_xstats_count(uint16_t port_id)
2083 {
2084         struct rte_eth_dev *dev;
2085         int count;
2086
2087         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2088         dev = &rte_eth_devices[port_id];
2089         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2090                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2091                                 NULL, 0);
2092                 if (count < 0)
2093                         return eth_err(port_id, count);
2094         }
2095         if (dev->dev_ops->xstats_get_names != NULL) {
2096                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2097                 if (count < 0)
2098                         return eth_err(port_id, count);
2099         } else
2100                 count = 0;
2101
2102
2103         count += get_xstats_basic_count(dev);
2104
2105         return count;
2106 }
2107
2108 int
2109 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2110                 uint64_t *id)
2111 {
2112         int cnt_xstats, idx_xstat;
2113
2114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2115
2116         if (!id) {
2117                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2118                 return -ENOMEM;
2119         }
2120
2121         if (!xstat_name) {
2122                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2123                 return -ENOMEM;
2124         }
2125
2126         /* Get count */
2127         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2128         if (cnt_xstats  < 0) {
2129                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2130                 return -ENODEV;
2131         }
2132
2133         /* Get id-name lookup table */
2134         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2135
2136         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2137                         port_id, xstats_names, cnt_xstats, NULL)) {
2138                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2139                 return -1;
2140         }
2141
2142         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2143                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2144                         *id = idx_xstat;
2145                         return 0;
2146                 };
2147         }
2148
2149         return -EINVAL;
2150 }
2151
2152 /* retrieve basic stats names */
2153 static int
2154 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2155         struct rte_eth_xstat_name *xstats_names)
2156 {
2157         int cnt_used_entries = 0;
2158         uint32_t idx, id_queue;
2159         uint16_t num_q;
2160
2161         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2162                 strlcpy(xstats_names[cnt_used_entries].name,
2163                         rte_stats_strings[idx].name,
2164                         sizeof(xstats_names[0].name));
2165                 cnt_used_entries++;
2166         }
2167         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2168         for (id_queue = 0; id_queue < num_q; id_queue++) {
2169                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2170                         snprintf(xstats_names[cnt_used_entries].name,
2171                                 sizeof(xstats_names[0].name),
2172                                 "rx_q%u%s",
2173                                 id_queue, rte_rxq_stats_strings[idx].name);
2174                         cnt_used_entries++;
2175                 }
2176
2177         }
2178         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2179         for (id_queue = 0; id_queue < num_q; id_queue++) {
2180                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2181                         snprintf(xstats_names[cnt_used_entries].name,
2182                                 sizeof(xstats_names[0].name),
2183                                 "tx_q%u%s",
2184                                 id_queue, rte_txq_stats_strings[idx].name);
2185                         cnt_used_entries++;
2186                 }
2187         }
2188         return cnt_used_entries;
2189 }
2190
2191 /* retrieve ethdev extended statistics names */
2192 int
2193 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2194         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2195         uint64_t *ids)
2196 {
2197         struct rte_eth_xstat_name *xstats_names_copy;
2198         unsigned int no_basic_stat_requested = 1;
2199         unsigned int no_ext_stat_requested = 1;
2200         unsigned int expected_entries;
2201         unsigned int basic_count;
2202         struct rte_eth_dev *dev;
2203         unsigned int i;
2204         int ret;
2205
2206         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2207         dev = &rte_eth_devices[port_id];
2208
2209         basic_count = get_xstats_basic_count(dev);
2210         ret = get_xstats_count(port_id);
2211         if (ret < 0)
2212                 return ret;
2213         expected_entries = (unsigned int)ret;
2214
2215         /* Return max number of stats if no ids given */
2216         if (!ids) {
2217                 if (!xstats_names)
2218                         return expected_entries;
2219                 else if (xstats_names && size < expected_entries)
2220                         return expected_entries;
2221         }
2222
2223         if (ids && !xstats_names)
2224                 return -EINVAL;
2225
2226         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2227                 uint64_t ids_copy[size];
2228
2229                 for (i = 0; i < size; i++) {
2230                         if (ids[i] < basic_count) {
2231                                 no_basic_stat_requested = 0;
2232                                 break;
2233                         }
2234
2235                         /*
2236                          * Convert ids to xstats ids that PMD knows.
2237                          * ids known by user are basic + extended stats.
2238                          */
2239                         ids_copy[i] = ids[i] - basic_count;
2240                 }
2241
2242                 if (no_basic_stat_requested)
2243                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2244                                         xstats_names, ids_copy, size);
2245         }
2246
2247         /* Retrieve all stats */
2248         if (!ids) {
2249                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2250                                 expected_entries);
2251                 if (num_stats < 0 || num_stats > (int)expected_entries)
2252                         return num_stats;
2253                 else
2254                         return expected_entries;
2255         }
2256
2257         xstats_names_copy = calloc(expected_entries,
2258                 sizeof(struct rte_eth_xstat_name));
2259
2260         if (!xstats_names_copy) {
2261                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2262                 return -ENOMEM;
2263         }
2264
2265         if (ids) {
2266                 for (i = 0; i < size; i++) {
2267                         if (ids[i] >= basic_count) {
2268                                 no_ext_stat_requested = 0;
2269                                 break;
2270                         }
2271                 }
2272         }
2273
2274         /* Fill xstats_names_copy structure */
2275         if (ids && no_ext_stat_requested) {
2276                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2277         } else {
2278                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2279                         expected_entries);
2280                 if (ret < 0) {
2281                         free(xstats_names_copy);
2282                         return ret;
2283                 }
2284         }
2285
2286         /* Filter stats */
2287         for (i = 0; i < size; i++) {
2288                 if (ids[i] >= expected_entries) {
2289                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2290                         free(xstats_names_copy);
2291                         return -1;
2292                 }
2293                 xstats_names[i] = xstats_names_copy[ids[i]];
2294         }
2295
2296         free(xstats_names_copy);
2297         return size;
2298 }
2299
2300 int
2301 rte_eth_xstats_get_names(uint16_t port_id,
2302         struct rte_eth_xstat_name *xstats_names,
2303         unsigned int size)
2304 {
2305         struct rte_eth_dev *dev;
2306         int cnt_used_entries;
2307         int cnt_expected_entries;
2308         int cnt_driver_entries;
2309
2310         cnt_expected_entries = get_xstats_count(port_id);
2311         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2312                         (int)size < cnt_expected_entries)
2313                 return cnt_expected_entries;
2314
2315         /* port_id checked in get_xstats_count() */
2316         dev = &rte_eth_devices[port_id];
2317
2318         cnt_used_entries = rte_eth_basic_stats_get_names(
2319                 dev, xstats_names);
2320
2321         if (dev->dev_ops->xstats_get_names != NULL) {
2322                 /* If there are any driver-specific xstats, append them
2323                  * to end of list.
2324                  */
2325                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2326                         dev,
2327                         xstats_names + cnt_used_entries,
2328                         size - cnt_used_entries);
2329                 if (cnt_driver_entries < 0)
2330                         return eth_err(port_id, cnt_driver_entries);
2331                 cnt_used_entries += cnt_driver_entries;
2332         }
2333
2334         return cnt_used_entries;
2335 }
2336
2337
2338 static int
2339 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2340 {
2341         struct rte_eth_dev *dev;
2342         struct rte_eth_stats eth_stats;
2343         unsigned int count = 0, i, q;
2344         uint64_t val, *stats_ptr;
2345         uint16_t nb_rxqs, nb_txqs;
2346         int ret;
2347
2348         ret = rte_eth_stats_get(port_id, &eth_stats);
2349         if (ret < 0)
2350                 return ret;
2351
2352         dev = &rte_eth_devices[port_id];
2353
2354         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2355         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2356
2357         /* global stats */
2358         for (i = 0; i < RTE_NB_STATS; i++) {
2359                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2360                                         rte_stats_strings[i].offset);
2361                 val = *stats_ptr;
2362                 xstats[count++].value = val;
2363         }
2364
2365         /* per-rxq stats */
2366         for (q = 0; q < nb_rxqs; q++) {
2367                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2368                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2369                                         rte_rxq_stats_strings[i].offset +
2370                                         q * sizeof(uint64_t));
2371                         val = *stats_ptr;
2372                         xstats[count++].value = val;
2373                 }
2374         }
2375
2376         /* per-txq stats */
2377         for (q = 0; q < nb_txqs; q++) {
2378                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2379                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2380                                         rte_txq_stats_strings[i].offset +
2381                                         q * sizeof(uint64_t));
2382                         val = *stats_ptr;
2383                         xstats[count++].value = val;
2384                 }
2385         }
2386         return count;
2387 }
2388
2389 /* retrieve ethdev extended statistics */
2390 int
2391 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2392                          uint64_t *values, unsigned int size)
2393 {
2394         unsigned int no_basic_stat_requested = 1;
2395         unsigned int no_ext_stat_requested = 1;
2396         unsigned int num_xstats_filled;
2397         unsigned int basic_count;
2398         uint16_t expected_entries;
2399         struct rte_eth_dev *dev;
2400         unsigned int i;
2401         int ret;
2402
2403         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2404         ret = get_xstats_count(port_id);
2405         if (ret < 0)
2406                 return ret;
2407         expected_entries = (uint16_t)ret;
2408         struct rte_eth_xstat xstats[expected_entries];
2409         dev = &rte_eth_devices[port_id];
2410         basic_count = get_xstats_basic_count(dev);
2411
2412         /* Return max number of stats if no ids given */
2413         if (!ids) {
2414                 if (!values)
2415                         return expected_entries;
2416                 else if (values && size < expected_entries)
2417                         return expected_entries;
2418         }
2419
2420         if (ids && !values)
2421                 return -EINVAL;
2422
2423         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2424                 unsigned int basic_count = get_xstats_basic_count(dev);
2425                 uint64_t ids_copy[size];
2426
2427                 for (i = 0; i < size; i++) {
2428                         if (ids[i] < basic_count) {
2429                                 no_basic_stat_requested = 0;
2430                                 break;
2431                         }
2432
2433                         /*
2434                          * Convert ids to xstats ids that PMD knows.
2435                          * ids known by user are basic + extended stats.
2436                          */
2437                         ids_copy[i] = ids[i] - basic_count;
2438                 }
2439
2440                 if (no_basic_stat_requested)
2441                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2442                                         values, size);
2443         }
2444
2445         if (ids) {
2446                 for (i = 0; i < size; i++) {
2447                         if (ids[i] >= basic_count) {
2448                                 no_ext_stat_requested = 0;
2449                                 break;
2450                         }
2451                 }
2452         }
2453
2454         /* Fill the xstats structure */
2455         if (ids && no_ext_stat_requested)
2456                 ret = rte_eth_basic_stats_get(port_id, xstats);
2457         else
2458                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2459
2460         if (ret < 0)
2461                 return ret;
2462         num_xstats_filled = (unsigned int)ret;
2463
2464         /* Return all stats */
2465         if (!ids) {
2466                 for (i = 0; i < num_xstats_filled; i++)
2467                         values[i] = xstats[i].value;
2468                 return expected_entries;
2469         }
2470
2471         /* Filter stats */
2472         for (i = 0; i < size; i++) {
2473                 if (ids[i] >= expected_entries) {
2474                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2475                         return -1;
2476                 }
2477                 values[i] = xstats[ids[i]].value;
2478         }
2479         return size;
2480 }
2481
2482 int
2483 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2484         unsigned int n)
2485 {
2486         struct rte_eth_dev *dev;
2487         unsigned int count = 0, i;
2488         signed int xcount = 0;
2489         uint16_t nb_rxqs, nb_txqs;
2490         int ret;
2491
2492         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2493
2494         dev = &rte_eth_devices[port_id];
2495
2496         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2497         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2498
2499         /* Return generic statistics */
2500         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2501                 (nb_txqs * RTE_NB_TXQ_STATS);
2502
2503         /* implemented by the driver */
2504         if (dev->dev_ops->xstats_get != NULL) {
2505                 /* Retrieve the xstats from the driver at the end of the
2506                  * xstats struct.
2507                  */
2508                 xcount = (*dev->dev_ops->xstats_get)(dev,
2509                                      xstats ? xstats + count : NULL,
2510                                      (n > count) ? n - count : 0);
2511
2512                 if (xcount < 0)
2513                         return eth_err(port_id, xcount);
2514         }
2515
2516         if (n < count + xcount || xstats == NULL)
2517                 return count + xcount;
2518
2519         /* now fill the xstats structure */
2520         ret = rte_eth_basic_stats_get(port_id, xstats);
2521         if (ret < 0)
2522                 return ret;
2523         count = ret;
2524
2525         for (i = 0; i < count; i++)
2526                 xstats[i].id = i;
2527         /* add an offset to driver-specific stats */
2528         for ( ; i < count + xcount; i++)
2529                 xstats[i].id += count;
2530
2531         return count + xcount;
2532 }
2533
2534 /* reset ethdev extended statistics */
2535 int
2536 rte_eth_xstats_reset(uint16_t port_id)
2537 {
2538         struct rte_eth_dev *dev;
2539
2540         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2541         dev = &rte_eth_devices[port_id];
2542
2543         /* implemented by the driver */
2544         if (dev->dev_ops->xstats_reset != NULL)
2545                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2546
2547         /* fallback to default */
2548         return rte_eth_stats_reset(port_id);
2549 }
2550
2551 static int
2552 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2553                 uint8_t is_rx)
2554 {
2555         struct rte_eth_dev *dev;
2556
2557         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2558
2559         dev = &rte_eth_devices[port_id];
2560
2561         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2562
2563         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2564                 return -EINVAL;
2565
2566         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2567                 return -EINVAL;
2568
2569         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2570                 return -EINVAL;
2571
2572         return (*dev->dev_ops->queue_stats_mapping_set)
2573                         (dev, queue_id, stat_idx, is_rx);
2574 }
2575
2576
2577 int
2578 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2579                 uint8_t stat_idx)
2580 {
2581         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2582                                                 stat_idx, STAT_QMAP_TX));
2583 }
2584
2585
2586 int
2587 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2588                 uint8_t stat_idx)
2589 {
2590         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2591                                                 stat_idx, STAT_QMAP_RX));
2592 }
2593
2594 int
2595 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2596 {
2597         struct rte_eth_dev *dev;
2598
2599         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2600         dev = &rte_eth_devices[port_id];
2601
2602         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2603         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2604                                                         fw_version, fw_size));
2605 }
2606
2607 int
2608 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2609 {
2610         struct rte_eth_dev *dev;
2611         const struct rte_eth_desc_lim lim = {
2612                 .nb_max = UINT16_MAX,
2613                 .nb_min = 0,
2614                 .nb_align = 1,
2615                 .nb_seg_max = UINT16_MAX,
2616                 .nb_mtu_seg_max = UINT16_MAX,
2617         };
2618         int diag;
2619
2620         /*
2621          * Init dev_info before port_id check since caller does not have
2622          * return status and does not know if get is successful or not.
2623          */
2624         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2625
2626         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2627         dev = &rte_eth_devices[port_id];
2628
2629         dev_info->rx_desc_lim = lim;
2630         dev_info->tx_desc_lim = lim;
2631         dev_info->device = dev->device;
2632         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2633         dev_info->max_mtu = UINT16_MAX;
2634
2635         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2636         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2637         if (diag != 0) {
2638                 /* Cleanup already filled in device information */
2639                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2640                 return eth_err(port_id, diag);
2641         }
2642
2643         dev_info->driver_name = dev->device->driver->name;
2644         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2645         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2646
2647         dev_info->dev_flags = &dev->data->dev_flags;
2648
2649         return 0;
2650 }
2651
2652 int
2653 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2654                                  uint32_t *ptypes, int num)
2655 {
2656         int i, j;
2657         struct rte_eth_dev *dev;
2658         const uint32_t *all_ptypes;
2659
2660         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2661         dev = &rte_eth_devices[port_id];
2662         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2663         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2664
2665         if (!all_ptypes)
2666                 return 0;
2667
2668         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2669                 if (all_ptypes[i] & ptype_mask) {
2670                         if (j < num)
2671                                 ptypes[j] = all_ptypes[i];
2672                         j++;
2673                 }
2674
2675         return j;
2676 }
2677
2678 int
2679 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
2680 {
2681         struct rte_eth_dev *dev;
2682
2683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2684         dev = &rte_eth_devices[port_id];
2685         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2686
2687         return 0;
2688 }
2689
2690
2691 int
2692 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2693 {
2694         struct rte_eth_dev *dev;
2695
2696         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2697
2698         dev = &rte_eth_devices[port_id];
2699         *mtu = dev->data->mtu;
2700         return 0;
2701 }
2702
2703 int
2704 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2705 {
2706         int ret;
2707         struct rte_eth_dev_info dev_info;
2708         struct rte_eth_dev *dev;
2709
2710         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2711         dev = &rte_eth_devices[port_id];
2712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2713
2714         /*
2715          * Check if the device supports dev_infos_get, if it does not
2716          * skip min_mtu/max_mtu validation here as this requires values
2717          * that are populated within the call to rte_eth_dev_info_get()
2718          * which relies on dev->dev_ops->dev_infos_get.
2719          */
2720         if (*dev->dev_ops->dev_infos_get != NULL) {
2721                 ret = rte_eth_dev_info_get(port_id, &dev_info);
2722                 if (ret != 0)
2723                         return ret;
2724
2725                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
2726                         return -EINVAL;
2727         }
2728
2729         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2730         if (!ret)
2731                 dev->data->mtu = mtu;
2732
2733         return eth_err(port_id, ret);
2734 }
2735
2736 int
2737 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2738 {
2739         struct rte_eth_dev *dev;
2740         int ret;
2741
2742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743         dev = &rte_eth_devices[port_id];
2744         if (!(dev->data->dev_conf.rxmode.offloads &
2745               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2746                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
2747                         port_id);
2748                 return -ENOSYS;
2749         }
2750
2751         if (vlan_id > 4095) {
2752                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
2753                         port_id, vlan_id);
2754                 return -EINVAL;
2755         }
2756         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2757
2758         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2759         if (ret == 0) {
2760                 struct rte_vlan_filter_conf *vfc;
2761                 int vidx;
2762                 int vbit;
2763
2764                 vfc = &dev->data->vlan_filter_conf;
2765                 vidx = vlan_id / 64;
2766                 vbit = vlan_id % 64;
2767
2768                 if (on)
2769                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2770                 else
2771                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2772         }
2773
2774         return eth_err(port_id, ret);
2775 }
2776
2777 int
2778 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2779                                     int on)
2780 {
2781         struct rte_eth_dev *dev;
2782
2783         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2784         dev = &rte_eth_devices[port_id];
2785         if (rx_queue_id >= dev->data->nb_rx_queues) {
2786                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
2787                 return -EINVAL;
2788         }
2789
2790         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2791         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2792
2793         return 0;
2794 }
2795
2796 int
2797 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2798                                 enum rte_vlan_type vlan_type,
2799                                 uint16_t tpid)
2800 {
2801         struct rte_eth_dev *dev;
2802
2803         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2804         dev = &rte_eth_devices[port_id];
2805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2806
2807         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2808                                                                tpid));
2809 }
2810
2811 int
2812 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2813 {
2814         struct rte_eth_dev *dev;
2815         int ret = 0;
2816         int mask = 0;
2817         int cur, org = 0;
2818         uint64_t orig_offloads;
2819         uint64_t *dev_offloads;
2820
2821         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2822         dev = &rte_eth_devices[port_id];
2823
2824         /* save original values in case of failure */
2825         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2826         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
2827
2828         /*check which option changed by application*/
2829         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2830         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
2831         if (cur != org) {
2832                 if (cur)
2833                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2834                 else
2835                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2836                 mask |= ETH_VLAN_STRIP_MASK;
2837         }
2838
2839         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2840         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
2841         if (cur != org) {
2842                 if (cur)
2843                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2844                 else
2845                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
2846                 mask |= ETH_VLAN_FILTER_MASK;
2847         }
2848
2849         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2850         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
2851         if (cur != org) {
2852                 if (cur)
2853                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2854                 else
2855                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2856                 mask |= ETH_VLAN_EXTEND_MASK;
2857         }
2858
2859         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
2860         org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
2861         if (cur != org) {
2862                 if (cur)
2863                         *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
2864                 else
2865                         *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
2866                 mask |= ETH_QINQ_STRIP_MASK;
2867         }
2868
2869         /*no change*/
2870         if (mask == 0)
2871                 return ret;
2872
2873         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2874         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2875         if (ret) {
2876                 /* hit an error restore  original values */
2877                 *dev_offloads = orig_offloads;
2878         }
2879
2880         return eth_err(port_id, ret);
2881 }
2882
2883 int
2884 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2885 {
2886         struct rte_eth_dev *dev;
2887         uint64_t *dev_offloads;
2888         int ret = 0;
2889
2890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2891         dev = &rte_eth_devices[port_id];
2892         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
2893
2894         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2895                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2896
2897         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2898                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2899
2900         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2901                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2902
2903         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
2904                 ret |= DEV_RX_OFFLOAD_QINQ_STRIP;
2905
2906         return ret;
2907 }
2908
2909 int
2910 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2911 {
2912         struct rte_eth_dev *dev;
2913
2914         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2915         dev = &rte_eth_devices[port_id];
2916         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2917
2918         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2919 }
2920
2921 int
2922 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2923 {
2924         struct rte_eth_dev *dev;
2925
2926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2927         dev = &rte_eth_devices[port_id];
2928         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2929         memset(fc_conf, 0, sizeof(*fc_conf));
2930         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2931 }
2932
2933 int
2934 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2935 {
2936         struct rte_eth_dev *dev;
2937
2938         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2939         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2940                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
2941                 return -EINVAL;
2942         }
2943
2944         dev = &rte_eth_devices[port_id];
2945         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2946         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2947 }
2948
2949 int
2950 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2951                                    struct rte_eth_pfc_conf *pfc_conf)
2952 {
2953         struct rte_eth_dev *dev;
2954
2955         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2956         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2957                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
2958                 return -EINVAL;
2959         }
2960
2961         dev = &rte_eth_devices[port_id];
2962         /* High water, low water validation are device specific */
2963         if  (*dev->dev_ops->priority_flow_ctrl_set)
2964                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2965                                         (dev, pfc_conf));
2966         return -ENOTSUP;
2967 }
2968
2969 static int
2970 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2971                         uint16_t reta_size)
2972 {
2973         uint16_t i, num;
2974
2975         if (!reta_conf)
2976                 return -EINVAL;
2977
2978         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2979         for (i = 0; i < num; i++) {
2980                 if (reta_conf[i].mask)
2981                         return 0;
2982         }
2983
2984         return -EINVAL;
2985 }
2986
2987 static int
2988 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2989                          uint16_t reta_size,
2990                          uint16_t max_rxq)
2991 {
2992         uint16_t i, idx, shift;
2993
2994         if (!reta_conf)
2995                 return -EINVAL;
2996
2997         if (max_rxq == 0) {
2998                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
2999                 return -EINVAL;
3000         }
3001
3002         for (i = 0; i < reta_size; i++) {
3003                 idx = i / RTE_RETA_GROUP_SIZE;
3004                 shift = i % RTE_RETA_GROUP_SIZE;
3005                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3006                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3007                         RTE_ETHDEV_LOG(ERR,
3008                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3009                                 idx, shift,
3010                                 reta_conf[idx].reta[shift], max_rxq);
3011                         return -EINVAL;
3012                 }
3013         }
3014
3015         return 0;
3016 }
3017
3018 int
3019 rte_eth_dev_rss_reta_update(uint16_t port_id,
3020                             struct rte_eth_rss_reta_entry64 *reta_conf,
3021                             uint16_t reta_size)
3022 {
3023         struct rte_eth_dev *dev;
3024         int ret;
3025
3026         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3027         /* Check mask bits */
3028         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3029         if (ret < 0)
3030                 return ret;
3031
3032         dev = &rte_eth_devices[port_id];
3033
3034         /* Check entry value */
3035         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3036                                 dev->data->nb_rx_queues);
3037         if (ret < 0)
3038                 return ret;
3039
3040         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3041         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3042                                                              reta_size));
3043 }
3044
3045 int
3046 rte_eth_dev_rss_reta_query(uint16_t port_id,
3047                            struct rte_eth_rss_reta_entry64 *reta_conf,
3048                            uint16_t reta_size)
3049 {
3050         struct rte_eth_dev *dev;
3051         int ret;
3052
3053         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3054
3055         /* Check mask bits */
3056         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3057         if (ret < 0)
3058                 return ret;
3059
3060         dev = &rte_eth_devices[port_id];
3061         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3062         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3063                                                             reta_size));
3064 }
3065
3066 int
3067 rte_eth_dev_rss_hash_update(uint16_t port_id,
3068                             struct rte_eth_rss_conf *rss_conf)
3069 {
3070         struct rte_eth_dev *dev;
3071         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3072         int ret;
3073
3074         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3075
3076         ret = rte_eth_dev_info_get(port_id, &dev_info);
3077         if (ret != 0)
3078                 return ret;
3079
3080         dev = &rte_eth_devices[port_id];
3081         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3082             dev_info.flow_type_rss_offloads) {
3083                 RTE_ETHDEV_LOG(ERR,
3084                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3085                         port_id, rss_conf->rss_hf,
3086                         dev_info.flow_type_rss_offloads);
3087                 return -EINVAL;
3088         }
3089         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3090         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3091                                                                  rss_conf));
3092 }
3093
3094 int
3095 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3096                               struct rte_eth_rss_conf *rss_conf)
3097 {
3098         struct rte_eth_dev *dev;
3099
3100         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3101         dev = &rte_eth_devices[port_id];
3102         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3103         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3104                                                                    rss_conf));
3105 }
3106
3107 int
3108 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3109                                 struct rte_eth_udp_tunnel *udp_tunnel)
3110 {
3111         struct rte_eth_dev *dev;
3112
3113         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3114         if (udp_tunnel == NULL) {
3115                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3116                 return -EINVAL;
3117         }
3118
3119         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3120                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3121                 return -EINVAL;
3122         }
3123
3124         dev = &rte_eth_devices[port_id];
3125         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3126         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3127                                                                 udp_tunnel));
3128 }
3129
3130 int
3131 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3132                                    struct rte_eth_udp_tunnel *udp_tunnel)
3133 {
3134         struct rte_eth_dev *dev;
3135
3136         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3137         dev = &rte_eth_devices[port_id];
3138
3139         if (udp_tunnel == NULL) {
3140                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3141                 return -EINVAL;
3142         }
3143
3144         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3145                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3146                 return -EINVAL;
3147         }
3148
3149         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3150         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3151                                                                 udp_tunnel));
3152 }
3153
3154 int
3155 rte_eth_led_on(uint16_t port_id)
3156 {
3157         struct rte_eth_dev *dev;
3158
3159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3160         dev = &rte_eth_devices[port_id];
3161         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3162         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3163 }
3164
3165 int
3166 rte_eth_led_off(uint16_t port_id)
3167 {
3168         struct rte_eth_dev *dev;
3169
3170         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3171         dev = &rte_eth_devices[port_id];
3172         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3173         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3174 }
3175
3176 /*
3177  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3178  * an empty spot.
3179  */
3180 static int
3181 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3182 {
3183         struct rte_eth_dev_info dev_info;
3184         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3185         unsigned i;
3186         int ret;
3187
3188         ret = rte_eth_dev_info_get(port_id, &dev_info);
3189         if (ret != 0)
3190                 return -1;
3191
3192         for (i = 0; i < dev_info.max_mac_addrs; i++)
3193                 if (memcmp(addr, &dev->data->mac_addrs[i],
3194                                 RTE_ETHER_ADDR_LEN) == 0)
3195                         return i;
3196
3197         return -1;
3198 }
3199
3200 static const struct rte_ether_addr null_mac_addr;
3201
3202 int
3203 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3204                         uint32_t pool)
3205 {
3206         struct rte_eth_dev *dev;
3207         int index;
3208         uint64_t pool_mask;
3209         int ret;
3210
3211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3212         dev = &rte_eth_devices[port_id];
3213         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3214
3215         if (rte_is_zero_ether_addr(addr)) {
3216                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3217                         port_id);
3218                 return -EINVAL;
3219         }
3220         if (pool >= ETH_64_POOLS) {
3221                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3222                 return -EINVAL;
3223         }
3224
3225         index = get_mac_addr_index(port_id, addr);
3226         if (index < 0) {
3227                 index = get_mac_addr_index(port_id, &null_mac_addr);
3228                 if (index < 0) {
3229                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3230                                 port_id);
3231                         return -ENOSPC;
3232                 }
3233         } else {
3234                 pool_mask = dev->data->mac_pool_sel[index];
3235
3236                 /* Check if both MAC address and pool is already there, and do nothing */
3237                 if (pool_mask & (1ULL << pool))
3238                         return 0;
3239         }
3240
3241         /* Update NIC */
3242         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3243
3244         if (ret == 0) {
3245                 /* Update address in NIC data structure */
3246                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3247
3248                 /* Update pool bitmap in NIC data structure */
3249                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3250         }
3251
3252         return eth_err(port_id, ret);
3253 }
3254
3255 int
3256 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3257 {
3258         struct rte_eth_dev *dev;
3259         int index;
3260
3261         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3262         dev = &rte_eth_devices[port_id];
3263         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3264
3265         index = get_mac_addr_index(port_id, addr);
3266         if (index == 0) {
3267                 RTE_ETHDEV_LOG(ERR,
3268                         "Port %u: Cannot remove default MAC address\n",
3269                         port_id);
3270                 return -EADDRINUSE;
3271         } else if (index < 0)
3272                 return 0;  /* Do nothing if address wasn't found */
3273
3274         /* Update NIC */
3275         (*dev->dev_ops->mac_addr_remove)(dev, index);
3276
3277         /* Update address in NIC data structure */
3278         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3279
3280         /* reset pool bitmap */
3281         dev->data->mac_pool_sel[index] = 0;
3282
3283         return 0;
3284 }
3285
3286 int
3287 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3288 {
3289         struct rte_eth_dev *dev;
3290         int ret;
3291
3292         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3293
3294         if (!rte_is_valid_assigned_ether_addr(addr))
3295                 return -EINVAL;
3296
3297         dev = &rte_eth_devices[port_id];
3298         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3299
3300         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3301         if (ret < 0)
3302                 return ret;
3303
3304         /* Update default address in NIC data structure */
3305         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3306
3307         return 0;
3308 }
3309
3310
3311 /*
3312  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3313  * an empty spot.
3314  */
3315 static int
3316 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3317 {
3318         struct rte_eth_dev_info dev_info;
3319         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3320         unsigned i;
3321         int ret;
3322
3323         ret = rte_eth_dev_info_get(port_id, &dev_info);
3324         if (ret != 0)
3325                 return -1;
3326
3327         if (!dev->data->hash_mac_addrs)
3328                 return -1;
3329
3330         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3331                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3332                         RTE_ETHER_ADDR_LEN) == 0)
3333                         return i;
3334
3335         return -1;
3336 }
3337
3338 int
3339 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3340                                 uint8_t on)
3341 {
3342         int index;
3343         int ret;
3344         struct rte_eth_dev *dev;
3345
3346         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3347
3348         dev = &rte_eth_devices[port_id];
3349         if (rte_is_zero_ether_addr(addr)) {
3350                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3351                         port_id);
3352                 return -EINVAL;
3353         }
3354
3355         index = get_hash_mac_addr_index(port_id, addr);
3356         /* Check if it's already there, and do nothing */
3357         if ((index >= 0) && on)
3358                 return 0;
3359
3360         if (index < 0) {
3361                 if (!on) {
3362                         RTE_ETHDEV_LOG(ERR,
3363                                 "Port %u: the MAC address was not set in UTA\n",
3364                                 port_id);
3365                         return -EINVAL;
3366                 }
3367
3368                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3369                 if (index < 0) {
3370                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3371                                 port_id);
3372                         return -ENOSPC;
3373                 }
3374         }
3375
3376         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3377         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3378         if (ret == 0) {
3379                 /* Update address in NIC data structure */
3380                 if (on)
3381                         rte_ether_addr_copy(addr,
3382                                         &dev->data->hash_mac_addrs[index]);
3383                 else
3384                         rte_ether_addr_copy(&null_mac_addr,
3385                                         &dev->data->hash_mac_addrs[index]);
3386         }
3387
3388         return eth_err(port_id, ret);
3389 }
3390
3391 int
3392 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3393 {
3394         struct rte_eth_dev *dev;
3395
3396         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3397
3398         dev = &rte_eth_devices[port_id];
3399
3400         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3401         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3402                                                                        on));
3403 }
3404
3405 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3406                                         uint16_t tx_rate)
3407 {
3408         struct rte_eth_dev *dev;
3409         struct rte_eth_dev_info dev_info;
3410         struct rte_eth_link link;
3411         int ret;
3412
3413         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3414
3415         ret = rte_eth_dev_info_get(port_id, &dev_info);
3416         if (ret != 0)
3417                 return ret;
3418
3419         dev = &rte_eth_devices[port_id];
3420         link = dev->data->dev_link;
3421
3422         if (queue_idx > dev_info.max_tx_queues) {
3423                 RTE_ETHDEV_LOG(ERR,
3424                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3425                         port_id, queue_idx);
3426                 return -EINVAL;
3427         }
3428
3429         if (tx_rate > link.link_speed) {
3430                 RTE_ETHDEV_LOG(ERR,
3431                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3432                         tx_rate, link.link_speed);
3433                 return -EINVAL;
3434         }
3435
3436         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3437         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3438                                                         queue_idx, tx_rate));
3439 }
3440
3441 int
3442 rte_eth_mirror_rule_set(uint16_t port_id,
3443                         struct rte_eth_mirror_conf *mirror_conf,
3444                         uint8_t rule_id, uint8_t on)
3445 {
3446         struct rte_eth_dev *dev;
3447
3448         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3449         if (mirror_conf->rule_type == 0) {
3450                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3451                 return -EINVAL;
3452         }
3453
3454         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3455                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3456                         ETH_64_POOLS - 1);
3457                 return -EINVAL;
3458         }
3459
3460         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3461              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3462             (mirror_conf->pool_mask == 0)) {
3463                 RTE_ETHDEV_LOG(ERR,
3464                         "Invalid mirror pool, pool mask can not be 0\n");
3465                 return -EINVAL;
3466         }
3467
3468         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3469             mirror_conf->vlan.vlan_mask == 0) {
3470                 RTE_ETHDEV_LOG(ERR,
3471                         "Invalid vlan mask, vlan mask can not be 0\n");
3472                 return -EINVAL;
3473         }
3474
3475         dev = &rte_eth_devices[port_id];
3476         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3477
3478         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3479                                                 mirror_conf, rule_id, on));
3480 }
3481
3482 int
3483 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3484 {
3485         struct rte_eth_dev *dev;
3486
3487         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3488
3489         dev = &rte_eth_devices[port_id];
3490         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3491
3492         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3493                                                                    rule_id));
3494 }
3495
3496 RTE_INIT(eth_dev_init_cb_lists)
3497 {
3498         int i;
3499
3500         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3501                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3502 }
3503
3504 int
3505 rte_eth_dev_callback_register(uint16_t port_id,
3506                         enum rte_eth_event_type event,
3507                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3508 {
3509         struct rte_eth_dev *dev;
3510         struct rte_eth_dev_callback *user_cb;
3511         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3512         uint16_t last_port;
3513
3514         if (!cb_fn)
3515                 return -EINVAL;
3516
3517         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3518                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3519                 return -EINVAL;
3520         }
3521
3522         if (port_id == RTE_ETH_ALL) {
3523                 next_port = 0;
3524                 last_port = RTE_MAX_ETHPORTS - 1;
3525         } else {
3526                 next_port = last_port = port_id;
3527         }
3528
3529         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3530
3531         do {
3532                 dev = &rte_eth_devices[next_port];
3533
3534                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3535                         if (user_cb->cb_fn == cb_fn &&
3536                                 user_cb->cb_arg == cb_arg &&
3537                                 user_cb->event == event) {
3538                                 break;
3539                         }
3540                 }
3541
3542                 /* create a new callback. */
3543                 if (user_cb == NULL) {
3544                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3545                                 sizeof(struct rte_eth_dev_callback), 0);
3546                         if (user_cb != NULL) {
3547                                 user_cb->cb_fn = cb_fn;
3548                                 user_cb->cb_arg = cb_arg;
3549                                 user_cb->event = event;
3550                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3551                                                   user_cb, next);
3552                         } else {
3553                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3554                                 rte_eth_dev_callback_unregister(port_id, event,
3555                                                                 cb_fn, cb_arg);
3556                                 return -ENOMEM;
3557                         }
3558
3559                 }
3560         } while (++next_port <= last_port);
3561
3562         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3563         return 0;
3564 }
3565
3566 int
3567 rte_eth_dev_callback_unregister(uint16_t port_id,
3568                         enum rte_eth_event_type event,
3569                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3570 {
3571         int ret;
3572         struct rte_eth_dev *dev;
3573         struct rte_eth_dev_callback *cb, *next;
3574         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3575         uint16_t last_port;
3576
3577         if (!cb_fn)
3578                 return -EINVAL;
3579
3580         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3581                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3582                 return -EINVAL;
3583         }
3584
3585         if (port_id == RTE_ETH_ALL) {
3586                 next_port = 0;
3587                 last_port = RTE_MAX_ETHPORTS - 1;
3588         } else {
3589                 next_port = last_port = port_id;
3590         }
3591
3592         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3593
3594         do {
3595                 dev = &rte_eth_devices[next_port];
3596                 ret = 0;
3597                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3598                      cb = next) {
3599
3600                         next = TAILQ_NEXT(cb, next);
3601
3602                         if (cb->cb_fn != cb_fn || cb->event != event ||
3603                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3604                                 continue;
3605
3606                         /*
3607                          * if this callback is not executing right now,
3608                          * then remove it.
3609                          */
3610                         if (cb->active == 0) {
3611                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3612                                 rte_free(cb);
3613                         } else {
3614                                 ret = -EAGAIN;
3615                         }
3616                 }
3617         } while (++next_port <= last_port);
3618
3619         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3620         return ret;
3621 }
3622
3623 int
3624 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3625         enum rte_eth_event_type event, void *ret_param)
3626 {
3627         struct rte_eth_dev_callback *cb_lst;
3628         struct rte_eth_dev_callback dev_cb;
3629         int rc = 0;
3630
3631         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3632         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3633                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3634                         continue;
3635                 dev_cb = *cb_lst;
3636                 cb_lst->active = 1;
3637                 if (ret_param != NULL)
3638                         dev_cb.ret_param = ret_param;
3639
3640                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3641                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3642                                 dev_cb.cb_arg, dev_cb.ret_param);
3643                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3644                 cb_lst->active = 0;
3645         }
3646         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3647         return rc;
3648 }
3649
3650 void
3651 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3652 {
3653         if (dev == NULL)
3654                 return;
3655
3656         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3657
3658         dev->state = RTE_ETH_DEV_ATTACHED;
3659 }
3660
3661 int
3662 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3663 {
3664         uint32_t vec;
3665         struct rte_eth_dev *dev;
3666         struct rte_intr_handle *intr_handle;
3667         uint16_t qid;
3668         int rc;
3669
3670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3671
3672         dev = &rte_eth_devices[port_id];
3673
3674         if (!dev->intr_handle) {
3675                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3676                 return -ENOTSUP;
3677         }
3678
3679         intr_handle = dev->intr_handle;
3680         if (!intr_handle->intr_vec) {
3681                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3682                 return -EPERM;
3683         }
3684
3685         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3686                 vec = intr_handle->intr_vec[qid];
3687                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3688                 if (rc && rc != -EEXIST) {
3689                         RTE_ETHDEV_LOG(ERR,
3690                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3691                                 port_id, qid, op, epfd, vec);
3692                 }
3693         }
3694
3695         return 0;
3696 }
3697
3698 int
3699 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
3700 {
3701         struct rte_intr_handle *intr_handle;
3702         struct rte_eth_dev *dev;
3703         unsigned int efd_idx;
3704         uint32_t vec;
3705         int fd;
3706
3707         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
3708
3709         dev = &rte_eth_devices[port_id];
3710
3711         if (queue_id >= dev->data->nb_rx_queues) {
3712                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3713                 return -1;
3714         }
3715
3716         if (!dev->intr_handle) {
3717                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3718                 return -1;
3719         }
3720
3721         intr_handle = dev->intr_handle;
3722         if (!intr_handle->intr_vec) {
3723                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3724                 return -1;
3725         }
3726
3727         vec = intr_handle->intr_vec[queue_id];
3728         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
3729                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
3730         fd = intr_handle->efds[efd_idx];
3731
3732         return fd;
3733 }
3734
3735 const struct rte_memzone *
3736 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3737                          uint16_t queue_id, size_t size, unsigned align,
3738                          int socket_id)
3739 {
3740         char z_name[RTE_MEMZONE_NAMESIZE];
3741         const struct rte_memzone *mz;
3742         int rc;
3743
3744         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
3745                       dev->data->port_id, queue_id, ring_name);
3746         if (rc >= RTE_MEMZONE_NAMESIZE) {
3747                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
3748                 rte_errno = ENAMETOOLONG;
3749                 return NULL;
3750         }
3751
3752         mz = rte_memzone_lookup(z_name);
3753         if (mz)
3754                 return mz;
3755
3756         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3757                         RTE_MEMZONE_IOVA_CONTIG, align);
3758 }
3759
3760 int
3761 rte_eth_dev_create(struct rte_device *device, const char *name,
3762         size_t priv_data_size,
3763         ethdev_bus_specific_init ethdev_bus_specific_init,
3764         void *bus_init_params,
3765         ethdev_init_t ethdev_init, void *init_params)
3766 {
3767         struct rte_eth_dev *ethdev;
3768         int retval;
3769
3770         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3771
3772         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3773                 ethdev = rte_eth_dev_allocate(name);
3774                 if (!ethdev)
3775                         return -ENODEV;
3776
3777                 if (priv_data_size) {
3778                         ethdev->data->dev_private = rte_zmalloc_socket(
3779                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3780                                 device->numa_node);
3781
3782                         if (!ethdev->data->dev_private) {
3783                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
3784                                 retval = -ENOMEM;
3785                                 goto probe_failed;
3786                         }
3787                 }
3788         } else {
3789                 ethdev = rte_eth_dev_attach_secondary(name);
3790                 if (!ethdev) {
3791                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
3792                                 "ethdev doesn't exist");
3793                         return  -ENODEV;
3794                 }
3795         }
3796
3797         ethdev->device = device;
3798
3799         if (ethdev_bus_specific_init) {
3800                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3801                 if (retval) {
3802                         RTE_LOG(ERR, EAL,
3803                                 "ethdev bus specific initialisation failed");
3804                         goto probe_failed;
3805                 }
3806         }
3807
3808         retval = ethdev_init(ethdev, init_params);
3809         if (retval) {
3810                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3811                 goto probe_failed;
3812         }
3813
3814         rte_eth_dev_probing_finish(ethdev);
3815
3816         return retval;
3817
3818 probe_failed:
3819         rte_eth_dev_release_port(ethdev);
3820         return retval;
3821 }
3822
3823 int
3824 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3825         ethdev_uninit_t ethdev_uninit)
3826 {
3827         int ret;
3828
3829         ethdev = rte_eth_dev_allocated(ethdev->data->name);
3830         if (!ethdev)
3831                 return -ENODEV;
3832
3833         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3834
3835         ret = ethdev_uninit(ethdev);
3836         if (ret)
3837                 return ret;
3838
3839         return rte_eth_dev_release_port(ethdev);
3840 }
3841
3842 int
3843 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3844                           int epfd, int op, void *data)
3845 {
3846         uint32_t vec;
3847         struct rte_eth_dev *dev;
3848         struct rte_intr_handle *intr_handle;
3849         int rc;
3850
3851         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3852
3853         dev = &rte_eth_devices[port_id];
3854         if (queue_id >= dev->data->nb_rx_queues) {
3855                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3856                 return -EINVAL;
3857         }
3858
3859         if (!dev->intr_handle) {
3860                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3861                 return -ENOTSUP;
3862         }
3863
3864         intr_handle = dev->intr_handle;
3865         if (!intr_handle->intr_vec) {
3866                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3867                 return -EPERM;
3868         }
3869
3870         vec = intr_handle->intr_vec[queue_id];
3871         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3872         if (rc && rc != -EEXIST) {
3873                 RTE_ETHDEV_LOG(ERR,
3874                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3875                         port_id, queue_id, op, epfd, vec);
3876                 return rc;
3877         }
3878
3879         return 0;
3880 }
3881
3882 int
3883 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3884                            uint16_t queue_id)
3885 {
3886         struct rte_eth_dev *dev;
3887
3888         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3889
3890         dev = &rte_eth_devices[port_id];
3891
3892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3893         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3894                                                                 queue_id));
3895 }
3896
3897 int
3898 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3899                             uint16_t queue_id)
3900 {
3901         struct rte_eth_dev *dev;
3902
3903         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3904
3905         dev = &rte_eth_devices[port_id];
3906
3907         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3908         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3909                                                                 queue_id));
3910 }
3911
3912
3913 int
3914 rte_eth_dev_filter_supported(uint16_t port_id,
3915                              enum rte_filter_type filter_type)
3916 {
3917         struct rte_eth_dev *dev;
3918
3919         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3920
3921         dev = &rte_eth_devices[port_id];
3922         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3923         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3924                                 RTE_ETH_FILTER_NOP, NULL);
3925 }
3926
3927 int
3928 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3929                         enum rte_filter_op filter_op, void *arg)
3930 {
3931         struct rte_eth_dev *dev;
3932
3933         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3934
3935         dev = &rte_eth_devices[port_id];
3936         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3937         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3938                                                              filter_op, arg));
3939 }
3940
3941 const struct rte_eth_rxtx_callback *
3942 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3943                 rte_rx_callback_fn fn, void *user_param)
3944 {
3945 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3946         rte_errno = ENOTSUP;
3947         return NULL;
3948 #endif
3949         /* check input parameters */
3950         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3951                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3952                 rte_errno = EINVAL;
3953                 return NULL;
3954         }
3955         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3956
3957         if (cb == NULL) {
3958                 rte_errno = ENOMEM;
3959                 return NULL;
3960         }
3961
3962         cb->fn.rx = fn;
3963         cb->param = user_param;
3964
3965         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3966         /* Add the callbacks in fifo order. */
3967         struct rte_eth_rxtx_callback *tail =
3968                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3969
3970         if (!tail) {
3971                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3972
3973         } else {
3974                 while (tail->next)
3975                         tail = tail->next;
3976                 tail->next = cb;
3977         }
3978         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3979
3980         return cb;
3981 }
3982
3983 const struct rte_eth_rxtx_callback *
3984 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3985                 rte_rx_callback_fn fn, void *user_param)
3986 {
3987 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3988         rte_errno = ENOTSUP;
3989         return NULL;
3990 #endif
3991         /* check input parameters */
3992         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3993                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3994                 rte_errno = EINVAL;
3995                 return NULL;
3996         }
3997
3998         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3999
4000         if (cb == NULL) {
4001                 rte_errno = ENOMEM;
4002                 return NULL;
4003         }
4004
4005         cb->fn.rx = fn;
4006         cb->param = user_param;
4007
4008         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4009         /* Add the callbacks at fisrt position*/
4010         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4011         rte_smp_wmb();
4012         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4013         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4014
4015         return cb;
4016 }
4017
4018 const struct rte_eth_rxtx_callback *
4019 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4020                 rte_tx_callback_fn fn, void *user_param)
4021 {
4022 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4023         rte_errno = ENOTSUP;
4024         return NULL;
4025 #endif
4026         /* check input parameters */
4027         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4028                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4029                 rte_errno = EINVAL;
4030                 return NULL;
4031         }
4032
4033         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4034
4035         if (cb == NULL) {
4036                 rte_errno = ENOMEM;
4037                 return NULL;
4038         }
4039
4040         cb->fn.tx = fn;
4041         cb->param = user_param;
4042
4043         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4044         /* Add the callbacks in fifo order. */
4045         struct rte_eth_rxtx_callback *tail =
4046                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4047
4048         if (!tail) {
4049                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4050
4051         } else {
4052                 while (tail->next)
4053                         tail = tail->next;
4054                 tail->next = cb;
4055         }
4056         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4057
4058         return cb;
4059 }
4060
4061 int
4062 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4063                 const struct rte_eth_rxtx_callback *user_cb)
4064 {
4065 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4066         return -ENOTSUP;
4067 #endif
4068         /* Check input parameters. */
4069         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4070         if (user_cb == NULL ||
4071                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4072                 return -EINVAL;
4073
4074         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4075         struct rte_eth_rxtx_callback *cb;
4076         struct rte_eth_rxtx_callback **prev_cb;
4077         int ret = -EINVAL;
4078
4079         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4080         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4081         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4082                 cb = *prev_cb;
4083                 if (cb == user_cb) {
4084                         /* Remove the user cb from the callback list. */
4085                         *prev_cb = cb->next;
4086                         ret = 0;
4087                         break;
4088                 }
4089         }
4090         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4091
4092         return ret;
4093 }
4094
4095 int
4096 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4097                 const struct rte_eth_rxtx_callback *user_cb)
4098 {
4099 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4100         return -ENOTSUP;
4101 #endif
4102         /* Check input parameters. */
4103         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4104         if (user_cb == NULL ||
4105                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4106                 return -EINVAL;
4107
4108         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4109         int ret = -EINVAL;
4110         struct rte_eth_rxtx_callback *cb;
4111         struct rte_eth_rxtx_callback **prev_cb;
4112
4113         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4114         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4115         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4116                 cb = *prev_cb;
4117                 if (cb == user_cb) {
4118                         /* Remove the user cb from the callback list. */
4119                         *prev_cb = cb->next;
4120                         ret = 0;
4121                         break;
4122                 }
4123         }
4124         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4125
4126         return ret;
4127 }
4128
4129 int
4130 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4131         struct rte_eth_rxq_info *qinfo)
4132 {
4133         struct rte_eth_dev *dev;
4134
4135         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4136
4137         if (qinfo == NULL)
4138                 return -EINVAL;
4139
4140         dev = &rte_eth_devices[port_id];
4141         if (queue_id >= dev->data->nb_rx_queues) {
4142                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4143                 return -EINVAL;
4144         }
4145
4146         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4147
4148         memset(qinfo, 0, sizeof(*qinfo));
4149         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4150         return 0;
4151 }
4152
4153 int
4154 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4155         struct rte_eth_txq_info *qinfo)
4156 {
4157         struct rte_eth_dev *dev;
4158
4159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4160
4161         if (qinfo == NULL)
4162                 return -EINVAL;
4163
4164         dev = &rte_eth_devices[port_id];
4165         if (queue_id >= dev->data->nb_tx_queues) {
4166                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4167                 return -EINVAL;
4168         }
4169
4170         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4171
4172         memset(qinfo, 0, sizeof(*qinfo));
4173         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4174
4175         return 0;
4176 }
4177
4178 int
4179 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4180                              struct rte_ether_addr *mc_addr_set,
4181                              uint32_t nb_mc_addr)
4182 {
4183         struct rte_eth_dev *dev;
4184
4185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4186
4187         dev = &rte_eth_devices[port_id];
4188         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4189         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4190                                                 mc_addr_set, nb_mc_addr));
4191 }
4192
4193 int
4194 rte_eth_timesync_enable(uint16_t port_id)
4195 {
4196         struct rte_eth_dev *dev;
4197
4198         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4199         dev = &rte_eth_devices[port_id];
4200
4201         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4202         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4203 }
4204
4205 int
4206 rte_eth_timesync_disable(uint16_t port_id)
4207 {
4208         struct rte_eth_dev *dev;
4209
4210         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4211         dev = &rte_eth_devices[port_id];
4212
4213         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4214         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4215 }
4216
4217 int
4218 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4219                                    uint32_t flags)
4220 {
4221         struct rte_eth_dev *dev;
4222
4223         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4224         dev = &rte_eth_devices[port_id];
4225
4226         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4227         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4228                                 (dev, timestamp, flags));
4229 }
4230
4231 int
4232 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4233                                    struct timespec *timestamp)
4234 {
4235         struct rte_eth_dev *dev;
4236
4237         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4238         dev = &rte_eth_devices[port_id];
4239
4240         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4241         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4242                                 (dev, timestamp));
4243 }
4244
4245 int
4246 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4247 {
4248         struct rte_eth_dev *dev;
4249
4250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4251         dev = &rte_eth_devices[port_id];
4252
4253         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4254         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4255                                                                       delta));
4256 }
4257
4258 int
4259 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4260 {
4261         struct rte_eth_dev *dev;
4262
4263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4264         dev = &rte_eth_devices[port_id];
4265
4266         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4267         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4268                                                                 timestamp));
4269 }
4270
4271 int
4272 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4273 {
4274         struct rte_eth_dev *dev;
4275
4276         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4277         dev = &rte_eth_devices[port_id];
4278
4279         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4280         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4281                                                                 timestamp));
4282 }
4283
4284 int
4285 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4286 {
4287         struct rte_eth_dev *dev;
4288
4289         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4290         dev = &rte_eth_devices[port_id];
4291
4292         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4293         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4294 }
4295
4296 int
4297 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4298 {
4299         struct rte_eth_dev *dev;
4300
4301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4302
4303         dev = &rte_eth_devices[port_id];
4304         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4305         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4306 }
4307
4308 int
4309 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4310 {
4311         struct rte_eth_dev *dev;
4312
4313         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4314
4315         dev = &rte_eth_devices[port_id];
4316         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4317         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4318 }
4319
4320 int
4321 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4322 {
4323         struct rte_eth_dev *dev;
4324
4325         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4326
4327         dev = &rte_eth_devices[port_id];
4328         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4329         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4330 }
4331
4332 int
4333 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4334 {
4335         struct rte_eth_dev *dev;
4336
4337         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4338
4339         dev = &rte_eth_devices[port_id];
4340         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4341         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4342 }
4343
4344 int
4345 rte_eth_dev_get_module_info(uint16_t port_id,
4346                             struct rte_eth_dev_module_info *modinfo)
4347 {
4348         struct rte_eth_dev *dev;
4349
4350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4351
4352         dev = &rte_eth_devices[port_id];
4353         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4354         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4355 }
4356
4357 int
4358 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4359                               struct rte_dev_eeprom_info *info)
4360 {
4361         struct rte_eth_dev *dev;
4362
4363         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4364
4365         dev = &rte_eth_devices[port_id];
4366         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4367         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4368 }
4369
4370 int
4371 rte_eth_dev_get_dcb_info(uint16_t port_id,
4372                              struct rte_eth_dcb_info *dcb_info)
4373 {
4374         struct rte_eth_dev *dev;
4375
4376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4377
4378         dev = &rte_eth_devices[port_id];
4379         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4380
4381         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4382         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4383 }
4384
4385 int
4386 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4387                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4388 {
4389         struct rte_eth_dev *dev;
4390
4391         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4392         if (l2_tunnel == NULL) {
4393                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4394                 return -EINVAL;
4395         }
4396
4397         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4398                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4399                 return -EINVAL;
4400         }
4401
4402         dev = &rte_eth_devices[port_id];
4403         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4404                                 -ENOTSUP);
4405         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4406                                                                 l2_tunnel));
4407 }
4408
4409 int
4410 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4411                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4412                                   uint32_t mask,
4413                                   uint8_t en)
4414 {
4415         struct rte_eth_dev *dev;
4416
4417         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4418
4419         if (l2_tunnel == NULL) {
4420                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4421                 return -EINVAL;
4422         }
4423
4424         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4425                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4426                 return -EINVAL;
4427         }
4428
4429         if (mask == 0) {
4430                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4431                 return -EINVAL;
4432         }
4433
4434         dev = &rte_eth_devices[port_id];
4435         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4436                                 -ENOTSUP);
4437         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4438                                                         l2_tunnel, mask, en));
4439 }
4440
4441 static void
4442 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4443                            const struct rte_eth_desc_lim *desc_lim)
4444 {
4445         if (desc_lim->nb_align != 0)
4446                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4447
4448         if (desc_lim->nb_max != 0)
4449                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4450
4451         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4452 }
4453
4454 int
4455 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4456                                  uint16_t *nb_rx_desc,
4457                                  uint16_t *nb_tx_desc)
4458 {
4459         struct rte_eth_dev_info dev_info;
4460         int ret;
4461
4462         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4463
4464         ret = rte_eth_dev_info_get(port_id, &dev_info);
4465         if (ret != 0)
4466                 return ret;
4467
4468         if (nb_rx_desc != NULL)
4469                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4470
4471         if (nb_tx_desc != NULL)
4472                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4473
4474         return 0;
4475 }
4476
4477 int
4478 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4479 {
4480         struct rte_eth_dev *dev;
4481
4482         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4483
4484         if (pool == NULL)
4485                 return -EINVAL;
4486
4487         dev = &rte_eth_devices[port_id];
4488
4489         if (*dev->dev_ops->pool_ops_supported == NULL)
4490                 return 1; /* all pools are supported */
4491
4492         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4493 }
4494
4495 /**
4496  * A set of values to describe the possible states of a switch domain.
4497  */
4498 enum rte_eth_switch_domain_state {
4499         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4500         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4501 };
4502
4503 /**
4504  * Array of switch domains available for allocation. Array is sized to
4505  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4506  * ethdev ports in a single process.
4507  */
4508 static struct rte_eth_dev_switch {
4509         enum rte_eth_switch_domain_state state;
4510 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4511
4512 int
4513 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4514 {
4515         unsigned int i;
4516
4517         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4518
4519         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4520                 i < RTE_MAX_ETHPORTS; i++) {
4521                 if (rte_eth_switch_domains[i].state ==
4522                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4523                         rte_eth_switch_domains[i].state =
4524                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4525                         *domain_id = i;
4526                         return 0;
4527                 }
4528         }
4529
4530         return -ENOSPC;
4531 }
4532
4533 int
4534 rte_eth_switch_domain_free(uint16_t domain_id)
4535 {
4536         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4537                 domain_id >= RTE_MAX_ETHPORTS)
4538                 return -EINVAL;
4539
4540         if (rte_eth_switch_domains[domain_id].state !=
4541                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4542                 return -EINVAL;
4543
4544         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4545
4546         return 0;
4547 }
4548
4549 static int
4550 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4551 {
4552         int state;
4553         struct rte_kvargs_pair *pair;
4554         char *letter;
4555
4556         arglist->str = strdup(str_in);
4557         if (arglist->str == NULL)
4558                 return -ENOMEM;
4559
4560         letter = arglist->str;
4561         state = 0;
4562         arglist->count = 0;
4563         pair = &arglist->pairs[0];
4564         while (1) {
4565                 switch (state) {
4566                 case 0: /* Initial */
4567                         if (*letter == '=')
4568                                 return -EINVAL;
4569                         else if (*letter == '\0')
4570                                 return 0;
4571
4572                         state = 1;
4573                         pair->key = letter;
4574                         /* fall-thru */
4575
4576                 case 1: /* Parsing key */
4577                         if (*letter == '=') {
4578                                 *letter = '\0';
4579                                 pair->value = letter + 1;
4580                                 state = 2;
4581                         } else if (*letter == ',' || *letter == '\0')
4582                                 return -EINVAL;
4583                         break;
4584
4585
4586                 case 2: /* Parsing value */
4587                         if (*letter == '[')
4588                                 state = 3;
4589                         else if (*letter == ',') {
4590                                 *letter = '\0';
4591                                 arglist->count++;
4592                                 pair = &arglist->pairs[arglist->count];
4593                                 state = 0;
4594                         } else if (*letter == '\0') {
4595                                 letter--;
4596                                 arglist->count++;
4597                                 pair = &arglist->pairs[arglist->count];
4598                                 state = 0;
4599                         }
4600                         break;
4601
4602                 case 3: /* Parsing list */
4603                         if (*letter == ']')
4604                                 state = 2;
4605                         else if (*letter == '\0')
4606                                 return -EINVAL;
4607                         break;
4608                 }
4609                 letter++;
4610         }
4611 }
4612
4613 int
4614 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4615 {
4616         struct rte_kvargs args;
4617         struct rte_kvargs_pair *pair;
4618         unsigned int i;
4619         int result = 0;
4620
4621         memset(eth_da, 0, sizeof(*eth_da));
4622
4623         result = rte_eth_devargs_tokenise(&args, dargs);
4624         if (result < 0)
4625                 goto parse_cleanup;
4626
4627         for (i = 0; i < args.count; i++) {
4628                 pair = &args.pairs[i];
4629                 if (strcmp("representor", pair->key) == 0) {
4630                         result = rte_eth_devargs_parse_list(pair->value,
4631                                 rte_eth_devargs_parse_representor_ports,
4632                                 eth_da);
4633                         if (result < 0)
4634                                 goto parse_cleanup;
4635                 }
4636         }
4637
4638 parse_cleanup:
4639         if (args.str)
4640                 free(args.str);
4641
4642         return result;
4643 }
4644
4645 RTE_INIT(ethdev_init_log)
4646 {
4647         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
4648         if (rte_eth_dev_logtype >= 0)
4649                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
4650 }