ethdev: allow iterating with pure class filter
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40
41 #include "rte_ether.h"
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51 static uint16_t eth_dev_last_created_port;
52
53 /* spinlock for eth device callbacks */
54 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for add/remove rx callbacks */
57 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* spinlock for add/remove tx callbacks */
60 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62 /* spinlock for shared data allocation */
63 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
64
65 /* store statistics names and its offset in stats structure  */
66 struct rte_eth_xstats_name_off {
67         char name[RTE_ETH_XSTATS_NAME_SIZE];
68         unsigned offset;
69 };
70
71 /* Shared memory between primary and secondary processes. */
72 static struct {
73         uint64_t next_owner_id;
74         rte_spinlock_t ownership_lock;
75         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
76 } *rte_eth_dev_shared_data;
77
78 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
79         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
80         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
81         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
82         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
83         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
84         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
85         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
86         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
87                 rx_nombuf)},
88 };
89
90 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
91
92 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
93         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
94         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
95         {"errors", offsetof(struct rte_eth_stats, q_errors)},
96 };
97
98 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
99                 sizeof(rte_rxq_stats_strings[0]))
100
101 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
102         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
103         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
104 };
105 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
106                 sizeof(rte_txq_stats_strings[0]))
107
108 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
109         { DEV_RX_OFFLOAD_##_name, #_name }
110
111 static const struct {
112         uint64_t offload;
113         const char *name;
114 } rte_rx_offload_names[] = {
115         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
119         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
120         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
121         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
122         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
123         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
125         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
126         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
127         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
128         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
129         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
130         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
131         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
133 };
134
135 #undef RTE_RX_OFFLOAD_BIT2STR
136
137 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
138         { DEV_TX_OFFLOAD_##_name, #_name }
139
140 static const struct {
141         uint64_t offload;
142         const char *name;
143 } rte_tx_offload_names[] = {
144         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
145         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
152         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
153         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
158         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
159         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
160         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
161         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
162         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
164         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
165         RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA),
166 };
167
168 #undef RTE_TX_OFFLOAD_BIT2STR
169
170 /**
171  * The user application callback description.
172  *
173  * It contains callback address to be registered by user application,
174  * the pointer to the parameters for callback, and the event type.
175  */
176 struct rte_eth_dev_callback {
177         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
178         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
179         void *cb_arg;                           /**< Parameter for callback */
180         void *ret_param;                        /**< Return parameter */
181         enum rte_eth_event_type event;          /**< Interrupt event type */
182         uint32_t active;                        /**< Callback is executing */
183 };
184
185 enum {
186         STAT_QMAP_TX = 0,
187         STAT_QMAP_RX
188 };
189
190 int __rte_experimental
191 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
192 {
193         int ret;
194         struct rte_devargs devargs = {.args = NULL};
195         const char *bus_param_key;
196         char *bus_str = NULL;
197         char *cls_str = NULL;
198         int str_size;
199
200         memset(iter, 0, sizeof(*iter));
201
202         /*
203          * The devargs string may use various syntaxes:
204          *   - 0000:08:00.0,representor=[1-3]
205          *   - pci:0000:06:00.0,representor=[0,5]
206          *   - class=eth,mac=00:11:22:33:44:55
207          * A new syntax is in development (not yet supported):
208          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
209          */
210
211         /*
212          * Handle pure class filter (i.e. without any bus-level argument),
213          * from future new syntax.
214          * rte_devargs_parse() is not yet supporting the new syntax,
215          * that's why this simple case is temporarily parsed here.
216          */
217 #define iter_anybus_str "class=eth,"
218         if (strncmp(devargs_str, iter_anybus_str,
219                         strlen(iter_anybus_str)) == 0) {
220                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
221                 goto end;
222         }
223
224         /* Split bus, device and parameters. */
225         ret = rte_devargs_parse(&devargs, devargs_str);
226         if (ret != 0)
227                 goto error;
228
229         /*
230          * Assume parameters of old syntax can match only at ethdev level.
231          * Extra parameters will be ignored, thanks to "+" prefix.
232          */
233         str_size = strlen(devargs.args) + 2;
234         cls_str = malloc(str_size);
235         if (cls_str == NULL) {
236                 ret = -ENOMEM;
237                 goto error;
238         }
239         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
240         if (ret != str_size - 1) {
241                 ret = -EINVAL;
242                 goto error;
243         }
244         iter->cls_str = cls_str;
245         free(devargs.args); /* allocated by rte_devargs_parse() */
246         devargs.args = NULL;
247
248         iter->bus = devargs.bus;
249         if (iter->bus->dev_iterate == NULL) {
250                 ret = -ENOTSUP;
251                 goto error;
252         }
253
254         /* Convert bus args to new syntax for use with new API dev_iterate. */
255         if (strcmp(iter->bus->name, "vdev") == 0) {
256                 bus_param_key = "name";
257         } else if (strcmp(iter->bus->name, "pci") == 0) {
258                 bus_param_key = "addr";
259         } else {
260                 ret = -ENOTSUP;
261                 goto error;
262         }
263         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
264         bus_str = malloc(str_size);
265         if (bus_str == NULL) {
266                 ret = -ENOMEM;
267                 goto error;
268         }
269         ret = snprintf(bus_str, str_size, "%s=%s",
270                         bus_param_key, devargs.name);
271         if (ret != str_size - 1) {
272                 ret = -EINVAL;
273                 goto error;
274         }
275         iter->bus_str = bus_str;
276
277 end:
278         iter->cls = rte_class_find_by_name("eth");
279         return 0;
280
281 error:
282         if (ret == -ENOTSUP)
283                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
284                                 iter->bus->name);
285         free(devargs.args);
286         free(bus_str);
287         free(cls_str);
288         return ret;
289 }
290
291 uint16_t __rte_experimental
292 rte_eth_iterator_next(struct rte_dev_iterator *iter)
293 {
294         if (iter->cls == NULL) /* invalid ethdev iterator */
295                 return RTE_MAX_ETHPORTS;
296
297         do { /* loop to try all matching rte_device */
298                 /* If not pure ethdev filter and */
299                 if (iter->bus != NULL &&
300                                 /* not in middle of rte_eth_dev iteration, */
301                                 iter->class_device == NULL) {
302                         /* get next rte_device to try. */
303                         iter->device = iter->bus->dev_iterate(
304                                         iter->device, iter->bus_str, iter);
305                         if (iter->device == NULL)
306                                 break; /* no more rte_device candidate */
307                 }
308                 /* A device is matching bus part, need to check ethdev part. */
309                 iter->class_device = iter->cls->dev_iterate(
310                                 iter->class_device, iter->cls_str, iter);
311                 if (iter->class_device != NULL)
312                         return eth_dev_to_id(iter->class_device); /* match */
313         } while (iter->bus != NULL); /* need to try next rte_device */
314
315         /* No more ethdev port to iterate. */
316         rte_eth_iterator_cleanup(iter);
317         return RTE_MAX_ETHPORTS;
318 }
319
320 void __rte_experimental
321 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
322 {
323         if (iter->bus_str == NULL)
324                 return; /* nothing to free in pure class filter */
325         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
326         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
327         memset(iter, 0, sizeof(*iter));
328 }
329
330 uint16_t
331 rte_eth_find_next(uint16_t port_id)
332 {
333         while (port_id < RTE_MAX_ETHPORTS &&
334                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
335                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
336                 port_id++;
337
338         if (port_id >= RTE_MAX_ETHPORTS)
339                 return RTE_MAX_ETHPORTS;
340
341         return port_id;
342 }
343
344 static void
345 rte_eth_dev_shared_data_prepare(void)
346 {
347         const unsigned flags = 0;
348         const struct rte_memzone *mz;
349
350         rte_spinlock_lock(&rte_eth_shared_data_lock);
351
352         if (rte_eth_dev_shared_data == NULL) {
353                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
354                         /* Allocate port data and ownership shared memory. */
355                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
356                                         sizeof(*rte_eth_dev_shared_data),
357                                         rte_socket_id(), flags);
358                 } else
359                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
360                 if (mz == NULL)
361                         rte_panic("Cannot allocate ethdev shared data\n");
362
363                 rte_eth_dev_shared_data = mz->addr;
364                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
365                         rte_eth_dev_shared_data->next_owner_id =
366                                         RTE_ETH_DEV_NO_OWNER + 1;
367                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
368                         memset(rte_eth_dev_shared_data->data, 0,
369                                sizeof(rte_eth_dev_shared_data->data));
370                 }
371         }
372
373         rte_spinlock_unlock(&rte_eth_shared_data_lock);
374 }
375
376 static bool
377 is_allocated(const struct rte_eth_dev *ethdev)
378 {
379         return ethdev->data->name[0] != '\0';
380 }
381
382 static struct rte_eth_dev *
383 _rte_eth_dev_allocated(const char *name)
384 {
385         unsigned i;
386
387         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
388                 if (rte_eth_devices[i].data != NULL &&
389                     strcmp(rte_eth_devices[i].data->name, name) == 0)
390                         return &rte_eth_devices[i];
391         }
392         return NULL;
393 }
394
395 struct rte_eth_dev *
396 rte_eth_dev_allocated(const char *name)
397 {
398         struct rte_eth_dev *ethdev;
399
400         rte_eth_dev_shared_data_prepare();
401
402         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
403
404         ethdev = _rte_eth_dev_allocated(name);
405
406         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
407
408         return ethdev;
409 }
410
411 static uint16_t
412 rte_eth_dev_find_free_port(void)
413 {
414         unsigned i;
415
416         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
417                 /* Using shared name field to find a free port. */
418                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
419                         RTE_ASSERT(rte_eth_devices[i].state ==
420                                    RTE_ETH_DEV_UNUSED);
421                         return i;
422                 }
423         }
424         return RTE_MAX_ETHPORTS;
425 }
426
427 static struct rte_eth_dev *
428 eth_dev_get(uint16_t port_id)
429 {
430         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
431
432         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
433
434         eth_dev_last_created_port = port_id;
435
436         return eth_dev;
437 }
438
439 struct rte_eth_dev *
440 rte_eth_dev_allocate(const char *name)
441 {
442         uint16_t port_id;
443         struct rte_eth_dev *eth_dev = NULL;
444
445         rte_eth_dev_shared_data_prepare();
446
447         /* Synchronize port creation between primary and secondary threads. */
448         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
449
450         if (_rte_eth_dev_allocated(name) != NULL) {
451                 RTE_ETHDEV_LOG(ERR,
452                         "Ethernet device with name %s already allocated\n",
453                         name);
454                 goto unlock;
455         }
456
457         port_id = rte_eth_dev_find_free_port();
458         if (port_id == RTE_MAX_ETHPORTS) {
459                 RTE_ETHDEV_LOG(ERR,
460                         "Reached maximum number of Ethernet ports\n");
461                 goto unlock;
462         }
463
464         eth_dev = eth_dev_get(port_id);
465         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
466         eth_dev->data->port_id = port_id;
467         eth_dev->data->mtu = ETHER_MTU;
468
469 unlock:
470         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
471
472         return eth_dev;
473 }
474
475 /*
476  * Attach to a port already registered by the primary process, which
477  * makes sure that the same device would have the same port id both
478  * in the primary and secondary process.
479  */
480 struct rte_eth_dev *
481 rte_eth_dev_attach_secondary(const char *name)
482 {
483         uint16_t i;
484         struct rte_eth_dev *eth_dev = NULL;
485
486         rte_eth_dev_shared_data_prepare();
487
488         /* Synchronize port attachment to primary port creation and release. */
489         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
490
491         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
492                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
493                         break;
494         }
495         if (i == RTE_MAX_ETHPORTS) {
496                 RTE_ETHDEV_LOG(ERR,
497                         "Device %s is not driven by the primary process\n",
498                         name);
499         } else {
500                 eth_dev = eth_dev_get(i);
501                 RTE_ASSERT(eth_dev->data->port_id == i);
502         }
503
504         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
505         return eth_dev;
506 }
507
508 int
509 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
510 {
511         if (eth_dev == NULL)
512                 return -EINVAL;
513
514         rte_eth_dev_shared_data_prepare();
515
516         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
517
518         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
519
520         eth_dev->state = RTE_ETH_DEV_UNUSED;
521
522         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
523                 rte_free(eth_dev->data->rx_queues);
524                 rte_free(eth_dev->data->tx_queues);
525                 rte_free(eth_dev->data->mac_addrs);
526                 rte_free(eth_dev->data->hash_mac_addrs);
527                 rte_free(eth_dev->data->dev_private);
528                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
529         }
530
531         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
532
533         return 0;
534 }
535
536 int
537 rte_eth_dev_is_valid_port(uint16_t port_id)
538 {
539         if (port_id >= RTE_MAX_ETHPORTS ||
540             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
541                 return 0;
542         else
543                 return 1;
544 }
545
546 static int
547 rte_eth_is_valid_owner_id(uint64_t owner_id)
548 {
549         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
550             rte_eth_dev_shared_data->next_owner_id <= owner_id)
551                 return 0;
552         return 1;
553 }
554
555 uint64_t
556 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
557 {
558         while (port_id < RTE_MAX_ETHPORTS &&
559                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
560                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
561                rte_eth_devices[port_id].data->owner.id != owner_id))
562                 port_id++;
563
564         if (port_id >= RTE_MAX_ETHPORTS)
565                 return RTE_MAX_ETHPORTS;
566
567         return port_id;
568 }
569
570 int __rte_experimental
571 rte_eth_dev_owner_new(uint64_t *owner_id)
572 {
573         rte_eth_dev_shared_data_prepare();
574
575         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
576
577         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
578
579         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
580         return 0;
581 }
582
583 static int
584 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
585                        const struct rte_eth_dev_owner *new_owner)
586 {
587         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
588         struct rte_eth_dev_owner *port_owner;
589         int sret;
590
591         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
592                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
593                         port_id);
594                 return -ENODEV;
595         }
596
597         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
598             !rte_eth_is_valid_owner_id(old_owner_id)) {
599                 RTE_ETHDEV_LOG(ERR,
600                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
601                        old_owner_id, new_owner->id);
602                 return -EINVAL;
603         }
604
605         port_owner = &rte_eth_devices[port_id].data->owner;
606         if (port_owner->id != old_owner_id) {
607                 RTE_ETHDEV_LOG(ERR,
608                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
609                         port_id, port_owner->name, port_owner->id);
610                 return -EPERM;
611         }
612
613         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
614                         new_owner->name);
615         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
616                 RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n",
617                         port_id);
618
619         port_owner->id = new_owner->id;
620
621         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
622                 port_id, new_owner->name, new_owner->id);
623
624         return 0;
625 }
626
627 int __rte_experimental
628 rte_eth_dev_owner_set(const uint16_t port_id,
629                       const struct rte_eth_dev_owner *owner)
630 {
631         int ret;
632
633         rte_eth_dev_shared_data_prepare();
634
635         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
636
637         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
638
639         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
640         return ret;
641 }
642
643 int __rte_experimental
644 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
645 {
646         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
647                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
648         int ret;
649
650         rte_eth_dev_shared_data_prepare();
651
652         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
653
654         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
655
656         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
657         return ret;
658 }
659
660 void __rte_experimental
661 rte_eth_dev_owner_delete(const uint64_t owner_id)
662 {
663         uint16_t port_id;
664
665         rte_eth_dev_shared_data_prepare();
666
667         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
668
669         if (rte_eth_is_valid_owner_id(owner_id)) {
670                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
671                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
672                                 memset(&rte_eth_devices[port_id].data->owner, 0,
673                                        sizeof(struct rte_eth_dev_owner));
674                 RTE_ETHDEV_LOG(NOTICE,
675                         "All port owners owned by %016"PRIx64" identifier have removed\n",
676                         owner_id);
677         } else {
678                 RTE_ETHDEV_LOG(ERR,
679                                "Invalid owner id=%016"PRIx64"\n",
680                                owner_id);
681         }
682
683         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
684 }
685
686 int __rte_experimental
687 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
688 {
689         int ret = 0;
690         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
691
692         rte_eth_dev_shared_data_prepare();
693
694         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
695
696         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
697                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
698                         port_id);
699                 ret = -ENODEV;
700         } else {
701                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
702         }
703
704         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
705         return ret;
706 }
707
708 int
709 rte_eth_dev_socket_id(uint16_t port_id)
710 {
711         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
712         return rte_eth_devices[port_id].data->numa_node;
713 }
714
715 void *
716 rte_eth_dev_get_sec_ctx(uint16_t port_id)
717 {
718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
719         return rte_eth_devices[port_id].security_ctx;
720 }
721
722 uint16_t
723 rte_eth_dev_count(void)
724 {
725         return rte_eth_dev_count_avail();
726 }
727
728 uint16_t
729 rte_eth_dev_count_avail(void)
730 {
731         uint16_t p;
732         uint16_t count;
733
734         count = 0;
735
736         RTE_ETH_FOREACH_DEV(p)
737                 count++;
738
739         return count;
740 }
741
742 uint16_t __rte_experimental
743 rte_eth_dev_count_total(void)
744 {
745         uint16_t port, count = 0;
746
747         for (port = 0; port < RTE_MAX_ETHPORTS; port++)
748                 if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
749                         count++;
750
751         return count;
752 }
753
754 int
755 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
756 {
757         char *tmp;
758
759         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
760
761         if (name == NULL) {
762                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
763                 return -EINVAL;
764         }
765
766         /* shouldn't check 'rte_eth_devices[i].data',
767          * because it might be overwritten by VDEV PMD */
768         tmp = rte_eth_dev_shared_data->data[port_id].name;
769         strcpy(name, tmp);
770         return 0;
771 }
772
773 int
774 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
775 {
776         uint32_t pid;
777
778         if (name == NULL) {
779                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
780                 return -EINVAL;
781         }
782
783         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
784                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
785                     !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
786                         *port_id = pid;
787                         return 0;
788                 }
789         }
790
791         return -ENODEV;
792 }
793
794 static int
795 eth_err(uint16_t port_id, int ret)
796 {
797         if (ret == 0)
798                 return 0;
799         if (rte_eth_dev_is_removed(port_id))
800                 return -EIO;
801         return ret;
802 }
803
804 /* attach the new device, then store port_id of the device */
805 int
806 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
807 {
808         int current = rte_eth_dev_count_total();
809         struct rte_devargs da;
810         int ret = -1;
811
812         memset(&da, 0, sizeof(da));
813
814         if ((devargs == NULL) || (port_id == NULL)) {
815                 ret = -EINVAL;
816                 goto err;
817         }
818
819         /* parse devargs */
820         if (rte_devargs_parse(&da, devargs))
821                 goto err;
822
823         ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
824         if (ret < 0)
825                 goto err;
826
827         /* no point looking at the port count if no port exists */
828         if (!rte_eth_dev_count_total()) {
829                 RTE_ETHDEV_LOG(ERR, "No port found for device (%s)\n", da.name);
830                 ret = -1;
831                 goto err;
832         }
833
834         /* if nothing happened, there is a bug here, since some driver told us
835          * it did attach a device, but did not create a port.
836          * FIXME: race condition in case of plug-out of another device
837          */
838         if (current == rte_eth_dev_count_total()) {
839                 ret = -1;
840                 goto err;
841         }
842
843         *port_id = eth_dev_last_created_port;
844         ret = 0;
845
846 err:
847         free(da.args);
848         return ret;
849 }
850
851 /* detach the device, then store the name of the device */
852 int
853 rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
854 {
855         struct rte_device *dev;
856         struct rte_bus *bus;
857         uint32_t dev_flags;
858         int ret = -1;
859
860         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
861
862         dev_flags = rte_eth_devices[port_id].data->dev_flags;
863         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
864                 RTE_ETHDEV_LOG(ERR,
865                         "Port %"PRIu16" is bonded, cannot detach\n", port_id);
866                 return -ENOTSUP;
867         }
868
869         dev = rte_eth_devices[port_id].device;
870         if (dev == NULL)
871                 return -EINVAL;
872
873         bus = rte_bus_find_by_device(dev);
874         if (bus == NULL)
875                 return -ENOENT;
876
877         ret = rte_eal_hotplug_remove(bus->name, dev->name);
878         if (ret < 0)
879                 return ret;
880
881         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
882         return 0;
883 }
884
885 static int
886 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
887 {
888         uint16_t old_nb_queues = dev->data->nb_rx_queues;
889         void **rxq;
890         unsigned i;
891
892         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
893                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
894                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
895                                 RTE_CACHE_LINE_SIZE);
896                 if (dev->data->rx_queues == NULL) {
897                         dev->data->nb_rx_queues = 0;
898                         return -(ENOMEM);
899                 }
900         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
901                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
902
903                 rxq = dev->data->rx_queues;
904
905                 for (i = nb_queues; i < old_nb_queues; i++)
906                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
907                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
908                                 RTE_CACHE_LINE_SIZE);
909                 if (rxq == NULL)
910                         return -(ENOMEM);
911                 if (nb_queues > old_nb_queues) {
912                         uint16_t new_qs = nb_queues - old_nb_queues;
913
914                         memset(rxq + old_nb_queues, 0,
915                                 sizeof(rxq[0]) * new_qs);
916                 }
917
918                 dev->data->rx_queues = rxq;
919
920         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
921                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
922
923                 rxq = dev->data->rx_queues;
924
925                 for (i = nb_queues; i < old_nb_queues; i++)
926                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
927
928                 rte_free(dev->data->rx_queues);
929                 dev->data->rx_queues = NULL;
930         }
931         dev->data->nb_rx_queues = nb_queues;
932         return 0;
933 }
934
935 int
936 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
937 {
938         struct rte_eth_dev *dev;
939
940         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
941
942         dev = &rte_eth_devices[port_id];
943         if (!dev->data->dev_started) {
944                 RTE_ETHDEV_LOG(ERR,
945                         "Port %u must be started before start any queue\n",
946                         port_id);
947                 return -EINVAL;
948         }
949
950         if (rx_queue_id >= dev->data->nb_rx_queues) {
951                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
952                 return -EINVAL;
953         }
954
955         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
956
957         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
958                 RTE_ETHDEV_LOG(INFO,
959                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
960                         rx_queue_id, port_id);
961                 return 0;
962         }
963
964         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
965                                                              rx_queue_id));
966
967 }
968
969 int
970 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
971 {
972         struct rte_eth_dev *dev;
973
974         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
975
976         dev = &rte_eth_devices[port_id];
977         if (rx_queue_id >= dev->data->nb_rx_queues) {
978                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
979                 return -EINVAL;
980         }
981
982         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
983
984         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
985                 RTE_ETHDEV_LOG(INFO,
986                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
987                         rx_queue_id, port_id);
988                 return 0;
989         }
990
991         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
992
993 }
994
995 int
996 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
997 {
998         struct rte_eth_dev *dev;
999
1000         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1001
1002         dev = &rte_eth_devices[port_id];
1003         if (!dev->data->dev_started) {
1004                 RTE_ETHDEV_LOG(ERR,
1005                         "Port %u must be started before start any queue\n",
1006                         port_id);
1007                 return -EINVAL;
1008         }
1009
1010         if (tx_queue_id >= dev->data->nb_tx_queues) {
1011                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1012                 return -EINVAL;
1013         }
1014
1015         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1016
1017         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1018                 RTE_ETHDEV_LOG(INFO,
1019                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1020                         tx_queue_id, port_id);
1021                 return 0;
1022         }
1023
1024         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1025 }
1026
1027 int
1028 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1029 {
1030         struct rte_eth_dev *dev;
1031
1032         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1033
1034         dev = &rte_eth_devices[port_id];
1035         if (tx_queue_id >= dev->data->nb_tx_queues) {
1036                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1037                 return -EINVAL;
1038         }
1039
1040         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1041
1042         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1043                 RTE_ETHDEV_LOG(INFO,
1044                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1045                         tx_queue_id, port_id);
1046                 return 0;
1047         }
1048
1049         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1050
1051 }
1052
1053 static int
1054 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1055 {
1056         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1057         void **txq;
1058         unsigned i;
1059
1060         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1061                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1062                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1063                                                    RTE_CACHE_LINE_SIZE);
1064                 if (dev->data->tx_queues == NULL) {
1065                         dev->data->nb_tx_queues = 0;
1066                         return -(ENOMEM);
1067                 }
1068         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1069                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1070
1071                 txq = dev->data->tx_queues;
1072
1073                 for (i = nb_queues; i < old_nb_queues; i++)
1074                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1075                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1076                                   RTE_CACHE_LINE_SIZE);
1077                 if (txq == NULL)
1078                         return -ENOMEM;
1079                 if (nb_queues > old_nb_queues) {
1080                         uint16_t new_qs = nb_queues - old_nb_queues;
1081
1082                         memset(txq + old_nb_queues, 0,
1083                                sizeof(txq[0]) * new_qs);
1084                 }
1085
1086                 dev->data->tx_queues = txq;
1087
1088         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1089                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1090
1091                 txq = dev->data->tx_queues;
1092
1093                 for (i = nb_queues; i < old_nb_queues; i++)
1094                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1095
1096                 rte_free(dev->data->tx_queues);
1097                 dev->data->tx_queues = NULL;
1098         }
1099         dev->data->nb_tx_queues = nb_queues;
1100         return 0;
1101 }
1102
1103 uint32_t
1104 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1105 {
1106         switch (speed) {
1107         case ETH_SPEED_NUM_10M:
1108                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1109         case ETH_SPEED_NUM_100M:
1110                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1111         case ETH_SPEED_NUM_1G:
1112                 return ETH_LINK_SPEED_1G;
1113         case ETH_SPEED_NUM_2_5G:
1114                 return ETH_LINK_SPEED_2_5G;
1115         case ETH_SPEED_NUM_5G:
1116                 return ETH_LINK_SPEED_5G;
1117         case ETH_SPEED_NUM_10G:
1118                 return ETH_LINK_SPEED_10G;
1119         case ETH_SPEED_NUM_20G:
1120                 return ETH_LINK_SPEED_20G;
1121         case ETH_SPEED_NUM_25G:
1122                 return ETH_LINK_SPEED_25G;
1123         case ETH_SPEED_NUM_40G:
1124                 return ETH_LINK_SPEED_40G;
1125         case ETH_SPEED_NUM_50G:
1126                 return ETH_LINK_SPEED_50G;
1127         case ETH_SPEED_NUM_56G:
1128                 return ETH_LINK_SPEED_56G;
1129         case ETH_SPEED_NUM_100G:
1130                 return ETH_LINK_SPEED_100G;
1131         default:
1132                 return 0;
1133         }
1134 }
1135
1136 const char * __rte_experimental
1137 rte_eth_dev_rx_offload_name(uint64_t offload)
1138 {
1139         const char *name = "UNKNOWN";
1140         unsigned int i;
1141
1142         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1143                 if (offload == rte_rx_offload_names[i].offload) {
1144                         name = rte_rx_offload_names[i].name;
1145                         break;
1146                 }
1147         }
1148
1149         return name;
1150 }
1151
1152 const char * __rte_experimental
1153 rte_eth_dev_tx_offload_name(uint64_t offload)
1154 {
1155         const char *name = "UNKNOWN";
1156         unsigned int i;
1157
1158         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1159                 if (offload == rte_tx_offload_names[i].offload) {
1160                         name = rte_tx_offload_names[i].name;
1161                         break;
1162                 }
1163         }
1164
1165         return name;
1166 }
1167
1168 int
1169 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1170                       const struct rte_eth_conf *dev_conf)
1171 {
1172         struct rte_eth_dev *dev;
1173         struct rte_eth_dev_info dev_info;
1174         struct rte_eth_conf local_conf = *dev_conf;
1175         int diag;
1176
1177         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1178
1179         dev = &rte_eth_devices[port_id];
1180
1181         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1182         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1183
1184         rte_eth_dev_info_get(port_id, &dev_info);
1185
1186         /* If number of queues specified by application for both Rx and Tx is
1187          * zero, use driver preferred values. This cannot be done individually
1188          * as it is valid for either Tx or Rx (but not both) to be zero.
1189          * If driver does not provide any preferred valued, fall back on
1190          * EAL defaults.
1191          */
1192         if (nb_rx_q == 0 && nb_tx_q == 0) {
1193                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1194                 if (nb_rx_q == 0)
1195                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1196                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1197                 if (nb_tx_q == 0)
1198                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1199         }
1200
1201         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1202                 RTE_ETHDEV_LOG(ERR,
1203                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1204                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1205                 return -EINVAL;
1206         }
1207
1208         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1209                 RTE_ETHDEV_LOG(ERR,
1210                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1211                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1212                 return -EINVAL;
1213         }
1214
1215         if (dev->data->dev_started) {
1216                 RTE_ETHDEV_LOG(ERR,
1217                         "Port %u must be stopped to allow configuration\n",
1218                         port_id);
1219                 return -EBUSY;
1220         }
1221
1222         /* Copy the dev_conf parameter into the dev structure */
1223         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1224
1225         /*
1226          * Check that the numbers of RX and TX queues are not greater
1227          * than the maximum number of RX and TX queues supported by the
1228          * configured device.
1229          */
1230         if (nb_rx_q > dev_info.max_rx_queues) {
1231                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1232                         port_id, nb_rx_q, dev_info.max_rx_queues);
1233                 return -EINVAL;
1234         }
1235
1236         if (nb_tx_q > dev_info.max_tx_queues) {
1237                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1238                         port_id, nb_tx_q, dev_info.max_tx_queues);
1239                 return -EINVAL;
1240         }
1241
1242         /* Check that the device supports requested interrupts */
1243         if ((dev_conf->intr_conf.lsc == 1) &&
1244                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1245                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1246                         dev->device->driver->name);
1247                 return -EINVAL;
1248         }
1249         if ((dev_conf->intr_conf.rmv == 1) &&
1250                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1251                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1252                         dev->device->driver->name);
1253                 return -EINVAL;
1254         }
1255
1256         /*
1257          * If jumbo frames are enabled, check that the maximum RX packet
1258          * length is supported by the configured device.
1259          */
1260         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1261                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1262                         RTE_ETHDEV_LOG(ERR,
1263                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1264                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1265                                 dev_info.max_rx_pktlen);
1266                         return -EINVAL;
1267                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1268                         RTE_ETHDEV_LOG(ERR,
1269                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1270                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1271                                 (unsigned)ETHER_MIN_LEN);
1272                         return -EINVAL;
1273                 }
1274         } else {
1275                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1276                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1277                         /* Use default value */
1278                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1279                                                         ETHER_MAX_LEN;
1280         }
1281
1282         /* Any requested offloading must be within its device capabilities */
1283         if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1284              local_conf.rxmode.offloads) {
1285                 RTE_ETHDEV_LOG(ERR,
1286                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1287                         "capabilities 0x%"PRIx64" in %s()\n",
1288                         port_id, local_conf.rxmode.offloads,
1289                         dev_info.rx_offload_capa,
1290                         __func__);
1291                 return -EINVAL;
1292         }
1293         if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1294              local_conf.txmode.offloads) {
1295                 RTE_ETHDEV_LOG(ERR,
1296                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1297                         "capabilities 0x%"PRIx64" in %s()\n",
1298                         port_id, local_conf.txmode.offloads,
1299                         dev_info.tx_offload_capa,
1300                         __func__);
1301                 return -EINVAL;
1302         }
1303
1304         /* Check that device supports requested rss hash functions. */
1305         if ((dev_info.flow_type_rss_offloads |
1306              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1307             dev_info.flow_type_rss_offloads) {
1308                 RTE_ETHDEV_LOG(ERR,
1309                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1310                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1311                         dev_info.flow_type_rss_offloads);
1312                 return -EINVAL;
1313         }
1314
1315         /*
1316          * Setup new number of RX/TX queues and reconfigure device.
1317          */
1318         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1319         if (diag != 0) {
1320                 RTE_ETHDEV_LOG(ERR,
1321                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1322                         port_id, diag);
1323                 return diag;
1324         }
1325
1326         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1327         if (diag != 0) {
1328                 RTE_ETHDEV_LOG(ERR,
1329                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1330                         port_id, diag);
1331                 rte_eth_dev_rx_queue_config(dev, 0);
1332                 return diag;
1333         }
1334
1335         diag = (*dev->dev_ops->dev_configure)(dev);
1336         if (diag != 0) {
1337                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1338                         port_id, diag);
1339                 rte_eth_dev_rx_queue_config(dev, 0);
1340                 rte_eth_dev_tx_queue_config(dev, 0);
1341                 return eth_err(port_id, diag);
1342         }
1343
1344         /* Initialize Rx profiling if enabled at compilation time. */
1345         diag = __rte_eth_dev_profile_init(port_id, dev);
1346         if (diag != 0) {
1347                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1348                         port_id, diag);
1349                 rte_eth_dev_rx_queue_config(dev, 0);
1350                 rte_eth_dev_tx_queue_config(dev, 0);
1351                 return eth_err(port_id, diag);
1352         }
1353
1354         return 0;
1355 }
1356
1357 void
1358 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1359 {
1360         if (dev->data->dev_started) {
1361                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1362                         dev->data->port_id);
1363                 return;
1364         }
1365
1366         rte_eth_dev_rx_queue_config(dev, 0);
1367         rte_eth_dev_tx_queue_config(dev, 0);
1368
1369         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1370 }
1371
1372 static void
1373 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1374                         struct rte_eth_dev_info *dev_info)
1375 {
1376         struct ether_addr *addr;
1377         uint16_t i;
1378         uint32_t pool = 0;
1379         uint64_t pool_mask;
1380
1381         /* replay MAC address configuration including default MAC */
1382         addr = &dev->data->mac_addrs[0];
1383         if (*dev->dev_ops->mac_addr_set != NULL)
1384                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1385         else if (*dev->dev_ops->mac_addr_add != NULL)
1386                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1387
1388         if (*dev->dev_ops->mac_addr_add != NULL) {
1389                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1390                         addr = &dev->data->mac_addrs[i];
1391
1392                         /* skip zero address */
1393                         if (is_zero_ether_addr(addr))
1394                                 continue;
1395
1396                         pool = 0;
1397                         pool_mask = dev->data->mac_pool_sel[i];
1398
1399                         do {
1400                                 if (pool_mask & 1ULL)
1401                                         (*dev->dev_ops->mac_addr_add)(dev,
1402                                                 addr, i, pool);
1403                                 pool_mask >>= 1;
1404                                 pool++;
1405                         } while (pool_mask);
1406                 }
1407         }
1408 }
1409
1410 static void
1411 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1412                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1413 {
1414         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1415                 rte_eth_dev_mac_restore(dev, dev_info);
1416
1417         /* replay promiscuous configuration */
1418         if (rte_eth_promiscuous_get(port_id) == 1)
1419                 rte_eth_promiscuous_enable(port_id);
1420         else if (rte_eth_promiscuous_get(port_id) == 0)
1421                 rte_eth_promiscuous_disable(port_id);
1422
1423         /* replay all multicast configuration */
1424         if (rte_eth_allmulticast_get(port_id) == 1)
1425                 rte_eth_allmulticast_enable(port_id);
1426         else if (rte_eth_allmulticast_get(port_id) == 0)
1427                 rte_eth_allmulticast_disable(port_id);
1428 }
1429
1430 int
1431 rte_eth_dev_start(uint16_t port_id)
1432 {
1433         struct rte_eth_dev *dev;
1434         struct rte_eth_dev_info dev_info;
1435         int diag;
1436
1437         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1438
1439         dev = &rte_eth_devices[port_id];
1440
1441         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1442
1443         if (dev->data->dev_started != 0) {
1444                 RTE_ETHDEV_LOG(INFO,
1445                         "Device with port_id=%"PRIu16" already started\n",
1446                         port_id);
1447                 return 0;
1448         }
1449
1450         rte_eth_dev_info_get(port_id, &dev_info);
1451
1452         /* Lets restore MAC now if device does not support live change */
1453         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1454                 rte_eth_dev_mac_restore(dev, &dev_info);
1455
1456         diag = (*dev->dev_ops->dev_start)(dev);
1457         if (diag == 0)
1458                 dev->data->dev_started = 1;
1459         else
1460                 return eth_err(port_id, diag);
1461
1462         rte_eth_dev_config_restore(dev, &dev_info, port_id);
1463
1464         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1465                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1466                 (*dev->dev_ops->link_update)(dev, 0);
1467         }
1468         return 0;
1469 }
1470
1471 void
1472 rte_eth_dev_stop(uint16_t port_id)
1473 {
1474         struct rte_eth_dev *dev;
1475
1476         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1477         dev = &rte_eth_devices[port_id];
1478
1479         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1480
1481         if (dev->data->dev_started == 0) {
1482                 RTE_ETHDEV_LOG(INFO,
1483                         "Device with port_id=%"PRIu16" already stopped\n",
1484                         port_id);
1485                 return;
1486         }
1487
1488         dev->data->dev_started = 0;
1489         (*dev->dev_ops->dev_stop)(dev);
1490 }
1491
1492 int
1493 rte_eth_dev_set_link_up(uint16_t port_id)
1494 {
1495         struct rte_eth_dev *dev;
1496
1497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1498
1499         dev = &rte_eth_devices[port_id];
1500
1501         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1502         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1503 }
1504
1505 int
1506 rte_eth_dev_set_link_down(uint16_t port_id)
1507 {
1508         struct rte_eth_dev *dev;
1509
1510         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1511
1512         dev = &rte_eth_devices[port_id];
1513
1514         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1515         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1516 }
1517
1518 void
1519 rte_eth_dev_close(uint16_t port_id)
1520 {
1521         struct rte_eth_dev *dev;
1522
1523         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1524         dev = &rte_eth_devices[port_id];
1525
1526         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1527         dev->data->dev_started = 0;
1528         (*dev->dev_ops->dev_close)(dev);
1529
1530         /* check behaviour flag - temporary for PMD migration */
1531         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1532                 /* new behaviour: send event + reset state + free all data */
1533                 rte_eth_dev_release_port(dev);
1534                 return;
1535         }
1536         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1537                         "The driver %s should migrate to the new behaviour.\n",
1538                         dev->device->driver->name);
1539         /* old behaviour: only free queue arrays */
1540         dev->data->nb_rx_queues = 0;
1541         rte_free(dev->data->rx_queues);
1542         dev->data->rx_queues = NULL;
1543         dev->data->nb_tx_queues = 0;
1544         rte_free(dev->data->tx_queues);
1545         dev->data->tx_queues = NULL;
1546 }
1547
1548 int
1549 rte_eth_dev_reset(uint16_t port_id)
1550 {
1551         struct rte_eth_dev *dev;
1552         int ret;
1553
1554         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1555         dev = &rte_eth_devices[port_id];
1556
1557         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1558
1559         rte_eth_dev_stop(port_id);
1560         ret = dev->dev_ops->dev_reset(dev);
1561
1562         return eth_err(port_id, ret);
1563 }
1564
1565 int __rte_experimental
1566 rte_eth_dev_is_removed(uint16_t port_id)
1567 {
1568         struct rte_eth_dev *dev;
1569         int ret;
1570
1571         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1572
1573         dev = &rte_eth_devices[port_id];
1574
1575         if (dev->state == RTE_ETH_DEV_REMOVED)
1576                 return 1;
1577
1578         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1579
1580         ret = dev->dev_ops->is_removed(dev);
1581         if (ret != 0)
1582                 /* Device is physically removed. */
1583                 dev->state = RTE_ETH_DEV_REMOVED;
1584
1585         return ret;
1586 }
1587
1588 int
1589 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1590                        uint16_t nb_rx_desc, unsigned int socket_id,
1591                        const struct rte_eth_rxconf *rx_conf,
1592                        struct rte_mempool *mp)
1593 {
1594         int ret;
1595         uint32_t mbp_buf_size;
1596         struct rte_eth_dev *dev;
1597         struct rte_eth_dev_info dev_info;
1598         struct rte_eth_rxconf local_conf;
1599         void **rxq;
1600
1601         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1602
1603         dev = &rte_eth_devices[port_id];
1604         if (rx_queue_id >= dev->data->nb_rx_queues) {
1605                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1606                 return -EINVAL;
1607         }
1608
1609         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1610         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1611
1612         /*
1613          * Check the size of the mbuf data buffer.
1614          * This value must be provided in the private data of the memory pool.
1615          * First check that the memory pool has a valid private data.
1616          */
1617         rte_eth_dev_info_get(port_id, &dev_info);
1618         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1619                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1620                         mp->name, (int)mp->private_data_size,
1621                         (int)sizeof(struct rte_pktmbuf_pool_private));
1622                 return -ENOSPC;
1623         }
1624         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1625
1626         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1627                 RTE_ETHDEV_LOG(ERR,
1628                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1629                         mp->name, (int)mbp_buf_size,
1630                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1631                         (int)RTE_PKTMBUF_HEADROOM,
1632                         (int)dev_info.min_rx_bufsize);
1633                 return -EINVAL;
1634         }
1635
1636         /* Use default specified by driver, if nb_rx_desc is zero */
1637         if (nb_rx_desc == 0) {
1638                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1639                 /* If driver default is also zero, fall back on EAL default */
1640                 if (nb_rx_desc == 0)
1641                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1642         }
1643
1644         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1645                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1646                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1647
1648                 RTE_ETHDEV_LOG(ERR,
1649                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1650                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1651                         dev_info.rx_desc_lim.nb_min,
1652                         dev_info.rx_desc_lim.nb_align);
1653                 return -EINVAL;
1654         }
1655
1656         if (dev->data->dev_started &&
1657                 !(dev_info.dev_capa &
1658                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1659                 return -EBUSY;
1660
1661         if (dev->data->dev_started &&
1662                 (dev->data->rx_queue_state[rx_queue_id] !=
1663                         RTE_ETH_QUEUE_STATE_STOPPED))
1664                 return -EBUSY;
1665
1666         rxq = dev->data->rx_queues;
1667         if (rxq[rx_queue_id]) {
1668                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1669                                         -ENOTSUP);
1670                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1671                 rxq[rx_queue_id] = NULL;
1672         }
1673
1674         if (rx_conf == NULL)
1675                 rx_conf = &dev_info.default_rxconf;
1676
1677         local_conf = *rx_conf;
1678
1679         /*
1680          * If an offloading has already been enabled in
1681          * rte_eth_dev_configure(), it has been enabled on all queues,
1682          * so there is no need to enable it in this queue again.
1683          * The local_conf.offloads input to underlying PMD only carries
1684          * those offloadings which are only enabled on this queue and
1685          * not enabled on all queues.
1686          */
1687         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1688
1689         /*
1690          * New added offloadings for this queue are those not enabled in
1691          * rte_eth_dev_configure() and they must be per-queue type.
1692          * A pure per-port offloading can't be enabled on a queue while
1693          * disabled on another queue. A pure per-port offloading can't
1694          * be enabled for any queue as new added one if it hasn't been
1695          * enabled in rte_eth_dev_configure().
1696          */
1697         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1698              local_conf.offloads) {
1699                 RTE_ETHDEV_LOG(ERR,
1700                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1701                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1702                         port_id, rx_queue_id, local_conf.offloads,
1703                         dev_info.rx_queue_offload_capa,
1704                         __func__);
1705                 return -EINVAL;
1706         }
1707
1708         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1709                                               socket_id, &local_conf, mp);
1710         if (!ret) {
1711                 if (!dev->data->min_rx_buf_size ||
1712                     dev->data->min_rx_buf_size > mbp_buf_size)
1713                         dev->data->min_rx_buf_size = mbp_buf_size;
1714         }
1715
1716         return eth_err(port_id, ret);
1717 }
1718
1719 int
1720 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1721                        uint16_t nb_tx_desc, unsigned int socket_id,
1722                        const struct rte_eth_txconf *tx_conf)
1723 {
1724         struct rte_eth_dev *dev;
1725         struct rte_eth_dev_info dev_info;
1726         struct rte_eth_txconf local_conf;
1727         void **txq;
1728
1729         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1730
1731         dev = &rte_eth_devices[port_id];
1732         if (tx_queue_id >= dev->data->nb_tx_queues) {
1733                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1734                 return -EINVAL;
1735         }
1736
1737         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1738         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1739
1740         rte_eth_dev_info_get(port_id, &dev_info);
1741
1742         /* Use default specified by driver, if nb_tx_desc is zero */
1743         if (nb_tx_desc == 0) {
1744                 nb_tx_desc = dev_info.default_txportconf.ring_size;
1745                 /* If driver default is zero, fall back on EAL default */
1746                 if (nb_tx_desc == 0)
1747                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
1748         }
1749         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1750             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1751             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1752                 RTE_ETHDEV_LOG(ERR,
1753                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
1754                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
1755                         dev_info.tx_desc_lim.nb_min,
1756                         dev_info.tx_desc_lim.nb_align);
1757                 return -EINVAL;
1758         }
1759
1760         if (dev->data->dev_started &&
1761                 !(dev_info.dev_capa &
1762                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
1763                 return -EBUSY;
1764
1765         if (dev->data->dev_started &&
1766                 (dev->data->tx_queue_state[tx_queue_id] !=
1767                         RTE_ETH_QUEUE_STATE_STOPPED))
1768                 return -EBUSY;
1769
1770         txq = dev->data->tx_queues;
1771         if (txq[tx_queue_id]) {
1772                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1773                                         -ENOTSUP);
1774                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1775                 txq[tx_queue_id] = NULL;
1776         }
1777
1778         if (tx_conf == NULL)
1779                 tx_conf = &dev_info.default_txconf;
1780
1781         local_conf = *tx_conf;
1782
1783         /*
1784          * If an offloading has already been enabled in
1785          * rte_eth_dev_configure(), it has been enabled on all queues,
1786          * so there is no need to enable it in this queue again.
1787          * The local_conf.offloads input to underlying PMD only carries
1788          * those offloadings which are only enabled on this queue and
1789          * not enabled on all queues.
1790          */
1791         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
1792
1793         /*
1794          * New added offloadings for this queue are those not enabled in
1795          * rte_eth_dev_configure() and they must be per-queue type.
1796          * A pure per-port offloading can't be enabled on a queue while
1797          * disabled on another queue. A pure per-port offloading can't
1798          * be enabled for any queue as new added one if it hasn't been
1799          * enabled in rte_eth_dev_configure().
1800          */
1801         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
1802              local_conf.offloads) {
1803                 RTE_ETHDEV_LOG(ERR,
1804                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1805                         "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
1806                         port_id, tx_queue_id, local_conf.offloads,
1807                         dev_info.tx_queue_offload_capa,
1808                         __func__);
1809                 return -EINVAL;
1810         }
1811
1812         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1813                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1814 }
1815
1816 void
1817 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1818                 void *userdata __rte_unused)
1819 {
1820         unsigned i;
1821
1822         for (i = 0; i < unsent; i++)
1823                 rte_pktmbuf_free(pkts[i]);
1824 }
1825
1826 void
1827 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1828                 void *userdata)
1829 {
1830         uint64_t *count = userdata;
1831         unsigned i;
1832
1833         for (i = 0; i < unsent; i++)
1834                 rte_pktmbuf_free(pkts[i]);
1835
1836         *count += unsent;
1837 }
1838
1839 int
1840 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1841                 buffer_tx_error_fn cbfn, void *userdata)
1842 {
1843         buffer->error_callback = cbfn;
1844         buffer->error_userdata = userdata;
1845         return 0;
1846 }
1847
1848 int
1849 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1850 {
1851         int ret = 0;
1852
1853         if (buffer == NULL)
1854                 return -EINVAL;
1855
1856         buffer->size = size;
1857         if (buffer->error_callback == NULL) {
1858                 ret = rte_eth_tx_buffer_set_err_callback(
1859                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1860         }
1861
1862         return ret;
1863 }
1864
1865 int
1866 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1867 {
1868         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1869         int ret;
1870
1871         /* Validate Input Data. Bail if not valid or not supported. */
1872         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1873         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1874
1875         /* Call driver to free pending mbufs. */
1876         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1877                                                free_cnt);
1878         return eth_err(port_id, ret);
1879 }
1880
1881 void
1882 rte_eth_promiscuous_enable(uint16_t port_id)
1883 {
1884         struct rte_eth_dev *dev;
1885
1886         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1887         dev = &rte_eth_devices[port_id];
1888
1889         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1890         (*dev->dev_ops->promiscuous_enable)(dev);
1891         dev->data->promiscuous = 1;
1892 }
1893
1894 void
1895 rte_eth_promiscuous_disable(uint16_t port_id)
1896 {
1897         struct rte_eth_dev *dev;
1898
1899         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1900         dev = &rte_eth_devices[port_id];
1901
1902         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1903         dev->data->promiscuous = 0;
1904         (*dev->dev_ops->promiscuous_disable)(dev);
1905 }
1906
1907 int
1908 rte_eth_promiscuous_get(uint16_t port_id)
1909 {
1910         struct rte_eth_dev *dev;
1911
1912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1913
1914         dev = &rte_eth_devices[port_id];
1915         return dev->data->promiscuous;
1916 }
1917
1918 void
1919 rte_eth_allmulticast_enable(uint16_t port_id)
1920 {
1921         struct rte_eth_dev *dev;
1922
1923         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1924         dev = &rte_eth_devices[port_id];
1925
1926         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1927         (*dev->dev_ops->allmulticast_enable)(dev);
1928         dev->data->all_multicast = 1;
1929 }
1930
1931 void
1932 rte_eth_allmulticast_disable(uint16_t port_id)
1933 {
1934         struct rte_eth_dev *dev;
1935
1936         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1937         dev = &rte_eth_devices[port_id];
1938
1939         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1940         dev->data->all_multicast = 0;
1941         (*dev->dev_ops->allmulticast_disable)(dev);
1942 }
1943
1944 int
1945 rte_eth_allmulticast_get(uint16_t port_id)
1946 {
1947         struct rte_eth_dev *dev;
1948
1949         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1950
1951         dev = &rte_eth_devices[port_id];
1952         return dev->data->all_multicast;
1953 }
1954
1955 void
1956 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1957 {
1958         struct rte_eth_dev *dev;
1959
1960         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1961         dev = &rte_eth_devices[port_id];
1962
1963         if (dev->data->dev_conf.intr_conf.lsc &&
1964             dev->data->dev_started)
1965                 rte_eth_linkstatus_get(dev, eth_link);
1966         else {
1967                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1968                 (*dev->dev_ops->link_update)(dev, 1);
1969                 *eth_link = dev->data->dev_link;
1970         }
1971 }
1972
1973 void
1974 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1975 {
1976         struct rte_eth_dev *dev;
1977
1978         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1979         dev = &rte_eth_devices[port_id];
1980
1981         if (dev->data->dev_conf.intr_conf.lsc &&
1982             dev->data->dev_started)
1983                 rte_eth_linkstatus_get(dev, eth_link);
1984         else {
1985                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1986                 (*dev->dev_ops->link_update)(dev, 0);
1987                 *eth_link = dev->data->dev_link;
1988         }
1989 }
1990
1991 int
1992 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1993 {
1994         struct rte_eth_dev *dev;
1995
1996         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1997
1998         dev = &rte_eth_devices[port_id];
1999         memset(stats, 0, sizeof(*stats));
2000
2001         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2002         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2003         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2004 }
2005
2006 int
2007 rte_eth_stats_reset(uint16_t port_id)
2008 {
2009         struct rte_eth_dev *dev;
2010
2011         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2012         dev = &rte_eth_devices[port_id];
2013
2014         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2015         (*dev->dev_ops->stats_reset)(dev);
2016         dev->data->rx_mbuf_alloc_failed = 0;
2017
2018         return 0;
2019 }
2020
2021 static inline int
2022 get_xstats_basic_count(struct rte_eth_dev *dev)
2023 {
2024         uint16_t nb_rxqs, nb_txqs;
2025         int count;
2026
2027         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2028         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2029
2030         count = RTE_NB_STATS;
2031         count += nb_rxqs * RTE_NB_RXQ_STATS;
2032         count += nb_txqs * RTE_NB_TXQ_STATS;
2033
2034         return count;
2035 }
2036
2037 static int
2038 get_xstats_count(uint16_t port_id)
2039 {
2040         struct rte_eth_dev *dev;
2041         int count;
2042
2043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2044         dev = &rte_eth_devices[port_id];
2045         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2046                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2047                                 NULL, 0);
2048                 if (count < 0)
2049                         return eth_err(port_id, count);
2050         }
2051         if (dev->dev_ops->xstats_get_names != NULL) {
2052                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2053                 if (count < 0)
2054                         return eth_err(port_id, count);
2055         } else
2056                 count = 0;
2057
2058
2059         count += get_xstats_basic_count(dev);
2060
2061         return count;
2062 }
2063
2064 int
2065 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2066                 uint64_t *id)
2067 {
2068         int cnt_xstats, idx_xstat;
2069
2070         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2071
2072         if (!id) {
2073                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2074                 return -ENOMEM;
2075         }
2076
2077         if (!xstat_name) {
2078                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2079                 return -ENOMEM;
2080         }
2081
2082         /* Get count */
2083         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2084         if (cnt_xstats  < 0) {
2085                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2086                 return -ENODEV;
2087         }
2088
2089         /* Get id-name lookup table */
2090         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2091
2092         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2093                         port_id, xstats_names, cnt_xstats, NULL)) {
2094                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2095                 return -1;
2096         }
2097
2098         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2099                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2100                         *id = idx_xstat;
2101                         return 0;
2102                 };
2103         }
2104
2105         return -EINVAL;
2106 }
2107
2108 /* retrieve basic stats names */
2109 static int
2110 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2111         struct rte_eth_xstat_name *xstats_names)
2112 {
2113         int cnt_used_entries = 0;
2114         uint32_t idx, id_queue;
2115         uint16_t num_q;
2116
2117         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2118                 snprintf(xstats_names[cnt_used_entries].name,
2119                         sizeof(xstats_names[0].name),
2120                         "%s", rte_stats_strings[idx].name);
2121                 cnt_used_entries++;
2122         }
2123         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2124         for (id_queue = 0; id_queue < num_q; id_queue++) {
2125                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2126                         snprintf(xstats_names[cnt_used_entries].name,
2127                                 sizeof(xstats_names[0].name),
2128                                 "rx_q%u%s",
2129                                 id_queue, rte_rxq_stats_strings[idx].name);
2130                         cnt_used_entries++;
2131                 }
2132
2133         }
2134         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2135         for (id_queue = 0; id_queue < num_q; id_queue++) {
2136                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2137                         snprintf(xstats_names[cnt_used_entries].name,
2138                                 sizeof(xstats_names[0].name),
2139                                 "tx_q%u%s",
2140                                 id_queue, rte_txq_stats_strings[idx].name);
2141                         cnt_used_entries++;
2142                 }
2143         }
2144         return cnt_used_entries;
2145 }
2146
2147 /* retrieve ethdev extended statistics names */
2148 int
2149 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2150         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2151         uint64_t *ids)
2152 {
2153         struct rte_eth_xstat_name *xstats_names_copy;
2154         unsigned int no_basic_stat_requested = 1;
2155         unsigned int no_ext_stat_requested = 1;
2156         unsigned int expected_entries;
2157         unsigned int basic_count;
2158         struct rte_eth_dev *dev;
2159         unsigned int i;
2160         int ret;
2161
2162         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2163         dev = &rte_eth_devices[port_id];
2164
2165         basic_count = get_xstats_basic_count(dev);
2166         ret = get_xstats_count(port_id);
2167         if (ret < 0)
2168                 return ret;
2169         expected_entries = (unsigned int)ret;
2170
2171         /* Return max number of stats if no ids given */
2172         if (!ids) {
2173                 if (!xstats_names)
2174                         return expected_entries;
2175                 else if (xstats_names && size < expected_entries)
2176                         return expected_entries;
2177         }
2178
2179         if (ids && !xstats_names)
2180                 return -EINVAL;
2181
2182         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2183                 uint64_t ids_copy[size];
2184
2185                 for (i = 0; i < size; i++) {
2186                         if (ids[i] < basic_count) {
2187                                 no_basic_stat_requested = 0;
2188                                 break;
2189                         }
2190
2191                         /*
2192                          * Convert ids to xstats ids that PMD knows.
2193                          * ids known by user are basic + extended stats.
2194                          */
2195                         ids_copy[i] = ids[i] - basic_count;
2196                 }
2197
2198                 if (no_basic_stat_requested)
2199                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2200                                         xstats_names, ids_copy, size);
2201         }
2202
2203         /* Retrieve all stats */
2204         if (!ids) {
2205                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2206                                 expected_entries);
2207                 if (num_stats < 0 || num_stats > (int)expected_entries)
2208                         return num_stats;
2209                 else
2210                         return expected_entries;
2211         }
2212
2213         xstats_names_copy = calloc(expected_entries,
2214                 sizeof(struct rte_eth_xstat_name));
2215
2216         if (!xstats_names_copy) {
2217                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2218                 return -ENOMEM;
2219         }
2220
2221         if (ids) {
2222                 for (i = 0; i < size; i++) {
2223                         if (ids[i] >= basic_count) {
2224                                 no_ext_stat_requested = 0;
2225                                 break;
2226                         }
2227                 }
2228         }
2229
2230         /* Fill xstats_names_copy structure */
2231         if (ids && no_ext_stat_requested) {
2232                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2233         } else {
2234                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2235                         expected_entries);
2236                 if (ret < 0) {
2237                         free(xstats_names_copy);
2238                         return ret;
2239                 }
2240         }
2241
2242         /* Filter stats */
2243         for (i = 0; i < size; i++) {
2244                 if (ids[i] >= expected_entries) {
2245                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2246                         free(xstats_names_copy);
2247                         return -1;
2248                 }
2249                 xstats_names[i] = xstats_names_copy[ids[i]];
2250         }
2251
2252         free(xstats_names_copy);
2253         return size;
2254 }
2255
2256 int
2257 rte_eth_xstats_get_names(uint16_t port_id,
2258         struct rte_eth_xstat_name *xstats_names,
2259         unsigned int size)
2260 {
2261         struct rte_eth_dev *dev;
2262         int cnt_used_entries;
2263         int cnt_expected_entries;
2264         int cnt_driver_entries;
2265
2266         cnt_expected_entries = get_xstats_count(port_id);
2267         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2268                         (int)size < cnt_expected_entries)
2269                 return cnt_expected_entries;
2270
2271         /* port_id checked in get_xstats_count() */
2272         dev = &rte_eth_devices[port_id];
2273
2274         cnt_used_entries = rte_eth_basic_stats_get_names(
2275                 dev, xstats_names);
2276
2277         if (dev->dev_ops->xstats_get_names != NULL) {
2278                 /* If there are any driver-specific xstats, append them
2279                  * to end of list.
2280                  */
2281                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2282                         dev,
2283                         xstats_names + cnt_used_entries,
2284                         size - cnt_used_entries);
2285                 if (cnt_driver_entries < 0)
2286                         return eth_err(port_id, cnt_driver_entries);
2287                 cnt_used_entries += cnt_driver_entries;
2288         }
2289
2290         return cnt_used_entries;
2291 }
2292
2293
2294 static int
2295 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2296 {
2297         struct rte_eth_dev *dev;
2298         struct rte_eth_stats eth_stats;
2299         unsigned int count = 0, i, q;
2300         uint64_t val, *stats_ptr;
2301         uint16_t nb_rxqs, nb_txqs;
2302         int ret;
2303
2304         ret = rte_eth_stats_get(port_id, &eth_stats);
2305         if (ret < 0)
2306                 return ret;
2307
2308         dev = &rte_eth_devices[port_id];
2309
2310         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2311         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2312
2313         /* global stats */
2314         for (i = 0; i < RTE_NB_STATS; i++) {
2315                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2316                                         rte_stats_strings[i].offset);
2317                 val = *stats_ptr;
2318                 xstats[count++].value = val;
2319         }
2320
2321         /* per-rxq stats */
2322         for (q = 0; q < nb_rxqs; q++) {
2323                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2324                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2325                                         rte_rxq_stats_strings[i].offset +
2326                                         q * sizeof(uint64_t));
2327                         val = *stats_ptr;
2328                         xstats[count++].value = val;
2329                 }
2330         }
2331
2332         /* per-txq stats */
2333         for (q = 0; q < nb_txqs; q++) {
2334                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2335                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2336                                         rte_txq_stats_strings[i].offset +
2337                                         q * sizeof(uint64_t));
2338                         val = *stats_ptr;
2339                         xstats[count++].value = val;
2340                 }
2341         }
2342         return count;
2343 }
2344
2345 /* retrieve ethdev extended statistics */
2346 int
2347 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2348                          uint64_t *values, unsigned int size)
2349 {
2350         unsigned int no_basic_stat_requested = 1;
2351         unsigned int no_ext_stat_requested = 1;
2352         unsigned int num_xstats_filled;
2353         unsigned int basic_count;
2354         uint16_t expected_entries;
2355         struct rte_eth_dev *dev;
2356         unsigned int i;
2357         int ret;
2358
2359         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2360         ret = get_xstats_count(port_id);
2361         if (ret < 0)
2362                 return ret;
2363         expected_entries = (uint16_t)ret;
2364         struct rte_eth_xstat xstats[expected_entries];
2365         dev = &rte_eth_devices[port_id];
2366         basic_count = get_xstats_basic_count(dev);
2367
2368         /* Return max number of stats if no ids given */
2369         if (!ids) {
2370                 if (!values)
2371                         return expected_entries;
2372                 else if (values && size < expected_entries)
2373                         return expected_entries;
2374         }
2375
2376         if (ids && !values)
2377                 return -EINVAL;
2378
2379         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2380                 unsigned int basic_count = get_xstats_basic_count(dev);
2381                 uint64_t ids_copy[size];
2382
2383                 for (i = 0; i < size; i++) {
2384                         if (ids[i] < basic_count) {
2385                                 no_basic_stat_requested = 0;
2386                                 break;
2387                         }
2388
2389                         /*
2390                          * Convert ids to xstats ids that PMD knows.
2391                          * ids known by user are basic + extended stats.
2392                          */
2393                         ids_copy[i] = ids[i] - basic_count;
2394                 }
2395
2396                 if (no_basic_stat_requested)
2397                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2398                                         values, size);
2399         }
2400
2401         if (ids) {
2402                 for (i = 0; i < size; i++) {
2403                         if (ids[i] >= basic_count) {
2404                                 no_ext_stat_requested = 0;
2405                                 break;
2406                         }
2407                 }
2408         }
2409
2410         /* Fill the xstats structure */
2411         if (ids && no_ext_stat_requested)
2412                 ret = rte_eth_basic_stats_get(port_id, xstats);
2413         else
2414                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2415
2416         if (ret < 0)
2417                 return ret;
2418         num_xstats_filled = (unsigned int)ret;
2419
2420         /* Return all stats */
2421         if (!ids) {
2422                 for (i = 0; i < num_xstats_filled; i++)
2423                         values[i] = xstats[i].value;
2424                 return expected_entries;
2425         }
2426
2427         /* Filter stats */
2428         for (i = 0; i < size; i++) {
2429                 if (ids[i] >= expected_entries) {
2430                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2431                         return -1;
2432                 }
2433                 values[i] = xstats[ids[i]].value;
2434         }
2435         return size;
2436 }
2437
2438 int
2439 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2440         unsigned int n)
2441 {
2442         struct rte_eth_dev *dev;
2443         unsigned int count = 0, i;
2444         signed int xcount = 0;
2445         uint16_t nb_rxqs, nb_txqs;
2446         int ret;
2447
2448         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2449
2450         dev = &rte_eth_devices[port_id];
2451
2452         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2453         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2454
2455         /* Return generic statistics */
2456         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2457                 (nb_txqs * RTE_NB_TXQ_STATS);
2458
2459         /* implemented by the driver */
2460         if (dev->dev_ops->xstats_get != NULL) {
2461                 /* Retrieve the xstats from the driver at the end of the
2462                  * xstats struct.
2463                  */
2464                 xcount = (*dev->dev_ops->xstats_get)(dev,
2465                                      xstats ? xstats + count : NULL,
2466                                      (n > count) ? n - count : 0);
2467
2468                 if (xcount < 0)
2469                         return eth_err(port_id, xcount);
2470         }
2471
2472         if (n < count + xcount || xstats == NULL)
2473                 return count + xcount;
2474
2475         /* now fill the xstats structure */
2476         ret = rte_eth_basic_stats_get(port_id, xstats);
2477         if (ret < 0)
2478                 return ret;
2479         count = ret;
2480
2481         for (i = 0; i < count; i++)
2482                 xstats[i].id = i;
2483         /* add an offset to driver-specific stats */
2484         for ( ; i < count + xcount; i++)
2485                 xstats[i].id += count;
2486
2487         return count + xcount;
2488 }
2489
2490 /* reset ethdev extended statistics */
2491 void
2492 rte_eth_xstats_reset(uint16_t port_id)
2493 {
2494         struct rte_eth_dev *dev;
2495
2496         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2497         dev = &rte_eth_devices[port_id];
2498
2499         /* implemented by the driver */
2500         if (dev->dev_ops->xstats_reset != NULL) {
2501                 (*dev->dev_ops->xstats_reset)(dev);
2502                 return;
2503         }
2504
2505         /* fallback to default */
2506         rte_eth_stats_reset(port_id);
2507 }
2508
2509 static int
2510 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2511                 uint8_t is_rx)
2512 {
2513         struct rte_eth_dev *dev;
2514
2515         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2516
2517         dev = &rte_eth_devices[port_id];
2518
2519         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2520
2521         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2522                 return -EINVAL;
2523
2524         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2525                 return -EINVAL;
2526
2527         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2528                 return -EINVAL;
2529
2530         return (*dev->dev_ops->queue_stats_mapping_set)
2531                         (dev, queue_id, stat_idx, is_rx);
2532 }
2533
2534
2535 int
2536 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2537                 uint8_t stat_idx)
2538 {
2539         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2540                                                 stat_idx, STAT_QMAP_TX));
2541 }
2542
2543
2544 int
2545 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2546                 uint8_t stat_idx)
2547 {
2548         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2549                                                 stat_idx, STAT_QMAP_RX));
2550 }
2551
2552 int
2553 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2554 {
2555         struct rte_eth_dev *dev;
2556
2557         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2558         dev = &rte_eth_devices[port_id];
2559
2560         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2561         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2562                                                         fw_version, fw_size));
2563 }
2564
2565 void
2566 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2567 {
2568         struct rte_eth_dev *dev;
2569         const struct rte_eth_desc_lim lim = {
2570                 .nb_max = UINT16_MAX,
2571                 .nb_min = 0,
2572                 .nb_align = 1,
2573         };
2574
2575         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2576         dev = &rte_eth_devices[port_id];
2577
2578         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2579         dev_info->rx_desc_lim = lim;
2580         dev_info->tx_desc_lim = lim;
2581         dev_info->device = dev->device;
2582
2583         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2584         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2585         dev_info->driver_name = dev->device->driver->name;
2586         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2587         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2588
2589         dev_info->dev_flags = &dev->data->dev_flags;
2590 }
2591
2592 int
2593 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2594                                  uint32_t *ptypes, int num)
2595 {
2596         int i, j;
2597         struct rte_eth_dev *dev;
2598         const uint32_t *all_ptypes;
2599
2600         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2601         dev = &rte_eth_devices[port_id];
2602         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2603         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2604
2605         if (!all_ptypes)
2606                 return 0;
2607
2608         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2609                 if (all_ptypes[i] & ptype_mask) {
2610                         if (j < num)
2611                                 ptypes[j] = all_ptypes[i];
2612                         j++;
2613                 }
2614
2615         return j;
2616 }
2617
2618 void
2619 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2620 {
2621         struct rte_eth_dev *dev;
2622
2623         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2624         dev = &rte_eth_devices[port_id];
2625         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2626 }
2627
2628
2629 int
2630 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2631 {
2632         struct rte_eth_dev *dev;
2633
2634         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2635
2636         dev = &rte_eth_devices[port_id];
2637         *mtu = dev->data->mtu;
2638         return 0;
2639 }
2640
2641 int
2642 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2643 {
2644         int ret;
2645         struct rte_eth_dev *dev;
2646
2647         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2648         dev = &rte_eth_devices[port_id];
2649         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2650
2651         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2652         if (!ret)
2653                 dev->data->mtu = mtu;
2654
2655         return eth_err(port_id, ret);
2656 }
2657
2658 int
2659 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2660 {
2661         struct rte_eth_dev *dev;
2662         int ret;
2663
2664         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2665         dev = &rte_eth_devices[port_id];
2666         if (!(dev->data->dev_conf.rxmode.offloads &
2667               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2668                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
2669                         port_id);
2670                 return -ENOSYS;
2671         }
2672
2673         if (vlan_id > 4095) {
2674                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
2675                         port_id, vlan_id);
2676                 return -EINVAL;
2677         }
2678         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2679
2680         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2681         if (ret == 0) {
2682                 struct rte_vlan_filter_conf *vfc;
2683                 int vidx;
2684                 int vbit;
2685
2686                 vfc = &dev->data->vlan_filter_conf;
2687                 vidx = vlan_id / 64;
2688                 vbit = vlan_id % 64;
2689
2690                 if (on)
2691                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2692                 else
2693                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2694         }
2695
2696         return eth_err(port_id, ret);
2697 }
2698
2699 int
2700 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2701                                     int on)
2702 {
2703         struct rte_eth_dev *dev;
2704
2705         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2706         dev = &rte_eth_devices[port_id];
2707         if (rx_queue_id >= dev->data->nb_rx_queues) {
2708                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
2709                 return -EINVAL;
2710         }
2711
2712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2713         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2714
2715         return 0;
2716 }
2717
2718 int
2719 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2720                                 enum rte_vlan_type vlan_type,
2721                                 uint16_t tpid)
2722 {
2723         struct rte_eth_dev *dev;
2724
2725         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2726         dev = &rte_eth_devices[port_id];
2727         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2728
2729         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2730                                                                tpid));
2731 }
2732
2733 int
2734 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2735 {
2736         struct rte_eth_dev *dev;
2737         int ret = 0;
2738         int mask = 0;
2739         int cur, org = 0;
2740         uint64_t orig_offloads;
2741
2742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743         dev = &rte_eth_devices[port_id];
2744
2745         /* save original values in case of failure */
2746         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2747
2748         /*check which option changed by application*/
2749         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2750         org = !!(dev->data->dev_conf.rxmode.offloads &
2751                  DEV_RX_OFFLOAD_VLAN_STRIP);
2752         if (cur != org) {
2753                 if (cur)
2754                         dev->data->dev_conf.rxmode.offloads |=
2755                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2756                 else
2757                         dev->data->dev_conf.rxmode.offloads &=
2758                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2759                 mask |= ETH_VLAN_STRIP_MASK;
2760         }
2761
2762         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2763         org = !!(dev->data->dev_conf.rxmode.offloads &
2764                  DEV_RX_OFFLOAD_VLAN_FILTER);
2765         if (cur != org) {
2766                 if (cur)
2767                         dev->data->dev_conf.rxmode.offloads |=
2768                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2769                 else
2770                         dev->data->dev_conf.rxmode.offloads &=
2771                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2772                 mask |= ETH_VLAN_FILTER_MASK;
2773         }
2774
2775         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2776         org = !!(dev->data->dev_conf.rxmode.offloads &
2777                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2778         if (cur != org) {
2779                 if (cur)
2780                         dev->data->dev_conf.rxmode.offloads |=
2781                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2782                 else
2783                         dev->data->dev_conf.rxmode.offloads &=
2784                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2785                 mask |= ETH_VLAN_EXTEND_MASK;
2786         }
2787
2788         /*no change*/
2789         if (mask == 0)
2790                 return ret;
2791
2792         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2793         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2794         if (ret) {
2795                 /* hit an error restore  original values */
2796                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2797         }
2798
2799         return eth_err(port_id, ret);
2800 }
2801
2802 int
2803 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2804 {
2805         struct rte_eth_dev *dev;
2806         int ret = 0;
2807
2808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2809         dev = &rte_eth_devices[port_id];
2810
2811         if (dev->data->dev_conf.rxmode.offloads &
2812             DEV_RX_OFFLOAD_VLAN_STRIP)
2813                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2814
2815         if (dev->data->dev_conf.rxmode.offloads &
2816             DEV_RX_OFFLOAD_VLAN_FILTER)
2817                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2818
2819         if (dev->data->dev_conf.rxmode.offloads &
2820             DEV_RX_OFFLOAD_VLAN_EXTEND)
2821                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2822
2823         return ret;
2824 }
2825
2826 int
2827 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2828 {
2829         struct rte_eth_dev *dev;
2830
2831         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2832         dev = &rte_eth_devices[port_id];
2833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2834
2835         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2836 }
2837
2838 int
2839 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2840 {
2841         struct rte_eth_dev *dev;
2842
2843         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2844         dev = &rte_eth_devices[port_id];
2845         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2846         memset(fc_conf, 0, sizeof(*fc_conf));
2847         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2848 }
2849
2850 int
2851 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2852 {
2853         struct rte_eth_dev *dev;
2854
2855         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2856         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2857                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
2858                 return -EINVAL;
2859         }
2860
2861         dev = &rte_eth_devices[port_id];
2862         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2863         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2864 }
2865
2866 int
2867 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2868                                    struct rte_eth_pfc_conf *pfc_conf)
2869 {
2870         struct rte_eth_dev *dev;
2871
2872         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2873         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2874                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
2875                 return -EINVAL;
2876         }
2877
2878         dev = &rte_eth_devices[port_id];
2879         /* High water, low water validation are device specific */
2880         if  (*dev->dev_ops->priority_flow_ctrl_set)
2881                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2882                                         (dev, pfc_conf));
2883         return -ENOTSUP;
2884 }
2885
2886 static int
2887 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2888                         uint16_t reta_size)
2889 {
2890         uint16_t i, num;
2891
2892         if (!reta_conf)
2893                 return -EINVAL;
2894
2895         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2896         for (i = 0; i < num; i++) {
2897                 if (reta_conf[i].mask)
2898                         return 0;
2899         }
2900
2901         return -EINVAL;
2902 }
2903
2904 static int
2905 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2906                          uint16_t reta_size,
2907                          uint16_t max_rxq)
2908 {
2909         uint16_t i, idx, shift;
2910
2911         if (!reta_conf)
2912                 return -EINVAL;
2913
2914         if (max_rxq == 0) {
2915                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
2916                 return -EINVAL;
2917         }
2918
2919         for (i = 0; i < reta_size; i++) {
2920                 idx = i / RTE_RETA_GROUP_SIZE;
2921                 shift = i % RTE_RETA_GROUP_SIZE;
2922                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2923                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2924                         RTE_ETHDEV_LOG(ERR,
2925                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
2926                                 idx, shift,
2927                                 reta_conf[idx].reta[shift], max_rxq);
2928                         return -EINVAL;
2929                 }
2930         }
2931
2932         return 0;
2933 }
2934
2935 int
2936 rte_eth_dev_rss_reta_update(uint16_t port_id,
2937                             struct rte_eth_rss_reta_entry64 *reta_conf,
2938                             uint16_t reta_size)
2939 {
2940         struct rte_eth_dev *dev;
2941         int ret;
2942
2943         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2944         /* Check mask bits */
2945         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2946         if (ret < 0)
2947                 return ret;
2948
2949         dev = &rte_eth_devices[port_id];
2950
2951         /* Check entry value */
2952         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2953                                 dev->data->nb_rx_queues);
2954         if (ret < 0)
2955                 return ret;
2956
2957         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2958         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2959                                                              reta_size));
2960 }
2961
2962 int
2963 rte_eth_dev_rss_reta_query(uint16_t port_id,
2964                            struct rte_eth_rss_reta_entry64 *reta_conf,
2965                            uint16_t reta_size)
2966 {
2967         struct rte_eth_dev *dev;
2968         int ret;
2969
2970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2971
2972         /* Check mask bits */
2973         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2974         if (ret < 0)
2975                 return ret;
2976
2977         dev = &rte_eth_devices[port_id];
2978         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2979         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2980                                                             reta_size));
2981 }
2982
2983 int
2984 rte_eth_dev_rss_hash_update(uint16_t port_id,
2985                             struct rte_eth_rss_conf *rss_conf)
2986 {
2987         struct rte_eth_dev *dev;
2988         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
2989
2990         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2991         dev = &rte_eth_devices[port_id];
2992         rte_eth_dev_info_get(port_id, &dev_info);
2993         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
2994             dev_info.flow_type_rss_offloads) {
2995                 RTE_ETHDEV_LOG(ERR,
2996                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
2997                         port_id, rss_conf->rss_hf,
2998                         dev_info.flow_type_rss_offloads);
2999                 return -EINVAL;
3000         }
3001         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3002         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3003                                                                  rss_conf));
3004 }
3005
3006 int
3007 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3008                               struct rte_eth_rss_conf *rss_conf)
3009 {
3010         struct rte_eth_dev *dev;
3011
3012         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3013         dev = &rte_eth_devices[port_id];
3014         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3015         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3016                                                                    rss_conf));
3017 }
3018
3019 int
3020 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3021                                 struct rte_eth_udp_tunnel *udp_tunnel)
3022 {
3023         struct rte_eth_dev *dev;
3024
3025         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3026         if (udp_tunnel == NULL) {
3027                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3028                 return -EINVAL;
3029         }
3030
3031         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3032                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3033                 return -EINVAL;
3034         }
3035
3036         dev = &rte_eth_devices[port_id];
3037         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3038         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3039                                                                 udp_tunnel));
3040 }
3041
3042 int
3043 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3044                                    struct rte_eth_udp_tunnel *udp_tunnel)
3045 {
3046         struct rte_eth_dev *dev;
3047
3048         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3049         dev = &rte_eth_devices[port_id];
3050
3051         if (udp_tunnel == NULL) {
3052                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3053                 return -EINVAL;
3054         }
3055
3056         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3057                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3058                 return -EINVAL;
3059         }
3060
3061         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3062         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3063                                                                 udp_tunnel));
3064 }
3065
3066 int
3067 rte_eth_led_on(uint16_t port_id)
3068 {
3069         struct rte_eth_dev *dev;
3070
3071         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3072         dev = &rte_eth_devices[port_id];
3073         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3074         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3075 }
3076
3077 int
3078 rte_eth_led_off(uint16_t port_id)
3079 {
3080         struct rte_eth_dev *dev;
3081
3082         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3083         dev = &rte_eth_devices[port_id];
3084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3085         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3086 }
3087
3088 /*
3089  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3090  * an empty spot.
3091  */
3092 static int
3093 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3094 {
3095         struct rte_eth_dev_info dev_info;
3096         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3097         unsigned i;
3098
3099         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3100         rte_eth_dev_info_get(port_id, &dev_info);
3101
3102         for (i = 0; i < dev_info.max_mac_addrs; i++)
3103                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
3104                         return i;
3105
3106         return -1;
3107 }
3108
3109 static const struct ether_addr null_mac_addr;
3110
3111 int
3112 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
3113                         uint32_t pool)
3114 {
3115         struct rte_eth_dev *dev;
3116         int index;
3117         uint64_t pool_mask;
3118         int ret;
3119
3120         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3121         dev = &rte_eth_devices[port_id];
3122         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3123
3124         if (is_zero_ether_addr(addr)) {
3125                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3126                         port_id);
3127                 return -EINVAL;
3128         }
3129         if (pool >= ETH_64_POOLS) {
3130                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3131                 return -EINVAL;
3132         }
3133
3134         index = get_mac_addr_index(port_id, addr);
3135         if (index < 0) {
3136                 index = get_mac_addr_index(port_id, &null_mac_addr);
3137                 if (index < 0) {
3138                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3139                                 port_id);
3140                         return -ENOSPC;
3141                 }
3142         } else {
3143                 pool_mask = dev->data->mac_pool_sel[index];
3144
3145                 /* Check if both MAC address and pool is already there, and do nothing */
3146                 if (pool_mask & (1ULL << pool))
3147                         return 0;
3148         }
3149
3150         /* Update NIC */
3151         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3152
3153         if (ret == 0) {
3154                 /* Update address in NIC data structure */
3155                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3156
3157                 /* Update pool bitmap in NIC data structure */
3158                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3159         }
3160
3161         return eth_err(port_id, ret);
3162 }
3163
3164 int
3165 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
3166 {
3167         struct rte_eth_dev *dev;
3168         int index;
3169
3170         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3171         dev = &rte_eth_devices[port_id];
3172         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3173
3174         index = get_mac_addr_index(port_id, addr);
3175         if (index == 0) {
3176                 RTE_ETHDEV_LOG(ERR,
3177                         "Port %u: Cannot remove default MAC address\n",
3178                         port_id);
3179                 return -EADDRINUSE;
3180         } else if (index < 0)
3181                 return 0;  /* Do nothing if address wasn't found */
3182
3183         /* Update NIC */
3184         (*dev->dev_ops->mac_addr_remove)(dev, index);
3185
3186         /* Update address in NIC data structure */
3187         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3188
3189         /* reset pool bitmap */
3190         dev->data->mac_pool_sel[index] = 0;
3191
3192         return 0;
3193 }
3194
3195 int
3196 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3197 {
3198         struct rte_eth_dev *dev;
3199         int ret;
3200
3201         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3202
3203         if (!is_valid_assigned_ether_addr(addr))
3204                 return -EINVAL;
3205
3206         dev = &rte_eth_devices[port_id];
3207         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3208
3209         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3210         if (ret < 0)
3211                 return ret;
3212
3213         /* Update default address in NIC data structure */
3214         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3215
3216         return 0;
3217 }
3218
3219
3220 /*
3221  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3222  * an empty spot.
3223  */
3224 static int
3225 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3226 {
3227         struct rte_eth_dev_info dev_info;
3228         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3229         unsigned i;
3230
3231         rte_eth_dev_info_get(port_id, &dev_info);
3232         if (!dev->data->hash_mac_addrs)
3233                 return -1;
3234
3235         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3236                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3237                         ETHER_ADDR_LEN) == 0)
3238                         return i;
3239
3240         return -1;
3241 }
3242
3243 int
3244 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3245                                 uint8_t on)
3246 {
3247         int index;
3248         int ret;
3249         struct rte_eth_dev *dev;
3250
3251         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3252
3253         dev = &rte_eth_devices[port_id];
3254         if (is_zero_ether_addr(addr)) {
3255                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3256                         port_id);
3257                 return -EINVAL;
3258         }
3259
3260         index = get_hash_mac_addr_index(port_id, addr);
3261         /* Check if it's already there, and do nothing */
3262         if ((index >= 0) && on)
3263                 return 0;
3264
3265         if (index < 0) {
3266                 if (!on) {
3267                         RTE_ETHDEV_LOG(ERR,
3268                                 "Port %u: the MAC address was not set in UTA\n",
3269                                 port_id);
3270                         return -EINVAL;
3271                 }
3272
3273                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3274                 if (index < 0) {
3275                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3276                                 port_id);
3277                         return -ENOSPC;
3278                 }
3279         }
3280
3281         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3282         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3283         if (ret == 0) {
3284                 /* Update address in NIC data structure */
3285                 if (on)
3286                         ether_addr_copy(addr,
3287                                         &dev->data->hash_mac_addrs[index]);
3288                 else
3289                         ether_addr_copy(&null_mac_addr,
3290                                         &dev->data->hash_mac_addrs[index]);
3291         }
3292
3293         return eth_err(port_id, ret);
3294 }
3295
3296 int
3297 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3298 {
3299         struct rte_eth_dev *dev;
3300
3301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3302
3303         dev = &rte_eth_devices[port_id];
3304
3305         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3306         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3307                                                                        on));
3308 }
3309
3310 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3311                                         uint16_t tx_rate)
3312 {
3313         struct rte_eth_dev *dev;
3314         struct rte_eth_dev_info dev_info;
3315         struct rte_eth_link link;
3316
3317         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3318
3319         dev = &rte_eth_devices[port_id];
3320         rte_eth_dev_info_get(port_id, &dev_info);
3321         link = dev->data->dev_link;
3322
3323         if (queue_idx > dev_info.max_tx_queues) {
3324                 RTE_ETHDEV_LOG(ERR,
3325                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3326                         port_id, queue_idx);
3327                 return -EINVAL;
3328         }
3329
3330         if (tx_rate > link.link_speed) {
3331                 RTE_ETHDEV_LOG(ERR,
3332                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3333                         tx_rate, link.link_speed);
3334                 return -EINVAL;
3335         }
3336
3337         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3338         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3339                                                         queue_idx, tx_rate));
3340 }
3341
3342 int
3343 rte_eth_mirror_rule_set(uint16_t port_id,
3344                         struct rte_eth_mirror_conf *mirror_conf,
3345                         uint8_t rule_id, uint8_t on)
3346 {
3347         struct rte_eth_dev *dev;
3348
3349         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3350         if (mirror_conf->rule_type == 0) {
3351                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3352                 return -EINVAL;
3353         }
3354
3355         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3356                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3357                         ETH_64_POOLS - 1);
3358                 return -EINVAL;
3359         }
3360
3361         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3362              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3363             (mirror_conf->pool_mask == 0)) {
3364                 RTE_ETHDEV_LOG(ERR,
3365                         "Invalid mirror pool, pool mask can not be 0\n");
3366                 return -EINVAL;
3367         }
3368
3369         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3370             mirror_conf->vlan.vlan_mask == 0) {
3371                 RTE_ETHDEV_LOG(ERR,
3372                         "Invalid vlan mask, vlan mask can not be 0\n");
3373                 return -EINVAL;
3374         }
3375
3376         dev = &rte_eth_devices[port_id];
3377         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3378
3379         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3380                                                 mirror_conf, rule_id, on));
3381 }
3382
3383 int
3384 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3385 {
3386         struct rte_eth_dev *dev;
3387
3388         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3389
3390         dev = &rte_eth_devices[port_id];
3391         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3392
3393         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3394                                                                    rule_id));
3395 }
3396
3397 RTE_INIT(eth_dev_init_cb_lists)
3398 {
3399         int i;
3400
3401         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3402                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3403 }
3404
3405 int
3406 rte_eth_dev_callback_register(uint16_t port_id,
3407                         enum rte_eth_event_type event,
3408                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3409 {
3410         struct rte_eth_dev *dev;
3411         struct rte_eth_dev_callback *user_cb;
3412         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3413         uint16_t last_port;
3414
3415         if (!cb_fn)
3416                 return -EINVAL;
3417
3418         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3419                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3420                 return -EINVAL;
3421         }
3422
3423         if (port_id == RTE_ETH_ALL) {
3424                 next_port = 0;
3425                 last_port = RTE_MAX_ETHPORTS - 1;
3426         } else {
3427                 next_port = last_port = port_id;
3428         }
3429
3430         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3431
3432         do {
3433                 dev = &rte_eth_devices[next_port];
3434
3435                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3436                         if (user_cb->cb_fn == cb_fn &&
3437                                 user_cb->cb_arg == cb_arg &&
3438                                 user_cb->event == event) {
3439                                 break;
3440                         }
3441                 }
3442
3443                 /* create a new callback. */
3444                 if (user_cb == NULL) {
3445                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3446                                 sizeof(struct rte_eth_dev_callback), 0);
3447                         if (user_cb != NULL) {
3448                                 user_cb->cb_fn = cb_fn;
3449                                 user_cb->cb_arg = cb_arg;
3450                                 user_cb->event = event;
3451                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3452                                                   user_cb, next);
3453                         } else {
3454                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3455                                 rte_eth_dev_callback_unregister(port_id, event,
3456                                                                 cb_fn, cb_arg);
3457                                 return -ENOMEM;
3458                         }
3459
3460                 }
3461         } while (++next_port <= last_port);
3462
3463         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3464         return 0;
3465 }
3466
3467 int
3468 rte_eth_dev_callback_unregister(uint16_t port_id,
3469                         enum rte_eth_event_type event,
3470                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3471 {
3472         int ret;
3473         struct rte_eth_dev *dev;
3474         struct rte_eth_dev_callback *cb, *next;
3475         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3476         uint16_t last_port;
3477
3478         if (!cb_fn)
3479                 return -EINVAL;
3480
3481         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3482                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3483                 return -EINVAL;
3484         }
3485
3486         if (port_id == RTE_ETH_ALL) {
3487                 next_port = 0;
3488                 last_port = RTE_MAX_ETHPORTS - 1;
3489         } else {
3490                 next_port = last_port = port_id;
3491         }
3492
3493         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3494
3495         do {
3496                 dev = &rte_eth_devices[next_port];
3497                 ret = 0;
3498                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3499                      cb = next) {
3500
3501                         next = TAILQ_NEXT(cb, next);
3502
3503                         if (cb->cb_fn != cb_fn || cb->event != event ||
3504                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3505                                 continue;
3506
3507                         /*
3508                          * if this callback is not executing right now,
3509                          * then remove it.
3510                          */
3511                         if (cb->active == 0) {
3512                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3513                                 rte_free(cb);
3514                         } else {
3515                                 ret = -EAGAIN;
3516                         }
3517                 }
3518         } while (++next_port <= last_port);
3519
3520         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3521         return ret;
3522 }
3523
3524 int
3525 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3526         enum rte_eth_event_type event, void *ret_param)
3527 {
3528         struct rte_eth_dev_callback *cb_lst;
3529         struct rte_eth_dev_callback dev_cb;
3530         int rc = 0;
3531
3532         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3533         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3534                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3535                         continue;
3536                 dev_cb = *cb_lst;
3537                 cb_lst->active = 1;
3538                 if (ret_param != NULL)
3539                         dev_cb.ret_param = ret_param;
3540
3541                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3542                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3543                                 dev_cb.cb_arg, dev_cb.ret_param);
3544                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3545                 cb_lst->active = 0;
3546         }
3547         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3548         return rc;
3549 }
3550
3551 void
3552 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
3553 {
3554         if (dev == NULL)
3555                 return;
3556
3557         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
3558
3559         dev->state = RTE_ETH_DEV_ATTACHED;
3560 }
3561
3562 int
3563 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3564 {
3565         uint32_t vec;
3566         struct rte_eth_dev *dev;
3567         struct rte_intr_handle *intr_handle;
3568         uint16_t qid;
3569         int rc;
3570
3571         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3572
3573         dev = &rte_eth_devices[port_id];
3574
3575         if (!dev->intr_handle) {
3576                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3577                 return -ENOTSUP;
3578         }
3579
3580         intr_handle = dev->intr_handle;
3581         if (!intr_handle->intr_vec) {
3582                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3583                 return -EPERM;
3584         }
3585
3586         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3587                 vec = intr_handle->intr_vec[qid];
3588                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3589                 if (rc && rc != -EEXIST) {
3590                         RTE_ETHDEV_LOG(ERR,
3591                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3592                                 port_id, qid, op, epfd, vec);
3593                 }
3594         }
3595
3596         return 0;
3597 }
3598
3599 int __rte_experimental
3600 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
3601 {
3602         struct rte_intr_handle *intr_handle;
3603         struct rte_eth_dev *dev;
3604         unsigned int efd_idx;
3605         uint32_t vec;
3606         int fd;
3607
3608         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
3609
3610         dev = &rte_eth_devices[port_id];
3611
3612         if (queue_id >= dev->data->nb_rx_queues) {
3613                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3614                 return -1;
3615         }
3616
3617         if (!dev->intr_handle) {
3618                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3619                 return -1;
3620         }
3621
3622         intr_handle = dev->intr_handle;
3623         if (!intr_handle->intr_vec) {
3624                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3625                 return -1;
3626         }
3627
3628         vec = intr_handle->intr_vec[queue_id];
3629         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
3630                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
3631         fd = intr_handle->efds[efd_idx];
3632
3633         return fd;
3634 }
3635
3636 const struct rte_memzone *
3637 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3638                          uint16_t queue_id, size_t size, unsigned align,
3639                          int socket_id)
3640 {
3641         char z_name[RTE_MEMZONE_NAMESIZE];
3642         const struct rte_memzone *mz;
3643
3644         snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
3645                  dev->data->port_id, queue_id, ring_name);
3646
3647         mz = rte_memzone_lookup(z_name);
3648         if (mz)
3649                 return mz;
3650
3651         return rte_memzone_reserve_aligned(z_name, size, socket_id,
3652                         RTE_MEMZONE_IOVA_CONTIG, align);
3653 }
3654
3655 int __rte_experimental
3656 rte_eth_dev_create(struct rte_device *device, const char *name,
3657         size_t priv_data_size,
3658         ethdev_bus_specific_init ethdev_bus_specific_init,
3659         void *bus_init_params,
3660         ethdev_init_t ethdev_init, void *init_params)
3661 {
3662         struct rte_eth_dev *ethdev;
3663         int retval;
3664
3665         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
3666
3667         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3668                 ethdev = rte_eth_dev_allocate(name);
3669                 if (!ethdev)
3670                         return -ENODEV;
3671
3672                 if (priv_data_size) {
3673                         ethdev->data->dev_private = rte_zmalloc_socket(
3674                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
3675                                 device->numa_node);
3676
3677                         if (!ethdev->data->dev_private) {
3678                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
3679                                 retval = -ENOMEM;
3680                                 goto probe_failed;
3681                         }
3682                 }
3683         } else {
3684                 ethdev = rte_eth_dev_attach_secondary(name);
3685                 if (!ethdev) {
3686                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
3687                                 "ethdev doesn't exist");
3688                         return  -ENODEV;
3689                 }
3690         }
3691
3692         ethdev->device = device;
3693
3694         if (ethdev_bus_specific_init) {
3695                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
3696                 if (retval) {
3697                         RTE_LOG(ERR, EAL,
3698                                 "ethdev bus specific initialisation failed");
3699                         goto probe_failed;
3700                 }
3701         }
3702
3703         retval = ethdev_init(ethdev, init_params);
3704         if (retval) {
3705                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
3706                 goto probe_failed;
3707         }
3708
3709         rte_eth_dev_probing_finish(ethdev);
3710
3711         return retval;
3712
3713 probe_failed:
3714         rte_eth_dev_release_port(ethdev);
3715         return retval;
3716 }
3717
3718 int  __rte_experimental
3719 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
3720         ethdev_uninit_t ethdev_uninit)
3721 {
3722         int ret;
3723
3724         ethdev = rte_eth_dev_allocated(ethdev->data->name);
3725         if (!ethdev)
3726                 return -ENODEV;
3727
3728         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
3729         if (ethdev_uninit) {
3730                 ret = ethdev_uninit(ethdev);
3731                 if (ret)
3732                         return ret;
3733         }
3734
3735         return rte_eth_dev_release_port(ethdev);
3736 }
3737
3738 int
3739 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3740                           int epfd, int op, void *data)
3741 {
3742         uint32_t vec;
3743         struct rte_eth_dev *dev;
3744         struct rte_intr_handle *intr_handle;
3745         int rc;
3746
3747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3748
3749         dev = &rte_eth_devices[port_id];
3750         if (queue_id >= dev->data->nb_rx_queues) {
3751                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3752                 return -EINVAL;
3753         }
3754
3755         if (!dev->intr_handle) {
3756                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
3757                 return -ENOTSUP;
3758         }
3759
3760         intr_handle = dev->intr_handle;
3761         if (!intr_handle->intr_vec) {
3762                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
3763                 return -EPERM;
3764         }
3765
3766         vec = intr_handle->intr_vec[queue_id];
3767         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3768         if (rc && rc != -EEXIST) {
3769                 RTE_ETHDEV_LOG(ERR,
3770                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
3771                         port_id, queue_id, op, epfd, vec);
3772                 return rc;
3773         }
3774
3775         return 0;
3776 }
3777
3778 int
3779 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3780                            uint16_t queue_id)
3781 {
3782         struct rte_eth_dev *dev;
3783
3784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3785
3786         dev = &rte_eth_devices[port_id];
3787
3788         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3789         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3790                                                                 queue_id));
3791 }
3792
3793 int
3794 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3795                             uint16_t queue_id)
3796 {
3797         struct rte_eth_dev *dev;
3798
3799         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3800
3801         dev = &rte_eth_devices[port_id];
3802
3803         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3804         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3805                                                                 queue_id));
3806 }
3807
3808
3809 int
3810 rte_eth_dev_filter_supported(uint16_t port_id,
3811                              enum rte_filter_type filter_type)
3812 {
3813         struct rte_eth_dev *dev;
3814
3815         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3816
3817         dev = &rte_eth_devices[port_id];
3818         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3819         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3820                                 RTE_ETH_FILTER_NOP, NULL);
3821 }
3822
3823 int
3824 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3825                         enum rte_filter_op filter_op, void *arg)
3826 {
3827         struct rte_eth_dev *dev;
3828
3829         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3830
3831         dev = &rte_eth_devices[port_id];
3832         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3833         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3834                                                              filter_op, arg));
3835 }
3836
3837 const struct rte_eth_rxtx_callback *
3838 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3839                 rte_rx_callback_fn fn, void *user_param)
3840 {
3841 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3842         rte_errno = ENOTSUP;
3843         return NULL;
3844 #endif
3845         /* check input parameters */
3846         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3847                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3848                 rte_errno = EINVAL;
3849                 return NULL;
3850         }
3851         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3852
3853         if (cb == NULL) {
3854                 rte_errno = ENOMEM;
3855                 return NULL;
3856         }
3857
3858         cb->fn.rx = fn;
3859         cb->param = user_param;
3860
3861         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3862         /* Add the callbacks in fifo order. */
3863         struct rte_eth_rxtx_callback *tail =
3864                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3865
3866         if (!tail) {
3867                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3868
3869         } else {
3870                 while (tail->next)
3871                         tail = tail->next;
3872                 tail->next = cb;
3873         }
3874         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3875
3876         return cb;
3877 }
3878
3879 const struct rte_eth_rxtx_callback *
3880 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3881                 rte_rx_callback_fn fn, void *user_param)
3882 {
3883 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3884         rte_errno = ENOTSUP;
3885         return NULL;
3886 #endif
3887         /* check input parameters */
3888         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3889                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3890                 rte_errno = EINVAL;
3891                 return NULL;
3892         }
3893
3894         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3895
3896         if (cb == NULL) {
3897                 rte_errno = ENOMEM;
3898                 return NULL;
3899         }
3900
3901         cb->fn.rx = fn;
3902         cb->param = user_param;
3903
3904         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3905         /* Add the callbacks at fisrt position*/
3906         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3907         rte_smp_wmb();
3908         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3909         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3910
3911         return cb;
3912 }
3913
3914 const struct rte_eth_rxtx_callback *
3915 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3916                 rte_tx_callback_fn fn, void *user_param)
3917 {
3918 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3919         rte_errno = ENOTSUP;
3920         return NULL;
3921 #endif
3922         /* check input parameters */
3923         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3924                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3925                 rte_errno = EINVAL;
3926                 return NULL;
3927         }
3928
3929         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3930
3931         if (cb == NULL) {
3932                 rte_errno = ENOMEM;
3933                 return NULL;
3934         }
3935
3936         cb->fn.tx = fn;
3937         cb->param = user_param;
3938
3939         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3940         /* Add the callbacks in fifo order. */
3941         struct rte_eth_rxtx_callback *tail =
3942                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3943
3944         if (!tail) {
3945                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3946
3947         } else {
3948                 while (tail->next)
3949                         tail = tail->next;
3950                 tail->next = cb;
3951         }
3952         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3953
3954         return cb;
3955 }
3956
3957 int
3958 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3959                 const struct rte_eth_rxtx_callback *user_cb)
3960 {
3961 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3962         return -ENOTSUP;
3963 #endif
3964         /* Check input parameters. */
3965         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3966         if (user_cb == NULL ||
3967                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3968                 return -EINVAL;
3969
3970         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3971         struct rte_eth_rxtx_callback *cb;
3972         struct rte_eth_rxtx_callback **prev_cb;
3973         int ret = -EINVAL;
3974
3975         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3976         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3977         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3978                 cb = *prev_cb;
3979                 if (cb == user_cb) {
3980                         /* Remove the user cb from the callback list. */
3981                         *prev_cb = cb->next;
3982                         ret = 0;
3983                         break;
3984                 }
3985         }
3986         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3987
3988         return ret;
3989 }
3990
3991 int
3992 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3993                 const struct rte_eth_rxtx_callback *user_cb)
3994 {
3995 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3996         return -ENOTSUP;
3997 #endif
3998         /* Check input parameters. */
3999         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4000         if (user_cb == NULL ||
4001                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4002                 return -EINVAL;
4003
4004         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4005         int ret = -EINVAL;
4006         struct rte_eth_rxtx_callback *cb;
4007         struct rte_eth_rxtx_callback **prev_cb;
4008
4009         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4010         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4011         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4012                 cb = *prev_cb;
4013                 if (cb == user_cb) {
4014                         /* Remove the user cb from the callback list. */
4015                         *prev_cb = cb->next;
4016                         ret = 0;
4017                         break;
4018                 }
4019         }
4020         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4021
4022         return ret;
4023 }
4024
4025 int
4026 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4027         struct rte_eth_rxq_info *qinfo)
4028 {
4029         struct rte_eth_dev *dev;
4030
4031         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4032
4033         if (qinfo == NULL)
4034                 return -EINVAL;
4035
4036         dev = &rte_eth_devices[port_id];
4037         if (queue_id >= dev->data->nb_rx_queues) {
4038                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4039                 return -EINVAL;
4040         }
4041
4042         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4043
4044         memset(qinfo, 0, sizeof(*qinfo));
4045         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4046         return 0;
4047 }
4048
4049 int
4050 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4051         struct rte_eth_txq_info *qinfo)
4052 {
4053         struct rte_eth_dev *dev;
4054
4055         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4056
4057         if (qinfo == NULL)
4058                 return -EINVAL;
4059
4060         dev = &rte_eth_devices[port_id];
4061         if (queue_id >= dev->data->nb_tx_queues) {
4062                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4063                 return -EINVAL;
4064         }
4065
4066         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4067
4068         memset(qinfo, 0, sizeof(*qinfo));
4069         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4070
4071         return 0;
4072 }
4073
4074 int
4075 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4076                              struct ether_addr *mc_addr_set,
4077                              uint32_t nb_mc_addr)
4078 {
4079         struct rte_eth_dev *dev;
4080
4081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4082
4083         dev = &rte_eth_devices[port_id];
4084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4085         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4086                                                 mc_addr_set, nb_mc_addr));
4087 }
4088
4089 int
4090 rte_eth_timesync_enable(uint16_t port_id)
4091 {
4092         struct rte_eth_dev *dev;
4093
4094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4095         dev = &rte_eth_devices[port_id];
4096
4097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4098         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4099 }
4100
4101 int
4102 rte_eth_timesync_disable(uint16_t port_id)
4103 {
4104         struct rte_eth_dev *dev;
4105
4106         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4107         dev = &rte_eth_devices[port_id];
4108
4109         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4110         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4111 }
4112
4113 int
4114 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4115                                    uint32_t flags)
4116 {
4117         struct rte_eth_dev *dev;
4118
4119         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4120         dev = &rte_eth_devices[port_id];
4121
4122         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4123         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4124                                 (dev, timestamp, flags));
4125 }
4126
4127 int
4128 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4129                                    struct timespec *timestamp)
4130 {
4131         struct rte_eth_dev *dev;
4132
4133         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4134         dev = &rte_eth_devices[port_id];
4135
4136         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4137         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4138                                 (dev, timestamp));
4139 }
4140
4141 int
4142 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4143 {
4144         struct rte_eth_dev *dev;
4145
4146         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4147         dev = &rte_eth_devices[port_id];
4148
4149         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4150         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4151                                                                       delta));
4152 }
4153
4154 int
4155 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4156 {
4157         struct rte_eth_dev *dev;
4158
4159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4160         dev = &rte_eth_devices[port_id];
4161
4162         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4163         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4164                                                                 timestamp));
4165 }
4166
4167 int
4168 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4169 {
4170         struct rte_eth_dev *dev;
4171
4172         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4173         dev = &rte_eth_devices[port_id];
4174
4175         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4176         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4177                                                                 timestamp));
4178 }
4179
4180 int
4181 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4182 {
4183         struct rte_eth_dev *dev;
4184
4185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4186
4187         dev = &rte_eth_devices[port_id];
4188         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4189         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4190 }
4191
4192 int
4193 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4194 {
4195         struct rte_eth_dev *dev;
4196
4197         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4198
4199         dev = &rte_eth_devices[port_id];
4200         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4201         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4202 }
4203
4204 int
4205 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4206 {
4207         struct rte_eth_dev *dev;
4208
4209         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4210
4211         dev = &rte_eth_devices[port_id];
4212         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4213         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4214 }
4215
4216 int
4217 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4218 {
4219         struct rte_eth_dev *dev;
4220
4221         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4222
4223         dev = &rte_eth_devices[port_id];
4224         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4225         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4226 }
4227
4228 int __rte_experimental
4229 rte_eth_dev_get_module_info(uint16_t port_id,
4230                             struct rte_eth_dev_module_info *modinfo)
4231 {
4232         struct rte_eth_dev *dev;
4233
4234         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4235
4236         dev = &rte_eth_devices[port_id];
4237         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4238         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4239 }
4240
4241 int __rte_experimental
4242 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4243                               struct rte_dev_eeprom_info *info)
4244 {
4245         struct rte_eth_dev *dev;
4246
4247         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4248
4249         dev = &rte_eth_devices[port_id];
4250         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4251         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4252 }
4253
4254 int
4255 rte_eth_dev_get_dcb_info(uint16_t port_id,
4256                              struct rte_eth_dcb_info *dcb_info)
4257 {
4258         struct rte_eth_dev *dev;
4259
4260         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4261
4262         dev = &rte_eth_devices[port_id];
4263         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4264
4265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4266         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4267 }
4268
4269 int
4270 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4271                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4272 {
4273         struct rte_eth_dev *dev;
4274
4275         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4276         if (l2_tunnel == NULL) {
4277                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4278                 return -EINVAL;
4279         }
4280
4281         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4282                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4283                 return -EINVAL;
4284         }
4285
4286         dev = &rte_eth_devices[port_id];
4287         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4288                                 -ENOTSUP);
4289         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4290                                                                 l2_tunnel));
4291 }
4292
4293 int
4294 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4295                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4296                                   uint32_t mask,
4297                                   uint8_t en)
4298 {
4299         struct rte_eth_dev *dev;
4300
4301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4302
4303         if (l2_tunnel == NULL) {
4304                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4305                 return -EINVAL;
4306         }
4307
4308         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4309                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4310                 return -EINVAL;
4311         }
4312
4313         if (mask == 0) {
4314                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4315                 return -EINVAL;
4316         }
4317
4318         dev = &rte_eth_devices[port_id];
4319         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4320                                 -ENOTSUP);
4321         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4322                                                         l2_tunnel, mask, en));
4323 }
4324
4325 static void
4326 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4327                            const struct rte_eth_desc_lim *desc_lim)
4328 {
4329         if (desc_lim->nb_align != 0)
4330                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4331
4332         if (desc_lim->nb_max != 0)
4333                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4334
4335         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4336 }
4337
4338 int
4339 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4340                                  uint16_t *nb_rx_desc,
4341                                  uint16_t *nb_tx_desc)
4342 {
4343         struct rte_eth_dev *dev;
4344         struct rte_eth_dev_info dev_info;
4345
4346         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4347
4348         dev = &rte_eth_devices[port_id];
4349         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4350
4351         rte_eth_dev_info_get(port_id, &dev_info);
4352
4353         if (nb_rx_desc != NULL)
4354                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4355
4356         if (nb_tx_desc != NULL)
4357                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4358
4359         return 0;
4360 }
4361
4362 int
4363 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4364 {
4365         struct rte_eth_dev *dev;
4366
4367         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4368
4369         if (pool == NULL)
4370                 return -EINVAL;
4371
4372         dev = &rte_eth_devices[port_id];
4373
4374         if (*dev->dev_ops->pool_ops_supported == NULL)
4375                 return 1; /* all pools are supported */
4376
4377         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4378 }
4379
4380 /**
4381  * A set of values to describe the possible states of a switch domain.
4382  */
4383 enum rte_eth_switch_domain_state {
4384         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
4385         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
4386 };
4387
4388 /**
4389  * Array of switch domains available for allocation. Array is sized to
4390  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
4391  * ethdev ports in a single process.
4392  */
4393 struct rte_eth_dev_switch {
4394         enum rte_eth_switch_domain_state state;
4395 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
4396
4397 int __rte_experimental
4398 rte_eth_switch_domain_alloc(uint16_t *domain_id)
4399 {
4400         unsigned int i;
4401
4402         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
4403
4404         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
4405                 i < RTE_MAX_ETHPORTS; i++) {
4406                 if (rte_eth_switch_domains[i].state ==
4407                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
4408                         rte_eth_switch_domains[i].state =
4409                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
4410                         *domain_id = i;
4411                         return 0;
4412                 }
4413         }
4414
4415         return -ENOSPC;
4416 }
4417
4418 int __rte_experimental
4419 rte_eth_switch_domain_free(uint16_t domain_id)
4420 {
4421         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
4422                 domain_id >= RTE_MAX_ETHPORTS)
4423                 return -EINVAL;
4424
4425         if (rte_eth_switch_domains[domain_id].state !=
4426                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
4427                 return -EINVAL;
4428
4429         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
4430
4431         return 0;
4432 }
4433
4434 typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
4435
4436 static int
4437 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
4438 {
4439         int state;
4440         struct rte_kvargs_pair *pair;
4441         char *letter;
4442
4443         arglist->str = strdup(str_in);
4444         if (arglist->str == NULL)
4445                 return -ENOMEM;
4446
4447         letter = arglist->str;
4448         state = 0;
4449         arglist->count = 0;
4450         pair = &arglist->pairs[0];
4451         while (1) {
4452                 switch (state) {
4453                 case 0: /* Initial */
4454                         if (*letter == '=')
4455                                 return -EINVAL;
4456                         else if (*letter == '\0')
4457                                 return 0;
4458
4459                         state = 1;
4460                         pair->key = letter;
4461                         /* fall-thru */
4462
4463                 case 1: /* Parsing key */
4464                         if (*letter == '=') {
4465                                 *letter = '\0';
4466                                 pair->value = letter + 1;
4467                                 state = 2;
4468                         } else if (*letter == ',' || *letter == '\0')
4469                                 return -EINVAL;
4470                         break;
4471
4472
4473                 case 2: /* Parsing value */
4474                         if (*letter == '[')
4475                                 state = 3;
4476                         else if (*letter == ',') {
4477                                 *letter = '\0';
4478                                 arglist->count++;
4479                                 pair = &arglist->pairs[arglist->count];
4480                                 state = 0;
4481                         } else if (*letter == '\0') {
4482                                 letter--;
4483                                 arglist->count++;
4484                                 pair = &arglist->pairs[arglist->count];
4485                                 state = 0;
4486                         }
4487                         break;
4488
4489                 case 3: /* Parsing list */
4490                         if (*letter == ']')
4491                                 state = 2;
4492                         else if (*letter == '\0')
4493                                 return -EINVAL;
4494                         break;
4495                 }
4496                 letter++;
4497         }
4498 }
4499
4500 static int
4501 rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
4502         void *data)
4503 {
4504         char *str_start;
4505         int state;
4506         int result;
4507
4508         if (*str != '[')
4509                 /* Single element, not a list */
4510                 return callback(str, data);
4511
4512         /* Sanity check, then strip the brackets */
4513         str_start = &str[strlen(str) - 1];
4514         if (*str_start != ']') {
4515                 RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
4516                 return -EINVAL;
4517         }
4518         str++;
4519         *str_start = '\0';
4520
4521         /* Process list elements */
4522         state = 0;
4523         while (1) {
4524                 if (state == 0) {
4525                         if (*str == '\0')
4526                                 break;
4527                         if (*str != ',') {
4528                                 str_start = str;
4529                                 state = 1;
4530                         }
4531                 } else if (state == 1) {
4532                         if (*str == ',' || *str == '\0') {
4533                                 if (str > str_start) {
4534                                         /* Non-empty string fragment */
4535                                         *str = '\0';
4536                                         result = callback(str_start, data);
4537                                         if (result < 0)
4538                                                 return result;
4539                                 }
4540                                 state = 0;
4541                         }
4542                 }
4543                 str++;
4544         }
4545         return 0;
4546 }
4547
4548 static int
4549 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
4550         const uint16_t max_list)
4551 {
4552         uint16_t lo, hi, val;
4553         int result;
4554
4555         result = sscanf(str, "%hu-%hu", &lo, &hi);
4556         if (result == 1) {
4557                 if (*len_list >= max_list)
4558                         return -ENOMEM;
4559                 list[(*len_list)++] = lo;
4560         } else if (result == 2) {
4561                 if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
4562                         return -EINVAL;
4563                 for (val = lo; val <= hi; val++) {
4564                         if (*len_list >= max_list)
4565                                 return -ENOMEM;
4566                         list[(*len_list)++] = val;
4567                 }
4568         } else
4569                 return -EINVAL;
4570         return 0;
4571 }
4572
4573
4574 static int
4575 rte_eth_devargs_parse_representor_ports(char *str, void *data)
4576 {
4577         struct rte_eth_devargs *eth_da = data;
4578
4579         return rte_eth_devargs_process_range(str, eth_da->representor_ports,
4580                 &eth_da->nb_representor_ports, RTE_MAX_ETHPORTS);
4581 }
4582
4583 int __rte_experimental
4584 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
4585 {
4586         struct rte_kvargs args;
4587         struct rte_kvargs_pair *pair;
4588         unsigned int i;
4589         int result = 0;
4590
4591         memset(eth_da, 0, sizeof(*eth_da));
4592
4593         result = rte_eth_devargs_tokenise(&args, dargs);
4594         if (result < 0)
4595                 goto parse_cleanup;
4596
4597         for (i = 0; i < args.count; i++) {
4598                 pair = &args.pairs[i];
4599                 if (strcmp("representor", pair->key) == 0) {
4600                         result = rte_eth_devargs_parse_list(pair->value,
4601                                 rte_eth_devargs_parse_representor_ports,
4602                                 eth_da);
4603                         if (result < 0)
4604                                 goto parse_cleanup;
4605                 }
4606         }
4607
4608 parse_cleanup:
4609         if (args.str)
4610                 free(args.str);
4611
4612         return result;
4613 }
4614
4615 RTE_INIT(ethdev_init_log)
4616 {
4617         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
4618         if (rte_eth_dev_logtype >= 0)
4619                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
4620 }