1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #include "rte_eth_ring.h"
7 #include <rte_ethdev_driver.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_string_fns.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
15 #define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
16 #define ETH_RING_ACTION_CREATE "CREATE"
17 #define ETH_RING_ACTION_ATTACH "ATTACH"
18 #define ETH_RING_INTERNAL_ARG "internal"
19 #define ETH_RING_INTERNAL_ARG_MAX_LEN 19 /* "0x..16chars..\0" */
21 static const char *valid_arguments[] = {
22 ETH_RING_NUMA_NODE_ACTION_ARG,
23 ETH_RING_INTERNAL_ARG,
27 struct ring_internal_args {
28 struct rte_ring * const *rx_queues;
29 const unsigned int nb_rx_queues;
30 struct rte_ring * const *tx_queues;
31 const unsigned int nb_tx_queues;
32 const unsigned int numa_node;
33 void *addr; /* self addr for sanity check */
43 rte_atomic64_t rx_pkts;
44 rte_atomic64_t tx_pkts;
47 struct pmd_internals {
48 unsigned int max_rx_queues;
49 unsigned int max_tx_queues;
51 struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
52 struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
54 struct rte_ether_addr address;
55 enum dev_action action;
58 static struct rte_eth_link pmd_link = {
59 .link_speed = ETH_SPEED_NUM_10G,
60 .link_duplex = ETH_LINK_FULL_DUPLEX,
61 .link_status = ETH_LINK_DOWN,
62 .link_autoneg = ETH_LINK_FIXED,
65 RTE_LOG_REGISTER(eth_ring_logtype, pmd.net.ring, NOTICE);
67 #define PMD_LOG(level, fmt, args...) \
68 rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
69 "%s(): " fmt "\n", __func__, ##args)
72 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
74 void **ptrs = (void *)&bufs[0];
75 struct ring_queue *r = q;
76 const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
78 if (r->rng->flags & RING_F_SC_DEQ)
79 r->rx_pkts.cnt += nb_rx;
81 rte_atomic64_add(&(r->rx_pkts), nb_rx);
86 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
88 void **ptrs = (void *)&bufs[0];
89 struct ring_queue *r = q;
90 const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
92 if (r->rng->flags & RING_F_SP_ENQ)
93 r->tx_pkts.cnt += nb_tx;
95 rte_atomic64_add(&(r->tx_pkts), nb_tx);
100 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
103 eth_dev_start(struct rte_eth_dev *dev)
105 dev->data->dev_link.link_status = ETH_LINK_UP;
110 eth_dev_stop(struct rte_eth_dev *dev)
112 dev->data->dev_link.link_status = ETH_LINK_DOWN;
116 eth_dev_set_link_down(struct rte_eth_dev *dev)
118 dev->data->dev_link.link_status = ETH_LINK_DOWN;
123 eth_dev_set_link_up(struct rte_eth_dev *dev)
125 dev->data->dev_link.link_status = ETH_LINK_UP;
130 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
131 uint16_t nb_rx_desc __rte_unused,
132 unsigned int socket_id __rte_unused,
133 const struct rte_eth_rxconf *rx_conf __rte_unused,
134 struct rte_mempool *mb_pool __rte_unused)
136 struct pmd_internals *internals = dev->data->dev_private;
138 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
143 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
144 uint16_t nb_tx_desc __rte_unused,
145 unsigned int socket_id __rte_unused,
146 const struct rte_eth_txconf *tx_conf __rte_unused)
148 struct pmd_internals *internals = dev->data->dev_private;
150 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
156 eth_dev_info(struct rte_eth_dev *dev,
157 struct rte_eth_dev_info *dev_info)
159 struct pmd_internals *internals = dev->data->dev_private;
161 dev_info->max_mac_addrs = 1;
162 dev_info->max_rx_pktlen = (uint32_t)-1;
163 dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
164 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
165 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
166 dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
167 dev_info->min_rx_bufsize = 0;
173 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
176 unsigned long rx_total = 0, tx_total = 0;
177 const struct pmd_internals *internal = dev->data->dev_private;
179 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
180 i < dev->data->nb_rx_queues; i++) {
181 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
182 rx_total += stats->q_ipackets[i];
185 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
186 i < dev->data->nb_tx_queues; i++) {
187 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
188 tx_total += stats->q_opackets[i];
191 stats->ipackets = rx_total;
192 stats->opackets = tx_total;
198 eth_stats_reset(struct rte_eth_dev *dev)
201 struct pmd_internals *internal = dev->data->dev_private;
203 for (i = 0; i < dev->data->nb_rx_queues; i++)
204 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
205 for (i = 0; i < dev->data->nb_tx_queues; i++)
206 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
212 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
213 uint32_t index __rte_unused)
218 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
219 struct rte_ether_addr *mac_addr __rte_unused,
220 uint32_t index __rte_unused,
221 uint32_t vmdq __rte_unused)
227 eth_queue_release(void *q __rte_unused) { ; }
229 eth_link_update(struct rte_eth_dev *dev __rte_unused,
230 int wait_to_complete __rte_unused) { return 0; }
233 eth_dev_close(struct rte_eth_dev *dev)
235 struct pmd_internals *internals = NULL;
236 struct ring_queue *r = NULL;
239 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
244 internals = dev->data->dev_private;
245 if (internals->action == DEV_CREATE) {
247 * it is only necessary to delete the rings in rx_queues because
248 * they are the same used in tx_queues
250 for (i = 0; i < dev->data->nb_rx_queues; i++) {
251 r = dev->data->rx_queues[i];
252 rte_ring_free(r->rng);
256 /* mac_addrs must not be freed alone because part of dev_private */
257 dev->data->mac_addrs = NULL;
262 static const struct eth_dev_ops ops = {
263 .dev_close = eth_dev_close,
264 .dev_start = eth_dev_start,
265 .dev_stop = eth_dev_stop,
266 .dev_set_link_up = eth_dev_set_link_up,
267 .dev_set_link_down = eth_dev_set_link_down,
268 .dev_configure = eth_dev_configure,
269 .dev_infos_get = eth_dev_info,
270 .rx_queue_setup = eth_rx_queue_setup,
271 .tx_queue_setup = eth_tx_queue_setup,
272 .rx_queue_release = eth_queue_release,
273 .tx_queue_release = eth_queue_release,
274 .link_update = eth_link_update,
275 .stats_get = eth_stats_get,
276 .stats_reset = eth_stats_reset,
277 .mac_addr_remove = eth_mac_addr_remove,
278 .mac_addr_add = eth_mac_addr_add,
282 do_eth_dev_ring_create(const char *name,
283 struct rte_vdev_device *vdev,
284 struct rte_ring * const rx_queues[],
285 const unsigned int nb_rx_queues,
286 struct rte_ring *const tx_queues[],
287 const unsigned int nb_tx_queues,
288 const unsigned int numa_node, enum dev_action action,
289 struct rte_eth_dev **eth_dev_p)
291 struct rte_eth_dev_data *data = NULL;
292 struct pmd_internals *internals = NULL;
293 struct rte_eth_dev *eth_dev = NULL;
294 void **rx_queues_local = NULL;
295 void **tx_queues_local = NULL;
298 PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
301 rx_queues_local = rte_calloc_socket(name, nb_rx_queues,
302 sizeof(void *), 0, numa_node);
303 if (rx_queues_local == NULL) {
308 tx_queues_local = rte_calloc_socket(name, nb_tx_queues,
309 sizeof(void *), 0, numa_node);
310 if (tx_queues_local == NULL) {
315 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
316 if (internals == NULL) {
321 /* reserve an ethdev entry */
322 eth_dev = rte_eth_dev_allocate(name);
323 if (eth_dev == NULL) {
328 /* now put it all together
329 * - store EAL device in eth_dev,
330 * - store queue data in internals,
331 * - store numa_node info in eth_dev_data
332 * - point eth_dev_data to internals
333 * - and point eth_dev structure to new eth_dev_data structure
336 eth_dev->device = &vdev->device;
338 data = eth_dev->data;
339 data->rx_queues = rx_queues_local;
340 data->tx_queues = tx_queues_local;
342 internals->action = action;
343 internals->max_rx_queues = nb_rx_queues;
344 internals->max_tx_queues = nb_tx_queues;
345 for (i = 0; i < nb_rx_queues; i++) {
346 internals->rx_ring_queues[i].rng = rx_queues[i];
347 data->rx_queues[i] = &internals->rx_ring_queues[i];
349 for (i = 0; i < nb_tx_queues; i++) {
350 internals->tx_ring_queues[i].rng = tx_queues[i];
351 data->tx_queues[i] = &internals->tx_ring_queues[i];
354 data->dev_private = internals;
355 data->nb_rx_queues = (uint16_t)nb_rx_queues;
356 data->nb_tx_queues = (uint16_t)nb_tx_queues;
357 data->dev_link = pmd_link;
358 data->mac_addrs = &internals->address;
359 data->promiscuous = 1;
360 data->all_multicast = 1;
362 eth_dev->dev_ops = &ops;
363 data->numa_node = numa_node;
365 /* finally assign rx and tx ops */
366 eth_dev->rx_pkt_burst = eth_ring_rx;
367 eth_dev->tx_pkt_burst = eth_ring_tx;
369 rte_eth_dev_probing_finish(eth_dev);
370 *eth_dev_p = eth_dev;
372 return data->port_id;
375 rte_free(rx_queues_local);
376 rte_free(tx_queues_local);
383 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
384 const unsigned int nb_rx_queues,
385 struct rte_ring *const tx_queues[],
386 const unsigned int nb_tx_queues,
387 const unsigned int numa_node)
389 struct ring_internal_args args = {
390 .rx_queues = rx_queues,
391 .nb_rx_queues = nb_rx_queues,
392 .tx_queues = tx_queues,
393 .nb_tx_queues = nb_tx_queues,
394 .numa_node = numa_node,
398 char ring_name[RTE_RING_NAMESIZE];
399 uint16_t port_id = RTE_MAX_ETHPORTS;
402 /* do some parameter checking */
403 if (rx_queues == NULL && nb_rx_queues > 0) {
407 if (tx_queues == NULL && nb_tx_queues > 0) {
411 if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
416 snprintf(args_str, sizeof(args_str), "%s=%p",
417 ETH_RING_INTERNAL_ARG, &args);
419 ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name);
420 if (ret >= (int)sizeof(ring_name)) {
421 rte_errno = ENAMETOOLONG;
425 ret = rte_vdev_init(ring_name, args_str);
431 ret = rte_eth_dev_get_port_by_name(ring_name, &port_id);
441 rte_eth_from_ring(struct rte_ring *r)
443 return rte_eth_from_rings(r->name, &r, 1, &r, 1,
444 r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
448 eth_dev_ring_create(const char *name,
449 struct rte_vdev_device *vdev,
450 const unsigned int numa_node,
451 enum dev_action action, struct rte_eth_dev **eth_dev)
453 /* rx and tx are so-called from point of view of first port.
454 * They are inverted from the point of view of second port
456 struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
458 char rng_name[RTE_RING_NAMESIZE];
459 unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
460 RTE_PMD_RING_MAX_TX_RINGS);
462 for (i = 0; i < num_rings; i++) {
465 cc = snprintf(rng_name, sizeof(rng_name),
466 "ETH_RXTX%u_%s", i, name);
467 if (cc >= (int)sizeof(rng_name)) {
468 rte_errno = ENAMETOOLONG;
472 rxtx[i] = (action == DEV_CREATE) ?
473 rte_ring_create(rng_name, 1024, numa_node,
474 RING_F_SP_ENQ|RING_F_SC_DEQ) :
475 rte_ring_lookup(rng_name);
480 if (do_eth_dev_ring_create(name, vdev, rxtx, num_rings, rxtx, num_rings,
481 numa_node, action, eth_dev) < 0)
487 struct node_action_pair {
490 enum dev_action action;
493 struct node_action_list {
496 struct node_action_pair *list;
499 static int parse_kvlist(const char *key __rte_unused,
500 const char *value, void *data)
502 struct node_action_list *info = data;
509 name = strdup(value);
514 PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
518 node = strchr(name, ':');
520 PMD_LOG(WARNING, "could not parse node value from %s",
528 action = strchr(node, ':');
530 PMD_LOG(WARNING, "could not parse action value from %s",
539 * Need to do some sanity checking here
542 if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
543 info->list[info->count].action = DEV_ATTACH;
544 else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
545 info->list[info->count].action = DEV_CREATE;
550 info->list[info->count].node = strtol(node, &end, 10);
552 if ((errno != 0) || (*end != '\0')) {
554 "node value %s is unparseable as a number", node);
558 strlcpy(info->list[info->count].name, name,
559 sizeof(info->list[info->count].name));
570 parse_internal_args(const char *key __rte_unused, const char *value,
573 struct ring_internal_args **internal_args = data;
577 /* make sure 'value' is valid pointer length */
578 if (strnlen(value, ETH_RING_INTERNAL_ARG_MAX_LEN) >=
579 ETH_RING_INTERNAL_ARG_MAX_LEN) {
580 PMD_LOG(ERR, "Error parsing internal args, argument is too long");
584 ret = sscanf(value, "%p%n", &args, &n);
585 if (ret == 0 || (size_t)n != strlen(value)) {
586 PMD_LOG(ERR, "Error parsing internal args");
591 *internal_args = args;
593 if ((*internal_args)->addr != args)
600 rte_pmd_ring_probe(struct rte_vdev_device *dev)
602 const char *name, *params;
603 struct rte_kvargs *kvlist = NULL;
605 struct node_action_list *info = NULL;
606 struct rte_eth_dev *eth_dev = NULL;
607 struct ring_internal_args *internal_args;
609 name = rte_vdev_device_name(dev);
610 params = rte_vdev_device_args(dev);
612 PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
614 if (params == NULL || params[0] == '\0') {
615 ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE,
619 "Attach to pmd_ring for %s", name);
620 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
621 DEV_ATTACH, ð_dev);
624 kvlist = rte_kvargs_parse(params, valid_arguments);
628 "Ignoring unsupported parameters when creatingrings-backed ethernet device");
629 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
630 DEV_CREATE, ð_dev);
633 "Attach to pmd_ring for %s",
635 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
636 DEV_ATTACH, ð_dev);
642 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
643 ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
649 ret = do_eth_dev_ring_create(name, dev,
650 internal_args->rx_queues,
651 internal_args->nb_rx_queues,
652 internal_args->tx_queues,
653 internal_args->nb_tx_queues,
654 internal_args->numa_node,
660 ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
661 info = rte_zmalloc("struct node_action_list",
662 sizeof(struct node_action_list) +
663 (sizeof(struct node_action_pair) * ret),
669 info->list = (struct node_action_pair *)(info + 1);
671 ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
677 for (info->count = 0; info->count < info->total; info->count++) {
678 ret = eth_dev_ring_create(info->list[info->count].name,
680 info->list[info->count].node,
681 info->list[info->count].action,
684 (info->list[info->count].action == DEV_CREATE)) {
686 "Attach to pmd_ring for %s",
688 ret = eth_dev_ring_create(name, dev,
689 info->list[info->count].node,
698 rte_kvargs_free(kvlist);
704 rte_pmd_ring_remove(struct rte_vdev_device *dev)
706 const char *name = rte_vdev_device_name(dev);
707 struct rte_eth_dev *eth_dev = NULL;
709 PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
714 /* find an ethdev entry */
715 eth_dev = rte_eth_dev_allocated(name);
717 return 0; /* port already released */
719 eth_dev_close(eth_dev);
720 rte_eth_dev_release_port(eth_dev);
724 static struct rte_vdev_driver pmd_ring_drv = {
725 .probe = rte_pmd_ring_probe,
726 .remove = rte_pmd_ring_remove,
729 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
730 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
731 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
732 ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");