1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #include "rte_eth_ring.h"
7 #include <rte_ethdev_driver.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_string_fns.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
15 #define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
16 #define ETH_RING_ACTION_CREATE "CREATE"
17 #define ETH_RING_ACTION_ATTACH "ATTACH"
18 #define ETH_RING_INTERNAL_ARG "internal"
20 static const char *valid_arguments[] = {
21 ETH_RING_NUMA_NODE_ACTION_ARG,
22 ETH_RING_INTERNAL_ARG,
26 struct ring_internal_args {
27 struct rte_ring * const *rx_queues;
28 const unsigned int nb_rx_queues;
29 struct rte_ring * const *tx_queues;
30 const unsigned int nb_tx_queues;
31 const unsigned int numa_node;
32 void *addr; /* self addr for sanity check */
42 rte_atomic64_t rx_pkts;
43 rte_atomic64_t tx_pkts;
44 rte_atomic64_t err_pkts;
47 struct pmd_internals {
48 unsigned max_rx_queues;
49 unsigned max_tx_queues;
51 struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
52 struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
54 struct ether_addr address;
55 enum dev_action action;
59 static struct rte_eth_link pmd_link = {
60 .link_speed = ETH_SPEED_NUM_10G,
61 .link_duplex = ETH_LINK_FULL_DUPLEX,
62 .link_status = ETH_LINK_DOWN,
63 .link_autoneg = ETH_LINK_AUTONEG
67 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
69 void **ptrs = (void *)&bufs[0];
70 struct ring_queue *r = q;
71 const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
73 if (r->rng->flags & RING_F_SC_DEQ)
74 r->rx_pkts.cnt += nb_rx;
76 rte_atomic64_add(&(r->rx_pkts), nb_rx);
81 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
83 void **ptrs = (void *)&bufs[0];
84 struct ring_queue *r = q;
85 const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
87 if (r->rng->flags & RING_F_SP_ENQ) {
88 r->tx_pkts.cnt += nb_tx;
89 r->err_pkts.cnt += nb_bufs - nb_tx;
91 rte_atomic64_add(&(r->tx_pkts), nb_tx);
92 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
98 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
101 eth_dev_start(struct rte_eth_dev *dev)
103 dev->data->dev_link.link_status = ETH_LINK_UP;
108 eth_dev_stop(struct rte_eth_dev *dev)
110 dev->data->dev_link.link_status = ETH_LINK_DOWN;
114 eth_dev_set_link_down(struct rte_eth_dev *dev)
116 dev->data->dev_link.link_status = ETH_LINK_DOWN;
121 eth_dev_set_link_up(struct rte_eth_dev *dev)
123 dev->data->dev_link.link_status = ETH_LINK_UP;
128 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
129 uint16_t nb_rx_desc __rte_unused,
130 unsigned int socket_id __rte_unused,
131 const struct rte_eth_rxconf *rx_conf __rte_unused,
132 struct rte_mempool *mb_pool __rte_unused)
134 struct pmd_internals *internals = dev->data->dev_private;
135 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
140 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
141 uint16_t nb_tx_desc __rte_unused,
142 unsigned int socket_id __rte_unused,
143 const struct rte_eth_txconf *tx_conf __rte_unused)
145 struct pmd_internals *internals = dev->data->dev_private;
146 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
152 eth_dev_info(struct rte_eth_dev *dev,
153 struct rte_eth_dev_info *dev_info)
155 struct pmd_internals *internals = dev->data->dev_private;
156 dev_info->max_mac_addrs = 1;
157 dev_info->max_rx_pktlen = (uint32_t)-1;
158 dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
159 dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
160 dev_info->min_rx_bufsize = 0;
164 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
167 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
168 const struct pmd_internals *internal = dev->data->dev_private;
170 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
171 i < dev->data->nb_rx_queues; i++) {
172 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
173 rx_total += stats->q_ipackets[i];
176 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
177 i < dev->data->nb_tx_queues; i++) {
178 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
179 stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
180 tx_total += stats->q_opackets[i];
181 tx_err_total += stats->q_errors[i];
184 stats->ipackets = rx_total;
185 stats->opackets = tx_total;
186 stats->oerrors = tx_err_total;
192 eth_stats_reset(struct rte_eth_dev *dev)
195 struct pmd_internals *internal = dev->data->dev_private;
196 for (i = 0; i < dev->data->nb_rx_queues; i++)
197 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
198 for (i = 0; i < dev->data->nb_tx_queues; i++) {
199 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
200 internal->tx_ring_queues[i].err_pkts.cnt = 0;
205 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
206 uint32_t index __rte_unused)
211 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
212 struct ether_addr *mac_addr __rte_unused,
213 uint32_t index __rte_unused,
214 uint32_t vmdq __rte_unused)
220 eth_queue_release(void *q __rte_unused) { ; }
222 eth_link_update(struct rte_eth_dev *dev __rte_unused,
223 int wait_to_complete __rte_unused) { return 0; }
225 static const struct eth_dev_ops ops = {
226 .dev_start = eth_dev_start,
227 .dev_stop = eth_dev_stop,
228 .dev_set_link_up = eth_dev_set_link_up,
229 .dev_set_link_down = eth_dev_set_link_down,
230 .dev_configure = eth_dev_configure,
231 .dev_infos_get = eth_dev_info,
232 .rx_queue_setup = eth_rx_queue_setup,
233 .tx_queue_setup = eth_tx_queue_setup,
234 .rx_queue_release = eth_queue_release,
235 .tx_queue_release = eth_queue_release,
236 .link_update = eth_link_update,
237 .stats_get = eth_stats_get,
238 .stats_reset = eth_stats_reset,
239 .mac_addr_remove = eth_mac_addr_remove,
240 .mac_addr_add = eth_mac_addr_add,
243 static struct rte_vdev_driver pmd_ring_drv;
246 do_eth_dev_ring_create(const char *name,
247 struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
248 struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
249 const unsigned int numa_node, enum dev_action action,
250 struct rte_eth_dev **eth_dev_p)
252 struct rte_eth_dev_data *data = NULL;
253 struct pmd_internals *internals = NULL;
254 struct rte_eth_dev *eth_dev = NULL;
255 void **rx_queues_local = NULL;
256 void **tx_queues_local = NULL;
259 RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
262 /* now do all data allocation - for eth_dev structure, dummy pci driver
263 * and internal (private) data
265 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
271 rx_queues_local = rte_zmalloc_socket(name,
272 sizeof(void *) * nb_rx_queues, 0, numa_node);
273 if (rx_queues_local == NULL) {
278 tx_queues_local = rte_zmalloc_socket(name,
279 sizeof(void *) * nb_tx_queues, 0, numa_node);
280 if (tx_queues_local == NULL) {
285 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
286 if (internals == NULL) {
291 /* reserve an ethdev entry */
292 eth_dev = rte_eth_dev_allocate(name);
293 if (eth_dev == NULL) {
298 /* now put it all together
299 * - store queue data in internals,
300 * - store numa_node info in eth_dev_data
301 * - point eth_dev_data to internals
302 * - and point eth_dev structure to new eth_dev_data structure
304 /* NOTE: we'll replace the data element, of originally allocated eth_dev
305 * so the rings are local per-process */
307 rte_memcpy(data, eth_dev->data, sizeof(*data));
308 data->rx_queues = rx_queues_local;
309 data->tx_queues = tx_queues_local;
311 internals->action = action;
312 internals->max_rx_queues = nb_rx_queues;
313 internals->max_tx_queues = nb_tx_queues;
314 for (i = 0; i < nb_rx_queues; i++) {
315 internals->rx_ring_queues[i].rng = rx_queues[i];
316 data->rx_queues[i] = &internals->rx_ring_queues[i];
318 for (i = 0; i < nb_tx_queues; i++) {
319 internals->tx_ring_queues[i].rng = tx_queues[i];
320 data->tx_queues[i] = &internals->tx_ring_queues[i];
323 data->dev_private = internals;
324 data->nb_rx_queues = (uint16_t)nb_rx_queues;
325 data->nb_tx_queues = (uint16_t)nb_tx_queues;
326 data->dev_link = pmd_link;
327 data->mac_addrs = &internals->address;
329 eth_dev->data = data;
330 eth_dev->dev_ops = &ops;
331 data->kdrv = RTE_KDRV_NONE;
332 data->numa_node = numa_node;
334 /* finally assign rx and tx ops */
335 eth_dev->rx_pkt_burst = eth_ring_rx;
336 eth_dev->tx_pkt_burst = eth_ring_tx;
338 *eth_dev_p = eth_dev;
340 return data->port_id;
343 rte_free(rx_queues_local);
344 rte_free(tx_queues_local);
352 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
353 const unsigned nb_rx_queues,
354 struct rte_ring *const tx_queues[],
355 const unsigned nb_tx_queues,
356 const unsigned numa_node)
358 struct ring_internal_args args = {
359 .rx_queues = rx_queues,
360 .nb_rx_queues = nb_rx_queues,
361 .tx_queues = tx_queues,
362 .nb_tx_queues = nb_tx_queues,
363 .numa_node = numa_node,
366 char args_str[32] = { 0 };
367 char ring_name[32] = { 0 };
368 uint16_t port_id = RTE_MAX_ETHPORTS;
371 /* do some parameter checking */
372 if (rx_queues == NULL && nb_rx_queues > 0) {
376 if (tx_queues == NULL && nb_tx_queues > 0) {
380 if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
385 snprintf(args_str, 32, "%s=%p", ETH_RING_INTERNAL_ARG, &args);
386 snprintf(ring_name, 32, "net_ring_%s", name);
388 ret = rte_vdev_init(ring_name, args_str);
394 rte_eth_dev_get_port_by_name(ring_name, &port_id);
400 rte_eth_from_ring(struct rte_ring *r)
402 return rte_eth_from_rings(r->name, &r, 1, &r, 1,
403 r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
407 eth_dev_ring_create(const char *name, const unsigned numa_node,
408 enum dev_action action, struct rte_eth_dev **eth_dev)
410 /* rx and tx are so-called from point of view of first port.
411 * They are inverted from the point of view of second port
413 struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
415 char rng_name[RTE_RING_NAMESIZE];
416 unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
417 RTE_PMD_RING_MAX_TX_RINGS);
419 for (i = 0; i < num_rings; i++) {
420 snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
421 rxtx[i] = (action == DEV_CREATE) ?
422 rte_ring_create(rng_name, 1024, numa_node,
423 RING_F_SP_ENQ|RING_F_SC_DEQ) :
424 rte_ring_lookup(rng_name);
429 if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
430 numa_node, action, eth_dev) < 0)
436 struct node_action_pair {
439 enum dev_action action;
442 struct node_action_list {
445 struct node_action_pair *list;
448 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
450 struct node_action_list *info = data;
457 name = strdup(value);
462 RTE_LOG(WARNING, PMD, "command line parameter is empty for ring pmd!\n");
466 node = strchr(name, ':');
468 RTE_LOG(WARNING, PMD, "could not parse node value from %s\n",
476 action = strchr(node, ':');
478 RTE_LOG(WARNING, PMD, "could not parse action value from %s\n",
487 * Need to do some sanity checking here
490 if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
491 info->list[info->count].action = DEV_ATTACH;
492 else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
493 info->list[info->count].action = DEV_CREATE;
498 info->list[info->count].node = strtol(node, &end, 10);
500 if ((errno != 0) || (*end != '\0')) {
501 RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
505 snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
516 parse_internal_args(const char *key __rte_unused, const char *value,
519 struct ring_internal_args **internal_args = data;
522 sscanf(value, "%p", &args);
524 *internal_args = args;
526 if ((*internal_args)->addr != args)
533 rte_pmd_ring_probe(struct rte_vdev_device *dev)
535 const char *name, *params;
536 struct rte_kvargs *kvlist = NULL;
538 struct node_action_list *info = NULL;
539 struct rte_eth_dev *eth_dev = NULL;
540 struct ring_internal_args *internal_args;
542 name = rte_vdev_device_name(dev);
543 params = rte_vdev_device_args(dev);
545 RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
547 if (params == NULL || params[0] == '\0') {
548 ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
552 "Attach to pmd_ring for %s\n", name);
553 ret = eth_dev_ring_create(name, rte_socket_id(),
554 DEV_ATTACH, ð_dev);
557 kvlist = rte_kvargs_parse(params, valid_arguments);
560 RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
561 " rings-backed ethernet device\n");
562 ret = eth_dev_ring_create(name, rte_socket_id(),
563 DEV_CREATE, ð_dev);
566 "Attach to pmd_ring for %s\n",
568 ret = eth_dev_ring_create(name, rte_socket_id(),
569 DEV_ATTACH, ð_dev);
573 eth_dev->device = &dev->device;
578 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
579 ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
585 ret = do_eth_dev_ring_create(name,
586 internal_args->rx_queues,
587 internal_args->nb_rx_queues,
588 internal_args->tx_queues,
589 internal_args->nb_tx_queues,
590 internal_args->numa_node,
596 ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
597 info = rte_zmalloc("struct node_action_list",
598 sizeof(struct node_action_list) +
599 (sizeof(struct node_action_pair) * ret),
605 info->list = (struct node_action_pair*)(info + 1);
607 ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
613 for (info->count = 0; info->count < info->total; info->count++) {
614 ret = eth_dev_ring_create(info->list[info->count].name,
615 info->list[info->count].node,
616 info->list[info->count].action,
619 (info->list[info->count].action == DEV_CREATE)) {
621 "Attach to pmd_ring for %s\n",
623 ret = eth_dev_ring_create(name,
624 info->list[info->count].node,
633 eth_dev->device = &dev->device;
636 rte_kvargs_free(kvlist);
642 rte_pmd_ring_remove(struct rte_vdev_device *dev)
644 const char *name = rte_vdev_device_name(dev);
645 struct rte_eth_dev *eth_dev = NULL;
646 struct pmd_internals *internals = NULL;
647 struct ring_queue *r = NULL;
650 RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
655 /* find an ethdev entry */
656 eth_dev = rte_eth_dev_allocated(name);
660 eth_dev_stop(eth_dev);
662 internals = eth_dev->data->dev_private;
663 if (internals->action == DEV_CREATE) {
665 * it is only necessary to delete the rings in rx_queues because
666 * they are the same used in tx_queues
668 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
669 r = eth_dev->data->rx_queues[i];
670 rte_ring_free(r->rng);
674 rte_free(eth_dev->data->rx_queues);
675 rte_free(eth_dev->data->tx_queues);
676 rte_free(eth_dev->data->dev_private);
678 rte_free(eth_dev->data);
680 rte_eth_dev_release_port(eth_dev);
684 static struct rte_vdev_driver pmd_ring_drv = {
685 .probe = rte_pmd_ring_probe,
686 .remove = rte_pmd_ring_remove,
689 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
690 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
691 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
692 ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");