1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #include "rte_eth_ring.h"
7 #include <rte_ethdev_driver.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_string_fns.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
15 #define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
16 #define ETH_RING_ACTION_CREATE "CREATE"
17 #define ETH_RING_ACTION_ATTACH "ATTACH"
18 #define ETH_RING_INTERNAL_ARG "internal"
20 static const char *valid_arguments[] = {
21 ETH_RING_NUMA_NODE_ACTION_ARG,
22 ETH_RING_INTERNAL_ARG,
26 struct ring_internal_args {
27 struct rte_ring * const *rx_queues;
28 const unsigned int nb_rx_queues;
29 struct rte_ring * const *tx_queues;
30 const unsigned int nb_tx_queues;
31 const unsigned int numa_node;
32 void *addr; /* self addr for sanity check */
42 rte_atomic64_t rx_pkts;
43 rte_atomic64_t tx_pkts;
46 struct pmd_internals {
47 unsigned int max_rx_queues;
48 unsigned int max_tx_queues;
50 struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
51 struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
53 struct rte_ether_addr address;
54 enum dev_action action;
57 static struct rte_eth_link pmd_link = {
58 .link_speed = ETH_SPEED_NUM_10G,
59 .link_duplex = ETH_LINK_FULL_DUPLEX,
60 .link_status = ETH_LINK_DOWN,
61 .link_autoneg = ETH_LINK_FIXED,
64 RTE_LOG_REGISTER(eth_ring_logtype, pmd.net.ring, NOTICE);
66 #define PMD_LOG(level, fmt, args...) \
67 rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
68 "%s(): " fmt "\n", __func__, ##args)
71 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
73 void **ptrs = (void *)&bufs[0];
74 struct ring_queue *r = q;
75 const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
77 if (r->rng->flags & RING_F_SC_DEQ)
78 r->rx_pkts.cnt += nb_rx;
80 rte_atomic64_add(&(r->rx_pkts), nb_rx);
85 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
87 void **ptrs = (void *)&bufs[0];
88 struct ring_queue *r = q;
89 const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
91 if (r->rng->flags & RING_F_SP_ENQ)
92 r->tx_pkts.cnt += nb_tx;
94 rte_atomic64_add(&(r->tx_pkts), nb_tx);
99 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
102 eth_dev_start(struct rte_eth_dev *dev)
104 dev->data->dev_link.link_status = ETH_LINK_UP;
109 eth_dev_stop(struct rte_eth_dev *dev)
111 dev->data->dev_link.link_status = ETH_LINK_DOWN;
115 eth_dev_set_link_down(struct rte_eth_dev *dev)
117 dev->data->dev_link.link_status = ETH_LINK_DOWN;
122 eth_dev_set_link_up(struct rte_eth_dev *dev)
124 dev->data->dev_link.link_status = ETH_LINK_UP;
129 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
130 uint16_t nb_rx_desc __rte_unused,
131 unsigned int socket_id __rte_unused,
132 const struct rte_eth_rxconf *rx_conf __rte_unused,
133 struct rte_mempool *mb_pool __rte_unused)
135 struct pmd_internals *internals = dev->data->dev_private;
137 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
142 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
143 uint16_t nb_tx_desc __rte_unused,
144 unsigned int socket_id __rte_unused,
145 const struct rte_eth_txconf *tx_conf __rte_unused)
147 struct pmd_internals *internals = dev->data->dev_private;
149 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
155 eth_dev_info(struct rte_eth_dev *dev,
156 struct rte_eth_dev_info *dev_info)
158 struct pmd_internals *internals = dev->data->dev_private;
160 dev_info->max_mac_addrs = 1;
161 dev_info->max_rx_pktlen = (uint32_t)-1;
162 dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
163 dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
164 dev_info->min_rx_bufsize = 0;
170 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
173 unsigned long rx_total = 0, tx_total = 0;
174 const struct pmd_internals *internal = dev->data->dev_private;
176 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
177 i < dev->data->nb_rx_queues; i++) {
178 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
179 rx_total += stats->q_ipackets[i];
182 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
183 i < dev->data->nb_tx_queues; i++) {
184 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
185 tx_total += stats->q_opackets[i];
188 stats->ipackets = rx_total;
189 stats->opackets = tx_total;
195 eth_stats_reset(struct rte_eth_dev *dev)
198 struct pmd_internals *internal = dev->data->dev_private;
200 for (i = 0; i < dev->data->nb_rx_queues; i++)
201 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
202 for (i = 0; i < dev->data->nb_tx_queues; i++)
203 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
209 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
210 uint32_t index __rte_unused)
215 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
216 struct rte_ether_addr *mac_addr __rte_unused,
217 uint32_t index __rte_unused,
218 uint32_t vmdq __rte_unused)
224 eth_queue_release(void *q __rte_unused) { ; }
226 eth_link_update(struct rte_eth_dev *dev __rte_unused,
227 int wait_to_complete __rte_unused) { return 0; }
229 static const struct eth_dev_ops ops = {
230 .dev_start = eth_dev_start,
231 .dev_stop = eth_dev_stop,
232 .dev_set_link_up = eth_dev_set_link_up,
233 .dev_set_link_down = eth_dev_set_link_down,
234 .dev_configure = eth_dev_configure,
235 .dev_infos_get = eth_dev_info,
236 .rx_queue_setup = eth_rx_queue_setup,
237 .tx_queue_setup = eth_tx_queue_setup,
238 .rx_queue_release = eth_queue_release,
239 .tx_queue_release = eth_queue_release,
240 .link_update = eth_link_update,
241 .stats_get = eth_stats_get,
242 .stats_reset = eth_stats_reset,
243 .mac_addr_remove = eth_mac_addr_remove,
244 .mac_addr_add = eth_mac_addr_add,
248 do_eth_dev_ring_create(const char *name,
249 struct rte_vdev_device *vdev,
250 struct rte_ring * const rx_queues[],
251 const unsigned int nb_rx_queues,
252 struct rte_ring *const tx_queues[],
253 const unsigned int nb_tx_queues,
254 const unsigned int numa_node, enum dev_action action,
255 struct rte_eth_dev **eth_dev_p)
257 struct rte_eth_dev_data *data = NULL;
258 struct pmd_internals *internals = NULL;
259 struct rte_eth_dev *eth_dev = NULL;
260 void **rx_queues_local = NULL;
261 void **tx_queues_local = NULL;
264 PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
267 rx_queues_local = rte_calloc_socket(name, nb_rx_queues,
268 sizeof(void *), 0, numa_node);
269 if (rx_queues_local == NULL) {
274 tx_queues_local = rte_calloc_socket(name, nb_tx_queues,
275 sizeof(void *), 0, numa_node);
276 if (tx_queues_local == NULL) {
281 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
282 if (internals == NULL) {
287 /* reserve an ethdev entry */
288 eth_dev = rte_eth_dev_allocate(name);
289 if (eth_dev == NULL) {
294 /* now put it all together
295 * - store EAL device in eth_dev,
296 * - store queue data in internals,
297 * - store numa_node info in eth_dev_data
298 * - point eth_dev_data to internals
299 * - and point eth_dev structure to new eth_dev_data structure
302 eth_dev->device = &vdev->device;
304 data = eth_dev->data;
305 data->rx_queues = rx_queues_local;
306 data->tx_queues = tx_queues_local;
308 internals->action = action;
309 internals->max_rx_queues = nb_rx_queues;
310 internals->max_tx_queues = nb_tx_queues;
311 for (i = 0; i < nb_rx_queues; i++) {
312 internals->rx_ring_queues[i].rng = rx_queues[i];
313 data->rx_queues[i] = &internals->rx_ring_queues[i];
315 for (i = 0; i < nb_tx_queues; i++) {
316 internals->tx_ring_queues[i].rng = tx_queues[i];
317 data->tx_queues[i] = &internals->tx_ring_queues[i];
320 data->dev_private = internals;
321 data->nb_rx_queues = (uint16_t)nb_rx_queues;
322 data->nb_tx_queues = (uint16_t)nb_tx_queues;
323 data->dev_link = pmd_link;
324 data->mac_addrs = &internals->address;
325 data->promiscuous = 1;
326 data->all_multicast = 1;
328 eth_dev->dev_ops = &ops;
329 data->numa_node = numa_node;
331 /* finally assign rx and tx ops */
332 eth_dev->rx_pkt_burst = eth_ring_rx;
333 eth_dev->tx_pkt_burst = eth_ring_tx;
335 rte_eth_dev_probing_finish(eth_dev);
336 *eth_dev_p = eth_dev;
338 return data->port_id;
341 rte_free(rx_queues_local);
342 rte_free(tx_queues_local);
349 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
350 const unsigned int nb_rx_queues,
351 struct rte_ring *const tx_queues[],
352 const unsigned int nb_tx_queues,
353 const unsigned int numa_node)
355 struct ring_internal_args args = {
356 .rx_queues = rx_queues,
357 .nb_rx_queues = nb_rx_queues,
358 .tx_queues = tx_queues,
359 .nb_tx_queues = nb_tx_queues,
360 .numa_node = numa_node,
364 char ring_name[RTE_RING_NAMESIZE];
365 uint16_t port_id = RTE_MAX_ETHPORTS;
368 /* do some parameter checking */
369 if (rx_queues == NULL && nb_rx_queues > 0) {
373 if (tx_queues == NULL && nb_tx_queues > 0) {
377 if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
382 snprintf(args_str, sizeof(args_str), "%s=%p",
383 ETH_RING_INTERNAL_ARG, &args);
385 ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name);
386 if (ret >= (int)sizeof(ring_name)) {
387 rte_errno = ENAMETOOLONG;
391 ret = rte_vdev_init(ring_name, args_str);
397 ret = rte_eth_dev_get_port_by_name(ring_name, &port_id);
407 rte_eth_from_ring(struct rte_ring *r)
409 return rte_eth_from_rings(r->name, &r, 1, &r, 1,
410 r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
414 eth_dev_ring_create(const char *name,
415 struct rte_vdev_device *vdev,
416 const unsigned int numa_node,
417 enum dev_action action, struct rte_eth_dev **eth_dev)
419 /* rx and tx are so-called from point of view of first port.
420 * They are inverted from the point of view of second port
422 struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
424 char rng_name[RTE_RING_NAMESIZE];
425 unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
426 RTE_PMD_RING_MAX_TX_RINGS);
428 for (i = 0; i < num_rings; i++) {
431 cc = snprintf(rng_name, sizeof(rng_name),
432 "ETH_RXTX%u_%s", i, name);
433 if (cc >= (int)sizeof(rng_name)) {
434 rte_errno = ENAMETOOLONG;
438 rxtx[i] = (action == DEV_CREATE) ?
439 rte_ring_create(rng_name, 1024, numa_node,
440 RING_F_SP_ENQ|RING_F_SC_DEQ) :
441 rte_ring_lookup(rng_name);
446 if (do_eth_dev_ring_create(name, vdev, rxtx, num_rings, rxtx, num_rings,
447 numa_node, action, eth_dev) < 0)
453 struct node_action_pair {
456 enum dev_action action;
459 struct node_action_list {
462 struct node_action_pair *list;
465 static int parse_kvlist(const char *key __rte_unused,
466 const char *value, void *data)
468 struct node_action_list *info = data;
475 name = strdup(value);
480 PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
484 node = strchr(name, ':');
486 PMD_LOG(WARNING, "could not parse node value from %s",
494 action = strchr(node, ':');
496 PMD_LOG(WARNING, "could not parse action value from %s",
505 * Need to do some sanity checking here
508 if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
509 info->list[info->count].action = DEV_ATTACH;
510 else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
511 info->list[info->count].action = DEV_CREATE;
516 info->list[info->count].node = strtol(node, &end, 10);
518 if ((errno != 0) || (*end != '\0')) {
520 "node value %s is unparseable as a number", node);
524 strlcpy(info->list[info->count].name, name,
525 sizeof(info->list[info->count].name));
536 parse_internal_args(const char *key __rte_unused, const char *value,
539 struct ring_internal_args **internal_args = data;
542 sscanf(value, "%p", &args);
544 *internal_args = args;
546 if ((*internal_args)->addr != args)
553 rte_pmd_ring_probe(struct rte_vdev_device *dev)
555 const char *name, *params;
556 struct rte_kvargs *kvlist = NULL;
558 struct node_action_list *info = NULL;
559 struct rte_eth_dev *eth_dev = NULL;
560 struct ring_internal_args *internal_args;
562 name = rte_vdev_device_name(dev);
563 params = rte_vdev_device_args(dev);
565 PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
567 if (params == NULL || params[0] == '\0') {
568 ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE,
572 "Attach to pmd_ring for %s", name);
573 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
574 DEV_ATTACH, ð_dev);
577 kvlist = rte_kvargs_parse(params, valid_arguments);
581 "Ignoring unsupported parameters when creatingrings-backed ethernet device");
582 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
583 DEV_CREATE, ð_dev);
586 "Attach to pmd_ring for %s",
588 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
589 DEV_ATTACH, ð_dev);
595 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
596 ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
602 ret = do_eth_dev_ring_create(name, dev,
603 internal_args->rx_queues,
604 internal_args->nb_rx_queues,
605 internal_args->tx_queues,
606 internal_args->nb_tx_queues,
607 internal_args->numa_node,
613 ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
614 info = rte_zmalloc("struct node_action_list",
615 sizeof(struct node_action_list) +
616 (sizeof(struct node_action_pair) * ret),
622 info->list = (struct node_action_pair *)(info + 1);
624 ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
630 for (info->count = 0; info->count < info->total; info->count++) {
631 ret = eth_dev_ring_create(info->list[info->count].name,
633 info->list[info->count].node,
634 info->list[info->count].action,
637 (info->list[info->count].action == DEV_CREATE)) {
639 "Attach to pmd_ring for %s",
641 ret = eth_dev_ring_create(name, dev,
642 info->list[info->count].node,
651 rte_kvargs_free(kvlist);
657 rte_pmd_ring_remove(struct rte_vdev_device *dev)
659 const char *name = rte_vdev_device_name(dev);
660 struct rte_eth_dev *eth_dev = NULL;
661 struct pmd_internals *internals = NULL;
662 struct ring_queue *r = NULL;
665 PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
670 /* find an ethdev entry */
671 eth_dev = rte_eth_dev_allocated(name);
675 eth_dev_stop(eth_dev);
677 internals = eth_dev->data->dev_private;
678 if (internals->action == DEV_CREATE) {
680 * it is only necessary to delete the rings in rx_queues because
681 * they are the same used in tx_queues
683 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
684 r = eth_dev->data->rx_queues[i];
685 rte_ring_free(r->rng);
689 /* mac_addrs must not be freed alone because part of dev_private */
690 eth_dev->data->mac_addrs = NULL;
691 rte_eth_dev_release_port(eth_dev);
695 static struct rte_vdev_driver pmd_ring_drv = {
696 .probe = rte_pmd_ring_probe,
697 .remove = rte_pmd_ring_remove,
700 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
701 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
702 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
703 ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");