1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) IGEL Co.,Ltd.
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
15 #define ETH_NULL_PACKET_SIZE_ARG "size"
16 #define ETH_NULL_PACKET_COPY_ARG "copy"
18 static unsigned int default_packet_size = 64;
19 static unsigned int default_packet_copy;
21 static const char *valid_arguments[] = {
22 ETH_NULL_PACKET_SIZE_ARG,
23 ETH_NULL_PACKET_COPY_ARG,
30 struct pmd_internals *internals;
32 struct rte_mempool *mb_pool;
33 struct rte_mbuf *dummy_packet;
35 rte_atomic64_t rx_pkts;
36 rte_atomic64_t tx_pkts;
40 unsigned int packet_copy;
41 unsigned int packet_size;
44 struct pmd_internals {
45 unsigned int packet_size;
46 unsigned int packet_copy;
49 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
50 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
52 struct rte_ether_addr eth_addr;
53 /** Bit mask of RSS offloads, the bit offset also means flow type */
54 uint64_t flow_type_rss_offloads;
56 rte_spinlock_t rss_lock;
59 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
62 uint8_t rss_key[40]; /**< 40-byte hash key. */
64 static struct rte_eth_link pmd_link = {
65 .link_speed = ETH_SPEED_NUM_10G,
66 .link_duplex = ETH_LINK_FULL_DUPLEX,
67 .link_status = ETH_LINK_DOWN,
68 .link_autoneg = ETH_LINK_FIXED,
71 static int eth_null_logtype;
73 #define PMD_LOG(level, fmt, args...) \
74 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
75 "%s(): " fmt "\n", __func__, ##args)
78 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
81 struct null_queue *h = q;
82 unsigned int packet_size;
84 if ((q == NULL) || (bufs == NULL))
87 packet_size = h->internals->packet_size;
88 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
91 for (i = 0; i < nb_bufs; i++) {
92 bufs[i]->data_len = (uint16_t)packet_size;
93 bufs[i]->pkt_len = packet_size;
94 bufs[i]->port = h->internals->port_id;
97 rte_atomic64_add(&(h->rx_pkts), i);
103 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
106 struct null_queue *h = q;
107 unsigned int packet_size;
109 if ((q == NULL) || (bufs == NULL))
112 packet_size = h->internals->packet_size;
113 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
116 for (i = 0; i < nb_bufs; i++) {
117 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
119 bufs[i]->data_len = (uint16_t)packet_size;
120 bufs[i]->pkt_len = packet_size;
121 bufs[i]->port = h->internals->port_id;
124 rte_atomic64_add(&(h->rx_pkts), i);
130 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
133 struct null_queue *h = q;
135 if ((q == NULL) || (bufs == NULL))
138 for (i = 0; i < nb_bufs; i++)
139 rte_pktmbuf_free(bufs[i]);
141 rte_atomic64_add(&(h->tx_pkts), i);
147 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
150 struct null_queue *h = q;
151 unsigned int packet_size;
153 if ((q == NULL) || (bufs == NULL))
156 packet_size = h->internals->packet_size;
157 for (i = 0; i < nb_bufs; i++) {
158 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
160 rte_pktmbuf_free(bufs[i]);
163 rte_atomic64_add(&(h->tx_pkts), i);
169 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
175 eth_dev_start(struct rte_eth_dev *dev)
180 dev->data->dev_link.link_status = ETH_LINK_UP;
185 eth_dev_stop(struct rte_eth_dev *dev)
190 dev->data->dev_link.link_status = ETH_LINK_DOWN;
194 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
195 uint16_t nb_rx_desc __rte_unused,
196 unsigned int socket_id __rte_unused,
197 const struct rte_eth_rxconf *rx_conf __rte_unused,
198 struct rte_mempool *mb_pool)
200 struct rte_mbuf *dummy_packet;
201 struct pmd_internals *internals;
202 unsigned int packet_size;
204 if ((dev == NULL) || (mb_pool == NULL))
207 internals = dev->data->dev_private;
209 if (rx_queue_id >= dev->data->nb_rx_queues)
212 packet_size = internals->packet_size;
214 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
215 dev->data->rx_queues[rx_queue_id] =
216 &internals->rx_null_queues[rx_queue_id];
217 dummy_packet = rte_zmalloc_socket(NULL,
218 packet_size, 0, dev->data->numa_node);
219 if (dummy_packet == NULL)
222 internals->rx_null_queues[rx_queue_id].internals = internals;
223 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
229 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
230 uint16_t nb_tx_desc __rte_unused,
231 unsigned int socket_id __rte_unused,
232 const struct rte_eth_txconf *tx_conf __rte_unused)
234 struct rte_mbuf *dummy_packet;
235 struct pmd_internals *internals;
236 unsigned int packet_size;
241 internals = dev->data->dev_private;
243 if (tx_queue_id >= dev->data->nb_tx_queues)
246 packet_size = internals->packet_size;
248 dev->data->tx_queues[tx_queue_id] =
249 &internals->tx_null_queues[tx_queue_id];
250 dummy_packet = rte_zmalloc_socket(NULL,
251 packet_size, 0, dev->data->numa_node);
252 if (dummy_packet == NULL)
255 internals->tx_null_queues[tx_queue_id].internals = internals;
256 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
262 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
268 eth_dev_info(struct rte_eth_dev *dev,
269 struct rte_eth_dev_info *dev_info)
271 struct pmd_internals *internals;
273 if ((dev == NULL) || (dev_info == NULL))
276 internals = dev->data->dev_private;
277 dev_info->max_mac_addrs = 1;
278 dev_info->max_rx_pktlen = (uint32_t)-1;
279 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
280 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
281 dev_info->min_rx_bufsize = 0;
282 dev_info->reta_size = internals->reta_size;
283 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
289 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
291 unsigned int i, num_stats;
292 unsigned long rx_total = 0, tx_total = 0;
293 const struct pmd_internals *internal;
295 if ((dev == NULL) || (igb_stats == NULL))
298 internal = dev->data->dev_private;
299 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
300 RTE_MIN(dev->data->nb_rx_queues,
301 RTE_DIM(internal->rx_null_queues)));
302 for (i = 0; i < num_stats; i++) {
303 igb_stats->q_ipackets[i] =
304 internal->rx_null_queues[i].rx_pkts.cnt;
305 rx_total += igb_stats->q_ipackets[i];
308 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
309 RTE_MIN(dev->data->nb_tx_queues,
310 RTE_DIM(internal->tx_null_queues)));
311 for (i = 0; i < num_stats; i++) {
312 igb_stats->q_opackets[i] =
313 internal->tx_null_queues[i].tx_pkts.cnt;
314 tx_total += igb_stats->q_opackets[i];
317 igb_stats->ipackets = rx_total;
318 igb_stats->opackets = tx_total;
324 eth_stats_reset(struct rte_eth_dev *dev)
327 struct pmd_internals *internal;
332 internal = dev->data->dev_private;
333 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
334 internal->rx_null_queues[i].rx_pkts.cnt = 0;
335 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
336 internal->tx_null_queues[i].tx_pkts.cnt = 0;
342 eth_queue_release(void *q)
344 struct null_queue *nq;
350 rte_free(nq->dummy_packet);
354 eth_link_update(struct rte_eth_dev *dev __rte_unused,
355 int wait_to_complete __rte_unused) { return 0; }
358 eth_rss_reta_update(struct rte_eth_dev *dev,
359 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
362 struct pmd_internals *internal = dev->data->dev_private;
364 if (reta_size != internal->reta_size)
367 rte_spinlock_lock(&internal->rss_lock);
369 /* Copy RETA table */
370 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
371 internal->reta_conf[i].mask = reta_conf[i].mask;
372 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
373 if ((reta_conf[i].mask >> j) & 0x01)
374 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
377 rte_spinlock_unlock(&internal->rss_lock);
383 eth_rss_reta_query(struct rte_eth_dev *dev,
384 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
387 struct pmd_internals *internal = dev->data->dev_private;
389 if (reta_size != internal->reta_size)
392 rte_spinlock_lock(&internal->rss_lock);
394 /* Copy RETA table */
395 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
396 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
397 if ((reta_conf[i].mask >> j) & 0x01)
398 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
401 rte_spinlock_unlock(&internal->rss_lock);
407 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
409 struct pmd_internals *internal = dev->data->dev_private;
411 rte_spinlock_lock(&internal->rss_lock);
413 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
414 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
415 rss_conf->rss_hf & internal->flow_type_rss_offloads;
417 if (rss_conf->rss_key)
418 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
420 rte_spinlock_unlock(&internal->rss_lock);
426 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
427 struct rte_eth_rss_conf *rss_conf)
429 struct pmd_internals *internal = dev->data->dev_private;
431 rte_spinlock_lock(&internal->rss_lock);
433 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
434 if (rss_conf->rss_key)
435 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
437 rte_spinlock_unlock(&internal->rss_lock);
443 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
444 __rte_unused struct rte_ether_addr *addr)
449 static const struct eth_dev_ops ops = {
450 .dev_start = eth_dev_start,
451 .dev_stop = eth_dev_stop,
452 .dev_configure = eth_dev_configure,
453 .dev_infos_get = eth_dev_info,
454 .rx_queue_setup = eth_rx_queue_setup,
455 .tx_queue_setup = eth_tx_queue_setup,
456 .rx_queue_release = eth_queue_release,
457 .tx_queue_release = eth_queue_release,
458 .mtu_set = eth_mtu_set,
459 .link_update = eth_link_update,
460 .mac_addr_set = eth_mac_address_set,
461 .stats_get = eth_stats_get,
462 .stats_reset = eth_stats_reset,
463 .reta_update = eth_rss_reta_update,
464 .reta_query = eth_rss_reta_query,
465 .rss_hash_update = eth_rss_hash_update,
466 .rss_hash_conf_get = eth_rss_hash_conf_get
470 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
472 const unsigned int nb_rx_queues = 1;
473 const unsigned int nb_tx_queues = 1;
474 struct rte_eth_dev_data *data;
475 struct pmd_internals *internals = NULL;
476 struct rte_eth_dev *eth_dev = NULL;
478 static const uint8_t default_rss_key[40] = {
479 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
480 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
481 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
482 0xBE, 0xAC, 0x01, 0xFA
485 if (dev->device.numa_node == SOCKET_ID_ANY)
486 dev->device.numa_node = rte_socket_id();
488 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
489 dev->device.numa_node);
491 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
495 /* now put it all together
496 * - store queue data in internals,
497 * - store numa_node info in ethdev data
498 * - point eth_dev_data to internals
499 * - and point eth_dev structure to new eth_dev_data structure
501 /* NOTE: we'll replace the data element, of originally allocated eth_dev
502 * so the nulls are local per-process */
504 internals = eth_dev->data->dev_private;
505 internals->packet_size = args->packet_size;
506 internals->packet_copy = args->packet_copy;
507 internals->port_id = eth_dev->data->port_id;
508 rte_eth_random_addr(internals->eth_addr.addr_bytes);
510 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
511 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
513 rte_memcpy(internals->rss_key, default_rss_key, 40);
515 data = eth_dev->data;
516 data->nb_rx_queues = (uint16_t)nb_rx_queues;
517 data->nb_tx_queues = (uint16_t)nb_tx_queues;
518 data->dev_link = pmd_link;
519 data->mac_addrs = &internals->eth_addr;
520 data->promiscuous = 1;
521 data->all_multicast = 1;
523 eth_dev->dev_ops = &ops;
525 /* finally assign rx and tx ops */
526 if (internals->packet_copy) {
527 eth_dev->rx_pkt_burst = eth_null_copy_rx;
528 eth_dev->tx_pkt_burst = eth_null_copy_tx;
530 eth_dev->rx_pkt_burst = eth_null_rx;
531 eth_dev->tx_pkt_burst = eth_null_tx;
534 rte_eth_dev_probing_finish(eth_dev);
539 get_packet_size_arg(const char *key __rte_unused,
540 const char *value, void *extra_args)
542 const char *a = value;
543 unsigned int *packet_size = extra_args;
545 if ((value == NULL) || (extra_args == NULL))
548 *packet_size = (unsigned int)strtoul(a, NULL, 0);
549 if (*packet_size == UINT_MAX)
556 get_packet_copy_arg(const char *key __rte_unused,
557 const char *value, void *extra_args)
559 const char *a = value;
560 unsigned int *packet_copy = extra_args;
562 if ((value == NULL) || (extra_args == NULL))
565 *packet_copy = (unsigned int)strtoul(a, NULL, 0);
566 if (*packet_copy == UINT_MAX)
573 rte_pmd_null_probe(struct rte_vdev_device *dev)
575 const char *name, *params;
576 struct pmd_options args = {
577 .packet_copy = default_packet_copy,
578 .packet_size = default_packet_size,
580 struct rte_kvargs *kvlist = NULL;
581 struct rte_eth_dev *eth_dev;
587 name = rte_vdev_device_name(dev);
588 params = rte_vdev_device_args(dev);
589 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
591 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
592 struct pmd_internals *internals;
593 eth_dev = rte_eth_dev_attach_secondary(name);
595 PMD_LOG(ERR, "Failed to probe %s", name);
598 /* TODO: request info from primary to set up Rx and Tx */
599 eth_dev->dev_ops = &ops;
600 eth_dev->device = &dev->device;
601 internals = eth_dev->data->dev_private;
602 if (internals->packet_copy) {
603 eth_dev->rx_pkt_burst = eth_null_copy_rx;
604 eth_dev->tx_pkt_burst = eth_null_copy_tx;
606 eth_dev->rx_pkt_burst = eth_null_rx;
607 eth_dev->tx_pkt_burst = eth_null_tx;
609 rte_eth_dev_probing_finish(eth_dev);
613 if (params != NULL) {
614 kvlist = rte_kvargs_parse(params, valid_arguments);
618 ret = rte_kvargs_process(kvlist,
619 ETH_NULL_PACKET_SIZE_ARG,
620 &get_packet_size_arg, &args.packet_size);
625 ret = rte_kvargs_process(kvlist,
626 ETH_NULL_PACKET_COPY_ARG,
627 &get_packet_copy_arg, &args.packet_copy);
632 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
633 "packet copy is %s", args.packet_size,
634 args.packet_copy ? "enabled" : "disabled");
636 ret = eth_dev_null_create(dev, &args);
640 rte_kvargs_free(kvlist);
645 rte_pmd_null_remove(struct rte_vdev_device *dev)
647 struct rte_eth_dev *eth_dev = NULL;
652 PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
655 /* find the ethdev entry */
656 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
660 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
661 /* mac_addrs must not be freed alone because part of dev_private */
662 eth_dev->data->mac_addrs = NULL;
664 rte_eth_dev_release_port(eth_dev);
669 static struct rte_vdev_driver pmd_null_drv = {
670 .probe = rte_pmd_null_probe,
671 .remove = rte_pmd_null_remove,
674 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
675 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
676 RTE_PMD_REGISTER_PARAM_STRING(net_null,
680 RTE_INIT(eth_null_init_log)
682 eth_null_logtype = rte_log_register("pmd.net.null");
683 if (eth_null_logtype >= 0)
684 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);