1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) IGEL Co.,Ltd.
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
15 #define ETH_NULL_PACKET_SIZE_ARG "size"
16 #define ETH_NULL_PACKET_COPY_ARG "copy"
17 #define ETH_NULL_PACKET_NO_RX_ARG "no-rx"
19 static unsigned int default_packet_size = 64;
20 static unsigned int default_packet_copy;
21 static unsigned int default_no_rx;
23 static const char *valid_arguments[] = {
24 ETH_NULL_PACKET_SIZE_ARG,
25 ETH_NULL_PACKET_COPY_ARG,
26 ETH_NULL_PACKET_NO_RX_ARG,
33 struct pmd_internals *internals;
35 struct rte_mempool *mb_pool;
36 struct rte_mbuf *dummy_packet;
38 rte_atomic64_t rx_pkts;
39 rte_atomic64_t tx_pkts;
43 unsigned int packet_copy;
44 unsigned int packet_size;
48 struct pmd_internals {
49 unsigned int packet_size;
50 unsigned int packet_copy;
54 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
55 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
57 struct rte_ether_addr eth_addr;
58 /** Bit mask of RSS offloads, the bit offset also means flow type */
59 uint64_t flow_type_rss_offloads;
61 rte_spinlock_t rss_lock;
64 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
67 uint8_t rss_key[40]; /**< 40-byte hash key. */
69 static struct rte_eth_link pmd_link = {
70 .link_speed = ETH_SPEED_NUM_10G,
71 .link_duplex = ETH_LINK_FULL_DUPLEX,
72 .link_status = ETH_LINK_DOWN,
73 .link_autoneg = ETH_LINK_FIXED,
76 static int eth_null_logtype;
78 #define PMD_LOG(level, fmt, args...) \
79 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
80 "%s(): " fmt "\n", __func__, ##args)
83 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 struct null_queue *h = q;
87 unsigned int packet_size;
89 if ((q == NULL) || (bufs == NULL))
92 packet_size = h->internals->packet_size;
93 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
96 for (i = 0; i < nb_bufs; i++) {
97 bufs[i]->data_len = (uint16_t)packet_size;
98 bufs[i]->pkt_len = packet_size;
99 bufs[i]->port = h->internals->port_id;
102 rte_atomic64_add(&(h->rx_pkts), i);
108 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
111 struct null_queue *h = q;
112 unsigned int packet_size;
114 if ((q == NULL) || (bufs == NULL))
117 packet_size = h->internals->packet_size;
118 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
121 for (i = 0; i < nb_bufs; i++) {
122 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
124 bufs[i]->data_len = (uint16_t)packet_size;
125 bufs[i]->pkt_len = packet_size;
126 bufs[i]->port = h->internals->port_id;
129 rte_atomic64_add(&(h->rx_pkts), i);
135 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
136 uint16_t nb_bufs __rte_unused)
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
145 struct null_queue *h = q;
147 if ((q == NULL) || (bufs == NULL))
150 for (i = 0; i < nb_bufs; i++)
151 rte_pktmbuf_free(bufs[i]);
153 rte_atomic64_add(&(h->tx_pkts), i);
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
162 struct null_queue *h = q;
163 unsigned int packet_size;
165 if ((q == NULL) || (bufs == NULL))
168 packet_size = h->internals->packet_size;
169 for (i = 0; i < nb_bufs; i++) {
170 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
172 rte_pktmbuf_free(bufs[i]);
175 rte_atomic64_add(&(h->tx_pkts), i);
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
187 eth_dev_start(struct rte_eth_dev *dev)
192 dev->data->dev_link.link_status = ETH_LINK_UP;
197 eth_dev_stop(struct rte_eth_dev *dev)
202 dev->data->dev_link.link_status = ETH_LINK_DOWN;
206 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
207 uint16_t nb_rx_desc __rte_unused,
208 unsigned int socket_id __rte_unused,
209 const struct rte_eth_rxconf *rx_conf __rte_unused,
210 struct rte_mempool *mb_pool)
212 struct rte_mbuf *dummy_packet;
213 struct pmd_internals *internals;
214 unsigned int packet_size;
216 if ((dev == NULL) || (mb_pool == NULL))
219 internals = dev->data->dev_private;
221 if (rx_queue_id >= dev->data->nb_rx_queues)
224 packet_size = internals->packet_size;
226 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
227 dev->data->rx_queues[rx_queue_id] =
228 &internals->rx_null_queues[rx_queue_id];
229 dummy_packet = rte_zmalloc_socket(NULL,
230 packet_size, 0, dev->data->numa_node);
231 if (dummy_packet == NULL)
234 internals->rx_null_queues[rx_queue_id].internals = internals;
235 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
241 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
242 uint16_t nb_tx_desc __rte_unused,
243 unsigned int socket_id __rte_unused,
244 const struct rte_eth_txconf *tx_conf __rte_unused)
246 struct rte_mbuf *dummy_packet;
247 struct pmd_internals *internals;
248 unsigned int packet_size;
253 internals = dev->data->dev_private;
255 if (tx_queue_id >= dev->data->nb_tx_queues)
258 packet_size = internals->packet_size;
260 dev->data->tx_queues[tx_queue_id] =
261 &internals->tx_null_queues[tx_queue_id];
262 dummy_packet = rte_zmalloc_socket(NULL,
263 packet_size, 0, dev->data->numa_node);
264 if (dummy_packet == NULL)
267 internals->tx_null_queues[tx_queue_id].internals = internals;
268 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
274 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
280 eth_dev_info(struct rte_eth_dev *dev,
281 struct rte_eth_dev_info *dev_info)
283 struct pmd_internals *internals;
285 if ((dev == NULL) || (dev_info == NULL))
288 internals = dev->data->dev_private;
289 dev_info->max_mac_addrs = 1;
290 dev_info->max_rx_pktlen = (uint32_t)-1;
291 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
292 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
293 dev_info->min_rx_bufsize = 0;
294 dev_info->reta_size = internals->reta_size;
295 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
301 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
303 unsigned int i, num_stats;
304 unsigned long rx_total = 0, tx_total = 0;
305 const struct pmd_internals *internal;
307 if ((dev == NULL) || (igb_stats == NULL))
310 internal = dev->data->dev_private;
311 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
312 RTE_MIN(dev->data->nb_rx_queues,
313 RTE_DIM(internal->rx_null_queues)));
314 for (i = 0; i < num_stats; i++) {
315 igb_stats->q_ipackets[i] =
316 internal->rx_null_queues[i].rx_pkts.cnt;
317 rx_total += igb_stats->q_ipackets[i];
320 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321 RTE_MIN(dev->data->nb_tx_queues,
322 RTE_DIM(internal->tx_null_queues)));
323 for (i = 0; i < num_stats; i++) {
324 igb_stats->q_opackets[i] =
325 internal->tx_null_queues[i].tx_pkts.cnt;
326 tx_total += igb_stats->q_opackets[i];
329 igb_stats->ipackets = rx_total;
330 igb_stats->opackets = tx_total;
336 eth_stats_reset(struct rte_eth_dev *dev)
339 struct pmd_internals *internal;
344 internal = dev->data->dev_private;
345 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
346 internal->rx_null_queues[i].rx_pkts.cnt = 0;
347 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
348 internal->tx_null_queues[i].tx_pkts.cnt = 0;
354 eth_queue_release(void *q)
356 struct null_queue *nq;
362 rte_free(nq->dummy_packet);
366 eth_link_update(struct rte_eth_dev *dev __rte_unused,
367 int wait_to_complete __rte_unused) { return 0; }
370 eth_rss_reta_update(struct rte_eth_dev *dev,
371 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
374 struct pmd_internals *internal = dev->data->dev_private;
376 if (reta_size != internal->reta_size)
379 rte_spinlock_lock(&internal->rss_lock);
381 /* Copy RETA table */
382 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
383 internal->reta_conf[i].mask = reta_conf[i].mask;
384 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
385 if ((reta_conf[i].mask >> j) & 0x01)
386 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
389 rte_spinlock_unlock(&internal->rss_lock);
395 eth_rss_reta_query(struct rte_eth_dev *dev,
396 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
399 struct pmd_internals *internal = dev->data->dev_private;
401 if (reta_size != internal->reta_size)
404 rte_spinlock_lock(&internal->rss_lock);
406 /* Copy RETA table */
407 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
408 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
409 if ((reta_conf[i].mask >> j) & 0x01)
410 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
413 rte_spinlock_unlock(&internal->rss_lock);
419 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
421 struct pmd_internals *internal = dev->data->dev_private;
423 rte_spinlock_lock(&internal->rss_lock);
425 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
426 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
427 rss_conf->rss_hf & internal->flow_type_rss_offloads;
429 if (rss_conf->rss_key)
430 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
432 rte_spinlock_unlock(&internal->rss_lock);
438 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
439 struct rte_eth_rss_conf *rss_conf)
441 struct pmd_internals *internal = dev->data->dev_private;
443 rte_spinlock_lock(&internal->rss_lock);
445 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
446 if (rss_conf->rss_key)
447 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
449 rte_spinlock_unlock(&internal->rss_lock);
455 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
456 __rte_unused struct rte_ether_addr *addr)
461 static const struct eth_dev_ops ops = {
462 .dev_start = eth_dev_start,
463 .dev_stop = eth_dev_stop,
464 .dev_configure = eth_dev_configure,
465 .dev_infos_get = eth_dev_info,
466 .rx_queue_setup = eth_rx_queue_setup,
467 .tx_queue_setup = eth_tx_queue_setup,
468 .rx_queue_release = eth_queue_release,
469 .tx_queue_release = eth_queue_release,
470 .mtu_set = eth_mtu_set,
471 .link_update = eth_link_update,
472 .mac_addr_set = eth_mac_address_set,
473 .stats_get = eth_stats_get,
474 .stats_reset = eth_stats_reset,
475 .reta_update = eth_rss_reta_update,
476 .reta_query = eth_rss_reta_query,
477 .rss_hash_update = eth_rss_hash_update,
478 .rss_hash_conf_get = eth_rss_hash_conf_get
482 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
484 const unsigned int nb_rx_queues = 1;
485 const unsigned int nb_tx_queues = 1;
486 struct rte_eth_dev_data *data;
487 struct pmd_internals *internals = NULL;
488 struct rte_eth_dev *eth_dev = NULL;
490 static const uint8_t default_rss_key[40] = {
491 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
492 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
493 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
494 0xBE, 0xAC, 0x01, 0xFA
497 if (dev->device.numa_node == SOCKET_ID_ANY)
498 dev->device.numa_node = rte_socket_id();
500 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
501 dev->device.numa_node);
503 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
507 /* now put it all together
508 * - store queue data in internals,
509 * - store numa_node info in ethdev data
510 * - point eth_dev_data to internals
511 * - and point eth_dev structure to new eth_dev_data structure
513 /* NOTE: we'll replace the data element, of originally allocated eth_dev
514 * so the nulls are local per-process */
516 internals = eth_dev->data->dev_private;
517 internals->packet_size = args->packet_size;
518 internals->packet_copy = args->packet_copy;
519 internals->no_rx = args->no_rx;
520 internals->port_id = eth_dev->data->port_id;
521 rte_eth_random_addr(internals->eth_addr.addr_bytes);
523 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
524 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
526 rte_memcpy(internals->rss_key, default_rss_key, 40);
528 data = eth_dev->data;
529 data->nb_rx_queues = (uint16_t)nb_rx_queues;
530 data->nb_tx_queues = (uint16_t)nb_tx_queues;
531 data->dev_link = pmd_link;
532 data->mac_addrs = &internals->eth_addr;
533 data->promiscuous = 1;
534 data->all_multicast = 1;
536 eth_dev->dev_ops = &ops;
538 /* finally assign rx and tx ops */
539 if (internals->packet_copy) {
540 eth_dev->rx_pkt_burst = eth_null_copy_rx;
541 eth_dev->tx_pkt_burst = eth_null_copy_tx;
542 } else if (internals->no_rx) {
543 eth_dev->rx_pkt_burst = eth_null_no_rx;
544 eth_dev->tx_pkt_burst = eth_null_tx;
546 eth_dev->rx_pkt_burst = eth_null_rx;
547 eth_dev->tx_pkt_burst = eth_null_tx;
550 rte_eth_dev_probing_finish(eth_dev);
555 get_packet_size_arg(const char *key __rte_unused,
556 const char *value, void *extra_args)
558 const char *a = value;
559 unsigned int *packet_size = extra_args;
561 if ((value == NULL) || (extra_args == NULL))
564 *packet_size = (unsigned int)strtoul(a, NULL, 0);
565 if (*packet_size == UINT_MAX)
572 get_packet_copy_arg(const char *key __rte_unused,
573 const char *value, void *extra_args)
575 const char *a = value;
576 unsigned int *packet_copy = extra_args;
578 if ((value == NULL) || (extra_args == NULL))
581 *packet_copy = (unsigned int)strtoul(a, NULL, 0);
582 if (*packet_copy == UINT_MAX)
589 get_packet_no_rx_arg(const char *key __rte_unused,
590 const char *value, void *extra_args)
592 const char *a = value;
595 if (value == NULL || extra_args == NULL)
598 no_rx = (unsigned int)strtoul(a, NULL, 0);
599 if (no_rx != 0 && no_rx != 1)
602 *(unsigned int *)extra_args = no_rx;
607 rte_pmd_null_probe(struct rte_vdev_device *dev)
609 const char *name, *params;
610 struct pmd_options args = {
611 .packet_copy = default_packet_copy,
612 .packet_size = default_packet_size,
613 .no_rx = default_no_rx,
615 struct rte_kvargs *kvlist = NULL;
616 struct rte_eth_dev *eth_dev;
622 name = rte_vdev_device_name(dev);
623 params = rte_vdev_device_args(dev);
624 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
626 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
627 struct pmd_internals *internals;
628 eth_dev = rte_eth_dev_attach_secondary(name);
630 PMD_LOG(ERR, "Failed to probe %s", name);
633 /* TODO: request info from primary to set up Rx and Tx */
634 eth_dev->dev_ops = &ops;
635 eth_dev->device = &dev->device;
636 internals = eth_dev->data->dev_private;
637 if (internals->packet_copy) {
638 eth_dev->rx_pkt_burst = eth_null_copy_rx;
639 eth_dev->tx_pkt_burst = eth_null_copy_tx;
640 } else if (internals->no_rx) {
641 eth_dev->rx_pkt_burst = eth_null_no_rx;
642 eth_dev->tx_pkt_burst = eth_null_tx;
644 eth_dev->rx_pkt_burst = eth_null_rx;
645 eth_dev->tx_pkt_burst = eth_null_tx;
647 rte_eth_dev_probing_finish(eth_dev);
651 if (params != NULL) {
652 kvlist = rte_kvargs_parse(params, valid_arguments);
656 ret = rte_kvargs_process(kvlist,
657 ETH_NULL_PACKET_SIZE_ARG,
658 &get_packet_size_arg, &args.packet_size);
663 ret = rte_kvargs_process(kvlist,
664 ETH_NULL_PACKET_COPY_ARG,
665 &get_packet_copy_arg, &args.packet_copy);
669 ret = rte_kvargs_process(kvlist,
670 ETH_NULL_PACKET_NO_RX_ARG,
671 &get_packet_no_rx_arg, &args.no_rx);
675 if (args.no_rx && args.packet_copy) {
677 "Both %s and %s arguments at the same time not supported",
678 ETH_NULL_PACKET_COPY_ARG,
679 ETH_NULL_PACKET_NO_RX_ARG);
684 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
685 "packet copy is %s", args.packet_size,
686 args.packet_copy ? "enabled" : "disabled");
688 ret = eth_dev_null_create(dev, &args);
692 rte_kvargs_free(kvlist);
697 rte_pmd_null_remove(struct rte_vdev_device *dev)
699 struct rte_eth_dev *eth_dev = NULL;
704 PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
707 /* find the ethdev entry */
708 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
712 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
713 /* mac_addrs must not be freed alone because part of dev_private */
714 eth_dev->data->mac_addrs = NULL;
716 rte_eth_dev_release_port(eth_dev);
721 static struct rte_vdev_driver pmd_null_drv = {
722 .probe = rte_pmd_null_probe,
723 .remove = rte_pmd_null_remove,
726 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
727 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
728 RTE_PMD_REGISTER_PARAM_STRING(net_null,
731 ETH_NULL_PACKET_NO_RX_ARG "=0|1");
733 RTE_INIT(eth_null_init_log)
735 eth_null_logtype = rte_log_register("pmd.net.null");
736 if (eth_null_logtype >= 0)
737 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);