1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) IGEL Co.,Ltd.
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
15 #define ETH_NULL_PACKET_SIZE_ARG "size"
16 #define ETH_NULL_PACKET_COPY_ARG "copy"
17 #define ETH_NULL_PACKET_NO_RX_ARG "no-rx"
19 static unsigned int default_packet_size = 64;
20 static unsigned int default_packet_copy;
21 static unsigned int default_no_rx;
23 static const char *valid_arguments[] = {
24 ETH_NULL_PACKET_SIZE_ARG,
25 ETH_NULL_PACKET_COPY_ARG,
26 ETH_NULL_PACKET_NO_RX_ARG,
33 struct pmd_internals *internals;
35 struct rte_mempool *mb_pool;
36 struct rte_mbuf *dummy_packet;
38 rte_atomic64_t rx_pkts;
39 rte_atomic64_t tx_pkts;
43 unsigned int packet_copy;
44 unsigned int packet_size;
48 struct pmd_internals {
49 unsigned int packet_size;
50 unsigned int packet_copy;
54 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
55 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
57 struct rte_ether_addr eth_addr;
58 /** Bit mask of RSS offloads, the bit offset also means flow type */
59 uint64_t flow_type_rss_offloads;
61 rte_spinlock_t rss_lock;
64 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
67 uint8_t rss_key[40]; /**< 40-byte hash key. */
69 static struct rte_eth_link pmd_link = {
70 .link_speed = ETH_SPEED_NUM_10G,
71 .link_duplex = ETH_LINK_FULL_DUPLEX,
72 .link_status = ETH_LINK_DOWN,
73 .link_autoneg = ETH_LINK_FIXED,
76 RTE_LOG_REGISTER(eth_null_logtype, pmd.net.null, NOTICE);
78 #define PMD_LOG(level, fmt, args...) \
79 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
80 "%s(): " fmt "\n", __func__, ##args)
83 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 struct null_queue *h = q;
87 unsigned int packet_size;
89 if ((q == NULL) || (bufs == NULL))
92 packet_size = h->internals->packet_size;
93 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
96 for (i = 0; i < nb_bufs; i++) {
97 bufs[i]->data_len = (uint16_t)packet_size;
98 bufs[i]->pkt_len = packet_size;
99 bufs[i]->port = h->internals->port_id;
102 rte_atomic64_add(&(h->rx_pkts), i);
108 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
111 struct null_queue *h = q;
112 unsigned int packet_size;
114 if ((q == NULL) || (bufs == NULL))
117 packet_size = h->internals->packet_size;
118 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
121 for (i = 0; i < nb_bufs; i++) {
122 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
124 bufs[i]->data_len = (uint16_t)packet_size;
125 bufs[i]->pkt_len = packet_size;
126 bufs[i]->port = h->internals->port_id;
129 rte_atomic64_add(&(h->rx_pkts), i);
135 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
136 uint16_t nb_bufs __rte_unused)
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
145 struct null_queue *h = q;
147 if ((q == NULL) || (bufs == NULL))
150 for (i = 0; i < nb_bufs; i++)
151 rte_pktmbuf_free(bufs[i]);
153 rte_atomic64_add(&(h->tx_pkts), i);
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
162 struct null_queue *h = q;
163 unsigned int packet_size;
165 if ((q == NULL) || (bufs == NULL))
168 packet_size = h->internals->packet_size;
169 for (i = 0; i < nb_bufs; i++) {
170 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
172 rte_pktmbuf_free(bufs[i]);
175 rte_atomic64_add(&(h->tx_pkts), i);
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
187 eth_dev_start(struct rte_eth_dev *dev)
192 dev->data->dev_link.link_status = ETH_LINK_UP;
197 eth_dev_stop(struct rte_eth_dev *dev)
202 dev->data->dev_link.link_status = ETH_LINK_DOWN;
206 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
207 uint16_t nb_rx_desc __rte_unused,
208 unsigned int socket_id __rte_unused,
209 const struct rte_eth_rxconf *rx_conf __rte_unused,
210 struct rte_mempool *mb_pool)
212 struct rte_mbuf *dummy_packet;
213 struct pmd_internals *internals;
214 unsigned int packet_size;
216 if ((dev == NULL) || (mb_pool == NULL))
219 internals = dev->data->dev_private;
221 if (rx_queue_id >= dev->data->nb_rx_queues)
224 packet_size = internals->packet_size;
226 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
227 dev->data->rx_queues[rx_queue_id] =
228 &internals->rx_null_queues[rx_queue_id];
229 dummy_packet = rte_zmalloc_socket(NULL,
230 packet_size, 0, dev->data->numa_node);
231 if (dummy_packet == NULL)
234 internals->rx_null_queues[rx_queue_id].internals = internals;
235 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
241 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
242 uint16_t nb_tx_desc __rte_unused,
243 unsigned int socket_id __rte_unused,
244 const struct rte_eth_txconf *tx_conf __rte_unused)
246 struct rte_mbuf *dummy_packet;
247 struct pmd_internals *internals;
248 unsigned int packet_size;
253 internals = dev->data->dev_private;
255 if (tx_queue_id >= dev->data->nb_tx_queues)
258 packet_size = internals->packet_size;
260 dev->data->tx_queues[tx_queue_id] =
261 &internals->tx_null_queues[tx_queue_id];
262 dummy_packet = rte_zmalloc_socket(NULL,
263 packet_size, 0, dev->data->numa_node);
264 if (dummy_packet == NULL)
267 internals->tx_null_queues[tx_queue_id].internals = internals;
268 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
274 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
280 eth_dev_info(struct rte_eth_dev *dev,
281 struct rte_eth_dev_info *dev_info)
283 struct pmd_internals *internals;
285 if ((dev == NULL) || (dev_info == NULL))
288 internals = dev->data->dev_private;
289 dev_info->max_mac_addrs = 1;
290 dev_info->max_rx_pktlen = (uint32_t)-1;
291 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
292 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
293 dev_info->min_rx_bufsize = 0;
294 dev_info->reta_size = internals->reta_size;
295 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
301 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
303 unsigned int i, num_stats;
304 unsigned long rx_total = 0, tx_total = 0;
305 const struct pmd_internals *internal;
307 if ((dev == NULL) || (igb_stats == NULL))
310 internal = dev->data->dev_private;
311 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
312 RTE_MIN(dev->data->nb_rx_queues,
313 RTE_DIM(internal->rx_null_queues)));
314 for (i = 0; i < num_stats; i++) {
315 igb_stats->q_ipackets[i] =
316 internal->rx_null_queues[i].rx_pkts.cnt;
317 rx_total += igb_stats->q_ipackets[i];
320 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321 RTE_MIN(dev->data->nb_tx_queues,
322 RTE_DIM(internal->tx_null_queues)));
323 for (i = 0; i < num_stats; i++) {
324 igb_stats->q_opackets[i] =
325 internal->tx_null_queues[i].tx_pkts.cnt;
326 tx_total += igb_stats->q_opackets[i];
329 igb_stats->ipackets = rx_total;
330 igb_stats->opackets = tx_total;
336 eth_stats_reset(struct rte_eth_dev *dev)
339 struct pmd_internals *internal;
344 internal = dev->data->dev_private;
345 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
346 internal->rx_null_queues[i].rx_pkts.cnt = 0;
347 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
348 internal->tx_null_queues[i].tx_pkts.cnt = 0;
354 eth_queue_release(void *q)
356 struct null_queue *nq;
362 rte_free(nq->dummy_packet);
366 eth_link_update(struct rte_eth_dev *dev __rte_unused,
367 int wait_to_complete __rte_unused) { return 0; }
370 eth_rss_reta_update(struct rte_eth_dev *dev,
371 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
374 struct pmd_internals *internal = dev->data->dev_private;
376 if (reta_size != internal->reta_size)
379 rte_spinlock_lock(&internal->rss_lock);
381 /* Copy RETA table */
382 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
383 internal->reta_conf[i].mask = reta_conf[i].mask;
384 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
385 if ((reta_conf[i].mask >> j) & 0x01)
386 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
389 rte_spinlock_unlock(&internal->rss_lock);
395 eth_rss_reta_query(struct rte_eth_dev *dev,
396 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
399 struct pmd_internals *internal = dev->data->dev_private;
401 if (reta_size != internal->reta_size)
404 rte_spinlock_lock(&internal->rss_lock);
406 /* Copy RETA table */
407 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
408 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
409 if ((reta_conf[i].mask >> j) & 0x01)
410 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
413 rte_spinlock_unlock(&internal->rss_lock);
419 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
421 struct pmd_internals *internal = dev->data->dev_private;
423 rte_spinlock_lock(&internal->rss_lock);
425 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
426 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
427 rss_conf->rss_hf & internal->flow_type_rss_offloads;
429 if (rss_conf->rss_key)
430 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
432 rte_spinlock_unlock(&internal->rss_lock);
438 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
439 struct rte_eth_rss_conf *rss_conf)
441 struct pmd_internals *internal = dev->data->dev_private;
443 rte_spinlock_lock(&internal->rss_lock);
445 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
446 if (rss_conf->rss_key)
447 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
449 rte_spinlock_unlock(&internal->rss_lock);
455 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
456 __rte_unused struct rte_ether_addr *addr)
462 eth_dev_close(struct rte_eth_dev *dev)
464 PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
467 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
470 /* mac_addrs must not be freed alone because part of dev_private */
471 dev->data->mac_addrs = NULL;
476 static const struct eth_dev_ops ops = {
477 .dev_close = eth_dev_close,
478 .dev_start = eth_dev_start,
479 .dev_stop = eth_dev_stop,
480 .dev_configure = eth_dev_configure,
481 .dev_infos_get = eth_dev_info,
482 .rx_queue_setup = eth_rx_queue_setup,
483 .tx_queue_setup = eth_tx_queue_setup,
484 .rx_queue_release = eth_queue_release,
485 .tx_queue_release = eth_queue_release,
486 .mtu_set = eth_mtu_set,
487 .link_update = eth_link_update,
488 .mac_addr_set = eth_mac_address_set,
489 .stats_get = eth_stats_get,
490 .stats_reset = eth_stats_reset,
491 .reta_update = eth_rss_reta_update,
492 .reta_query = eth_rss_reta_query,
493 .rss_hash_update = eth_rss_hash_update,
494 .rss_hash_conf_get = eth_rss_hash_conf_get
498 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
500 const unsigned int nb_rx_queues = 1;
501 const unsigned int nb_tx_queues = 1;
502 struct rte_eth_dev_data *data;
503 struct pmd_internals *internals = NULL;
504 struct rte_eth_dev *eth_dev = NULL;
506 static const uint8_t default_rss_key[40] = {
507 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
508 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
509 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
510 0xBE, 0xAC, 0x01, 0xFA
513 if (dev->device.numa_node == SOCKET_ID_ANY)
514 dev->device.numa_node = rte_socket_id();
516 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
517 dev->device.numa_node);
519 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
523 /* now put it all together
524 * - store queue data in internals,
525 * - store numa_node info in ethdev data
526 * - point eth_dev_data to internals
527 * - and point eth_dev structure to new eth_dev_data structure
529 /* NOTE: we'll replace the data element, of originally allocated eth_dev
530 * so the nulls are local per-process */
532 internals = eth_dev->data->dev_private;
533 internals->packet_size = args->packet_size;
534 internals->packet_copy = args->packet_copy;
535 internals->no_rx = args->no_rx;
536 internals->port_id = eth_dev->data->port_id;
537 rte_eth_random_addr(internals->eth_addr.addr_bytes);
539 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
540 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
542 rte_memcpy(internals->rss_key, default_rss_key, 40);
544 data = eth_dev->data;
545 data->nb_rx_queues = (uint16_t)nb_rx_queues;
546 data->nb_tx_queues = (uint16_t)nb_tx_queues;
547 data->dev_link = pmd_link;
548 data->mac_addrs = &internals->eth_addr;
549 data->promiscuous = 1;
550 data->all_multicast = 1;
551 data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
553 eth_dev->dev_ops = &ops;
555 /* finally assign rx and tx ops */
556 if (internals->packet_copy) {
557 eth_dev->rx_pkt_burst = eth_null_copy_rx;
558 eth_dev->tx_pkt_burst = eth_null_copy_tx;
559 } else if (internals->no_rx) {
560 eth_dev->rx_pkt_burst = eth_null_no_rx;
561 eth_dev->tx_pkt_burst = eth_null_tx;
563 eth_dev->rx_pkt_burst = eth_null_rx;
564 eth_dev->tx_pkt_burst = eth_null_tx;
567 rte_eth_dev_probing_finish(eth_dev);
572 get_packet_size_arg(const char *key __rte_unused,
573 const char *value, void *extra_args)
575 const char *a = value;
576 unsigned int *packet_size = extra_args;
578 if ((value == NULL) || (extra_args == NULL))
581 *packet_size = (unsigned int)strtoul(a, NULL, 0);
582 if (*packet_size == UINT_MAX)
589 get_packet_copy_arg(const char *key __rte_unused,
590 const char *value, void *extra_args)
592 const char *a = value;
593 unsigned int *packet_copy = extra_args;
595 if ((value == NULL) || (extra_args == NULL))
598 *packet_copy = (unsigned int)strtoul(a, NULL, 0);
599 if (*packet_copy == UINT_MAX)
606 get_packet_no_rx_arg(const char *key __rte_unused,
607 const char *value, void *extra_args)
609 const char *a = value;
612 if (value == NULL || extra_args == NULL)
615 no_rx = (unsigned int)strtoul(a, NULL, 0);
616 if (no_rx != 0 && no_rx != 1)
619 *(unsigned int *)extra_args = no_rx;
624 rte_pmd_null_probe(struct rte_vdev_device *dev)
626 const char *name, *params;
627 struct pmd_options args = {
628 .packet_copy = default_packet_copy,
629 .packet_size = default_packet_size,
630 .no_rx = default_no_rx,
632 struct rte_kvargs *kvlist = NULL;
633 struct rte_eth_dev *eth_dev;
639 name = rte_vdev_device_name(dev);
640 params = rte_vdev_device_args(dev);
641 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
643 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
644 struct pmd_internals *internals;
645 eth_dev = rte_eth_dev_attach_secondary(name);
647 PMD_LOG(ERR, "Failed to probe %s", name);
650 /* TODO: request info from primary to set up Rx and Tx */
651 eth_dev->dev_ops = &ops;
652 eth_dev->device = &dev->device;
653 internals = eth_dev->data->dev_private;
654 if (internals->packet_copy) {
655 eth_dev->rx_pkt_burst = eth_null_copy_rx;
656 eth_dev->tx_pkt_burst = eth_null_copy_tx;
657 } else if (internals->no_rx) {
658 eth_dev->rx_pkt_burst = eth_null_no_rx;
659 eth_dev->tx_pkt_burst = eth_null_tx;
661 eth_dev->rx_pkt_burst = eth_null_rx;
662 eth_dev->tx_pkt_burst = eth_null_tx;
664 rte_eth_dev_probing_finish(eth_dev);
668 if (params != NULL) {
669 kvlist = rte_kvargs_parse(params, valid_arguments);
673 ret = rte_kvargs_process(kvlist,
674 ETH_NULL_PACKET_SIZE_ARG,
675 &get_packet_size_arg, &args.packet_size);
680 ret = rte_kvargs_process(kvlist,
681 ETH_NULL_PACKET_COPY_ARG,
682 &get_packet_copy_arg, &args.packet_copy);
686 ret = rte_kvargs_process(kvlist,
687 ETH_NULL_PACKET_NO_RX_ARG,
688 &get_packet_no_rx_arg, &args.no_rx);
692 if (args.no_rx && args.packet_copy) {
694 "Both %s and %s arguments at the same time not supported",
695 ETH_NULL_PACKET_COPY_ARG,
696 ETH_NULL_PACKET_NO_RX_ARG);
701 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
702 "packet copy is %s", args.packet_size,
703 args.packet_copy ? "enabled" : "disabled");
705 ret = eth_dev_null_create(dev, &args);
709 rte_kvargs_free(kvlist);
714 rte_pmd_null_remove(struct rte_vdev_device *dev)
716 struct rte_eth_dev *eth_dev = NULL;
721 /* find the ethdev entry */
722 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
724 return 0; /* port already released */
726 eth_dev_close(eth_dev);
727 rte_eth_dev_release_port(eth_dev);
732 static struct rte_vdev_driver pmd_null_drv = {
733 .probe = rte_pmd_null_probe,
734 .remove = rte_pmd_null_remove,
737 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
738 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
739 RTE_PMD_REGISTER_PARAM_STRING(net_null,
742 ETH_NULL_PACKET_NO_RX_ARG "=0|1");