1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) IGEL Co.,Ltd.
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
15 #define ETH_NULL_PACKET_SIZE_ARG "size"
16 #define ETH_NULL_PACKET_COPY_ARG "copy"
17 #define ETH_NULL_PACKET_NO_RX_ARG "no-rx"
19 static unsigned int default_packet_size = 64;
20 static unsigned int default_packet_copy;
21 static unsigned int default_no_rx;
23 static const char *valid_arguments[] = {
24 ETH_NULL_PACKET_SIZE_ARG,
25 ETH_NULL_PACKET_COPY_ARG,
26 ETH_NULL_PACKET_NO_RX_ARG,
33 struct pmd_internals *internals;
35 struct rte_mempool *mb_pool;
36 struct rte_mbuf *dummy_packet;
38 rte_atomic64_t rx_pkts;
39 rte_atomic64_t tx_pkts;
43 unsigned int packet_copy;
44 unsigned int packet_size;
48 struct pmd_internals {
49 unsigned int packet_size;
50 unsigned int packet_copy;
54 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
55 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
57 struct rte_ether_addr eth_addr;
58 /** Bit mask of RSS offloads, the bit offset also means flow type */
59 uint64_t flow_type_rss_offloads;
61 rte_spinlock_t rss_lock;
64 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
67 uint8_t rss_key[40]; /**< 40-byte hash key. */
69 static struct rte_eth_link pmd_link = {
70 .link_speed = ETH_SPEED_NUM_10G,
71 .link_duplex = ETH_LINK_FULL_DUPLEX,
72 .link_status = ETH_LINK_DOWN,
73 .link_autoneg = ETH_LINK_FIXED,
76 RTE_LOG_REGISTER(eth_null_logtype, pmd.net.null, NOTICE);
78 #define PMD_LOG(level, fmt, args...) \
79 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
80 "%s(): " fmt "\n", __func__, ##args)
83 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 struct null_queue *h = q;
87 unsigned int packet_size;
89 if ((q == NULL) || (bufs == NULL))
92 packet_size = h->internals->packet_size;
93 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
96 for (i = 0; i < nb_bufs; i++) {
97 bufs[i]->data_len = (uint16_t)packet_size;
98 bufs[i]->pkt_len = packet_size;
99 bufs[i]->port = h->internals->port_id;
102 rte_atomic64_add(&(h->rx_pkts), i);
108 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
111 struct null_queue *h = q;
112 unsigned int packet_size;
114 if ((q == NULL) || (bufs == NULL))
117 packet_size = h->internals->packet_size;
118 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
121 for (i = 0; i < nb_bufs; i++) {
122 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
124 bufs[i]->data_len = (uint16_t)packet_size;
125 bufs[i]->pkt_len = packet_size;
126 bufs[i]->port = h->internals->port_id;
129 rte_atomic64_add(&(h->rx_pkts), i);
135 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
136 uint16_t nb_bufs __rte_unused)
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
145 struct null_queue *h = q;
147 if ((q == NULL) || (bufs == NULL))
150 for (i = 0; i < nb_bufs; i++)
151 rte_pktmbuf_free(bufs[i]);
153 rte_atomic64_add(&(h->tx_pkts), i);
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
162 struct null_queue *h = q;
163 unsigned int packet_size;
165 if ((q == NULL) || (bufs == NULL))
168 packet_size = h->internals->packet_size;
169 for (i = 0; i < nb_bufs; i++) {
170 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
172 rte_pktmbuf_free(bufs[i]);
175 rte_atomic64_add(&(h->tx_pkts), i);
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
187 eth_dev_start(struct rte_eth_dev *dev)
192 dev->data->dev_link.link_status = ETH_LINK_UP;
197 eth_dev_stop(struct rte_eth_dev *dev)
202 dev->data->dev_link.link_status = ETH_LINK_DOWN;
206 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
207 uint16_t nb_rx_desc __rte_unused,
208 unsigned int socket_id __rte_unused,
209 const struct rte_eth_rxconf *rx_conf __rte_unused,
210 struct rte_mempool *mb_pool)
212 struct rte_mbuf *dummy_packet;
213 struct pmd_internals *internals;
214 unsigned int packet_size;
216 if ((dev == NULL) || (mb_pool == NULL))
219 internals = dev->data->dev_private;
221 if (rx_queue_id >= dev->data->nb_rx_queues)
224 packet_size = internals->packet_size;
226 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
227 dev->data->rx_queues[rx_queue_id] =
228 &internals->rx_null_queues[rx_queue_id];
229 dummy_packet = rte_zmalloc_socket(NULL,
230 packet_size, 0, dev->data->numa_node);
231 if (dummy_packet == NULL)
234 internals->rx_null_queues[rx_queue_id].internals = internals;
235 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
241 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
242 uint16_t nb_tx_desc __rte_unused,
243 unsigned int socket_id __rte_unused,
244 const struct rte_eth_txconf *tx_conf __rte_unused)
246 struct rte_mbuf *dummy_packet;
247 struct pmd_internals *internals;
248 unsigned int packet_size;
253 internals = dev->data->dev_private;
255 if (tx_queue_id >= dev->data->nb_tx_queues)
258 packet_size = internals->packet_size;
260 dev->data->tx_queues[tx_queue_id] =
261 &internals->tx_null_queues[tx_queue_id];
262 dummy_packet = rte_zmalloc_socket(NULL,
263 packet_size, 0, dev->data->numa_node);
264 if (dummy_packet == NULL)
267 internals->tx_null_queues[tx_queue_id].internals = internals;
268 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
274 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
280 eth_dev_info(struct rte_eth_dev *dev,
281 struct rte_eth_dev_info *dev_info)
283 struct pmd_internals *internals;
285 if ((dev == NULL) || (dev_info == NULL))
288 internals = dev->data->dev_private;
289 dev_info->max_mac_addrs = 1;
290 dev_info->max_rx_pktlen = (uint32_t)-1;
291 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
292 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
293 dev_info->min_rx_bufsize = 0;
294 dev_info->reta_size = internals->reta_size;
295 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
301 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
303 unsigned int i, num_stats;
304 unsigned long rx_total = 0, tx_total = 0;
305 const struct pmd_internals *internal;
307 if ((dev == NULL) || (igb_stats == NULL))
310 internal = dev->data->dev_private;
311 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
312 RTE_MIN(dev->data->nb_rx_queues,
313 RTE_DIM(internal->rx_null_queues)));
314 for (i = 0; i < num_stats; i++) {
315 igb_stats->q_ipackets[i] =
316 internal->rx_null_queues[i].rx_pkts.cnt;
317 rx_total += igb_stats->q_ipackets[i];
320 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321 RTE_MIN(dev->data->nb_tx_queues,
322 RTE_DIM(internal->tx_null_queues)));
323 for (i = 0; i < num_stats; i++) {
324 igb_stats->q_opackets[i] =
325 internal->tx_null_queues[i].tx_pkts.cnt;
326 tx_total += igb_stats->q_opackets[i];
329 igb_stats->ipackets = rx_total;
330 igb_stats->opackets = tx_total;
336 eth_stats_reset(struct rte_eth_dev *dev)
339 struct pmd_internals *internal;
344 internal = dev->data->dev_private;
345 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
346 internal->rx_null_queues[i].rx_pkts.cnt = 0;
347 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
348 internal->tx_null_queues[i].tx_pkts.cnt = 0;
354 eth_queue_release(void *q)
356 struct null_queue *nq;
362 rte_free(nq->dummy_packet);
366 eth_link_update(struct rte_eth_dev *dev __rte_unused,
367 int wait_to_complete __rte_unused) { return 0; }
370 eth_rss_reta_update(struct rte_eth_dev *dev,
371 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
374 struct pmd_internals *internal = dev->data->dev_private;
376 if (reta_size != internal->reta_size)
379 rte_spinlock_lock(&internal->rss_lock);
381 /* Copy RETA table */
382 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
383 internal->reta_conf[i].mask = reta_conf[i].mask;
384 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
385 if ((reta_conf[i].mask >> j) & 0x01)
386 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
389 rte_spinlock_unlock(&internal->rss_lock);
395 eth_rss_reta_query(struct rte_eth_dev *dev,
396 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
399 struct pmd_internals *internal = dev->data->dev_private;
401 if (reta_size != internal->reta_size)
404 rte_spinlock_lock(&internal->rss_lock);
406 /* Copy RETA table */
407 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
408 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
409 if ((reta_conf[i].mask >> j) & 0x01)
410 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
413 rte_spinlock_unlock(&internal->rss_lock);
419 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
421 struct pmd_internals *internal = dev->data->dev_private;
423 rte_spinlock_lock(&internal->rss_lock);
425 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
426 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
427 rss_conf->rss_hf & internal->flow_type_rss_offloads;
429 if (rss_conf->rss_key)
430 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
432 rte_spinlock_unlock(&internal->rss_lock);
438 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
439 struct rte_eth_rss_conf *rss_conf)
441 struct pmd_internals *internal = dev->data->dev_private;
443 rte_spinlock_lock(&internal->rss_lock);
445 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
446 if (rss_conf->rss_key)
447 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
449 rte_spinlock_unlock(&internal->rss_lock);
455 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
456 __rte_unused struct rte_ether_addr *addr)
462 eth_dev_close(struct rte_eth_dev *dev)
464 PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
467 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
470 /* mac_addrs must not be freed alone because part of dev_private */
471 dev->data->mac_addrs = NULL;
476 static const struct eth_dev_ops ops = {
477 .dev_close = eth_dev_close,
478 .dev_start = eth_dev_start,
479 .dev_stop = eth_dev_stop,
480 .dev_configure = eth_dev_configure,
481 .dev_infos_get = eth_dev_info,
482 .rx_queue_setup = eth_rx_queue_setup,
483 .tx_queue_setup = eth_tx_queue_setup,
484 .rx_queue_release = eth_queue_release,
485 .tx_queue_release = eth_queue_release,
486 .mtu_set = eth_mtu_set,
487 .link_update = eth_link_update,
488 .mac_addr_set = eth_mac_address_set,
489 .stats_get = eth_stats_get,
490 .stats_reset = eth_stats_reset,
491 .reta_update = eth_rss_reta_update,
492 .reta_query = eth_rss_reta_query,
493 .rss_hash_update = eth_rss_hash_update,
494 .rss_hash_conf_get = eth_rss_hash_conf_get
498 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
500 const unsigned int nb_rx_queues = 1;
501 const unsigned int nb_tx_queues = 1;
502 struct rte_eth_dev_data *data;
503 struct pmd_internals *internals = NULL;
504 struct rte_eth_dev *eth_dev = NULL;
506 static const uint8_t default_rss_key[40] = {
507 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
508 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
509 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
510 0xBE, 0xAC, 0x01, 0xFA
513 if (dev->device.numa_node == SOCKET_ID_ANY)
514 dev->device.numa_node = rte_socket_id();
516 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
517 dev->device.numa_node);
519 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
523 /* now put it all together
524 * - store queue data in internals,
525 * - store numa_node info in ethdev data
526 * - point eth_dev_data to internals
527 * - and point eth_dev structure to new eth_dev_data structure
529 /* NOTE: we'll replace the data element, of originally allocated eth_dev
530 * so the nulls are local per-process */
532 internals = eth_dev->data->dev_private;
533 internals->packet_size = args->packet_size;
534 internals->packet_copy = args->packet_copy;
535 internals->no_rx = args->no_rx;
536 internals->port_id = eth_dev->data->port_id;
537 rte_eth_random_addr(internals->eth_addr.addr_bytes);
539 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
540 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
542 rte_memcpy(internals->rss_key, default_rss_key, 40);
544 data = eth_dev->data;
545 data->nb_rx_queues = (uint16_t)nb_rx_queues;
546 data->nb_tx_queues = (uint16_t)nb_tx_queues;
547 data->dev_link = pmd_link;
548 data->mac_addrs = &internals->eth_addr;
549 data->promiscuous = 1;
550 data->all_multicast = 1;
552 eth_dev->dev_ops = &ops;
554 /* finally assign rx and tx ops */
555 if (internals->packet_copy) {
556 eth_dev->rx_pkt_burst = eth_null_copy_rx;
557 eth_dev->tx_pkt_burst = eth_null_copy_tx;
558 } else if (internals->no_rx) {
559 eth_dev->rx_pkt_burst = eth_null_no_rx;
560 eth_dev->tx_pkt_burst = eth_null_tx;
562 eth_dev->rx_pkt_burst = eth_null_rx;
563 eth_dev->tx_pkt_burst = eth_null_tx;
566 rte_eth_dev_probing_finish(eth_dev);
571 get_packet_size_arg(const char *key __rte_unused,
572 const char *value, void *extra_args)
574 const char *a = value;
575 unsigned int *packet_size = extra_args;
577 if ((value == NULL) || (extra_args == NULL))
580 *packet_size = (unsigned int)strtoul(a, NULL, 0);
581 if (*packet_size == UINT_MAX)
588 get_packet_copy_arg(const char *key __rte_unused,
589 const char *value, void *extra_args)
591 const char *a = value;
592 unsigned int *packet_copy = extra_args;
594 if ((value == NULL) || (extra_args == NULL))
597 *packet_copy = (unsigned int)strtoul(a, NULL, 0);
598 if (*packet_copy == UINT_MAX)
605 get_packet_no_rx_arg(const char *key __rte_unused,
606 const char *value, void *extra_args)
608 const char *a = value;
611 if (value == NULL || extra_args == NULL)
614 no_rx = (unsigned int)strtoul(a, NULL, 0);
615 if (no_rx != 0 && no_rx != 1)
618 *(unsigned int *)extra_args = no_rx;
623 rte_pmd_null_probe(struct rte_vdev_device *dev)
625 const char *name, *params;
626 struct pmd_options args = {
627 .packet_copy = default_packet_copy,
628 .packet_size = default_packet_size,
629 .no_rx = default_no_rx,
631 struct rte_kvargs *kvlist = NULL;
632 struct rte_eth_dev *eth_dev;
638 name = rte_vdev_device_name(dev);
639 params = rte_vdev_device_args(dev);
640 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
642 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
643 struct pmd_internals *internals;
644 eth_dev = rte_eth_dev_attach_secondary(name);
646 PMD_LOG(ERR, "Failed to probe %s", name);
649 /* TODO: request info from primary to set up Rx and Tx */
650 eth_dev->dev_ops = &ops;
651 eth_dev->device = &dev->device;
652 internals = eth_dev->data->dev_private;
653 if (internals->packet_copy) {
654 eth_dev->rx_pkt_burst = eth_null_copy_rx;
655 eth_dev->tx_pkt_burst = eth_null_copy_tx;
656 } else if (internals->no_rx) {
657 eth_dev->rx_pkt_burst = eth_null_no_rx;
658 eth_dev->tx_pkt_burst = eth_null_tx;
660 eth_dev->rx_pkt_burst = eth_null_rx;
661 eth_dev->tx_pkt_burst = eth_null_tx;
663 rte_eth_dev_probing_finish(eth_dev);
667 if (params != NULL) {
668 kvlist = rte_kvargs_parse(params, valid_arguments);
672 ret = rte_kvargs_process(kvlist,
673 ETH_NULL_PACKET_SIZE_ARG,
674 &get_packet_size_arg, &args.packet_size);
679 ret = rte_kvargs_process(kvlist,
680 ETH_NULL_PACKET_COPY_ARG,
681 &get_packet_copy_arg, &args.packet_copy);
685 ret = rte_kvargs_process(kvlist,
686 ETH_NULL_PACKET_NO_RX_ARG,
687 &get_packet_no_rx_arg, &args.no_rx);
691 if (args.no_rx && args.packet_copy) {
693 "Both %s and %s arguments at the same time not supported",
694 ETH_NULL_PACKET_COPY_ARG,
695 ETH_NULL_PACKET_NO_RX_ARG);
700 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
701 "packet copy is %s", args.packet_size,
702 args.packet_copy ? "enabled" : "disabled");
704 ret = eth_dev_null_create(dev, &args);
708 rte_kvargs_free(kvlist);
713 rte_pmd_null_remove(struct rte_vdev_device *dev)
715 struct rte_eth_dev *eth_dev = NULL;
720 /* find the ethdev entry */
721 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
723 return 0; /* port already released */
725 eth_dev_close(eth_dev);
726 rte_eth_dev_release_port(eth_dev);
731 static struct rte_vdev_driver pmd_null_drv = {
732 .probe = rte_pmd_null_probe,
733 .remove = rte_pmd_null_remove,
736 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
737 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
738 RTE_PMD_REGISTER_PARAM_STRING(net_null,
741 ETH_NULL_PACKET_NO_RX_ARG "=0|1");