1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) IGEL Co.,Ltd.
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
15 #define ETH_NULL_PACKET_SIZE_ARG "size"
16 #define ETH_NULL_PACKET_COPY_ARG "copy"
18 static unsigned default_packet_size = 64;
19 static unsigned default_packet_copy;
21 static const char *valid_arguments[] = {
22 ETH_NULL_PACKET_SIZE_ARG,
23 ETH_NULL_PACKET_COPY_ARG,
30 struct pmd_internals *internals;
32 struct rte_mempool *mb_pool;
33 struct rte_mbuf *dummy_packet;
35 rte_atomic64_t rx_pkts;
36 rte_atomic64_t tx_pkts;
39 struct pmd_internals {
44 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
45 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
47 struct rte_ether_addr eth_addr;
48 /** Bit mask of RSS offloads, the bit offset also means flow type */
49 uint64_t flow_type_rss_offloads;
51 rte_spinlock_t rss_lock;
54 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
57 uint8_t rss_key[40]; /**< 40-byte hash key. */
59 static struct rte_eth_link pmd_link = {
60 .link_speed = ETH_SPEED_NUM_10G,
61 .link_duplex = ETH_LINK_FULL_DUPLEX,
62 .link_status = ETH_LINK_DOWN,
63 .link_autoneg = ETH_LINK_FIXED,
66 static int eth_null_logtype;
68 #define PMD_LOG(level, fmt, args...) \
69 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
70 "%s(): " fmt "\n", __func__, ##args)
73 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
76 struct null_queue *h = q;
79 if ((q == NULL) || (bufs == NULL))
82 packet_size = h->internals->packet_size;
83 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
86 for (i = 0; i < nb_bufs; i++) {
87 bufs[i]->data_len = (uint16_t)packet_size;
88 bufs[i]->pkt_len = packet_size;
89 bufs[i]->port = h->internals->port_id;
92 rte_atomic64_add(&(h->rx_pkts), i);
98 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 struct null_queue *h = q;
102 unsigned packet_size;
104 if ((q == NULL) || (bufs == NULL))
107 packet_size = h->internals->packet_size;
108 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
111 for (i = 0; i < nb_bufs; i++) {
112 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
114 bufs[i]->data_len = (uint16_t)packet_size;
115 bufs[i]->pkt_len = packet_size;
116 bufs[i]->port = h->internals->port_id;
119 rte_atomic64_add(&(h->rx_pkts), i);
125 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128 struct null_queue *h = q;
130 if ((q == NULL) || (bufs == NULL))
133 for (i = 0; i < nb_bufs; i++)
134 rte_pktmbuf_free(bufs[i]);
136 rte_atomic64_add(&(h->tx_pkts), i);
142 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
145 struct null_queue *h = q;
146 unsigned packet_size;
148 if ((q == NULL) || (bufs == NULL))
151 packet_size = h->internals->packet_size;
152 for (i = 0; i < nb_bufs; i++) {
153 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
155 rte_pktmbuf_free(bufs[i]);
158 rte_atomic64_add(&(h->tx_pkts), i);
164 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
170 eth_dev_start(struct rte_eth_dev *dev)
175 dev->data->dev_link.link_status = ETH_LINK_UP;
180 eth_dev_stop(struct rte_eth_dev *dev)
185 dev->data->dev_link.link_status = ETH_LINK_DOWN;
189 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
190 uint16_t nb_rx_desc __rte_unused,
191 unsigned int socket_id __rte_unused,
192 const struct rte_eth_rxconf *rx_conf __rte_unused,
193 struct rte_mempool *mb_pool)
195 struct rte_mbuf *dummy_packet;
196 struct pmd_internals *internals;
197 unsigned packet_size;
199 if ((dev == NULL) || (mb_pool == NULL))
202 internals = dev->data->dev_private;
204 if (rx_queue_id >= dev->data->nb_rx_queues)
207 packet_size = internals->packet_size;
209 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
210 dev->data->rx_queues[rx_queue_id] =
211 &internals->rx_null_queues[rx_queue_id];
212 dummy_packet = rte_zmalloc_socket(NULL,
213 packet_size, 0, dev->data->numa_node);
214 if (dummy_packet == NULL)
217 internals->rx_null_queues[rx_queue_id].internals = internals;
218 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
224 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
225 uint16_t nb_tx_desc __rte_unused,
226 unsigned int socket_id __rte_unused,
227 const struct rte_eth_txconf *tx_conf __rte_unused)
229 struct rte_mbuf *dummy_packet;
230 struct pmd_internals *internals;
231 unsigned packet_size;
236 internals = dev->data->dev_private;
238 if (tx_queue_id >= dev->data->nb_tx_queues)
241 packet_size = internals->packet_size;
243 dev->data->tx_queues[tx_queue_id] =
244 &internals->tx_null_queues[tx_queue_id];
245 dummy_packet = rte_zmalloc_socket(NULL,
246 packet_size, 0, dev->data->numa_node);
247 if (dummy_packet == NULL)
250 internals->tx_null_queues[tx_queue_id].internals = internals;
251 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
257 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
263 eth_dev_info(struct rte_eth_dev *dev,
264 struct rte_eth_dev_info *dev_info)
266 struct pmd_internals *internals;
268 if ((dev == NULL) || (dev_info == NULL))
271 internals = dev->data->dev_private;
272 dev_info->max_mac_addrs = 1;
273 dev_info->max_rx_pktlen = (uint32_t)-1;
274 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
275 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
276 dev_info->min_rx_bufsize = 0;
277 dev_info->reta_size = internals->reta_size;
278 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
284 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
286 unsigned i, num_stats;
287 unsigned long rx_total = 0, tx_total = 0;
288 const struct pmd_internals *internal;
290 if ((dev == NULL) || (igb_stats == NULL))
293 internal = dev->data->dev_private;
294 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
295 RTE_MIN(dev->data->nb_rx_queues,
296 RTE_DIM(internal->rx_null_queues)));
297 for (i = 0; i < num_stats; i++) {
298 igb_stats->q_ipackets[i] =
299 internal->rx_null_queues[i].rx_pkts.cnt;
300 rx_total += igb_stats->q_ipackets[i];
303 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
304 RTE_MIN(dev->data->nb_tx_queues,
305 RTE_DIM(internal->tx_null_queues)));
306 for (i = 0; i < num_stats; i++) {
307 igb_stats->q_opackets[i] =
308 internal->tx_null_queues[i].tx_pkts.cnt;
309 tx_total += igb_stats->q_opackets[i];
312 igb_stats->ipackets = rx_total;
313 igb_stats->opackets = tx_total;
319 eth_stats_reset(struct rte_eth_dev *dev)
322 struct pmd_internals *internal;
327 internal = dev->data->dev_private;
328 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
329 internal->rx_null_queues[i].rx_pkts.cnt = 0;
330 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
331 internal->tx_null_queues[i].tx_pkts.cnt = 0;
337 eth_queue_release(void *q)
339 struct null_queue *nq;
345 rte_free(nq->dummy_packet);
349 eth_link_update(struct rte_eth_dev *dev __rte_unused,
350 int wait_to_complete __rte_unused) { return 0; }
353 eth_rss_reta_update(struct rte_eth_dev *dev,
354 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
357 struct pmd_internals *internal = dev->data->dev_private;
359 if (reta_size != internal->reta_size)
362 rte_spinlock_lock(&internal->rss_lock);
364 /* Copy RETA table */
365 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
366 internal->reta_conf[i].mask = reta_conf[i].mask;
367 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
368 if ((reta_conf[i].mask >> j) & 0x01)
369 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
372 rte_spinlock_unlock(&internal->rss_lock);
378 eth_rss_reta_query(struct rte_eth_dev *dev,
379 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
382 struct pmd_internals *internal = dev->data->dev_private;
384 if (reta_size != internal->reta_size)
387 rte_spinlock_lock(&internal->rss_lock);
389 /* Copy RETA table */
390 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
391 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
392 if ((reta_conf[i].mask >> j) & 0x01)
393 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
396 rte_spinlock_unlock(&internal->rss_lock);
402 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
404 struct pmd_internals *internal = dev->data->dev_private;
406 rte_spinlock_lock(&internal->rss_lock);
408 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
409 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
410 rss_conf->rss_hf & internal->flow_type_rss_offloads;
412 if (rss_conf->rss_key)
413 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
415 rte_spinlock_unlock(&internal->rss_lock);
421 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
422 struct rte_eth_rss_conf *rss_conf)
424 struct pmd_internals *internal = dev->data->dev_private;
426 rte_spinlock_lock(&internal->rss_lock);
428 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
429 if (rss_conf->rss_key)
430 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
432 rte_spinlock_unlock(&internal->rss_lock);
438 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
439 __rte_unused struct rte_ether_addr *addr)
444 static const struct eth_dev_ops ops = {
445 .dev_start = eth_dev_start,
446 .dev_stop = eth_dev_stop,
447 .dev_configure = eth_dev_configure,
448 .dev_infos_get = eth_dev_info,
449 .rx_queue_setup = eth_rx_queue_setup,
450 .tx_queue_setup = eth_tx_queue_setup,
451 .rx_queue_release = eth_queue_release,
452 .tx_queue_release = eth_queue_release,
453 .mtu_set = eth_mtu_set,
454 .link_update = eth_link_update,
455 .mac_addr_set = eth_mac_address_set,
456 .stats_get = eth_stats_get,
457 .stats_reset = eth_stats_reset,
458 .reta_update = eth_rss_reta_update,
459 .reta_query = eth_rss_reta_query,
460 .rss_hash_update = eth_rss_hash_update,
461 .rss_hash_conf_get = eth_rss_hash_conf_get
465 eth_dev_null_create(struct rte_vdev_device *dev,
466 unsigned packet_size,
467 unsigned packet_copy)
469 const unsigned nb_rx_queues = 1;
470 const unsigned nb_tx_queues = 1;
471 struct rte_eth_dev_data *data;
472 struct pmd_internals *internals = NULL;
473 struct rte_eth_dev *eth_dev = NULL;
475 static const uint8_t default_rss_key[40] = {
476 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
477 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
478 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
479 0xBE, 0xAC, 0x01, 0xFA
482 if (dev->device.numa_node == SOCKET_ID_ANY)
483 dev->device.numa_node = rte_socket_id();
485 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
486 dev->device.numa_node);
488 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
492 /* now put it all together
493 * - store queue data in internals,
494 * - store numa_node info in ethdev data
495 * - point eth_dev_data to internals
496 * - and point eth_dev structure to new eth_dev_data structure
498 /* NOTE: we'll replace the data element, of originally allocated eth_dev
499 * so the nulls are local per-process */
501 internals = eth_dev->data->dev_private;
502 internals->packet_size = packet_size;
503 internals->packet_copy = packet_copy;
504 internals->port_id = eth_dev->data->port_id;
505 rte_eth_random_addr(internals->eth_addr.addr_bytes);
507 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
508 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
510 rte_memcpy(internals->rss_key, default_rss_key, 40);
512 data = eth_dev->data;
513 data->nb_rx_queues = (uint16_t)nb_rx_queues;
514 data->nb_tx_queues = (uint16_t)nb_tx_queues;
515 data->dev_link = pmd_link;
516 data->mac_addrs = &internals->eth_addr;
518 eth_dev->dev_ops = &ops;
520 /* finally assign rx and tx ops */
522 eth_dev->rx_pkt_burst = eth_null_copy_rx;
523 eth_dev->tx_pkt_burst = eth_null_copy_tx;
525 eth_dev->rx_pkt_burst = eth_null_rx;
526 eth_dev->tx_pkt_burst = eth_null_tx;
529 rte_eth_dev_probing_finish(eth_dev);
534 get_packet_size_arg(const char *key __rte_unused,
535 const char *value, void *extra_args)
537 const char *a = value;
538 unsigned *packet_size = extra_args;
540 if ((value == NULL) || (extra_args == NULL))
543 *packet_size = (unsigned)strtoul(a, NULL, 0);
544 if (*packet_size == UINT_MAX)
551 get_packet_copy_arg(const char *key __rte_unused,
552 const char *value, void *extra_args)
554 const char *a = value;
555 unsigned *packet_copy = extra_args;
557 if ((value == NULL) || (extra_args == NULL))
560 *packet_copy = (unsigned)strtoul(a, NULL, 0);
561 if (*packet_copy == UINT_MAX)
568 rte_pmd_null_probe(struct rte_vdev_device *dev)
570 const char *name, *params;
571 unsigned packet_size = default_packet_size;
572 unsigned packet_copy = default_packet_copy;
573 struct rte_kvargs *kvlist = NULL;
574 struct rte_eth_dev *eth_dev;
580 name = rte_vdev_device_name(dev);
581 params = rte_vdev_device_args(dev);
582 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
584 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
585 eth_dev = rte_eth_dev_attach_secondary(name);
587 PMD_LOG(ERR, "Failed to probe %s", name);
590 /* TODO: request info from primary to set up Rx and Tx */
591 eth_dev->dev_ops = &ops;
592 eth_dev->device = &dev->device;
594 eth_dev->rx_pkt_burst = eth_null_copy_rx;
595 eth_dev->tx_pkt_burst = eth_null_copy_tx;
597 eth_dev->rx_pkt_burst = eth_null_rx;
598 eth_dev->tx_pkt_burst = eth_null_tx;
600 rte_eth_dev_probing_finish(eth_dev);
604 if (params != NULL) {
605 kvlist = rte_kvargs_parse(params, valid_arguments);
609 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
611 ret = rte_kvargs_process(kvlist,
612 ETH_NULL_PACKET_SIZE_ARG,
613 &get_packet_size_arg, &packet_size);
618 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
620 ret = rte_kvargs_process(kvlist,
621 ETH_NULL_PACKET_COPY_ARG,
622 &get_packet_copy_arg, &packet_copy);
628 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
629 "packet copy is %s", packet_size,
630 packet_copy ? "enabled" : "disabled");
632 ret = eth_dev_null_create(dev, packet_size, packet_copy);
636 rte_kvargs_free(kvlist);
641 rte_pmd_null_remove(struct rte_vdev_device *dev)
643 struct rte_eth_dev *eth_dev = NULL;
648 PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
651 /* find the ethdev entry */
652 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
656 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
657 /* mac_addrs must not be freed alone because part of dev_private */
658 eth_dev->data->mac_addrs = NULL;
660 rte_eth_dev_release_port(eth_dev);
665 static struct rte_vdev_driver pmd_null_drv = {
666 .probe = rte_pmd_null_probe,
667 .remove = rte_pmd_null_remove,
670 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
671 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
672 RTE_PMD_REGISTER_PARAM_STRING(net_null,
676 RTE_INIT(eth_null_init_log)
678 eth_null_logtype = rte_log_register("pmd.net.null");
679 if (eth_null_logtype >= 0)
680 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);