1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) IGEL Co.,Ltd.
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
15 #define ETH_NULL_PACKET_SIZE_ARG "size"
16 #define ETH_NULL_PACKET_COPY_ARG "copy"
18 static unsigned default_packet_size = 64;
19 static unsigned default_packet_copy;
21 static const char *valid_arguments[] = {
22 ETH_NULL_PACKET_SIZE_ARG,
23 ETH_NULL_PACKET_COPY_ARG,
30 struct pmd_internals *internals;
32 struct rte_mempool *mb_pool;
33 struct rte_mbuf *dummy_packet;
35 rte_atomic64_t rx_pkts;
36 rte_atomic64_t tx_pkts;
39 struct pmd_internals {
44 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
45 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
47 struct rte_ether_addr eth_addr;
48 /** Bit mask of RSS offloads, the bit offset also means flow type */
49 uint64_t flow_type_rss_offloads;
51 rte_spinlock_t rss_lock;
54 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
57 uint8_t rss_key[40]; /**< 40-byte hash key. */
59 static struct rte_eth_link pmd_link = {
60 .link_speed = ETH_SPEED_NUM_10G,
61 .link_duplex = ETH_LINK_FULL_DUPLEX,
62 .link_status = ETH_LINK_DOWN,
63 .link_autoneg = ETH_LINK_FIXED,
66 static int eth_null_logtype;
68 #define PMD_LOG(level, fmt, args...) \
69 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
70 "%s(): " fmt "\n", __func__, ##args)
73 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
76 struct null_queue *h = q;
79 if ((q == NULL) || (bufs == NULL))
82 packet_size = h->internals->packet_size;
83 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
86 for (i = 0; i < nb_bufs; i++) {
87 bufs[i]->data_len = (uint16_t)packet_size;
88 bufs[i]->pkt_len = packet_size;
89 bufs[i]->port = h->internals->port_id;
92 rte_atomic64_add(&(h->rx_pkts), i);
98 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 struct null_queue *h = q;
102 unsigned packet_size;
104 if ((q == NULL) || (bufs == NULL))
107 packet_size = h->internals->packet_size;
108 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
111 for (i = 0; i < nb_bufs; i++) {
112 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
114 bufs[i]->data_len = (uint16_t)packet_size;
115 bufs[i]->pkt_len = packet_size;
116 bufs[i]->port = h->internals->port_id;
119 rte_atomic64_add(&(h->rx_pkts), i);
125 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128 struct null_queue *h = q;
130 if ((q == NULL) || (bufs == NULL))
133 for (i = 0; i < nb_bufs; i++)
134 rte_pktmbuf_free(bufs[i]);
136 rte_atomic64_add(&(h->tx_pkts), i);
142 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
145 struct null_queue *h = q;
146 unsigned packet_size;
148 if ((q == NULL) || (bufs == NULL))
151 packet_size = h->internals->packet_size;
152 for (i = 0; i < nb_bufs; i++) {
153 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
155 rte_pktmbuf_free(bufs[i]);
158 rte_atomic64_add(&(h->tx_pkts), i);
164 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
170 eth_dev_start(struct rte_eth_dev *dev)
175 dev->data->dev_link.link_status = ETH_LINK_UP;
180 eth_dev_stop(struct rte_eth_dev *dev)
185 dev->data->dev_link.link_status = ETH_LINK_DOWN;
189 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
190 uint16_t nb_rx_desc __rte_unused,
191 unsigned int socket_id __rte_unused,
192 const struct rte_eth_rxconf *rx_conf __rte_unused,
193 struct rte_mempool *mb_pool)
195 struct rte_mbuf *dummy_packet;
196 struct pmd_internals *internals;
197 unsigned packet_size;
199 if ((dev == NULL) || (mb_pool == NULL))
202 internals = dev->data->dev_private;
204 if (rx_queue_id >= dev->data->nb_rx_queues)
207 packet_size = internals->packet_size;
209 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
210 dev->data->rx_queues[rx_queue_id] =
211 &internals->rx_null_queues[rx_queue_id];
212 dummy_packet = rte_zmalloc_socket(NULL,
213 packet_size, 0, dev->data->numa_node);
214 if (dummy_packet == NULL)
217 internals->rx_null_queues[rx_queue_id].internals = internals;
218 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
224 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
225 uint16_t nb_tx_desc __rte_unused,
226 unsigned int socket_id __rte_unused,
227 const struct rte_eth_txconf *tx_conf __rte_unused)
229 struct rte_mbuf *dummy_packet;
230 struct pmd_internals *internals;
231 unsigned packet_size;
236 internals = dev->data->dev_private;
238 if (tx_queue_id >= dev->data->nb_tx_queues)
241 packet_size = internals->packet_size;
243 dev->data->tx_queues[tx_queue_id] =
244 &internals->tx_null_queues[tx_queue_id];
245 dummy_packet = rte_zmalloc_socket(NULL,
246 packet_size, 0, dev->data->numa_node);
247 if (dummy_packet == NULL)
250 internals->tx_null_queues[tx_queue_id].internals = internals;
251 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
257 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
263 eth_dev_info(struct rte_eth_dev *dev,
264 struct rte_eth_dev_info *dev_info)
266 struct pmd_internals *internals;
268 if ((dev == NULL) || (dev_info == NULL))
271 internals = dev->data->dev_private;
272 dev_info->max_mac_addrs = 1;
273 dev_info->max_rx_pktlen = (uint32_t)-1;
274 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
275 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
276 dev_info->min_rx_bufsize = 0;
277 dev_info->reta_size = internals->reta_size;
278 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
284 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
286 unsigned i, num_stats;
287 unsigned long rx_total = 0, tx_total = 0;
288 const struct pmd_internals *internal;
290 if ((dev == NULL) || (igb_stats == NULL))
293 internal = dev->data->dev_private;
294 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
295 RTE_MIN(dev->data->nb_rx_queues,
296 RTE_DIM(internal->rx_null_queues)));
297 for (i = 0; i < num_stats; i++) {
298 igb_stats->q_ipackets[i] =
299 internal->rx_null_queues[i].rx_pkts.cnt;
300 rx_total += igb_stats->q_ipackets[i];
303 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
304 RTE_MIN(dev->data->nb_tx_queues,
305 RTE_DIM(internal->tx_null_queues)));
306 for (i = 0; i < num_stats; i++) {
307 igb_stats->q_opackets[i] =
308 internal->tx_null_queues[i].tx_pkts.cnt;
309 tx_total += igb_stats->q_opackets[i];
312 igb_stats->ipackets = rx_total;
313 igb_stats->opackets = tx_total;
319 eth_stats_reset(struct rte_eth_dev *dev)
322 struct pmd_internals *internal;
327 internal = dev->data->dev_private;
328 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
329 internal->rx_null_queues[i].rx_pkts.cnt = 0;
330 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
331 internal->tx_null_queues[i].tx_pkts.cnt = 0;
335 eth_queue_release(void *q)
337 struct null_queue *nq;
343 rte_free(nq->dummy_packet);
347 eth_link_update(struct rte_eth_dev *dev __rte_unused,
348 int wait_to_complete __rte_unused) { return 0; }
351 eth_rss_reta_update(struct rte_eth_dev *dev,
352 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
355 struct pmd_internals *internal = dev->data->dev_private;
357 if (reta_size != internal->reta_size)
360 rte_spinlock_lock(&internal->rss_lock);
362 /* Copy RETA table */
363 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
364 internal->reta_conf[i].mask = reta_conf[i].mask;
365 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
366 if ((reta_conf[i].mask >> j) & 0x01)
367 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
370 rte_spinlock_unlock(&internal->rss_lock);
376 eth_rss_reta_query(struct rte_eth_dev *dev,
377 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
380 struct pmd_internals *internal = dev->data->dev_private;
382 if (reta_size != internal->reta_size)
385 rte_spinlock_lock(&internal->rss_lock);
387 /* Copy RETA table */
388 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
389 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
390 if ((reta_conf[i].mask >> j) & 0x01)
391 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
394 rte_spinlock_unlock(&internal->rss_lock);
400 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
402 struct pmd_internals *internal = dev->data->dev_private;
404 rte_spinlock_lock(&internal->rss_lock);
406 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
407 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
408 rss_conf->rss_hf & internal->flow_type_rss_offloads;
410 if (rss_conf->rss_key)
411 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
413 rte_spinlock_unlock(&internal->rss_lock);
419 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
420 struct rte_eth_rss_conf *rss_conf)
422 struct pmd_internals *internal = dev->data->dev_private;
424 rte_spinlock_lock(&internal->rss_lock);
426 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
427 if (rss_conf->rss_key)
428 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
430 rte_spinlock_unlock(&internal->rss_lock);
436 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
437 __rte_unused struct rte_ether_addr *addr)
442 static const struct eth_dev_ops ops = {
443 .dev_start = eth_dev_start,
444 .dev_stop = eth_dev_stop,
445 .dev_configure = eth_dev_configure,
446 .dev_infos_get = eth_dev_info,
447 .rx_queue_setup = eth_rx_queue_setup,
448 .tx_queue_setup = eth_tx_queue_setup,
449 .rx_queue_release = eth_queue_release,
450 .tx_queue_release = eth_queue_release,
451 .mtu_set = eth_mtu_set,
452 .link_update = eth_link_update,
453 .mac_addr_set = eth_mac_address_set,
454 .stats_get = eth_stats_get,
455 .stats_reset = eth_stats_reset,
456 .reta_update = eth_rss_reta_update,
457 .reta_query = eth_rss_reta_query,
458 .rss_hash_update = eth_rss_hash_update,
459 .rss_hash_conf_get = eth_rss_hash_conf_get
463 eth_dev_null_create(struct rte_vdev_device *dev,
464 unsigned packet_size,
465 unsigned packet_copy)
467 const unsigned nb_rx_queues = 1;
468 const unsigned nb_tx_queues = 1;
469 struct rte_eth_dev_data *data;
470 struct pmd_internals *internals = NULL;
471 struct rte_eth_dev *eth_dev = NULL;
473 static const uint8_t default_rss_key[40] = {
474 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
475 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
476 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
477 0xBE, 0xAC, 0x01, 0xFA
480 if (dev->device.numa_node == SOCKET_ID_ANY)
481 dev->device.numa_node = rte_socket_id();
483 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
484 dev->device.numa_node);
486 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
490 /* now put it all together
491 * - store queue data in internals,
492 * - store numa_node info in ethdev data
493 * - point eth_dev_data to internals
494 * - and point eth_dev structure to new eth_dev_data structure
496 /* NOTE: we'll replace the data element, of originally allocated eth_dev
497 * so the nulls are local per-process */
499 internals = eth_dev->data->dev_private;
500 internals->packet_size = packet_size;
501 internals->packet_copy = packet_copy;
502 internals->port_id = eth_dev->data->port_id;
503 rte_eth_random_addr(internals->eth_addr.addr_bytes);
505 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
506 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
508 rte_memcpy(internals->rss_key, default_rss_key, 40);
510 data = eth_dev->data;
511 data->nb_rx_queues = (uint16_t)nb_rx_queues;
512 data->nb_tx_queues = (uint16_t)nb_tx_queues;
513 data->dev_link = pmd_link;
514 data->mac_addrs = &internals->eth_addr;
516 eth_dev->dev_ops = &ops;
518 /* finally assign rx and tx ops */
520 eth_dev->rx_pkt_burst = eth_null_copy_rx;
521 eth_dev->tx_pkt_burst = eth_null_copy_tx;
523 eth_dev->rx_pkt_burst = eth_null_rx;
524 eth_dev->tx_pkt_burst = eth_null_tx;
527 rte_eth_dev_probing_finish(eth_dev);
532 get_packet_size_arg(const char *key __rte_unused,
533 const char *value, void *extra_args)
535 const char *a = value;
536 unsigned *packet_size = extra_args;
538 if ((value == NULL) || (extra_args == NULL))
541 *packet_size = (unsigned)strtoul(a, NULL, 0);
542 if (*packet_size == UINT_MAX)
549 get_packet_copy_arg(const char *key __rte_unused,
550 const char *value, void *extra_args)
552 const char *a = value;
553 unsigned *packet_copy = extra_args;
555 if ((value == NULL) || (extra_args == NULL))
558 *packet_copy = (unsigned)strtoul(a, NULL, 0);
559 if (*packet_copy == UINT_MAX)
566 rte_pmd_null_probe(struct rte_vdev_device *dev)
568 const char *name, *params;
569 unsigned packet_size = default_packet_size;
570 unsigned packet_copy = default_packet_copy;
571 struct rte_kvargs *kvlist = NULL;
572 struct rte_eth_dev *eth_dev;
578 name = rte_vdev_device_name(dev);
579 params = rte_vdev_device_args(dev);
580 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
582 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
583 eth_dev = rte_eth_dev_attach_secondary(name);
585 PMD_LOG(ERR, "Failed to probe %s", name);
588 /* TODO: request info from primary to set up Rx and Tx */
589 eth_dev->dev_ops = &ops;
590 eth_dev->device = &dev->device;
591 rte_eth_dev_probing_finish(eth_dev);
595 if (params != NULL) {
596 kvlist = rte_kvargs_parse(params, valid_arguments);
600 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
602 ret = rte_kvargs_process(kvlist,
603 ETH_NULL_PACKET_SIZE_ARG,
604 &get_packet_size_arg, &packet_size);
609 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
611 ret = rte_kvargs_process(kvlist,
612 ETH_NULL_PACKET_COPY_ARG,
613 &get_packet_copy_arg, &packet_copy);
619 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
620 "packet copy is %s", packet_size,
621 packet_copy ? "enabled" : "disabled");
623 ret = eth_dev_null_create(dev, packet_size, packet_copy);
627 rte_kvargs_free(kvlist);
632 rte_pmd_null_remove(struct rte_vdev_device *dev)
634 struct rte_eth_dev *eth_dev = NULL;
639 PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
642 /* find the ethdev entry */
643 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
647 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
648 /* mac_addrs must not be freed alone because part of dev_private */
649 eth_dev->data->mac_addrs = NULL;
651 rte_eth_dev_release_port(eth_dev);
656 static struct rte_vdev_driver pmd_null_drv = {
657 .probe = rte_pmd_null_probe,
658 .remove = rte_pmd_null_remove,
661 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
662 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
663 RTE_PMD_REGISTER_PARAM_STRING(net_null,
667 RTE_INIT(eth_null_init_log)
669 eth_null_logtype = rte_log_register("pmd.net.null");
670 if (eth_null_logtype >= 0)
671 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);