1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) IGEL Co.,Ltd.
7 #include <ethdev_driver.h>
8 #include <ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
15 #define ETH_NULL_PACKET_SIZE_ARG "size"
16 #define ETH_NULL_PACKET_COPY_ARG "copy"
17 #define ETH_NULL_PACKET_NO_RX_ARG "no-rx"
19 static unsigned int default_packet_size = 64;
20 static unsigned int default_packet_copy;
21 static unsigned int default_no_rx;
23 static const char *valid_arguments[] = {
24 ETH_NULL_PACKET_SIZE_ARG,
25 ETH_NULL_PACKET_COPY_ARG,
26 ETH_NULL_PACKET_NO_RX_ARG,
33 struct pmd_internals *internals;
35 struct rte_mempool *mb_pool;
36 struct rte_mbuf *dummy_packet;
38 rte_atomic64_t rx_pkts;
39 rte_atomic64_t tx_pkts;
43 unsigned int packet_copy;
44 unsigned int packet_size;
48 struct pmd_internals {
49 unsigned int packet_size;
50 unsigned int packet_copy;
54 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
55 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
57 struct rte_ether_addr eth_addr;
58 /** Bit mask of RSS offloads, the bit offset also means flow type */
59 uint64_t flow_type_rss_offloads;
61 rte_spinlock_t rss_lock;
64 struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
65 RTE_ETH_RETA_GROUP_SIZE];
67 uint8_t rss_key[40]; /**< 40-byte hash key. */
69 static struct rte_eth_link pmd_link = {
70 .link_speed = RTE_ETH_SPEED_NUM_10G,
71 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
72 .link_status = RTE_ETH_LINK_DOWN,
73 .link_autoneg = RTE_ETH_LINK_FIXED,
76 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
78 #define PMD_LOG(level, fmt, args...) \
79 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
80 "%s(): " fmt "\n", __func__, ##args)
83 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 struct null_queue *h = q;
87 unsigned int packet_size;
89 if ((q == NULL) || (bufs == NULL))
92 packet_size = h->internals->packet_size;
93 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
96 for (i = 0; i < nb_bufs; i++) {
97 bufs[i]->data_len = (uint16_t)packet_size;
98 bufs[i]->pkt_len = packet_size;
99 bufs[i]->port = h->internals->port_id;
102 rte_atomic64_add(&(h->rx_pkts), i);
108 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
111 struct null_queue *h = q;
112 unsigned int packet_size;
114 if ((q == NULL) || (bufs == NULL))
117 packet_size = h->internals->packet_size;
118 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
121 for (i = 0; i < nb_bufs; i++) {
122 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
124 bufs[i]->data_len = (uint16_t)packet_size;
125 bufs[i]->pkt_len = packet_size;
126 bufs[i]->port = h->internals->port_id;
129 rte_atomic64_add(&(h->rx_pkts), i);
135 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
136 uint16_t nb_bufs __rte_unused)
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
145 struct null_queue *h = q;
147 if ((q == NULL) || (bufs == NULL))
150 for (i = 0; i < nb_bufs; i++)
151 rte_pktmbuf_free(bufs[i]);
153 rte_atomic64_add(&(h->tx_pkts), i);
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
162 struct null_queue *h = q;
163 unsigned int packet_size;
165 if ((q == NULL) || (bufs == NULL))
168 packet_size = h->internals->packet_size;
169 for (i = 0; i < nb_bufs; i++) {
170 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
172 rte_pktmbuf_free(bufs[i]);
175 rte_atomic64_add(&(h->tx_pkts), i);
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
187 eth_dev_start(struct rte_eth_dev *dev)
192 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
197 eth_dev_stop(struct rte_eth_dev *dev)
202 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
208 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
209 uint16_t nb_rx_desc __rte_unused,
210 unsigned int socket_id __rte_unused,
211 const struct rte_eth_rxconf *rx_conf __rte_unused,
212 struct rte_mempool *mb_pool)
214 struct rte_mbuf *dummy_packet;
215 struct pmd_internals *internals;
216 unsigned int packet_size;
218 if ((dev == NULL) || (mb_pool == NULL))
221 internals = dev->data->dev_private;
223 if (rx_queue_id >= dev->data->nb_rx_queues)
226 packet_size = internals->packet_size;
228 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
229 dev->data->rx_queues[rx_queue_id] =
230 &internals->rx_null_queues[rx_queue_id];
231 dummy_packet = rte_zmalloc_socket(NULL,
232 packet_size, 0, dev->data->numa_node);
233 if (dummy_packet == NULL)
236 internals->rx_null_queues[rx_queue_id].internals = internals;
237 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
243 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
244 uint16_t nb_tx_desc __rte_unused,
245 unsigned int socket_id __rte_unused,
246 const struct rte_eth_txconf *tx_conf __rte_unused)
248 struct rte_mbuf *dummy_packet;
249 struct pmd_internals *internals;
250 unsigned int packet_size;
255 internals = dev->data->dev_private;
257 if (tx_queue_id >= dev->data->nb_tx_queues)
260 packet_size = internals->packet_size;
262 dev->data->tx_queues[tx_queue_id] =
263 &internals->tx_null_queues[tx_queue_id];
264 dummy_packet = rte_zmalloc_socket(NULL,
265 packet_size, 0, dev->data->numa_node);
266 if (dummy_packet == NULL)
269 internals->tx_null_queues[tx_queue_id].internals = internals;
270 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
276 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
282 eth_dev_info(struct rte_eth_dev *dev,
283 struct rte_eth_dev_info *dev_info)
285 struct pmd_internals *internals;
287 if ((dev == NULL) || (dev_info == NULL))
290 internals = dev->data->dev_private;
291 dev_info->max_mac_addrs = 1;
292 dev_info->max_rx_pktlen = (uint32_t)-1;
293 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
294 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
295 dev_info->min_rx_bufsize = 0;
296 dev_info->reta_size = internals->reta_size;
297 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
303 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
305 unsigned int i, num_stats;
306 unsigned long rx_total = 0, tx_total = 0;
307 const struct pmd_internals *internal;
309 if ((dev == NULL) || (igb_stats == NULL))
312 internal = dev->data->dev_private;
313 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
314 RTE_MIN(dev->data->nb_rx_queues,
315 RTE_DIM(internal->rx_null_queues)));
316 for (i = 0; i < num_stats; i++) {
317 igb_stats->q_ipackets[i] =
318 internal->rx_null_queues[i].rx_pkts.cnt;
319 rx_total += igb_stats->q_ipackets[i];
322 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
323 RTE_MIN(dev->data->nb_tx_queues,
324 RTE_DIM(internal->tx_null_queues)));
325 for (i = 0; i < num_stats; i++) {
326 igb_stats->q_opackets[i] =
327 internal->tx_null_queues[i].tx_pkts.cnt;
328 tx_total += igb_stats->q_opackets[i];
331 igb_stats->ipackets = rx_total;
332 igb_stats->opackets = tx_total;
338 eth_stats_reset(struct rte_eth_dev *dev)
341 struct pmd_internals *internal;
346 internal = dev->data->dev_private;
347 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
348 internal->rx_null_queues[i].rx_pkts.cnt = 0;
349 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
350 internal->tx_null_queues[i].tx_pkts.cnt = 0;
356 eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
358 struct null_queue *nq = dev->data->rx_queues[qid];
363 rte_free(nq->dummy_packet);
367 eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
369 struct null_queue *nq = dev->data->tx_queues[qid];
374 rte_free(nq->dummy_packet);
378 eth_link_update(struct rte_eth_dev *dev __rte_unused,
379 int wait_to_complete __rte_unused) { return 0; }
382 eth_rss_reta_update(struct rte_eth_dev *dev,
383 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
386 struct pmd_internals *internal = dev->data->dev_private;
388 if (reta_size != internal->reta_size)
391 rte_spinlock_lock(&internal->rss_lock);
393 /* Copy RETA table */
394 for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
395 internal->reta_conf[i].mask = reta_conf[i].mask;
396 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
397 if ((reta_conf[i].mask >> j) & 0x01)
398 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
401 rte_spinlock_unlock(&internal->rss_lock);
407 eth_rss_reta_query(struct rte_eth_dev *dev,
408 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
411 struct pmd_internals *internal = dev->data->dev_private;
413 if (reta_size != internal->reta_size)
416 rte_spinlock_lock(&internal->rss_lock);
418 /* Copy RETA table */
419 for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
420 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
421 if ((reta_conf[i].mask >> j) & 0x01)
422 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
425 rte_spinlock_unlock(&internal->rss_lock);
431 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
433 struct pmd_internals *internal = dev->data->dev_private;
435 rte_spinlock_lock(&internal->rss_lock);
437 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
438 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
439 rss_conf->rss_hf & internal->flow_type_rss_offloads;
441 if (rss_conf->rss_key)
442 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
444 rte_spinlock_unlock(&internal->rss_lock);
450 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
451 struct rte_eth_rss_conf *rss_conf)
453 struct pmd_internals *internal = dev->data->dev_private;
455 rte_spinlock_lock(&internal->rss_lock);
457 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
458 if (rss_conf->rss_key)
459 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
461 rte_spinlock_unlock(&internal->rss_lock);
467 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
468 __rte_unused struct rte_ether_addr *addr)
474 eth_dev_close(struct rte_eth_dev *dev)
476 PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
479 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
482 /* mac_addrs must not be freed alone because part of dev_private */
483 dev->data->mac_addrs = NULL;
488 static const struct eth_dev_ops ops = {
489 .dev_close = eth_dev_close,
490 .dev_start = eth_dev_start,
491 .dev_stop = eth_dev_stop,
492 .dev_configure = eth_dev_configure,
493 .dev_infos_get = eth_dev_info,
494 .rx_queue_setup = eth_rx_queue_setup,
495 .tx_queue_setup = eth_tx_queue_setup,
496 .rx_queue_release = eth_rx_queue_release,
497 .tx_queue_release = eth_tx_queue_release,
498 .mtu_set = eth_mtu_set,
499 .link_update = eth_link_update,
500 .mac_addr_set = eth_mac_address_set,
501 .stats_get = eth_stats_get,
502 .stats_reset = eth_stats_reset,
503 .reta_update = eth_rss_reta_update,
504 .reta_query = eth_rss_reta_query,
505 .rss_hash_update = eth_rss_hash_update,
506 .rss_hash_conf_get = eth_rss_hash_conf_get
510 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
512 const unsigned int nb_rx_queues = 1;
513 const unsigned int nb_tx_queues = 1;
514 struct rte_eth_dev_data *data;
515 struct pmd_internals *internals = NULL;
516 struct rte_eth_dev *eth_dev = NULL;
518 static const uint8_t default_rss_key[40] = {
519 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
520 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
521 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
522 0xBE, 0xAC, 0x01, 0xFA
525 if (dev->device.numa_node == SOCKET_ID_ANY)
526 dev->device.numa_node = rte_socket_id();
528 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
529 dev->device.numa_node);
531 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
535 /* now put it all together
536 * - store queue data in internals,
537 * - store numa_node info in ethdev data
538 * - point eth_dev_data to internals
539 * - and point eth_dev structure to new eth_dev_data structure
541 /* NOTE: we'll replace the data element, of originally allocated eth_dev
542 * so the nulls are local per-process */
544 internals = eth_dev->data->dev_private;
545 internals->packet_size = args->packet_size;
546 internals->packet_copy = args->packet_copy;
547 internals->no_rx = args->no_rx;
548 internals->port_id = eth_dev->data->port_id;
549 rte_eth_random_addr(internals->eth_addr.addr_bytes);
551 internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
552 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
554 rte_memcpy(internals->rss_key, default_rss_key, 40);
556 data = eth_dev->data;
557 data->nb_rx_queues = (uint16_t)nb_rx_queues;
558 data->nb_tx_queues = (uint16_t)nb_tx_queues;
559 data->dev_link = pmd_link;
560 data->mac_addrs = &internals->eth_addr;
561 data->promiscuous = 1;
562 data->all_multicast = 1;
563 data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
565 eth_dev->dev_ops = &ops;
567 /* finally assign rx and tx ops */
568 if (internals->packet_copy) {
569 eth_dev->rx_pkt_burst = eth_null_copy_rx;
570 eth_dev->tx_pkt_burst = eth_null_copy_tx;
571 } else if (internals->no_rx) {
572 eth_dev->rx_pkt_burst = eth_null_no_rx;
573 eth_dev->tx_pkt_burst = eth_null_tx;
575 eth_dev->rx_pkt_burst = eth_null_rx;
576 eth_dev->tx_pkt_burst = eth_null_tx;
579 rte_eth_dev_probing_finish(eth_dev);
584 get_packet_size_arg(const char *key __rte_unused,
585 const char *value, void *extra_args)
587 const char *a = value;
588 unsigned int *packet_size = extra_args;
590 if ((value == NULL) || (extra_args == NULL))
593 *packet_size = (unsigned int)strtoul(a, NULL, 0);
594 if (*packet_size == UINT_MAX)
601 get_packet_copy_arg(const char *key __rte_unused,
602 const char *value, void *extra_args)
604 const char *a = value;
605 unsigned int *packet_copy = extra_args;
607 if ((value == NULL) || (extra_args == NULL))
610 *packet_copy = (unsigned int)strtoul(a, NULL, 0);
611 if (*packet_copy == UINT_MAX)
618 get_packet_no_rx_arg(const char *key __rte_unused,
619 const char *value, void *extra_args)
621 const char *a = value;
624 if (value == NULL || extra_args == NULL)
627 no_rx = (unsigned int)strtoul(a, NULL, 0);
628 if (no_rx != 0 && no_rx != 1)
631 *(unsigned int *)extra_args = no_rx;
636 rte_pmd_null_probe(struct rte_vdev_device *dev)
638 const char *name, *params;
639 struct pmd_options args = {
640 .packet_copy = default_packet_copy,
641 .packet_size = default_packet_size,
642 .no_rx = default_no_rx,
644 struct rte_kvargs *kvlist = NULL;
645 struct rte_eth_dev *eth_dev;
651 name = rte_vdev_device_name(dev);
652 params = rte_vdev_device_args(dev);
653 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
655 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
656 struct pmd_internals *internals;
657 eth_dev = rte_eth_dev_attach_secondary(name);
659 PMD_LOG(ERR, "Failed to probe %s", name);
662 /* TODO: request info from primary to set up Rx and Tx */
663 eth_dev->dev_ops = &ops;
664 eth_dev->device = &dev->device;
665 internals = eth_dev->data->dev_private;
666 if (internals->packet_copy) {
667 eth_dev->rx_pkt_burst = eth_null_copy_rx;
668 eth_dev->tx_pkt_burst = eth_null_copy_tx;
669 } else if (internals->no_rx) {
670 eth_dev->rx_pkt_burst = eth_null_no_rx;
671 eth_dev->tx_pkt_burst = eth_null_tx;
673 eth_dev->rx_pkt_burst = eth_null_rx;
674 eth_dev->tx_pkt_burst = eth_null_tx;
676 rte_eth_dev_probing_finish(eth_dev);
680 if (params != NULL) {
681 kvlist = rte_kvargs_parse(params, valid_arguments);
685 ret = rte_kvargs_process(kvlist,
686 ETH_NULL_PACKET_SIZE_ARG,
687 &get_packet_size_arg, &args.packet_size);
692 ret = rte_kvargs_process(kvlist,
693 ETH_NULL_PACKET_COPY_ARG,
694 &get_packet_copy_arg, &args.packet_copy);
698 ret = rte_kvargs_process(kvlist,
699 ETH_NULL_PACKET_NO_RX_ARG,
700 &get_packet_no_rx_arg, &args.no_rx);
704 if (args.no_rx && args.packet_copy) {
706 "Both %s and %s arguments at the same time not supported",
707 ETH_NULL_PACKET_COPY_ARG,
708 ETH_NULL_PACKET_NO_RX_ARG);
713 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
714 "packet copy is %s", args.packet_size,
715 args.packet_copy ? "enabled" : "disabled");
717 ret = eth_dev_null_create(dev, &args);
720 rte_kvargs_free(kvlist);
725 rte_pmd_null_remove(struct rte_vdev_device *dev)
727 struct rte_eth_dev *eth_dev = NULL;
732 /* find the ethdev entry */
733 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
735 return 0; /* port already released */
737 eth_dev_close(eth_dev);
738 rte_eth_dev_release_port(eth_dev);
743 static struct rte_vdev_driver pmd_null_drv = {
744 .probe = rte_pmd_null_probe,
745 .remove = rte_pmd_null_remove,
748 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
749 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
750 RTE_PMD_REGISTER_PARAM_STRING(net_null,
753 ETH_NULL_PACKET_NO_RX_ARG "=0|1");