4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
43 #define ETH_NULL_PACKET_SIZE_ARG "size"
44 #define ETH_NULL_PACKET_COPY_ARG "copy"
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
49 static const char *valid_arguments[] = {
50 ETH_NULL_PACKET_SIZE_ARG,
51 ETH_NULL_PACKET_COPY_ARG,
58 struct pmd_internals *internals;
60 struct rte_mempool *mb_pool;
61 struct rte_mbuf *dummy_packet;
63 rte_atomic64_t rx_pkts;
64 rte_atomic64_t tx_pkts;
67 struct pmd_internals {
72 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
73 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 struct rte_ether_addr eth_addr;
76 /** Bit mask of RSS offloads, the bit offset also means flow type */
77 uint64_t flow_type_rss_offloads;
79 rte_spinlock_t rss_lock;
82 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
85 uint8_t rss_key[40]; /**< 40-byte hash key. */
87 static struct rte_eth_link pmd_link = {
88 .link_speed = ETH_SPEED_NUM_10G,
89 .link_duplex = ETH_LINK_FULL_DUPLEX,
90 .link_status = ETH_LINK_DOWN,
91 .link_autoneg = ETH_LINK_FIXED,
94 static int eth_null_logtype;
96 #define PMD_LOG(level, fmt, args...) \
97 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
98 "%s(): " fmt "\n", __func__, ##args)
101 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
104 struct null_queue *h = q;
105 unsigned packet_size;
107 if ((q == NULL) || (bufs == NULL))
110 packet_size = h->internals->packet_size;
111 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
114 for (i = 0; i < nb_bufs; i++) {
115 bufs[i]->data_len = (uint16_t)packet_size;
116 bufs[i]->pkt_len = packet_size;
117 bufs[i]->port = h->internals->port_id;
120 rte_atomic64_add(&(h->rx_pkts), i);
126 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
129 struct null_queue *h = q;
130 unsigned packet_size;
132 if ((q == NULL) || (bufs == NULL))
135 packet_size = h->internals->packet_size;
136 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
139 for (i = 0; i < nb_bufs; i++) {
140 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142 bufs[i]->data_len = (uint16_t)packet_size;
143 bufs[i]->pkt_len = packet_size;
144 bufs[i]->port = h->internals->port_id;
147 rte_atomic64_add(&(h->rx_pkts), i);
153 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
156 struct null_queue *h = q;
158 if ((q == NULL) || (bufs == NULL))
161 for (i = 0; i < nb_bufs; i++)
162 rte_pktmbuf_free(bufs[i]);
164 rte_atomic64_add(&(h->tx_pkts), i);
170 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
173 struct null_queue *h = q;
174 unsigned packet_size;
176 if ((q == NULL) || (bufs == NULL))
179 packet_size = h->internals->packet_size;
180 for (i = 0; i < nb_bufs; i++) {
181 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
183 rte_pktmbuf_free(bufs[i]);
186 rte_atomic64_add(&(h->tx_pkts), i);
192 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
198 eth_dev_start(struct rte_eth_dev *dev)
203 dev->data->dev_link.link_status = ETH_LINK_UP;
208 eth_dev_stop(struct rte_eth_dev *dev)
213 dev->data->dev_link.link_status = ETH_LINK_DOWN;
217 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
218 uint16_t nb_rx_desc __rte_unused,
219 unsigned int socket_id __rte_unused,
220 const struct rte_eth_rxconf *rx_conf __rte_unused,
221 struct rte_mempool *mb_pool)
223 struct rte_mbuf *dummy_packet;
224 struct pmd_internals *internals;
225 unsigned packet_size;
227 if ((dev == NULL) || (mb_pool == NULL))
230 internals = dev->data->dev_private;
232 if (rx_queue_id >= dev->data->nb_rx_queues)
235 packet_size = internals->packet_size;
237 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
238 dev->data->rx_queues[rx_queue_id] =
239 &internals->rx_null_queues[rx_queue_id];
240 dummy_packet = rte_zmalloc_socket(NULL,
241 packet_size, 0, dev->data->numa_node);
242 if (dummy_packet == NULL)
245 internals->rx_null_queues[rx_queue_id].internals = internals;
246 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
252 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
253 uint16_t nb_tx_desc __rte_unused,
254 unsigned int socket_id __rte_unused,
255 const struct rte_eth_txconf *tx_conf __rte_unused)
257 struct rte_mbuf *dummy_packet;
258 struct pmd_internals *internals;
259 unsigned packet_size;
264 internals = dev->data->dev_private;
266 if (tx_queue_id >= dev->data->nb_tx_queues)
269 packet_size = internals->packet_size;
271 dev->data->tx_queues[tx_queue_id] =
272 &internals->tx_null_queues[tx_queue_id];
273 dummy_packet = rte_zmalloc_socket(NULL,
274 packet_size, 0, dev->data->numa_node);
275 if (dummy_packet == NULL)
278 internals->tx_null_queues[tx_queue_id].internals = internals;
279 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
285 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
291 eth_dev_info(struct rte_eth_dev *dev,
292 struct rte_eth_dev_info *dev_info)
294 struct pmd_internals *internals;
296 if ((dev == NULL) || (dev_info == NULL))
299 internals = dev->data->dev_private;
300 dev_info->max_mac_addrs = 1;
301 dev_info->max_rx_pktlen = (uint32_t)-1;
302 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
303 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
304 dev_info->min_rx_bufsize = 0;
305 dev_info->reta_size = internals->reta_size;
306 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
310 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
312 unsigned i, num_stats;
313 unsigned long rx_total = 0, tx_total = 0;
314 const struct pmd_internals *internal;
316 if ((dev == NULL) || (igb_stats == NULL))
319 internal = dev->data->dev_private;
320 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321 RTE_MIN(dev->data->nb_rx_queues,
322 RTE_DIM(internal->rx_null_queues)));
323 for (i = 0; i < num_stats; i++) {
324 igb_stats->q_ipackets[i] =
325 internal->rx_null_queues[i].rx_pkts.cnt;
326 rx_total += igb_stats->q_ipackets[i];
329 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
330 RTE_MIN(dev->data->nb_tx_queues,
331 RTE_DIM(internal->tx_null_queues)));
332 for (i = 0; i < num_stats; i++) {
333 igb_stats->q_opackets[i] =
334 internal->tx_null_queues[i].tx_pkts.cnt;
335 tx_total += igb_stats->q_opackets[i];
338 igb_stats->ipackets = rx_total;
339 igb_stats->opackets = tx_total;
345 eth_stats_reset(struct rte_eth_dev *dev)
348 struct pmd_internals *internal;
353 internal = dev->data->dev_private;
354 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
355 internal->rx_null_queues[i].rx_pkts.cnt = 0;
356 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
357 internal->tx_null_queues[i].tx_pkts.cnt = 0;
361 eth_queue_release(void *q)
363 struct null_queue *nq;
369 rte_free(nq->dummy_packet);
373 eth_link_update(struct rte_eth_dev *dev __rte_unused,
374 int wait_to_complete __rte_unused) { return 0; }
377 eth_rss_reta_update(struct rte_eth_dev *dev,
378 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
381 struct pmd_internals *internal = dev->data->dev_private;
383 if (reta_size != internal->reta_size)
386 rte_spinlock_lock(&internal->rss_lock);
388 /* Copy RETA table */
389 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
390 internal->reta_conf[i].mask = reta_conf[i].mask;
391 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
392 if ((reta_conf[i].mask >> j) & 0x01)
393 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
396 rte_spinlock_unlock(&internal->rss_lock);
402 eth_rss_reta_query(struct rte_eth_dev *dev,
403 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
406 struct pmd_internals *internal = dev->data->dev_private;
408 if (reta_size != internal->reta_size)
411 rte_spinlock_lock(&internal->rss_lock);
413 /* Copy RETA table */
414 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
415 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
416 if ((reta_conf[i].mask >> j) & 0x01)
417 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
420 rte_spinlock_unlock(&internal->rss_lock);
426 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
428 struct pmd_internals *internal = dev->data->dev_private;
430 rte_spinlock_lock(&internal->rss_lock);
432 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
433 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
434 rss_conf->rss_hf & internal->flow_type_rss_offloads;
436 if (rss_conf->rss_key)
437 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
439 rte_spinlock_unlock(&internal->rss_lock);
445 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
446 struct rte_eth_rss_conf *rss_conf)
448 struct pmd_internals *internal = dev->data->dev_private;
450 rte_spinlock_lock(&internal->rss_lock);
452 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
453 if (rss_conf->rss_key)
454 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
456 rte_spinlock_unlock(&internal->rss_lock);
462 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
463 __rte_unused struct rte_ether_addr *addr)
468 static const struct eth_dev_ops ops = {
469 .dev_start = eth_dev_start,
470 .dev_stop = eth_dev_stop,
471 .dev_configure = eth_dev_configure,
472 .dev_infos_get = eth_dev_info,
473 .rx_queue_setup = eth_rx_queue_setup,
474 .tx_queue_setup = eth_tx_queue_setup,
475 .rx_queue_release = eth_queue_release,
476 .tx_queue_release = eth_queue_release,
477 .mtu_set = eth_mtu_set,
478 .link_update = eth_link_update,
479 .mac_addr_set = eth_mac_address_set,
480 .stats_get = eth_stats_get,
481 .stats_reset = eth_stats_reset,
482 .reta_update = eth_rss_reta_update,
483 .reta_query = eth_rss_reta_query,
484 .rss_hash_update = eth_rss_hash_update,
485 .rss_hash_conf_get = eth_rss_hash_conf_get
489 eth_dev_null_create(struct rte_vdev_device *dev,
490 unsigned packet_size,
491 unsigned packet_copy)
493 const unsigned nb_rx_queues = 1;
494 const unsigned nb_tx_queues = 1;
495 struct rte_eth_dev_data *data;
496 struct pmd_internals *internals = NULL;
497 struct rte_eth_dev *eth_dev = NULL;
499 static const uint8_t default_rss_key[40] = {
500 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
501 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
502 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
503 0xBE, 0xAC, 0x01, 0xFA
506 if (dev->device.numa_node == SOCKET_ID_ANY)
507 dev->device.numa_node = rte_socket_id();
509 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
510 dev->device.numa_node);
512 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
516 /* now put it all together
517 * - store queue data in internals,
518 * - store numa_node info in ethdev data
519 * - point eth_dev_data to internals
520 * - and point eth_dev structure to new eth_dev_data structure
522 /* NOTE: we'll replace the data element, of originally allocated eth_dev
523 * so the nulls are local per-process */
525 internals = eth_dev->data->dev_private;
526 internals->packet_size = packet_size;
527 internals->packet_copy = packet_copy;
528 internals->port_id = eth_dev->data->port_id;
529 rte_eth_random_addr(internals->eth_addr.addr_bytes);
531 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
532 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
534 rte_memcpy(internals->rss_key, default_rss_key, 40);
536 data = eth_dev->data;
537 data->nb_rx_queues = (uint16_t)nb_rx_queues;
538 data->nb_tx_queues = (uint16_t)nb_tx_queues;
539 data->dev_link = pmd_link;
540 data->mac_addrs = &internals->eth_addr;
542 eth_dev->dev_ops = &ops;
544 /* finally assign rx and tx ops */
546 eth_dev->rx_pkt_burst = eth_null_copy_rx;
547 eth_dev->tx_pkt_burst = eth_null_copy_tx;
549 eth_dev->rx_pkt_burst = eth_null_rx;
550 eth_dev->tx_pkt_burst = eth_null_tx;
553 rte_eth_dev_probing_finish(eth_dev);
558 get_packet_size_arg(const char *key __rte_unused,
559 const char *value, void *extra_args)
561 const char *a = value;
562 unsigned *packet_size = extra_args;
564 if ((value == NULL) || (extra_args == NULL))
567 *packet_size = (unsigned)strtoul(a, NULL, 0);
568 if (*packet_size == UINT_MAX)
575 get_packet_copy_arg(const char *key __rte_unused,
576 const char *value, void *extra_args)
578 const char *a = value;
579 unsigned *packet_copy = extra_args;
581 if ((value == NULL) || (extra_args == NULL))
584 *packet_copy = (unsigned)strtoul(a, NULL, 0);
585 if (*packet_copy == UINT_MAX)
592 rte_pmd_null_probe(struct rte_vdev_device *dev)
594 const char *name, *params;
595 unsigned packet_size = default_packet_size;
596 unsigned packet_copy = default_packet_copy;
597 struct rte_kvargs *kvlist = NULL;
598 struct rte_eth_dev *eth_dev;
604 name = rte_vdev_device_name(dev);
605 params = rte_vdev_device_args(dev);
606 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
608 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
609 eth_dev = rte_eth_dev_attach_secondary(name);
611 PMD_LOG(ERR, "Failed to probe %s", name);
614 /* TODO: request info from primary to set up Rx and Tx */
615 eth_dev->dev_ops = &ops;
616 eth_dev->device = &dev->device;
617 rte_eth_dev_probing_finish(eth_dev);
621 if (params != NULL) {
622 kvlist = rte_kvargs_parse(params, valid_arguments);
626 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
628 ret = rte_kvargs_process(kvlist,
629 ETH_NULL_PACKET_SIZE_ARG,
630 &get_packet_size_arg, &packet_size);
635 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
637 ret = rte_kvargs_process(kvlist,
638 ETH_NULL_PACKET_COPY_ARG,
639 &get_packet_copy_arg, &packet_copy);
645 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
646 "packet copy is %s", packet_size,
647 packet_copy ? "enabled" : "disabled");
649 ret = eth_dev_null_create(dev, packet_size, packet_copy);
653 rte_kvargs_free(kvlist);
658 rte_pmd_null_remove(struct rte_vdev_device *dev)
660 struct rte_eth_dev *eth_dev = NULL;
665 PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
668 /* find the ethdev entry */
669 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
673 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
674 /* mac_addrs must not be freed alone because part of dev_private */
675 eth_dev->data->mac_addrs = NULL;
677 rte_eth_dev_release_port(eth_dev);
682 static struct rte_vdev_driver pmd_null_drv = {
683 .probe = rte_pmd_null_probe,
684 .remove = rte_pmd_null_remove,
687 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
688 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
689 RTE_PMD_REGISTER_PARAM_STRING(net_null,
693 RTE_INIT(eth_null_init_log)
695 eth_null_logtype = rte_log_register("pmd.net.null");
696 if (eth_null_logtype >= 0)
697 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);