4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
42 #include "rte_eth_null.h"
44 #define ETH_NULL_PACKET_SIZE_ARG "size"
45 #define ETH_NULL_PACKET_COPY_ARG "copy"
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
50 static const char *valid_arguments[] = {
51 ETH_NULL_PACKET_SIZE_ARG,
52 ETH_NULL_PACKET_COPY_ARG,
59 struct pmd_internals *internals;
61 struct rte_mempool *mb_pool;
62 struct rte_mbuf *dummy_packet;
64 rte_atomic64_t rx_pkts;
65 rte_atomic64_t tx_pkts;
66 rte_atomic64_t err_pkts;
69 struct pmd_internals {
74 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
77 /** Bit mask of RSS offloads, the bit offset also means flow type */
78 uint64_t flow_type_rss_offloads;
80 rte_spinlock_t rss_lock;
83 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
86 uint8_t rss_key[40]; /**< 40-byte hash key. */
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static struct rte_eth_link pmd_link = {
92 .link_speed = ETH_SPEED_NUM_10G,
93 .link_duplex = ETH_LINK_FULL_DUPLEX,
94 .link_status = ETH_LINK_DOWN,
95 .link_autoneg = ETH_LINK_SPEED_AUTONEG,
99 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
102 struct null_queue *h = q;
103 unsigned packet_size;
105 if ((q == NULL) || (bufs == NULL))
108 packet_size = h->internals->packet_size;
109 for (i = 0; i < nb_bufs; i++) {
110 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
113 bufs[i]->data_len = (uint16_t)packet_size;
114 bufs[i]->pkt_len = packet_size;
115 bufs[i]->port = h->internals->port_id;
118 rte_atomic64_add(&(h->rx_pkts), i);
124 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
127 struct null_queue *h = q;
128 unsigned packet_size;
130 if ((q == NULL) || (bufs == NULL))
133 packet_size = h->internals->packet_size;
134 for (i = 0; i < nb_bufs; i++) {
135 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
138 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
140 bufs[i]->data_len = (uint16_t)packet_size;
141 bufs[i]->pkt_len = packet_size;
142 bufs[i]->nb_segs = 1;
143 bufs[i]->next = NULL;
144 bufs[i]->port = h->internals->port_id;
147 rte_atomic64_add(&(h->rx_pkts), i);
153 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
156 struct null_queue *h = q;
158 if ((q == NULL) || (bufs == NULL))
161 for (i = 0; i < nb_bufs; i++)
162 rte_pktmbuf_free(bufs[i]);
164 rte_atomic64_add(&(h->tx_pkts), i);
170 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
173 struct null_queue *h = q;
174 unsigned packet_size;
176 if ((q == NULL) || (bufs == NULL))
179 packet_size = h->internals->packet_size;
180 for (i = 0; i < nb_bufs; i++) {
181 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
183 rte_pktmbuf_free(bufs[i]);
186 rte_atomic64_add(&(h->tx_pkts), i);
192 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
198 eth_dev_start(struct rte_eth_dev *dev)
203 dev->data->dev_link.link_status = ETH_LINK_UP;
208 eth_dev_stop(struct rte_eth_dev *dev)
213 dev->data->dev_link.link_status = ETH_LINK_DOWN;
217 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
218 uint16_t nb_rx_desc __rte_unused,
219 unsigned int socket_id __rte_unused,
220 const struct rte_eth_rxconf *rx_conf __rte_unused,
221 struct rte_mempool *mb_pool)
223 struct rte_mbuf *dummy_packet;
224 struct pmd_internals *internals;
225 unsigned packet_size;
227 if ((dev == NULL) || (mb_pool == NULL))
230 internals = dev->data->dev_private;
232 if (rx_queue_id >= dev->data->nb_rx_queues)
235 packet_size = internals->packet_size;
237 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
238 dev->data->rx_queues[rx_queue_id] =
239 &internals->rx_null_queues[rx_queue_id];
240 dummy_packet = rte_zmalloc_socket(NULL,
241 packet_size, 0, dev->data->numa_node);
242 if (dummy_packet == NULL)
245 internals->rx_null_queues[rx_queue_id].internals = internals;
246 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
252 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
253 uint16_t nb_tx_desc __rte_unused,
254 unsigned int socket_id __rte_unused,
255 const struct rte_eth_txconf *tx_conf __rte_unused)
257 struct rte_mbuf *dummy_packet;
258 struct pmd_internals *internals;
259 unsigned packet_size;
264 internals = dev->data->dev_private;
266 if (tx_queue_id >= dev->data->nb_tx_queues)
269 packet_size = internals->packet_size;
271 dev->data->tx_queues[tx_queue_id] =
272 &internals->tx_null_queues[tx_queue_id];
273 dummy_packet = rte_zmalloc_socket(NULL,
274 packet_size, 0, dev->data->numa_node);
275 if (dummy_packet == NULL)
278 internals->tx_null_queues[tx_queue_id].internals = internals;
279 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
286 eth_dev_info(struct rte_eth_dev *dev,
287 struct rte_eth_dev_info *dev_info)
289 struct pmd_internals *internals;
291 if ((dev == NULL) || (dev_info == NULL))
294 internals = dev->data->dev_private;
295 dev_info->max_mac_addrs = 1;
296 dev_info->max_rx_pktlen = (uint32_t)-1;
297 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
298 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
299 dev_info->min_rx_bufsize = 0;
300 dev_info->reta_size = internals->reta_size;
301 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
305 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
307 unsigned i, num_stats;
308 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
309 const struct pmd_internals *internal;
311 if ((dev == NULL) || (igb_stats == NULL))
314 internal = dev->data->dev_private;
315 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
316 RTE_MIN(dev->data->nb_rx_queues,
317 RTE_DIM(internal->rx_null_queues)));
318 for (i = 0; i < num_stats; i++) {
319 igb_stats->q_ipackets[i] =
320 internal->rx_null_queues[i].rx_pkts.cnt;
321 rx_total += igb_stats->q_ipackets[i];
324 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
325 RTE_MIN(dev->data->nb_tx_queues,
326 RTE_DIM(internal->tx_null_queues)));
327 for (i = 0; i < num_stats; i++) {
328 igb_stats->q_opackets[i] =
329 internal->tx_null_queues[i].tx_pkts.cnt;
330 igb_stats->q_errors[i] =
331 internal->tx_null_queues[i].err_pkts.cnt;
332 tx_total += igb_stats->q_opackets[i];
333 tx_err_total += igb_stats->q_errors[i];
336 igb_stats->ipackets = rx_total;
337 igb_stats->opackets = tx_total;
338 igb_stats->oerrors = tx_err_total;
342 eth_stats_reset(struct rte_eth_dev *dev)
345 struct pmd_internals *internal;
350 internal = dev->data->dev_private;
351 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
352 internal->rx_null_queues[i].rx_pkts.cnt = 0;
353 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
354 internal->tx_null_queues[i].tx_pkts.cnt = 0;
355 internal->tx_null_queues[i].err_pkts.cnt = 0;
360 eth_queue_release(void *q)
362 struct null_queue *nq;
368 rte_free(nq->dummy_packet);
372 eth_link_update(struct rte_eth_dev *dev __rte_unused,
373 int wait_to_complete __rte_unused) { return 0; }
376 eth_rss_reta_update(struct rte_eth_dev *dev,
377 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
380 struct pmd_internals *internal = dev->data->dev_private;
382 if (reta_size != internal->reta_size)
385 rte_spinlock_lock(&internal->rss_lock);
387 /* Copy RETA table */
388 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
389 internal->reta_conf[i].mask = reta_conf[i].mask;
390 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
391 if ((reta_conf[i].mask >> j) & 0x01)
392 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
395 rte_spinlock_unlock(&internal->rss_lock);
401 eth_rss_reta_query(struct rte_eth_dev *dev,
402 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
405 struct pmd_internals *internal = dev->data->dev_private;
407 if (reta_size != internal->reta_size)
410 rte_spinlock_lock(&internal->rss_lock);
412 /* Copy RETA table */
413 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
414 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
415 if ((reta_conf[i].mask >> j) & 0x01)
416 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
419 rte_spinlock_unlock(&internal->rss_lock);
425 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
427 struct pmd_internals *internal = dev->data->dev_private;
429 rte_spinlock_lock(&internal->rss_lock);
431 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
432 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
433 rss_conf->rss_hf & internal->flow_type_rss_offloads;
435 if (rss_conf->rss_key)
436 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
438 rte_spinlock_unlock(&internal->rss_lock);
444 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
445 struct rte_eth_rss_conf *rss_conf)
447 struct pmd_internals *internal = dev->data->dev_private;
449 rte_spinlock_lock(&internal->rss_lock);
451 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
452 if (rss_conf->rss_key)
453 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
455 rte_spinlock_unlock(&internal->rss_lock);
460 static const struct eth_dev_ops ops = {
461 .dev_start = eth_dev_start,
462 .dev_stop = eth_dev_stop,
463 .dev_configure = eth_dev_configure,
464 .dev_infos_get = eth_dev_info,
465 .rx_queue_setup = eth_rx_queue_setup,
466 .tx_queue_setup = eth_tx_queue_setup,
467 .rx_queue_release = eth_queue_release,
468 .tx_queue_release = eth_queue_release,
469 .link_update = eth_link_update,
470 .stats_get = eth_stats_get,
471 .stats_reset = eth_stats_reset,
472 .reta_update = eth_rss_reta_update,
473 .reta_query = eth_rss_reta_query,
474 .rss_hash_update = eth_rss_hash_update,
475 .rss_hash_conf_get = eth_rss_hash_conf_get
478 static struct rte_vdev_driver pmd_null_drv;
481 eth_dev_null_create(const char *name,
482 const unsigned numa_node,
483 unsigned packet_size,
484 unsigned packet_copy)
486 const unsigned nb_rx_queues = 1;
487 const unsigned nb_tx_queues = 1;
488 struct rte_eth_dev_data *data = NULL;
489 struct pmd_internals *internals = NULL;
490 struct rte_eth_dev *eth_dev = NULL;
492 static const uint8_t default_rss_key[40] = {
493 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
494 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
495 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
496 0xBE, 0xAC, 0x01, 0xFA
502 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
505 /* now do all data allocation - for eth_dev structure, dummy pci driver
506 * and internal (private) data
508 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
512 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
513 if (internals == NULL)
516 /* reserve an ethdev entry */
517 eth_dev = rte_eth_dev_allocate(name);
521 /* now put it all together
522 * - store queue data in internals,
523 * - store numa_node info in ethdev data
524 * - point eth_dev_data to internals
525 * - and point eth_dev structure to new eth_dev_data structure
527 /* NOTE: we'll replace the data element, of originally allocated eth_dev
528 * so the nulls are local per-process */
530 internals->packet_size = packet_size;
531 internals->packet_copy = packet_copy;
532 internals->port_id = eth_dev->data->port_id;
534 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
535 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
537 rte_memcpy(internals->rss_key, default_rss_key, 40);
539 data->dev_private = internals;
540 data->port_id = eth_dev->data->port_id;
541 data->nb_rx_queues = (uint16_t)nb_rx_queues;
542 data->nb_tx_queues = (uint16_t)nb_tx_queues;
543 data->dev_link = pmd_link;
544 data->mac_addrs = ð_addr;
545 strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
547 eth_dev->data = data;
548 eth_dev->dev_ops = &ops;
550 eth_dev->driver = NULL;
551 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
552 data->kdrv = RTE_KDRV_NONE;
553 data->drv_name = pmd_null_drv.driver.name;
554 data->numa_node = numa_node;
556 /* finally assign rx and tx ops */
558 eth_dev->rx_pkt_burst = eth_null_copy_rx;
559 eth_dev->tx_pkt_burst = eth_null_copy_tx;
561 eth_dev->rx_pkt_burst = eth_null_rx;
562 eth_dev->tx_pkt_burst = eth_null_tx;
575 get_packet_size_arg(const char *key __rte_unused,
576 const char *value, void *extra_args)
578 const char *a = value;
579 unsigned *packet_size = extra_args;
581 if ((value == NULL) || (extra_args == NULL))
584 *packet_size = (unsigned)strtoul(a, NULL, 0);
585 if (*packet_size == UINT_MAX)
592 get_packet_copy_arg(const char *key __rte_unused,
593 const char *value, void *extra_args)
595 const char *a = value;
596 unsigned *packet_copy = extra_args;
598 if ((value == NULL) || (extra_args == NULL))
601 *packet_copy = (unsigned)strtoul(a, NULL, 0);
602 if (*packet_copy == UINT_MAX)
609 rte_pmd_null_probe(const char *name, const char *params)
612 unsigned packet_size = default_packet_size;
613 unsigned packet_copy = default_packet_copy;
614 struct rte_kvargs *kvlist = NULL;
620 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
622 numa_node = rte_socket_id();
624 if (params != NULL) {
625 kvlist = rte_kvargs_parse(params, valid_arguments);
629 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
631 ret = rte_kvargs_process(kvlist,
632 ETH_NULL_PACKET_SIZE_ARG,
633 &get_packet_size_arg, &packet_size);
638 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
640 ret = rte_kvargs_process(kvlist,
641 ETH_NULL_PACKET_COPY_ARG,
642 &get_packet_copy_arg, &packet_copy);
648 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
649 "packet copy is %s\n", packet_size,
650 packet_copy ? "enabled" : "disabled");
652 ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
656 rte_kvargs_free(kvlist);
661 rte_pmd_null_remove(const char *name)
663 struct rte_eth_dev *eth_dev = NULL;
668 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
671 /* find the ethdev entry */
672 eth_dev = rte_eth_dev_allocated(name);
676 rte_free(eth_dev->data->dev_private);
677 rte_free(eth_dev->data);
679 rte_eth_dev_release_port(eth_dev);
684 static struct rte_vdev_driver pmd_null_drv = {
685 .probe = rte_pmd_null_probe,
686 .remove = rte_pmd_null_remove,
689 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
690 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
691 RTE_PMD_REGISTER_PARAM_STRING(net_null,