4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
42 #include "rte_eth_null.h"
44 #define ETH_NULL_PACKET_SIZE_ARG "size"
45 #define ETH_NULL_PACKET_COPY_ARG "copy"
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
50 static const char *valid_arguments[] = {
51 ETH_NULL_PACKET_SIZE_ARG,
52 ETH_NULL_PACKET_COPY_ARG,
60 struct pmd_internals *internals;
62 struct rte_mempool *mb_pool;
63 struct rte_mbuf *dummy_packet;
65 rte_atomic64_t rx_pkts;
66 rte_atomic64_t tx_pkts;
67 rte_atomic64_t err_pkts;
70 struct pmd_internals {
75 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
78 /** Bit mask of RSS offloads, the bit offset also means flow type */
79 uint64_t flow_type_rss_offloads;
81 rte_spinlock_t rss_lock;
84 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
87 uint8_t rss_key[40]; /**< 40-byte hash key. */
91 static struct ether_addr eth_addr = { .addr_bytes = {0} };
92 static struct rte_eth_link pmd_link = {
93 .link_speed = ETH_SPEED_NUM_10G,
94 .link_duplex = ETH_LINK_FULL_DUPLEX,
95 .link_status = ETH_LINK_DOWN,
96 .link_autoneg = ETH_LINK_SPEED_AUTONEG,
100 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
103 struct null_queue *h = q;
104 unsigned packet_size;
106 if ((q == NULL) || (bufs == NULL))
109 packet_size = h->internals->packet_size;
110 for (i = 0; i < nb_bufs; i++) {
111 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
114 bufs[i]->data_len = (uint16_t)packet_size;
115 bufs[i]->pkt_len = packet_size;
116 bufs[i]->port = h->internals->port_id;
119 rte_atomic64_add(&(h->rx_pkts), i);
125 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128 struct null_queue *h = q;
129 unsigned packet_size;
131 if ((q == NULL) || (bufs == NULL))
134 packet_size = h->internals->packet_size;
135 for (i = 0; i < nb_bufs; i++) {
136 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
139 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
141 bufs[i]->data_len = (uint16_t)packet_size;
142 bufs[i]->pkt_len = packet_size;
143 bufs[i]->nb_segs = 1;
144 bufs[i]->next = NULL;
145 bufs[i]->port = h->internals->port_id;
148 rte_atomic64_add(&(h->rx_pkts), i);
154 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
157 struct null_queue *h = q;
159 if ((q == NULL) || (bufs == NULL))
162 for (i = 0; i < nb_bufs; i++)
163 rte_pktmbuf_free(bufs[i]);
165 rte_atomic64_add(&(h->tx_pkts), i);
171 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
174 struct null_queue *h = q;
175 unsigned packet_size;
177 if ((q == NULL) || (bufs == NULL))
180 packet_size = h->internals->packet_size;
181 for (i = 0; i < nb_bufs; i++) {
182 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
184 rte_pktmbuf_free(bufs[i]);
187 rte_atomic64_add(&(h->tx_pkts), i);
193 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
199 eth_dev_start(struct rte_eth_dev *dev)
204 dev->data->dev_link.link_status = ETH_LINK_UP;
209 eth_dev_stop(struct rte_eth_dev *dev)
214 dev->data->dev_link.link_status = ETH_LINK_DOWN;
218 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
219 uint16_t nb_rx_desc __rte_unused,
220 unsigned int socket_id __rte_unused,
221 const struct rte_eth_rxconf *rx_conf __rte_unused,
222 struct rte_mempool *mb_pool)
224 struct rte_mbuf *dummy_packet;
225 struct pmd_internals *internals;
226 unsigned packet_size;
228 if ((dev == NULL) || (mb_pool == NULL))
231 internals = dev->data->dev_private;
233 if (rx_queue_id >= dev->data->nb_rx_queues)
236 packet_size = internals->packet_size;
238 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
239 dev->data->rx_queues[rx_queue_id] =
240 &internals->rx_null_queues[rx_queue_id];
241 dummy_packet = rte_zmalloc_socket(NULL,
242 packet_size, 0, dev->data->numa_node);
243 if (dummy_packet == NULL)
246 internals->rx_null_queues[rx_queue_id].internals = internals;
247 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
253 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
254 uint16_t nb_tx_desc __rte_unused,
255 unsigned int socket_id __rte_unused,
256 const struct rte_eth_txconf *tx_conf __rte_unused)
258 struct rte_mbuf *dummy_packet;
259 struct pmd_internals *internals;
260 unsigned packet_size;
265 internals = dev->data->dev_private;
267 if (tx_queue_id >= dev->data->nb_tx_queues)
270 packet_size = internals->packet_size;
272 dev->data->tx_queues[tx_queue_id] =
273 &internals->tx_null_queues[tx_queue_id];
274 dummy_packet = rte_zmalloc_socket(NULL,
275 packet_size, 0, dev->data->numa_node);
276 if (dummy_packet == NULL)
279 internals->tx_null_queues[tx_queue_id].internals = internals;
280 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
287 eth_dev_info(struct rte_eth_dev *dev,
288 struct rte_eth_dev_info *dev_info)
290 struct pmd_internals *internals;
292 if ((dev == NULL) || (dev_info == NULL))
295 internals = dev->data->dev_private;
296 dev_info->max_mac_addrs = 1;
297 dev_info->max_rx_pktlen = (uint32_t)-1;
298 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
299 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
300 dev_info->min_rx_bufsize = 0;
301 dev_info->reta_size = internals->reta_size;
302 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
306 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
308 unsigned i, num_stats;
309 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
310 const struct pmd_internals *internal;
312 if ((dev == NULL) || (igb_stats == NULL))
315 internal = dev->data->dev_private;
316 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
317 RTE_MIN(dev->data->nb_rx_queues,
318 RTE_DIM(internal->rx_null_queues)));
319 for (i = 0; i < num_stats; i++) {
320 igb_stats->q_ipackets[i] =
321 internal->rx_null_queues[i].rx_pkts.cnt;
322 rx_total += igb_stats->q_ipackets[i];
325 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
326 RTE_MIN(dev->data->nb_tx_queues,
327 RTE_DIM(internal->tx_null_queues)));
328 for (i = 0; i < num_stats; i++) {
329 igb_stats->q_opackets[i] =
330 internal->tx_null_queues[i].tx_pkts.cnt;
331 igb_stats->q_errors[i] =
332 internal->tx_null_queues[i].err_pkts.cnt;
333 tx_total += igb_stats->q_opackets[i];
334 tx_err_total += igb_stats->q_errors[i];
337 igb_stats->ipackets = rx_total;
338 igb_stats->opackets = tx_total;
339 igb_stats->oerrors = tx_err_total;
343 eth_stats_reset(struct rte_eth_dev *dev)
346 struct pmd_internals *internal;
351 internal = dev->data->dev_private;
352 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
353 internal->rx_null_queues[i].rx_pkts.cnt = 0;
354 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
355 internal->tx_null_queues[i].tx_pkts.cnt = 0;
356 internal->tx_null_queues[i].err_pkts.cnt = 0;
361 eth_queue_release(void *q)
363 struct null_queue *nq;
369 rte_free(nq->dummy_packet);
373 eth_link_update(struct rte_eth_dev *dev __rte_unused,
374 int wait_to_complete __rte_unused) { return 0; }
377 eth_rss_reta_update(struct rte_eth_dev *dev,
378 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
381 struct pmd_internals *internal = dev->data->dev_private;
383 if (reta_size != internal->reta_size)
386 rte_spinlock_lock(&internal->rss_lock);
388 /* Copy RETA table */
389 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
390 internal->reta_conf[i].mask = reta_conf[i].mask;
391 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
392 if ((reta_conf[i].mask >> j) & 0x01)
393 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
396 rte_spinlock_unlock(&internal->rss_lock);
402 eth_rss_reta_query(struct rte_eth_dev *dev,
403 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
406 struct pmd_internals *internal = dev->data->dev_private;
408 if (reta_size != internal->reta_size)
411 rte_spinlock_lock(&internal->rss_lock);
413 /* Copy RETA table */
414 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
415 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
416 if ((reta_conf[i].mask >> j) & 0x01)
417 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
420 rte_spinlock_unlock(&internal->rss_lock);
426 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
428 struct pmd_internals *internal = dev->data->dev_private;
430 rte_spinlock_lock(&internal->rss_lock);
432 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
433 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
434 rss_conf->rss_hf & internal->flow_type_rss_offloads;
436 if (rss_conf->rss_key)
437 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
439 rte_spinlock_unlock(&internal->rss_lock);
445 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
446 struct rte_eth_rss_conf *rss_conf)
448 struct pmd_internals *internal = dev->data->dev_private;
450 rte_spinlock_lock(&internal->rss_lock);
452 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
453 if (rss_conf->rss_key)
454 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
456 rte_spinlock_unlock(&internal->rss_lock);
461 static const struct eth_dev_ops ops = {
462 .dev_start = eth_dev_start,
463 .dev_stop = eth_dev_stop,
464 .dev_configure = eth_dev_configure,
465 .dev_infos_get = eth_dev_info,
466 .rx_queue_setup = eth_rx_queue_setup,
467 .tx_queue_setup = eth_tx_queue_setup,
468 .rx_queue_release = eth_queue_release,
469 .tx_queue_release = eth_queue_release,
470 .link_update = eth_link_update,
471 .stats_get = eth_stats_get,
472 .stats_reset = eth_stats_reset,
473 .reta_update = eth_rss_reta_update,
474 .reta_query = eth_rss_reta_query,
475 .rss_hash_update = eth_rss_hash_update,
476 .rss_hash_conf_get = eth_rss_hash_conf_get
479 static struct rte_vdev_driver pmd_null_drv;
482 eth_dev_null_create(const char *name,
483 const unsigned numa_node,
484 unsigned packet_size,
485 unsigned packet_copy)
487 const unsigned nb_rx_queues = 1;
488 const unsigned nb_tx_queues = 1;
489 struct rte_eth_dev_data *data = NULL;
490 struct pmd_internals *internals = NULL;
491 struct rte_eth_dev *eth_dev = NULL;
493 static const uint8_t default_rss_key[40] = {
494 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
495 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
496 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
497 0xBE, 0xAC, 0x01, 0xFA
503 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
506 /* now do all data allocation - for eth_dev structure, dummy pci driver
507 * and internal (private) data
509 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
513 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
514 if (internals == NULL)
517 /* reserve an ethdev entry */
518 eth_dev = rte_eth_dev_allocate(name);
522 /* now put it all together
523 * - store queue data in internals,
524 * - store numa_node info in ethdev data
525 * - point eth_dev_data to internals
526 * - and point eth_dev structure to new eth_dev_data structure
528 /* NOTE: we'll replace the data element, of originally allocated eth_dev
529 * so the nulls are local per-process */
531 internals->packet_size = packet_size;
532 internals->packet_copy = packet_copy;
533 internals->port_id = eth_dev->data->port_id;
535 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
536 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
538 rte_memcpy(internals->rss_key, default_rss_key, 40);
540 data->dev_private = internals;
541 data->port_id = eth_dev->data->port_id;
542 data->nb_rx_queues = (uint16_t)nb_rx_queues;
543 data->nb_tx_queues = (uint16_t)nb_tx_queues;
544 data->dev_link = pmd_link;
545 data->mac_addrs = ð_addr;
546 strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
548 eth_dev->data = data;
549 eth_dev->dev_ops = &ops;
551 eth_dev->driver = NULL;
552 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
553 data->kdrv = RTE_KDRV_NONE;
554 data->drv_name = pmd_null_drv.driver.name;
555 data->numa_node = numa_node;
557 /* finally assign rx and tx ops */
559 eth_dev->rx_pkt_burst = eth_null_copy_rx;
560 eth_dev->tx_pkt_burst = eth_null_copy_tx;
562 eth_dev->rx_pkt_burst = eth_null_rx;
563 eth_dev->tx_pkt_burst = eth_null_tx;
576 get_packet_size_arg(const char *key __rte_unused,
577 const char *value, void *extra_args)
579 const char *a = value;
580 unsigned *packet_size = extra_args;
582 if ((value == NULL) || (extra_args == NULL))
585 *packet_size = (unsigned)strtoul(a, NULL, 0);
586 if (*packet_size == UINT_MAX)
593 get_packet_copy_arg(const char *key __rte_unused,
594 const char *value, void *extra_args)
596 const char *a = value;
597 unsigned *packet_copy = extra_args;
599 if ((value == NULL) || (extra_args == NULL))
602 *packet_copy = (unsigned)strtoul(a, NULL, 0);
603 if (*packet_copy == UINT_MAX)
610 rte_pmd_null_probe(struct rte_vdev_device *dev)
612 const char *name, *params;
614 unsigned packet_size = default_packet_size;
615 unsigned packet_copy = default_packet_copy;
616 struct rte_kvargs *kvlist = NULL;
622 name = rte_vdev_device_name(dev);
623 params = rte_vdev_device_args(dev);
624 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
626 numa_node = rte_socket_id();
628 if (params != NULL) {
629 kvlist = rte_kvargs_parse(params, valid_arguments);
633 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
635 ret = rte_kvargs_process(kvlist,
636 ETH_NULL_PACKET_SIZE_ARG,
637 &get_packet_size_arg, &packet_size);
642 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
644 ret = rte_kvargs_process(kvlist,
645 ETH_NULL_PACKET_COPY_ARG,
646 &get_packet_copy_arg, &packet_copy);
652 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
653 "packet copy is %s\n", packet_size,
654 packet_copy ? "enabled" : "disabled");
656 ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
660 rte_kvargs_free(kvlist);
665 rte_pmd_null_remove(struct rte_vdev_device *dev)
667 struct rte_eth_dev *eth_dev = NULL;
672 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
675 /* find the ethdev entry */
676 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
680 rte_free(eth_dev->data->dev_private);
681 rte_free(eth_dev->data);
683 rte_eth_dev_release_port(eth_dev);
688 static struct rte_vdev_driver pmd_null_drv = {
689 .probe = rte_pmd_null_probe,
690 .remove = rte_pmd_null_remove,
693 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
694 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
695 RTE_PMD_REGISTER_PARAM_STRING(net_null,