4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
42 #include "rte_eth_null.h"
44 #define ETH_NULL_PACKET_SIZE_ARG "size"
45 #define ETH_NULL_PACKET_COPY_ARG "copy"
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
50 static const char *valid_arguments[] = {
51 ETH_NULL_PACKET_SIZE_ARG,
52 ETH_NULL_PACKET_COPY_ARG,
59 struct pmd_internals *internals;
61 struct rte_mempool *mb_pool;
62 struct rte_mbuf *dummy_packet;
64 rte_atomic64_t rx_pkts;
65 rte_atomic64_t tx_pkts;
66 rte_atomic64_t err_pkts;
69 struct pmd_internals {
74 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
77 /** Bit mask of RSS offloads, the bit offset also means flow type */
78 uint64_t flow_type_rss_offloads;
80 rte_spinlock_t rss_lock;
83 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
86 uint8_t rss_key[40]; /**< 40-byte hash key. */
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static struct rte_eth_link pmd_link = {
92 .link_speed = ETH_SPEED_NUM_10G,
93 .link_duplex = ETH_LINK_FULL_DUPLEX,
94 .link_status = ETH_LINK_DOWN,
95 .link_autoneg = ETH_LINK_SPEED_AUTONEG,
99 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
102 struct null_queue *h = q;
103 unsigned packet_size;
105 if ((q == NULL) || (bufs == NULL))
108 packet_size = h->internals->packet_size;
109 for (i = 0; i < nb_bufs; i++) {
110 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
113 bufs[i]->data_len = (uint16_t)packet_size;
114 bufs[i]->pkt_len = packet_size;
115 bufs[i]->nb_segs = 1;
116 bufs[i]->next = NULL;
117 bufs[i]->port = h->internals->port_id;
120 rte_atomic64_add(&(h->rx_pkts), i);
126 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
129 struct null_queue *h = q;
130 unsigned packet_size;
132 if ((q == NULL) || (bufs == NULL))
135 packet_size = h->internals->packet_size;
136 for (i = 0; i < nb_bufs; i++) {
137 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
140 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142 bufs[i]->data_len = (uint16_t)packet_size;
143 bufs[i]->pkt_len = packet_size;
144 bufs[i]->nb_segs = 1;
145 bufs[i]->next = NULL;
146 bufs[i]->port = h->internals->port_id;
149 rte_atomic64_add(&(h->rx_pkts), i);
155 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
158 struct null_queue *h = q;
160 if ((q == NULL) || (bufs == NULL))
163 for (i = 0; i < nb_bufs; i++)
164 rte_pktmbuf_free(bufs[i]);
166 rte_atomic64_add(&(h->tx_pkts), i);
172 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
175 struct null_queue *h = q;
176 unsigned packet_size;
178 if ((q == NULL) || (bufs == NULL))
181 packet_size = h->internals->packet_size;
182 for (i = 0; i < nb_bufs; i++) {
183 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
185 rte_pktmbuf_free(bufs[i]);
188 rte_atomic64_add(&(h->tx_pkts), i);
194 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
200 eth_dev_start(struct rte_eth_dev *dev)
205 dev->data->dev_link.link_status = ETH_LINK_UP;
210 eth_dev_stop(struct rte_eth_dev *dev)
215 dev->data->dev_link.link_status = ETH_LINK_DOWN;
219 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
220 uint16_t nb_rx_desc __rte_unused,
221 unsigned int socket_id __rte_unused,
222 const struct rte_eth_rxconf *rx_conf __rte_unused,
223 struct rte_mempool *mb_pool)
225 struct rte_mbuf *dummy_packet;
226 struct pmd_internals *internals;
227 unsigned packet_size;
229 if ((dev == NULL) || (mb_pool == NULL))
232 internals = dev->data->dev_private;
234 if (rx_queue_id >= dev->data->nb_rx_queues)
237 packet_size = internals->packet_size;
239 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
240 dev->data->rx_queues[rx_queue_id] =
241 &internals->rx_null_queues[rx_queue_id];
242 dummy_packet = rte_zmalloc_socket(NULL,
243 packet_size, 0, dev->data->numa_node);
244 if (dummy_packet == NULL)
247 internals->rx_null_queues[rx_queue_id].internals = internals;
248 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
254 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
255 uint16_t nb_tx_desc __rte_unused,
256 unsigned int socket_id __rte_unused,
257 const struct rte_eth_txconf *tx_conf __rte_unused)
259 struct rte_mbuf *dummy_packet;
260 struct pmd_internals *internals;
261 unsigned packet_size;
266 internals = dev->data->dev_private;
268 if (tx_queue_id >= dev->data->nb_tx_queues)
271 packet_size = internals->packet_size;
273 dev->data->tx_queues[tx_queue_id] =
274 &internals->tx_null_queues[tx_queue_id];
275 dummy_packet = rte_zmalloc_socket(NULL,
276 packet_size, 0, dev->data->numa_node);
277 if (dummy_packet == NULL)
280 internals->tx_null_queues[tx_queue_id].internals = internals;
281 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
288 eth_dev_info(struct rte_eth_dev *dev,
289 struct rte_eth_dev_info *dev_info)
291 struct pmd_internals *internals;
293 if ((dev == NULL) || (dev_info == NULL))
296 internals = dev->data->dev_private;
297 dev_info->max_mac_addrs = 1;
298 dev_info->max_rx_pktlen = (uint32_t)-1;
299 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
300 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
301 dev_info->min_rx_bufsize = 0;
302 dev_info->reta_size = internals->reta_size;
303 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
307 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
309 unsigned i, num_stats;
310 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
311 const struct pmd_internals *internal;
313 if ((dev == NULL) || (igb_stats == NULL))
316 internal = dev->data->dev_private;
317 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
318 RTE_MIN(dev->data->nb_rx_queues,
319 RTE_DIM(internal->rx_null_queues)));
320 for (i = 0; i < num_stats; i++) {
321 igb_stats->q_ipackets[i] =
322 internal->rx_null_queues[i].rx_pkts.cnt;
323 rx_total += igb_stats->q_ipackets[i];
326 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
327 RTE_MIN(dev->data->nb_tx_queues,
328 RTE_DIM(internal->tx_null_queues)));
329 for (i = 0; i < num_stats; i++) {
330 igb_stats->q_opackets[i] =
331 internal->tx_null_queues[i].tx_pkts.cnt;
332 igb_stats->q_errors[i] =
333 internal->tx_null_queues[i].err_pkts.cnt;
334 tx_total += igb_stats->q_opackets[i];
335 tx_err_total += igb_stats->q_errors[i];
338 igb_stats->ipackets = rx_total;
339 igb_stats->opackets = tx_total;
340 igb_stats->oerrors = tx_err_total;
344 eth_stats_reset(struct rte_eth_dev *dev)
347 struct pmd_internals *internal;
352 internal = dev->data->dev_private;
353 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
354 internal->rx_null_queues[i].rx_pkts.cnt = 0;
355 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
356 internal->tx_null_queues[i].tx_pkts.cnt = 0;
357 internal->tx_null_queues[i].err_pkts.cnt = 0;
362 eth_queue_release(void *q)
364 struct null_queue *nq;
370 rte_free(nq->dummy_packet);
374 eth_link_update(struct rte_eth_dev *dev __rte_unused,
375 int wait_to_complete __rte_unused) { return 0; }
378 eth_rss_reta_update(struct rte_eth_dev *dev,
379 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
382 struct pmd_internals *internal = dev->data->dev_private;
384 if (reta_size != internal->reta_size)
387 rte_spinlock_lock(&internal->rss_lock);
389 /* Copy RETA table */
390 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
391 internal->reta_conf[i].mask = reta_conf[i].mask;
392 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
393 if ((reta_conf[i].mask >> j) & 0x01)
394 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
397 rte_spinlock_unlock(&internal->rss_lock);
403 eth_rss_reta_query(struct rte_eth_dev *dev,
404 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
407 struct pmd_internals *internal = dev->data->dev_private;
409 if (reta_size != internal->reta_size)
412 rte_spinlock_lock(&internal->rss_lock);
414 /* Copy RETA table */
415 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
416 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
417 if ((reta_conf[i].mask >> j) & 0x01)
418 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
421 rte_spinlock_unlock(&internal->rss_lock);
427 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
429 struct pmd_internals *internal = dev->data->dev_private;
431 rte_spinlock_lock(&internal->rss_lock);
433 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
434 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
435 rss_conf->rss_hf & internal->flow_type_rss_offloads;
437 if (rss_conf->rss_key)
438 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
440 rte_spinlock_unlock(&internal->rss_lock);
446 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
447 struct rte_eth_rss_conf *rss_conf)
449 struct pmd_internals *internal = dev->data->dev_private;
451 rte_spinlock_lock(&internal->rss_lock);
453 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
454 if (rss_conf->rss_key)
455 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
457 rte_spinlock_unlock(&internal->rss_lock);
462 static const struct eth_dev_ops ops = {
463 .dev_start = eth_dev_start,
464 .dev_stop = eth_dev_stop,
465 .dev_configure = eth_dev_configure,
466 .dev_infos_get = eth_dev_info,
467 .rx_queue_setup = eth_rx_queue_setup,
468 .tx_queue_setup = eth_tx_queue_setup,
469 .rx_queue_release = eth_queue_release,
470 .tx_queue_release = eth_queue_release,
471 .link_update = eth_link_update,
472 .stats_get = eth_stats_get,
473 .stats_reset = eth_stats_reset,
474 .reta_update = eth_rss_reta_update,
475 .reta_query = eth_rss_reta_query,
476 .rss_hash_update = eth_rss_hash_update,
477 .rss_hash_conf_get = eth_rss_hash_conf_get
480 static struct rte_vdev_driver pmd_null_drv;
483 eth_dev_null_create(const char *name,
484 const unsigned numa_node,
485 unsigned packet_size,
486 unsigned packet_copy)
488 const unsigned nb_rx_queues = 1;
489 const unsigned nb_tx_queues = 1;
490 struct rte_eth_dev_data *data = NULL;
491 struct pmd_internals *internals = NULL;
492 struct rte_eth_dev *eth_dev = NULL;
494 static const uint8_t default_rss_key[40] = {
495 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
496 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
497 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
498 0xBE, 0xAC, 0x01, 0xFA
504 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
507 /* now do all data allocation - for eth_dev structure, dummy pci driver
508 * and internal (private) data
510 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
514 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
515 if (internals == NULL)
518 /* reserve an ethdev entry */
519 eth_dev = rte_eth_dev_allocate(name);
523 /* now put it all together
524 * - store queue data in internals,
525 * - store numa_node info in ethdev data
526 * - point eth_dev_data to internals
527 * - and point eth_dev structure to new eth_dev_data structure
529 /* NOTE: we'll replace the data element, of originally allocated eth_dev
530 * so the nulls are local per-process */
532 internals->packet_size = packet_size;
533 internals->packet_copy = packet_copy;
534 internals->port_id = eth_dev->data->port_id;
536 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
537 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
539 rte_memcpy(internals->rss_key, default_rss_key, 40);
541 data->dev_private = internals;
542 data->port_id = eth_dev->data->port_id;
543 data->nb_rx_queues = (uint16_t)nb_rx_queues;
544 data->nb_tx_queues = (uint16_t)nb_tx_queues;
545 data->dev_link = pmd_link;
546 data->mac_addrs = ð_addr;
547 strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
549 eth_dev->data = data;
550 eth_dev->dev_ops = &ops;
552 eth_dev->driver = NULL;
553 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
554 data->kdrv = RTE_KDRV_NONE;
555 data->drv_name = pmd_null_drv.driver.name;
556 data->numa_node = numa_node;
558 /* finally assign rx and tx ops */
560 eth_dev->rx_pkt_burst = eth_null_copy_rx;
561 eth_dev->tx_pkt_burst = eth_null_copy_tx;
563 eth_dev->rx_pkt_burst = eth_null_rx;
564 eth_dev->tx_pkt_burst = eth_null_tx;
577 get_packet_size_arg(const char *key __rte_unused,
578 const char *value, void *extra_args)
580 const char *a = value;
581 unsigned *packet_size = extra_args;
583 if ((value == NULL) || (extra_args == NULL))
586 *packet_size = (unsigned)strtoul(a, NULL, 0);
587 if (*packet_size == UINT_MAX)
594 get_packet_copy_arg(const char *key __rte_unused,
595 const char *value, void *extra_args)
597 const char *a = value;
598 unsigned *packet_copy = extra_args;
600 if ((value == NULL) || (extra_args == NULL))
603 *packet_copy = (unsigned)strtoul(a, NULL, 0);
604 if (*packet_copy == UINT_MAX)
611 rte_pmd_null_probe(const char *name, const char *params)
614 unsigned packet_size = default_packet_size;
615 unsigned packet_copy = default_packet_copy;
616 struct rte_kvargs *kvlist = NULL;
622 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
624 numa_node = rte_socket_id();
626 if (params != NULL) {
627 kvlist = rte_kvargs_parse(params, valid_arguments);
631 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
633 ret = rte_kvargs_process(kvlist,
634 ETH_NULL_PACKET_SIZE_ARG,
635 &get_packet_size_arg, &packet_size);
640 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
642 ret = rte_kvargs_process(kvlist,
643 ETH_NULL_PACKET_COPY_ARG,
644 &get_packet_copy_arg, &packet_copy);
650 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
651 "packet copy is %s\n", packet_size,
652 packet_copy ? "enabled" : "disabled");
654 ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
658 rte_kvargs_free(kvlist);
663 rte_pmd_null_remove(const char *name)
665 struct rte_eth_dev *eth_dev = NULL;
670 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
673 /* find the ethdev entry */
674 eth_dev = rte_eth_dev_allocated(name);
678 rte_free(eth_dev->data->dev_private);
679 rte_free(eth_dev->data);
681 rte_eth_dev_release_port(eth_dev);
686 static struct rte_vdev_driver pmd_null_drv = {
687 .probe = rte_pmd_null_probe,
688 .remove = rte_pmd_null_remove,
691 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
692 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
693 RTE_PMD_REGISTER_PARAM_STRING(net_null,