4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
42 #include "rte_eth_null.h"
44 #define ETH_NULL_PACKET_SIZE_ARG "size"
45 #define ETH_NULL_PACKET_COPY_ARG "copy"
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
50 static const char *valid_arguments[] = {
51 ETH_NULL_PACKET_SIZE_ARG,
52 ETH_NULL_PACKET_COPY_ARG,
59 struct pmd_internals *internals;
61 struct rte_mempool *mb_pool;
62 struct rte_mbuf *dummy_packet;
64 rte_atomic64_t rx_pkts;
65 rte_atomic64_t tx_pkts;
66 rte_atomic64_t err_pkts;
69 struct pmd_internals {
74 unsigned nb_rx_queues;
75 unsigned nb_tx_queues;
77 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
78 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
80 /** Bit mask of RSS offloads, the bit offset also means flow type */
81 uint64_t flow_type_rss_offloads;
83 rte_spinlock_t rss_lock;
86 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
89 uint8_t rss_key[40]; /**< 40-byte hash key. */
93 static struct ether_addr eth_addr = { .addr_bytes = {0} };
94 static const char *drivername = "Null PMD";
95 static struct rte_eth_link pmd_link = {
97 .link_duplex = ETH_LINK_FULL_DUPLEX,
102 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
105 struct null_queue *h = q;
106 unsigned packet_size;
108 if ((q == NULL) || (bufs == NULL))
111 packet_size = h->internals->packet_size;
112 for (i = 0; i < nb_bufs; i++) {
113 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
116 bufs[i]->data_len = (uint16_t)packet_size;
117 bufs[i]->pkt_len = packet_size;
118 bufs[i]->nb_segs = 1;
119 bufs[i]->next = NULL;
122 rte_atomic64_add(&(h->rx_pkts), i);
128 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
131 struct null_queue *h = q;
132 unsigned packet_size;
134 if ((q == NULL) || (bufs == NULL))
137 packet_size = h->internals->packet_size;
138 for (i = 0; i < nb_bufs; i++) {
139 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
142 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
144 bufs[i]->data_len = (uint16_t)packet_size;
145 bufs[i]->pkt_len = packet_size;
146 bufs[i]->nb_segs = 1;
147 bufs[i]->next = NULL;
150 rte_atomic64_add(&(h->rx_pkts), i);
156 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
159 struct null_queue *h = q;
161 if ((q == NULL) || (bufs == NULL))
164 for (i = 0; i < nb_bufs; i++)
165 rte_pktmbuf_free(bufs[i]);
167 rte_atomic64_add(&(h->tx_pkts), i);
173 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
176 struct null_queue *h = q;
177 unsigned packet_size;
179 if ((q == NULL) || (bufs == NULL))
182 packet_size = h->internals->packet_size;
183 for (i = 0; i < nb_bufs; i++) {
184 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
186 rte_pktmbuf_free(bufs[i]);
189 rte_atomic64_add(&(h->tx_pkts), i);
195 eth_dev_configure(struct rte_eth_dev *dev) {
196 struct pmd_internals *internals;
198 internals = dev->data->dev_private;
199 internals->nb_rx_queues = dev->data->nb_rx_queues;
200 internals->nb_tx_queues = dev->data->nb_tx_queues;
206 eth_dev_start(struct rte_eth_dev *dev)
211 dev->data->dev_link.link_status = 1;
216 eth_dev_stop(struct rte_eth_dev *dev)
221 dev->data->dev_link.link_status = 0;
225 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
226 uint16_t nb_rx_desc __rte_unused,
227 unsigned int socket_id __rte_unused,
228 const struct rte_eth_rxconf *rx_conf __rte_unused,
229 struct rte_mempool *mb_pool)
231 struct rte_mbuf *dummy_packet;
232 struct pmd_internals *internals;
233 unsigned packet_size;
235 if ((dev == NULL) || (mb_pool == NULL))
238 internals = dev->data->dev_private;
240 if (rx_queue_id >= internals->nb_rx_queues)
243 packet_size = internals->packet_size;
245 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
246 dev->data->rx_queues[rx_queue_id] =
247 &internals->rx_null_queues[rx_queue_id];
248 dummy_packet = rte_zmalloc_socket(NULL,
249 packet_size, 0, internals->numa_node);
250 if (dummy_packet == NULL)
253 internals->rx_null_queues[rx_queue_id].internals = internals;
254 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
260 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
261 uint16_t nb_tx_desc __rte_unused,
262 unsigned int socket_id __rte_unused,
263 const struct rte_eth_txconf *tx_conf __rte_unused)
265 struct rte_mbuf *dummy_packet;
266 struct pmd_internals *internals;
267 unsigned packet_size;
272 internals = dev->data->dev_private;
274 if (tx_queue_id >= internals->nb_tx_queues)
277 packet_size = internals->packet_size;
279 dev->data->tx_queues[tx_queue_id] =
280 &internals->tx_null_queues[tx_queue_id];
281 dummy_packet = rte_zmalloc_socket(NULL,
282 packet_size, 0, internals->numa_node);
283 if (dummy_packet == NULL)
286 internals->tx_null_queues[tx_queue_id].internals = internals;
287 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
294 eth_dev_info(struct rte_eth_dev *dev,
295 struct rte_eth_dev_info *dev_info)
297 struct pmd_internals *internals;
299 if ((dev == NULL) || (dev_info == NULL))
302 internals = dev->data->dev_private;
303 dev_info->driver_name = drivername;
304 dev_info->max_mac_addrs = 1;
305 dev_info->max_rx_pktlen = (uint32_t)-1;
306 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
307 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
308 dev_info->min_rx_bufsize = 0;
309 dev_info->pci_dev = NULL;
310 dev_info->reta_size = internals->reta_size;
311 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
315 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
317 unsigned i, num_stats;
318 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
319 const struct pmd_internals *internal;
321 if ((dev == NULL) || (igb_stats == NULL))
324 internal = dev->data->dev_private;
325 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
326 RTE_MIN(internal->nb_rx_queues,
327 RTE_DIM(internal->rx_null_queues)));
328 for (i = 0; i < num_stats; i++) {
329 igb_stats->q_ipackets[i] =
330 internal->rx_null_queues[i].rx_pkts.cnt;
331 rx_total += igb_stats->q_ipackets[i];
334 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
335 RTE_MIN(internal->nb_tx_queues,
336 RTE_DIM(internal->tx_null_queues)));
337 for (i = 0; i < num_stats; i++) {
338 igb_stats->q_opackets[i] =
339 internal->tx_null_queues[i].tx_pkts.cnt;
340 igb_stats->q_errors[i] =
341 internal->tx_null_queues[i].err_pkts.cnt;
342 tx_total += igb_stats->q_opackets[i];
343 tx_err_total += igb_stats->q_errors[i];
346 igb_stats->ipackets = rx_total;
347 igb_stats->opackets = tx_total;
348 igb_stats->oerrors = tx_err_total;
352 eth_stats_reset(struct rte_eth_dev *dev)
355 struct pmd_internals *internal;
360 internal = dev->data->dev_private;
361 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
362 internal->rx_null_queues[i].rx_pkts.cnt = 0;
363 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
364 internal->tx_null_queues[i].tx_pkts.cnt = 0;
365 internal->tx_null_queues[i].err_pkts.cnt = 0;
369 static struct eth_driver rte_null_pmd = {
371 .name = "rte_null_pmd",
372 .drv_flags = RTE_PCI_DRV_DETACHABLE,
377 eth_queue_release(void *q)
379 struct null_queue *nq;
385 rte_free(nq->dummy_packet);
389 eth_link_update(struct rte_eth_dev *dev __rte_unused,
390 int wait_to_complete __rte_unused) { return 0; }
393 eth_rss_reta_update(struct rte_eth_dev *dev,
394 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
397 struct pmd_internals *internal = dev->data->dev_private;
399 if (reta_size != internal->reta_size)
402 rte_spinlock_lock(&internal->rss_lock);
404 /* Copy RETA table */
405 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
406 internal->reta_conf[i].mask = reta_conf[i].mask;
407 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
408 if ((reta_conf[i].mask >> j) & 0x01)
409 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
412 rte_spinlock_unlock(&internal->rss_lock);
418 eth_rss_reta_query(struct rte_eth_dev *dev,
419 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
422 struct pmd_internals *internal = dev->data->dev_private;
424 if (reta_size != internal->reta_size)
427 rte_spinlock_lock(&internal->rss_lock);
429 /* Copy RETA table */
430 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
431 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
432 if ((reta_conf[i].mask >> j) & 0x01)
433 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
436 rte_spinlock_unlock(&internal->rss_lock);
442 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
444 struct pmd_internals *internal = dev->data->dev_private;
446 rte_spinlock_lock(&internal->rss_lock);
448 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
449 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
450 rss_conf->rss_hf & internal->flow_type_rss_offloads;
452 if (rss_conf->rss_key)
453 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
455 rte_spinlock_unlock(&internal->rss_lock);
461 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
462 struct rte_eth_rss_conf *rss_conf)
464 struct pmd_internals *internal = dev->data->dev_private;
466 rte_spinlock_lock(&internal->rss_lock);
468 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
469 if (rss_conf->rss_key)
470 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
472 rte_spinlock_unlock(&internal->rss_lock);
477 static const struct eth_dev_ops ops = {
478 .dev_start = eth_dev_start,
479 .dev_stop = eth_dev_stop,
480 .dev_configure = eth_dev_configure,
481 .dev_infos_get = eth_dev_info,
482 .rx_queue_setup = eth_rx_queue_setup,
483 .tx_queue_setup = eth_tx_queue_setup,
484 .rx_queue_release = eth_queue_release,
485 .tx_queue_release = eth_queue_release,
486 .link_update = eth_link_update,
487 .stats_get = eth_stats_get,
488 .stats_reset = eth_stats_reset,
489 .reta_update = eth_rss_reta_update,
490 .reta_query = eth_rss_reta_query,
491 .rss_hash_update = eth_rss_hash_update,
492 .rss_hash_conf_get = eth_rss_hash_conf_get
496 eth_dev_null_create(const char *name,
497 const unsigned numa_node,
498 unsigned packet_size,
499 unsigned packet_copy)
501 const unsigned nb_rx_queues = 1;
502 const unsigned nb_tx_queues = 1;
503 struct rte_eth_dev_data *data = NULL;
504 struct rte_pci_device *pci_dev = NULL;
505 struct pmd_internals *internals = NULL;
506 struct rte_eth_dev *eth_dev = NULL;
508 static const uint8_t default_rss_key[40] = {
509 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
510 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
511 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
512 0xBE, 0xAC, 0x01, 0xFA
518 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
521 /* now do all data allocation - for eth_dev structure, dummy pci driver
522 * and internal (private) data
524 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
528 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
532 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
533 if (internals == NULL)
536 /* reserve an ethdev entry */
537 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
541 /* now put it all together
542 * - store queue data in internals,
543 * - store numa_node info in ethdev data
544 * - point eth_dev_data to internals
545 * - and point eth_dev structure to new eth_dev_data structure
547 /* NOTE: we'll replace the data element, of originally allocated eth_dev
548 * so the nulls are local per-process */
550 internals->nb_rx_queues = nb_rx_queues;
551 internals->nb_tx_queues = nb_tx_queues;
552 internals->packet_size = packet_size;
553 internals->packet_copy = packet_copy;
554 internals->numa_node = numa_node;
556 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
557 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
559 rte_memcpy(internals->rss_key, default_rss_key, 40);
561 pci_dev->numa_node = numa_node;
562 pci_dev->driver = &rte_null_pmd.pci_drv;
564 data->dev_private = internals;
565 data->port_id = eth_dev->data->port_id;
566 data->nb_rx_queues = (uint16_t)nb_rx_queues;
567 data->nb_tx_queues = (uint16_t)nb_tx_queues;
568 data->dev_link = pmd_link;
569 data->mac_addrs = ð_addr;
570 strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
572 eth_dev->data = data;
573 eth_dev->dev_ops = &ops;
574 eth_dev->pci_dev = pci_dev;
575 eth_dev->driver = &rte_null_pmd;
576 TAILQ_INIT(ð_dev->link_intr_cbs);
578 eth_dev->driver = NULL;
579 eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
580 eth_dev->data->kdrv = RTE_KDRV_NONE;
581 eth_dev->data->drv_name = drivername;
582 eth_dev->data->numa_node = numa_node;
584 /* finally assign rx and tx ops */
586 eth_dev->rx_pkt_burst = eth_null_copy_rx;
587 eth_dev->tx_pkt_burst = eth_null_copy_tx;
589 eth_dev->rx_pkt_burst = eth_null_rx;
590 eth_dev->tx_pkt_burst = eth_null_tx;
604 get_packet_size_arg(const char *key __rte_unused,
605 const char *value, void *extra_args)
607 const char *a = value;
608 unsigned *packet_size = extra_args;
610 if ((value == NULL) || (extra_args == NULL))
613 *packet_size = (unsigned)strtoul(a, NULL, 0);
614 if (*packet_size == UINT_MAX)
621 get_packet_copy_arg(const char *key __rte_unused,
622 const char *value, void *extra_args)
624 const char *a = value;
625 unsigned *packet_copy = extra_args;
627 if ((value == NULL) || (extra_args == NULL))
630 *packet_copy = (unsigned)strtoul(a, NULL, 0);
631 if (*packet_copy == UINT_MAX)
638 rte_pmd_null_devinit(const char *name, const char *params)
641 unsigned packet_size = default_packet_size;
642 unsigned packet_copy = default_packet_copy;
643 struct rte_kvargs *kvlist = NULL;
649 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
651 numa_node = rte_socket_id();
653 if (params != NULL) {
654 kvlist = rte_kvargs_parse(params, valid_arguments);
658 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
660 ret = rte_kvargs_process(kvlist,
661 ETH_NULL_PACKET_SIZE_ARG,
662 &get_packet_size_arg, &packet_size);
667 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
669 ret = rte_kvargs_process(kvlist,
670 ETH_NULL_PACKET_COPY_ARG,
671 &get_packet_copy_arg, &packet_copy);
677 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
678 "packet copy is %s\n", packet_size,
679 packet_copy ? "enabled" : "disabled");
681 ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
685 rte_kvargs_free(kvlist);
690 rte_pmd_null_devuninit(const char *name)
692 struct rte_eth_dev *eth_dev = NULL;
697 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
700 /* reserve an ethdev entry */
701 eth_dev = rte_eth_dev_allocated(name);
705 rte_free(eth_dev->data->dev_private);
706 rte_free(eth_dev->data);
707 rte_free(eth_dev->pci_dev);
709 rte_eth_dev_release_port(eth_dev);
714 static struct rte_driver pmd_null_drv = {
717 .init = rte_pmd_null_devinit,
718 .uninit = rte_pmd_null_devuninit,
721 PMD_REGISTER_DRIVER(pmd_null_drv);