4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
42 #define ETH_NULL_PACKET_SIZE_ARG "size"
43 #define ETH_NULL_PACKET_COPY_ARG "copy"
45 static unsigned default_packet_size = 64;
46 static unsigned default_packet_copy;
48 static const char *valid_arguments[] = {
49 ETH_NULL_PACKET_SIZE_ARG,
50 ETH_NULL_PACKET_COPY_ARG,
58 struct pmd_internals *internals;
60 struct rte_mempool *mb_pool;
61 struct rte_mbuf *dummy_packet;
63 rte_atomic64_t rx_pkts;
64 rte_atomic64_t tx_pkts;
65 rte_atomic64_t err_pkts;
68 struct pmd_internals {
73 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 /** Bit mask of RSS offloads, the bit offset also means flow type */
77 uint64_t flow_type_rss_offloads;
79 rte_spinlock_t rss_lock;
82 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
85 uint8_t rss_key[40]; /**< 40-byte hash key. */
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static struct rte_eth_link pmd_link = {
91 .link_speed = ETH_SPEED_NUM_10G,
92 .link_duplex = ETH_LINK_FULL_DUPLEX,
93 .link_status = ETH_LINK_DOWN,
94 .link_autoneg = ETH_LINK_SPEED_AUTONEG,
98 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 struct null_queue *h = q;
102 unsigned packet_size;
104 if ((q == NULL) || (bufs == NULL))
107 packet_size = h->internals->packet_size;
108 for (i = 0; i < nb_bufs; i++) {
109 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
112 bufs[i]->data_len = (uint16_t)packet_size;
113 bufs[i]->pkt_len = packet_size;
114 bufs[i]->port = h->internals->port_id;
117 rte_atomic64_add(&(h->rx_pkts), i);
123 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
126 struct null_queue *h = q;
127 unsigned packet_size;
129 if ((q == NULL) || (bufs == NULL))
132 packet_size = h->internals->packet_size;
133 for (i = 0; i < nb_bufs; i++) {
134 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
137 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
139 bufs[i]->data_len = (uint16_t)packet_size;
140 bufs[i]->pkt_len = packet_size;
141 bufs[i]->nb_segs = 1;
142 bufs[i]->next = NULL;
143 bufs[i]->port = h->internals->port_id;
146 rte_atomic64_add(&(h->rx_pkts), i);
152 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
155 struct null_queue *h = q;
157 if ((q == NULL) || (bufs == NULL))
160 for (i = 0; i < nb_bufs; i++)
161 rte_pktmbuf_free(bufs[i]);
163 rte_atomic64_add(&(h->tx_pkts), i);
169 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
172 struct null_queue *h = q;
173 unsigned packet_size;
175 if ((q == NULL) || (bufs == NULL))
178 packet_size = h->internals->packet_size;
179 for (i = 0; i < nb_bufs; i++) {
180 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
182 rte_pktmbuf_free(bufs[i]);
185 rte_atomic64_add(&(h->tx_pkts), i);
191 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
197 eth_dev_start(struct rte_eth_dev *dev)
202 dev->data->dev_link.link_status = ETH_LINK_UP;
207 eth_dev_stop(struct rte_eth_dev *dev)
212 dev->data->dev_link.link_status = ETH_LINK_DOWN;
216 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
217 uint16_t nb_rx_desc __rte_unused,
218 unsigned int socket_id __rte_unused,
219 const struct rte_eth_rxconf *rx_conf __rte_unused,
220 struct rte_mempool *mb_pool)
222 struct rte_mbuf *dummy_packet;
223 struct pmd_internals *internals;
224 unsigned packet_size;
226 if ((dev == NULL) || (mb_pool == NULL))
229 internals = dev->data->dev_private;
231 if (rx_queue_id >= dev->data->nb_rx_queues)
234 packet_size = internals->packet_size;
236 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
237 dev->data->rx_queues[rx_queue_id] =
238 &internals->rx_null_queues[rx_queue_id];
239 dummy_packet = rte_zmalloc_socket(NULL,
240 packet_size, 0, dev->data->numa_node);
241 if (dummy_packet == NULL)
244 internals->rx_null_queues[rx_queue_id].internals = internals;
245 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
251 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
252 uint16_t nb_tx_desc __rte_unused,
253 unsigned int socket_id __rte_unused,
254 const struct rte_eth_txconf *tx_conf __rte_unused)
256 struct rte_mbuf *dummy_packet;
257 struct pmd_internals *internals;
258 unsigned packet_size;
263 internals = dev->data->dev_private;
265 if (tx_queue_id >= dev->data->nb_tx_queues)
268 packet_size = internals->packet_size;
270 dev->data->tx_queues[tx_queue_id] =
271 &internals->tx_null_queues[tx_queue_id];
272 dummy_packet = rte_zmalloc_socket(NULL,
273 packet_size, 0, dev->data->numa_node);
274 if (dummy_packet == NULL)
277 internals->tx_null_queues[tx_queue_id].internals = internals;
278 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
285 eth_dev_info(struct rte_eth_dev *dev,
286 struct rte_eth_dev_info *dev_info)
288 struct pmd_internals *internals;
290 if ((dev == NULL) || (dev_info == NULL))
293 internals = dev->data->dev_private;
294 dev_info->max_mac_addrs = 1;
295 dev_info->max_rx_pktlen = (uint32_t)-1;
296 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
297 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
298 dev_info->min_rx_bufsize = 0;
299 dev_info->reta_size = internals->reta_size;
300 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
304 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
306 unsigned i, num_stats;
307 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
308 const struct pmd_internals *internal;
310 if ((dev == NULL) || (igb_stats == NULL))
313 internal = dev->data->dev_private;
314 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
315 RTE_MIN(dev->data->nb_rx_queues,
316 RTE_DIM(internal->rx_null_queues)));
317 for (i = 0; i < num_stats; i++) {
318 igb_stats->q_ipackets[i] =
319 internal->rx_null_queues[i].rx_pkts.cnt;
320 rx_total += igb_stats->q_ipackets[i];
323 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
324 RTE_MIN(dev->data->nb_tx_queues,
325 RTE_DIM(internal->tx_null_queues)));
326 for (i = 0; i < num_stats; i++) {
327 igb_stats->q_opackets[i] =
328 internal->tx_null_queues[i].tx_pkts.cnt;
329 igb_stats->q_errors[i] =
330 internal->tx_null_queues[i].err_pkts.cnt;
331 tx_total += igb_stats->q_opackets[i];
332 tx_err_total += igb_stats->q_errors[i];
335 igb_stats->ipackets = rx_total;
336 igb_stats->opackets = tx_total;
337 igb_stats->oerrors = tx_err_total;
341 eth_stats_reset(struct rte_eth_dev *dev)
344 struct pmd_internals *internal;
349 internal = dev->data->dev_private;
350 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
351 internal->rx_null_queues[i].rx_pkts.cnt = 0;
352 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
353 internal->tx_null_queues[i].tx_pkts.cnt = 0;
354 internal->tx_null_queues[i].err_pkts.cnt = 0;
359 eth_queue_release(void *q)
361 struct null_queue *nq;
367 rte_free(nq->dummy_packet);
371 eth_link_update(struct rte_eth_dev *dev __rte_unused,
372 int wait_to_complete __rte_unused) { return 0; }
375 eth_rss_reta_update(struct rte_eth_dev *dev,
376 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
379 struct pmd_internals *internal = dev->data->dev_private;
381 if (reta_size != internal->reta_size)
384 rte_spinlock_lock(&internal->rss_lock);
386 /* Copy RETA table */
387 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
388 internal->reta_conf[i].mask = reta_conf[i].mask;
389 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
390 if ((reta_conf[i].mask >> j) & 0x01)
391 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
394 rte_spinlock_unlock(&internal->rss_lock);
400 eth_rss_reta_query(struct rte_eth_dev *dev,
401 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
404 struct pmd_internals *internal = dev->data->dev_private;
406 if (reta_size != internal->reta_size)
409 rte_spinlock_lock(&internal->rss_lock);
411 /* Copy RETA table */
412 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
413 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
414 if ((reta_conf[i].mask >> j) & 0x01)
415 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
418 rte_spinlock_unlock(&internal->rss_lock);
424 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
426 struct pmd_internals *internal = dev->data->dev_private;
428 rte_spinlock_lock(&internal->rss_lock);
430 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
431 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
432 rss_conf->rss_hf & internal->flow_type_rss_offloads;
434 if (rss_conf->rss_key)
435 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
437 rte_spinlock_unlock(&internal->rss_lock);
443 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
444 struct rte_eth_rss_conf *rss_conf)
446 struct pmd_internals *internal = dev->data->dev_private;
448 rte_spinlock_lock(&internal->rss_lock);
450 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
451 if (rss_conf->rss_key)
452 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
454 rte_spinlock_unlock(&internal->rss_lock);
459 static const struct eth_dev_ops ops = {
460 .dev_start = eth_dev_start,
461 .dev_stop = eth_dev_stop,
462 .dev_configure = eth_dev_configure,
463 .dev_infos_get = eth_dev_info,
464 .rx_queue_setup = eth_rx_queue_setup,
465 .tx_queue_setup = eth_tx_queue_setup,
466 .rx_queue_release = eth_queue_release,
467 .tx_queue_release = eth_queue_release,
468 .link_update = eth_link_update,
469 .stats_get = eth_stats_get,
470 .stats_reset = eth_stats_reset,
471 .reta_update = eth_rss_reta_update,
472 .reta_query = eth_rss_reta_query,
473 .rss_hash_update = eth_rss_hash_update,
474 .rss_hash_conf_get = eth_rss_hash_conf_get
477 static struct rte_vdev_driver pmd_null_drv;
480 eth_dev_null_create(const char *name,
481 const unsigned numa_node,
482 unsigned packet_size,
483 unsigned packet_copy)
485 const unsigned nb_rx_queues = 1;
486 const unsigned nb_tx_queues = 1;
487 struct rte_eth_dev_data *data = NULL;
488 struct pmd_internals *internals = NULL;
489 struct rte_eth_dev *eth_dev = NULL;
491 static const uint8_t default_rss_key[40] = {
492 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
493 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
494 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
495 0xBE, 0xAC, 0x01, 0xFA
501 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
504 /* now do all data allocation - for eth_dev structure, dummy pci driver
505 * and internal (private) data
507 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
511 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
512 if (internals == NULL)
515 /* reserve an ethdev entry */
516 eth_dev = rte_eth_dev_allocate(name);
520 /* now put it all together
521 * - store queue data in internals,
522 * - store numa_node info in ethdev data
523 * - point eth_dev_data to internals
524 * - and point eth_dev structure to new eth_dev_data structure
526 /* NOTE: we'll replace the data element, of originally allocated eth_dev
527 * so the nulls are local per-process */
529 internals->packet_size = packet_size;
530 internals->packet_copy = packet_copy;
531 internals->port_id = eth_dev->data->port_id;
533 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
534 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
536 rte_memcpy(internals->rss_key, default_rss_key, 40);
538 data->dev_private = internals;
539 data->port_id = eth_dev->data->port_id;
540 data->nb_rx_queues = (uint16_t)nb_rx_queues;
541 data->nb_tx_queues = (uint16_t)nb_tx_queues;
542 data->dev_link = pmd_link;
543 data->mac_addrs = ð_addr;
544 strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
546 eth_dev->data = data;
547 eth_dev->dev_ops = &ops;
549 eth_dev->driver = NULL;
550 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
551 data->kdrv = RTE_KDRV_NONE;
552 data->drv_name = pmd_null_drv.driver.name;
553 data->numa_node = numa_node;
555 /* finally assign rx and tx ops */
557 eth_dev->rx_pkt_burst = eth_null_copy_rx;
558 eth_dev->tx_pkt_burst = eth_null_copy_tx;
560 eth_dev->rx_pkt_burst = eth_null_rx;
561 eth_dev->tx_pkt_burst = eth_null_tx;
574 get_packet_size_arg(const char *key __rte_unused,
575 const char *value, void *extra_args)
577 const char *a = value;
578 unsigned *packet_size = extra_args;
580 if ((value == NULL) || (extra_args == NULL))
583 *packet_size = (unsigned)strtoul(a, NULL, 0);
584 if (*packet_size == UINT_MAX)
591 get_packet_copy_arg(const char *key __rte_unused,
592 const char *value, void *extra_args)
594 const char *a = value;
595 unsigned *packet_copy = extra_args;
597 if ((value == NULL) || (extra_args == NULL))
600 *packet_copy = (unsigned)strtoul(a, NULL, 0);
601 if (*packet_copy == UINT_MAX)
608 rte_pmd_null_probe(struct rte_vdev_device *dev)
610 const char *name, *params;
612 unsigned packet_size = default_packet_size;
613 unsigned packet_copy = default_packet_copy;
614 struct rte_kvargs *kvlist = NULL;
620 name = rte_vdev_device_name(dev);
621 params = rte_vdev_device_args(dev);
622 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
624 numa_node = rte_socket_id();
626 if (params != NULL) {
627 kvlist = rte_kvargs_parse(params, valid_arguments);
631 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
633 ret = rte_kvargs_process(kvlist,
634 ETH_NULL_PACKET_SIZE_ARG,
635 &get_packet_size_arg, &packet_size);
640 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
642 ret = rte_kvargs_process(kvlist,
643 ETH_NULL_PACKET_COPY_ARG,
644 &get_packet_copy_arg, &packet_copy);
650 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
651 "packet copy is %s\n", packet_size,
652 packet_copy ? "enabled" : "disabled");
654 ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
658 rte_kvargs_free(kvlist);
663 rte_pmd_null_remove(struct rte_vdev_device *dev)
665 struct rte_eth_dev *eth_dev = NULL;
670 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
673 /* find the ethdev entry */
674 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
678 rte_free(eth_dev->data->dev_private);
679 rte_free(eth_dev->data);
681 rte_eth_dev_release_port(eth_dev);
686 static struct rte_vdev_driver pmd_null_drv = {
687 .probe = rte_pmd_null_probe,
688 .remove = rte_pmd_null_remove,
691 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
692 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
693 RTE_PMD_REGISTER_PARAM_STRING(net_null,