4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
43 #define ETH_NULL_PACKET_SIZE_ARG "size"
44 #define ETH_NULL_PACKET_COPY_ARG "copy"
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
49 static const char *valid_arguments[] = {
50 ETH_NULL_PACKET_SIZE_ARG,
51 ETH_NULL_PACKET_COPY_ARG,
58 struct pmd_internals *internals;
60 struct rte_mempool *mb_pool;
61 struct rte_mbuf *dummy_packet;
63 rte_atomic64_t rx_pkts;
64 rte_atomic64_t tx_pkts;
65 rte_atomic64_t err_pkts;
68 struct pmd_internals {
73 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 /** Bit mask of RSS offloads, the bit offset also means flow type */
77 uint64_t flow_type_rss_offloads;
79 rte_spinlock_t rss_lock;
82 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
85 uint8_t rss_key[40]; /**< 40-byte hash key. */
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static struct rte_eth_link pmd_link = {
91 .link_speed = ETH_SPEED_NUM_10G,
92 .link_duplex = ETH_LINK_FULL_DUPLEX,
93 .link_status = ETH_LINK_DOWN,
94 .link_autoneg = ETH_LINK_SPEED_AUTONEG,
98 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 struct null_queue *h = q;
102 unsigned packet_size;
104 if ((q == NULL) || (bufs == NULL))
107 packet_size = h->internals->packet_size;
108 for (i = 0; i < nb_bufs; i++) {
109 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
112 bufs[i]->data_len = (uint16_t)packet_size;
113 bufs[i]->pkt_len = packet_size;
114 bufs[i]->port = h->internals->port_id;
117 rte_atomic64_add(&(h->rx_pkts), i);
123 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
126 struct null_queue *h = q;
127 unsigned packet_size;
129 if ((q == NULL) || (bufs == NULL))
132 packet_size = h->internals->packet_size;
133 for (i = 0; i < nb_bufs; i++) {
134 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
137 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
139 bufs[i]->data_len = (uint16_t)packet_size;
140 bufs[i]->pkt_len = packet_size;
141 bufs[i]->port = h->internals->port_id;
144 rte_atomic64_add(&(h->rx_pkts), i);
150 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
153 struct null_queue *h = q;
155 if ((q == NULL) || (bufs == NULL))
158 for (i = 0; i < nb_bufs; i++)
159 rte_pktmbuf_free(bufs[i]);
161 rte_atomic64_add(&(h->tx_pkts), i);
167 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
170 struct null_queue *h = q;
171 unsigned packet_size;
173 if ((q == NULL) || (bufs == NULL))
176 packet_size = h->internals->packet_size;
177 for (i = 0; i < nb_bufs; i++) {
178 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
180 rte_pktmbuf_free(bufs[i]);
183 rte_atomic64_add(&(h->tx_pkts), i);
189 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
195 eth_dev_start(struct rte_eth_dev *dev)
200 dev->data->dev_link.link_status = ETH_LINK_UP;
205 eth_dev_stop(struct rte_eth_dev *dev)
210 dev->data->dev_link.link_status = ETH_LINK_DOWN;
214 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
215 uint16_t nb_rx_desc __rte_unused,
216 unsigned int socket_id __rte_unused,
217 const struct rte_eth_rxconf *rx_conf __rte_unused,
218 struct rte_mempool *mb_pool)
220 struct rte_mbuf *dummy_packet;
221 struct pmd_internals *internals;
222 unsigned packet_size;
224 if ((dev == NULL) || (mb_pool == NULL))
227 internals = dev->data->dev_private;
229 if (rx_queue_id >= dev->data->nb_rx_queues)
232 packet_size = internals->packet_size;
234 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
235 dev->data->rx_queues[rx_queue_id] =
236 &internals->rx_null_queues[rx_queue_id];
237 dummy_packet = rte_zmalloc_socket(NULL,
238 packet_size, 0, dev->data->numa_node);
239 if (dummy_packet == NULL)
242 internals->rx_null_queues[rx_queue_id].internals = internals;
243 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
249 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
250 uint16_t nb_tx_desc __rte_unused,
251 unsigned int socket_id __rte_unused,
252 const struct rte_eth_txconf *tx_conf __rte_unused)
254 struct rte_mbuf *dummy_packet;
255 struct pmd_internals *internals;
256 unsigned packet_size;
261 internals = dev->data->dev_private;
263 if (tx_queue_id >= dev->data->nb_tx_queues)
266 packet_size = internals->packet_size;
268 dev->data->tx_queues[tx_queue_id] =
269 &internals->tx_null_queues[tx_queue_id];
270 dummy_packet = rte_zmalloc_socket(NULL,
271 packet_size, 0, dev->data->numa_node);
272 if (dummy_packet == NULL)
275 internals->tx_null_queues[tx_queue_id].internals = internals;
276 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
283 eth_dev_info(struct rte_eth_dev *dev,
284 struct rte_eth_dev_info *dev_info)
286 struct pmd_internals *internals;
288 if ((dev == NULL) || (dev_info == NULL))
291 internals = dev->data->dev_private;
292 dev_info->max_mac_addrs = 1;
293 dev_info->max_rx_pktlen = (uint32_t)-1;
294 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
295 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
296 dev_info->min_rx_bufsize = 0;
297 dev_info->reta_size = internals->reta_size;
298 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
302 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
304 unsigned i, num_stats;
305 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
306 const struct pmd_internals *internal;
308 if ((dev == NULL) || (igb_stats == NULL))
311 internal = dev->data->dev_private;
312 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
313 RTE_MIN(dev->data->nb_rx_queues,
314 RTE_DIM(internal->rx_null_queues)));
315 for (i = 0; i < num_stats; i++) {
316 igb_stats->q_ipackets[i] =
317 internal->rx_null_queues[i].rx_pkts.cnt;
318 rx_total += igb_stats->q_ipackets[i];
321 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
322 RTE_MIN(dev->data->nb_tx_queues,
323 RTE_DIM(internal->tx_null_queues)));
324 for (i = 0; i < num_stats; i++) {
325 igb_stats->q_opackets[i] =
326 internal->tx_null_queues[i].tx_pkts.cnt;
327 igb_stats->q_errors[i] =
328 internal->tx_null_queues[i].err_pkts.cnt;
329 tx_total += igb_stats->q_opackets[i];
330 tx_err_total += igb_stats->q_errors[i];
333 igb_stats->ipackets = rx_total;
334 igb_stats->opackets = tx_total;
335 igb_stats->oerrors = tx_err_total;
341 eth_stats_reset(struct rte_eth_dev *dev)
344 struct pmd_internals *internal;
349 internal = dev->data->dev_private;
350 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
351 internal->rx_null_queues[i].rx_pkts.cnt = 0;
352 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
353 internal->tx_null_queues[i].tx_pkts.cnt = 0;
354 internal->tx_null_queues[i].err_pkts.cnt = 0;
359 eth_queue_release(void *q)
361 struct null_queue *nq;
367 rte_free(nq->dummy_packet);
371 eth_link_update(struct rte_eth_dev *dev __rte_unused,
372 int wait_to_complete __rte_unused) { return 0; }
375 eth_rss_reta_update(struct rte_eth_dev *dev,
376 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
379 struct pmd_internals *internal = dev->data->dev_private;
381 if (reta_size != internal->reta_size)
384 rte_spinlock_lock(&internal->rss_lock);
386 /* Copy RETA table */
387 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
388 internal->reta_conf[i].mask = reta_conf[i].mask;
389 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
390 if ((reta_conf[i].mask >> j) & 0x01)
391 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
394 rte_spinlock_unlock(&internal->rss_lock);
400 eth_rss_reta_query(struct rte_eth_dev *dev,
401 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
404 struct pmd_internals *internal = dev->data->dev_private;
406 if (reta_size != internal->reta_size)
409 rte_spinlock_lock(&internal->rss_lock);
411 /* Copy RETA table */
412 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
413 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
414 if ((reta_conf[i].mask >> j) & 0x01)
415 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
418 rte_spinlock_unlock(&internal->rss_lock);
424 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
426 struct pmd_internals *internal = dev->data->dev_private;
428 rte_spinlock_lock(&internal->rss_lock);
430 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
431 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
432 rss_conf->rss_hf & internal->flow_type_rss_offloads;
434 if (rss_conf->rss_key)
435 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
437 rte_spinlock_unlock(&internal->rss_lock);
443 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
444 struct rte_eth_rss_conf *rss_conf)
446 struct pmd_internals *internal = dev->data->dev_private;
448 rte_spinlock_lock(&internal->rss_lock);
450 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
451 if (rss_conf->rss_key)
452 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
454 rte_spinlock_unlock(&internal->rss_lock);
459 static const struct eth_dev_ops ops = {
460 .dev_start = eth_dev_start,
461 .dev_stop = eth_dev_stop,
462 .dev_configure = eth_dev_configure,
463 .dev_infos_get = eth_dev_info,
464 .rx_queue_setup = eth_rx_queue_setup,
465 .tx_queue_setup = eth_tx_queue_setup,
466 .rx_queue_release = eth_queue_release,
467 .tx_queue_release = eth_queue_release,
468 .link_update = eth_link_update,
469 .stats_get = eth_stats_get,
470 .stats_reset = eth_stats_reset,
471 .reta_update = eth_rss_reta_update,
472 .reta_query = eth_rss_reta_query,
473 .rss_hash_update = eth_rss_hash_update,
474 .rss_hash_conf_get = eth_rss_hash_conf_get
477 static struct rte_vdev_driver pmd_null_drv;
480 eth_dev_null_create(struct rte_vdev_device *dev,
481 unsigned packet_size,
482 unsigned packet_copy)
484 const unsigned nb_rx_queues = 1;
485 const unsigned nb_tx_queues = 1;
486 struct rte_eth_dev_data *data = NULL;
487 struct pmd_internals *internals = NULL;
488 struct rte_eth_dev *eth_dev = NULL;
490 static const uint8_t default_rss_key[40] = {
491 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
492 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
493 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
494 0xBE, 0xAC, 0x01, 0xFA
497 if (dev->device.numa_node == SOCKET_ID_ANY)
498 dev->device.numa_node = rte_socket_id();
500 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
501 dev->device.numa_node);
503 /* now do all data allocation - for eth_dev structure, dummy pci driver
504 * and internal (private) data
506 data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
507 dev->device.numa_node);
511 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
517 /* now put it all together
518 * - store queue data in internals,
519 * - store numa_node info in ethdev data
520 * - point eth_dev_data to internals
521 * - and point eth_dev structure to new eth_dev_data structure
523 /* NOTE: we'll replace the data element, of originally allocated eth_dev
524 * so the nulls are local per-process */
526 internals = eth_dev->data->dev_private;
527 internals->packet_size = packet_size;
528 internals->packet_copy = packet_copy;
529 internals->port_id = eth_dev->data->port_id;
531 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
532 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
534 rte_memcpy(internals->rss_key, default_rss_key, 40);
536 rte_memcpy(data, eth_dev->data, sizeof(*data));
537 data->nb_rx_queues = (uint16_t)nb_rx_queues;
538 data->nb_tx_queues = (uint16_t)nb_tx_queues;
539 data->dev_link = pmd_link;
540 data->mac_addrs = ð_addr;
542 eth_dev->data = data;
543 eth_dev->dev_ops = &ops;
545 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
547 /* finally assign rx and tx ops */
549 eth_dev->rx_pkt_burst = eth_null_copy_rx;
550 eth_dev->tx_pkt_burst = eth_null_copy_tx;
552 eth_dev->rx_pkt_burst = eth_null_rx;
553 eth_dev->tx_pkt_burst = eth_null_tx;
560 get_packet_size_arg(const char *key __rte_unused,
561 const char *value, void *extra_args)
563 const char *a = value;
564 unsigned *packet_size = extra_args;
566 if ((value == NULL) || (extra_args == NULL))
569 *packet_size = (unsigned)strtoul(a, NULL, 0);
570 if (*packet_size == UINT_MAX)
577 get_packet_copy_arg(const char *key __rte_unused,
578 const char *value, void *extra_args)
580 const char *a = value;
581 unsigned *packet_copy = extra_args;
583 if ((value == NULL) || (extra_args == NULL))
586 *packet_copy = (unsigned)strtoul(a, NULL, 0);
587 if (*packet_copy == UINT_MAX)
594 rte_pmd_null_probe(struct rte_vdev_device *dev)
596 const char *name, *params;
597 unsigned packet_size = default_packet_size;
598 unsigned packet_copy = default_packet_copy;
599 struct rte_kvargs *kvlist = NULL;
605 name = rte_vdev_device_name(dev);
606 params = rte_vdev_device_args(dev);
607 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
609 if (params != NULL) {
610 kvlist = rte_kvargs_parse(params, valid_arguments);
614 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
616 ret = rte_kvargs_process(kvlist,
617 ETH_NULL_PACKET_SIZE_ARG,
618 &get_packet_size_arg, &packet_size);
623 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
625 ret = rte_kvargs_process(kvlist,
626 ETH_NULL_PACKET_COPY_ARG,
627 &get_packet_copy_arg, &packet_copy);
633 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
634 "packet copy is %s\n", packet_size,
635 packet_copy ? "enabled" : "disabled");
637 ret = eth_dev_null_create(dev, packet_size, packet_copy);
641 rte_kvargs_free(kvlist);
646 rte_pmd_null_remove(struct rte_vdev_device *dev)
648 struct rte_eth_dev *eth_dev = NULL;
653 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
656 /* find the ethdev entry */
657 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
661 rte_free(eth_dev->data->dev_private);
662 rte_free(eth_dev->data);
664 rte_eth_dev_release_port(eth_dev);
669 static struct rte_vdev_driver pmd_null_drv = {
670 .probe = rte_pmd_null_probe,
671 .remove = rte_pmd_null_remove,
674 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
675 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
676 RTE_PMD_REGISTER_PARAM_STRING(net_null,