4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
43 #define ETH_NULL_PACKET_SIZE_ARG "size"
44 #define ETH_NULL_PACKET_COPY_ARG "copy"
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
49 static const char *valid_arguments[] = {
50 ETH_NULL_PACKET_SIZE_ARG,
51 ETH_NULL_PACKET_COPY_ARG,
58 struct pmd_internals *internals;
60 struct rte_mempool *mb_pool;
61 struct rte_mbuf *dummy_packet;
63 rte_atomic64_t rx_pkts;
64 rte_atomic64_t tx_pkts;
65 rte_atomic64_t err_pkts;
68 struct pmd_internals {
73 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 /** Bit mask of RSS offloads, the bit offset also means flow type */
77 uint64_t flow_type_rss_offloads;
79 rte_spinlock_t rss_lock;
82 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
85 uint8_t rss_key[40]; /**< 40-byte hash key. */
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static struct rte_eth_link pmd_link = {
91 .link_speed = ETH_SPEED_NUM_10G,
92 .link_duplex = ETH_LINK_FULL_DUPLEX,
93 .link_status = ETH_LINK_DOWN,
94 .link_autoneg = ETH_LINK_AUTONEG,
98 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 struct null_queue *h = q;
102 unsigned packet_size;
104 if ((q == NULL) || (bufs == NULL))
107 packet_size = h->internals->packet_size;
108 for (i = 0; i < nb_bufs; i++) {
109 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
112 bufs[i]->data_len = (uint16_t)packet_size;
113 bufs[i]->pkt_len = packet_size;
114 bufs[i]->port = h->internals->port_id;
117 rte_atomic64_add(&(h->rx_pkts), i);
123 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
126 struct null_queue *h = q;
127 unsigned packet_size;
129 if ((q == NULL) || (bufs == NULL))
132 packet_size = h->internals->packet_size;
133 for (i = 0; i < nb_bufs; i++) {
134 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
137 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
139 bufs[i]->data_len = (uint16_t)packet_size;
140 bufs[i]->pkt_len = packet_size;
141 bufs[i]->port = h->internals->port_id;
144 rte_atomic64_add(&(h->rx_pkts), i);
150 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
153 struct null_queue *h = q;
155 if ((q == NULL) || (bufs == NULL))
158 for (i = 0; i < nb_bufs; i++)
159 rte_pktmbuf_free(bufs[i]);
161 rte_atomic64_add(&(h->tx_pkts), i);
167 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
170 struct null_queue *h = q;
171 unsigned packet_size;
173 if ((q == NULL) || (bufs == NULL))
176 packet_size = h->internals->packet_size;
177 for (i = 0; i < nb_bufs; i++) {
178 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
180 rte_pktmbuf_free(bufs[i]);
183 rte_atomic64_add(&(h->tx_pkts), i);
189 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
195 eth_dev_start(struct rte_eth_dev *dev)
200 dev->data->dev_link.link_status = ETH_LINK_UP;
205 eth_dev_stop(struct rte_eth_dev *dev)
210 dev->data->dev_link.link_status = ETH_LINK_DOWN;
214 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
215 uint16_t nb_rx_desc __rte_unused,
216 unsigned int socket_id __rte_unused,
217 const struct rte_eth_rxconf *rx_conf __rte_unused,
218 struct rte_mempool *mb_pool)
220 struct rte_mbuf *dummy_packet;
221 struct pmd_internals *internals;
222 unsigned packet_size;
224 if ((dev == NULL) || (mb_pool == NULL))
227 internals = dev->data->dev_private;
229 if (rx_queue_id >= dev->data->nb_rx_queues)
232 packet_size = internals->packet_size;
234 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
235 dev->data->rx_queues[rx_queue_id] =
236 &internals->rx_null_queues[rx_queue_id];
237 dummy_packet = rte_zmalloc_socket(NULL,
238 packet_size, 0, dev->data->numa_node);
239 if (dummy_packet == NULL)
242 internals->rx_null_queues[rx_queue_id].internals = internals;
243 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
249 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
250 uint16_t nb_tx_desc __rte_unused,
251 unsigned int socket_id __rte_unused,
252 const struct rte_eth_txconf *tx_conf __rte_unused)
254 struct rte_mbuf *dummy_packet;
255 struct pmd_internals *internals;
256 unsigned packet_size;
261 internals = dev->data->dev_private;
263 if (tx_queue_id >= dev->data->nb_tx_queues)
266 packet_size = internals->packet_size;
268 dev->data->tx_queues[tx_queue_id] =
269 &internals->tx_null_queues[tx_queue_id];
270 dummy_packet = rte_zmalloc_socket(NULL,
271 packet_size, 0, dev->data->numa_node);
272 if (dummy_packet == NULL)
275 internals->tx_null_queues[tx_queue_id].internals = internals;
276 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
282 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
288 eth_dev_info(struct rte_eth_dev *dev,
289 struct rte_eth_dev_info *dev_info)
291 struct pmd_internals *internals;
293 if ((dev == NULL) || (dev_info == NULL))
296 internals = dev->data->dev_private;
297 dev_info->max_mac_addrs = 1;
298 dev_info->max_rx_pktlen = (uint32_t)-1;
299 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
300 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
301 dev_info->min_rx_bufsize = 0;
302 dev_info->reta_size = internals->reta_size;
303 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
307 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
309 unsigned i, num_stats;
310 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
311 const struct pmd_internals *internal;
313 if ((dev == NULL) || (igb_stats == NULL))
316 internal = dev->data->dev_private;
317 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
318 RTE_MIN(dev->data->nb_rx_queues,
319 RTE_DIM(internal->rx_null_queues)));
320 for (i = 0; i < num_stats; i++) {
321 igb_stats->q_ipackets[i] =
322 internal->rx_null_queues[i].rx_pkts.cnt;
323 rx_total += igb_stats->q_ipackets[i];
326 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
327 RTE_MIN(dev->data->nb_tx_queues,
328 RTE_DIM(internal->tx_null_queues)));
329 for (i = 0; i < num_stats; i++) {
330 igb_stats->q_opackets[i] =
331 internal->tx_null_queues[i].tx_pkts.cnt;
332 igb_stats->q_errors[i] =
333 internal->tx_null_queues[i].err_pkts.cnt;
334 tx_total += igb_stats->q_opackets[i];
335 tx_err_total += igb_stats->q_errors[i];
338 igb_stats->ipackets = rx_total;
339 igb_stats->opackets = tx_total;
340 igb_stats->oerrors = tx_err_total;
346 eth_stats_reset(struct rte_eth_dev *dev)
349 struct pmd_internals *internal;
354 internal = dev->data->dev_private;
355 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
356 internal->rx_null_queues[i].rx_pkts.cnt = 0;
357 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
358 internal->tx_null_queues[i].tx_pkts.cnt = 0;
359 internal->tx_null_queues[i].err_pkts.cnt = 0;
364 eth_queue_release(void *q)
366 struct null_queue *nq;
372 rte_free(nq->dummy_packet);
376 eth_link_update(struct rte_eth_dev *dev __rte_unused,
377 int wait_to_complete __rte_unused) { return 0; }
380 eth_rss_reta_update(struct rte_eth_dev *dev,
381 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
384 struct pmd_internals *internal = dev->data->dev_private;
386 if (reta_size != internal->reta_size)
389 rte_spinlock_lock(&internal->rss_lock);
391 /* Copy RETA table */
392 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
393 internal->reta_conf[i].mask = reta_conf[i].mask;
394 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
395 if ((reta_conf[i].mask >> j) & 0x01)
396 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
399 rte_spinlock_unlock(&internal->rss_lock);
405 eth_rss_reta_query(struct rte_eth_dev *dev,
406 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
409 struct pmd_internals *internal = dev->data->dev_private;
411 if (reta_size != internal->reta_size)
414 rte_spinlock_lock(&internal->rss_lock);
416 /* Copy RETA table */
417 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
418 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
419 if ((reta_conf[i].mask >> j) & 0x01)
420 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
423 rte_spinlock_unlock(&internal->rss_lock);
429 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
431 struct pmd_internals *internal = dev->data->dev_private;
433 rte_spinlock_lock(&internal->rss_lock);
435 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
436 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
437 rss_conf->rss_hf & internal->flow_type_rss_offloads;
439 if (rss_conf->rss_key)
440 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
442 rte_spinlock_unlock(&internal->rss_lock);
448 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
449 struct rte_eth_rss_conf *rss_conf)
451 struct pmd_internals *internal = dev->data->dev_private;
453 rte_spinlock_lock(&internal->rss_lock);
455 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
456 if (rss_conf->rss_key)
457 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
459 rte_spinlock_unlock(&internal->rss_lock);
465 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
466 __rte_unused struct ether_addr *addr)
470 static const struct eth_dev_ops ops = {
471 .dev_start = eth_dev_start,
472 .dev_stop = eth_dev_stop,
473 .dev_configure = eth_dev_configure,
474 .dev_infos_get = eth_dev_info,
475 .rx_queue_setup = eth_rx_queue_setup,
476 .tx_queue_setup = eth_tx_queue_setup,
477 .rx_queue_release = eth_queue_release,
478 .tx_queue_release = eth_queue_release,
479 .mtu_set = eth_mtu_set,
480 .link_update = eth_link_update,
481 .mac_addr_set = eth_mac_address_set,
482 .stats_get = eth_stats_get,
483 .stats_reset = eth_stats_reset,
484 .reta_update = eth_rss_reta_update,
485 .reta_query = eth_rss_reta_query,
486 .rss_hash_update = eth_rss_hash_update,
487 .rss_hash_conf_get = eth_rss_hash_conf_get
490 static struct rte_vdev_driver pmd_null_drv;
493 eth_dev_null_create(struct rte_vdev_device *dev,
494 unsigned packet_size,
495 unsigned packet_copy)
497 const unsigned nb_rx_queues = 1;
498 const unsigned nb_tx_queues = 1;
499 struct rte_eth_dev_data *data = NULL;
500 struct pmd_internals *internals = NULL;
501 struct rte_eth_dev *eth_dev = NULL;
503 static const uint8_t default_rss_key[40] = {
504 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
505 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
506 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
507 0xBE, 0xAC, 0x01, 0xFA
510 if (dev->device.numa_node == SOCKET_ID_ANY)
511 dev->device.numa_node = rte_socket_id();
513 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
514 dev->device.numa_node);
516 /* now do all data allocation - for eth_dev structure, dummy pci driver
517 * and internal (private) data
519 data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
520 dev->device.numa_node);
524 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
530 /* now put it all together
531 * - store queue data in internals,
532 * - store numa_node info in ethdev data
533 * - point eth_dev_data to internals
534 * - and point eth_dev structure to new eth_dev_data structure
536 /* NOTE: we'll replace the data element, of originally allocated eth_dev
537 * so the nulls are local per-process */
539 internals = eth_dev->data->dev_private;
540 internals->packet_size = packet_size;
541 internals->packet_copy = packet_copy;
542 internals->port_id = eth_dev->data->port_id;
544 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
545 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
547 rte_memcpy(internals->rss_key, default_rss_key, 40);
549 rte_memcpy(data, eth_dev->data, sizeof(*data));
550 data->nb_rx_queues = (uint16_t)nb_rx_queues;
551 data->nb_tx_queues = (uint16_t)nb_tx_queues;
552 data->dev_link = pmd_link;
553 data->mac_addrs = ð_addr;
555 eth_dev->data = data;
556 eth_dev->dev_ops = &ops;
558 /* finally assign rx and tx ops */
560 eth_dev->rx_pkt_burst = eth_null_copy_rx;
561 eth_dev->tx_pkt_burst = eth_null_copy_tx;
563 eth_dev->rx_pkt_burst = eth_null_rx;
564 eth_dev->tx_pkt_burst = eth_null_tx;
571 get_packet_size_arg(const char *key __rte_unused,
572 const char *value, void *extra_args)
574 const char *a = value;
575 unsigned *packet_size = extra_args;
577 if ((value == NULL) || (extra_args == NULL))
580 *packet_size = (unsigned)strtoul(a, NULL, 0);
581 if (*packet_size == UINT_MAX)
588 get_packet_copy_arg(const char *key __rte_unused,
589 const char *value, void *extra_args)
591 const char *a = value;
592 unsigned *packet_copy = extra_args;
594 if ((value == NULL) || (extra_args == NULL))
597 *packet_copy = (unsigned)strtoul(a, NULL, 0);
598 if (*packet_copy == UINT_MAX)
605 rte_pmd_null_probe(struct rte_vdev_device *dev)
607 const char *name, *params;
608 unsigned packet_size = default_packet_size;
609 unsigned packet_copy = default_packet_copy;
610 struct rte_kvargs *kvlist = NULL;
616 name = rte_vdev_device_name(dev);
617 params = rte_vdev_device_args(dev);
618 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
620 if (params != NULL) {
621 kvlist = rte_kvargs_parse(params, valid_arguments);
625 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
627 ret = rte_kvargs_process(kvlist,
628 ETH_NULL_PACKET_SIZE_ARG,
629 &get_packet_size_arg, &packet_size);
634 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
636 ret = rte_kvargs_process(kvlist,
637 ETH_NULL_PACKET_COPY_ARG,
638 &get_packet_copy_arg, &packet_copy);
644 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
645 "packet copy is %s\n", packet_size,
646 packet_copy ? "enabled" : "disabled");
648 ret = eth_dev_null_create(dev, packet_size, packet_copy);
652 rte_kvargs_free(kvlist);
657 rte_pmd_null_remove(struct rte_vdev_device *dev)
659 struct rte_eth_dev *eth_dev = NULL;
664 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
667 /* find the ethdev entry */
668 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
672 rte_free(eth_dev->data->dev_private);
673 rte_free(eth_dev->data);
675 rte_eth_dev_release_port(eth_dev);
680 static struct rte_vdev_driver pmd_null_drv = {
681 .probe = rte_pmd_null_probe,
682 .remove = rte_pmd_null_remove,
685 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
686 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
687 RTE_PMD_REGISTER_PARAM_STRING(net_null,