4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
43 #define ETH_NULL_PACKET_SIZE_ARG "size"
44 #define ETH_NULL_PACKET_COPY_ARG "copy"
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
49 static const char *valid_arguments[] = {
50 ETH_NULL_PACKET_SIZE_ARG,
51 ETH_NULL_PACKET_COPY_ARG,
58 struct pmd_internals *internals;
60 struct rte_mempool *mb_pool;
61 struct rte_mbuf *dummy_packet;
63 rte_atomic64_t rx_pkts;
64 rte_atomic64_t tx_pkts;
65 rte_atomic64_t err_pkts;
68 struct pmd_internals {
73 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 struct ether_addr eth_addr;
77 /** Bit mask of RSS offloads, the bit offset also means flow type */
78 uint64_t flow_type_rss_offloads;
80 rte_spinlock_t rss_lock;
83 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
86 uint8_t rss_key[40]; /**< 40-byte hash key. */
88 static struct rte_eth_link pmd_link = {
89 .link_speed = ETH_SPEED_NUM_10G,
90 .link_duplex = ETH_LINK_FULL_DUPLEX,
91 .link_status = ETH_LINK_DOWN,
92 .link_autoneg = ETH_LINK_FIXED,
95 static int eth_null_logtype;
97 #define PMD_LOG(level, fmt, args...) \
98 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
99 "%s(): " fmt "\n", __func__, ##args)
102 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
105 struct null_queue *h = q;
106 unsigned packet_size;
108 if ((q == NULL) || (bufs == NULL))
111 packet_size = h->internals->packet_size;
112 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
115 for (i = 0; i < nb_bufs; i++) {
116 bufs[i]->data_len = (uint16_t)packet_size;
117 bufs[i]->pkt_len = packet_size;
118 bufs[i]->port = h->internals->port_id;
121 rte_atomic64_add(&(h->rx_pkts), i);
127 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
130 struct null_queue *h = q;
131 unsigned packet_size;
133 if ((q == NULL) || (bufs == NULL))
136 packet_size = h->internals->packet_size;
137 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
140 for (i = 0; i < nb_bufs; i++) {
141 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
143 bufs[i]->data_len = (uint16_t)packet_size;
144 bufs[i]->pkt_len = packet_size;
145 bufs[i]->port = h->internals->port_id;
148 rte_atomic64_add(&(h->rx_pkts), i);
154 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
157 struct null_queue *h = q;
159 if ((q == NULL) || (bufs == NULL))
162 for (i = 0; i < nb_bufs; i++)
163 rte_pktmbuf_free(bufs[i]);
165 rte_atomic64_add(&(h->tx_pkts), i);
171 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
174 struct null_queue *h = q;
175 unsigned packet_size;
177 if ((q == NULL) || (bufs == NULL))
180 packet_size = h->internals->packet_size;
181 for (i = 0; i < nb_bufs; i++) {
182 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
184 rte_pktmbuf_free(bufs[i]);
187 rte_atomic64_add(&(h->tx_pkts), i);
193 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
199 eth_dev_start(struct rte_eth_dev *dev)
204 dev->data->dev_link.link_status = ETH_LINK_UP;
209 eth_dev_stop(struct rte_eth_dev *dev)
214 dev->data->dev_link.link_status = ETH_LINK_DOWN;
218 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
219 uint16_t nb_rx_desc __rte_unused,
220 unsigned int socket_id __rte_unused,
221 const struct rte_eth_rxconf *rx_conf __rte_unused,
222 struct rte_mempool *mb_pool)
224 struct rte_mbuf *dummy_packet;
225 struct pmd_internals *internals;
226 unsigned packet_size;
228 if ((dev == NULL) || (mb_pool == NULL))
231 internals = dev->data->dev_private;
233 if (rx_queue_id >= dev->data->nb_rx_queues)
236 packet_size = internals->packet_size;
238 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
239 dev->data->rx_queues[rx_queue_id] =
240 &internals->rx_null_queues[rx_queue_id];
241 dummy_packet = rte_zmalloc_socket(NULL,
242 packet_size, 0, dev->data->numa_node);
243 if (dummy_packet == NULL)
246 internals->rx_null_queues[rx_queue_id].internals = internals;
247 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
253 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
254 uint16_t nb_tx_desc __rte_unused,
255 unsigned int socket_id __rte_unused,
256 const struct rte_eth_txconf *tx_conf __rte_unused)
258 struct rte_mbuf *dummy_packet;
259 struct pmd_internals *internals;
260 unsigned packet_size;
265 internals = dev->data->dev_private;
267 if (tx_queue_id >= dev->data->nb_tx_queues)
270 packet_size = internals->packet_size;
272 dev->data->tx_queues[tx_queue_id] =
273 &internals->tx_null_queues[tx_queue_id];
274 dummy_packet = rte_zmalloc_socket(NULL,
275 packet_size, 0, dev->data->numa_node);
276 if (dummy_packet == NULL)
279 internals->tx_null_queues[tx_queue_id].internals = internals;
280 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
286 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
292 eth_dev_info(struct rte_eth_dev *dev,
293 struct rte_eth_dev_info *dev_info)
295 struct pmd_internals *internals;
297 if ((dev == NULL) || (dev_info == NULL))
300 internals = dev->data->dev_private;
301 dev_info->max_mac_addrs = 1;
302 dev_info->max_rx_pktlen = (uint32_t)-1;
303 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
304 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
305 dev_info->min_rx_bufsize = 0;
306 dev_info->reta_size = internals->reta_size;
307 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
311 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
313 unsigned i, num_stats;
314 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
315 const struct pmd_internals *internal;
317 if ((dev == NULL) || (igb_stats == NULL))
320 internal = dev->data->dev_private;
321 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
322 RTE_MIN(dev->data->nb_rx_queues,
323 RTE_DIM(internal->rx_null_queues)));
324 for (i = 0; i < num_stats; i++) {
325 igb_stats->q_ipackets[i] =
326 internal->rx_null_queues[i].rx_pkts.cnt;
327 rx_total += igb_stats->q_ipackets[i];
330 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
331 RTE_MIN(dev->data->nb_tx_queues,
332 RTE_DIM(internal->tx_null_queues)));
333 for (i = 0; i < num_stats; i++) {
334 igb_stats->q_opackets[i] =
335 internal->tx_null_queues[i].tx_pkts.cnt;
336 igb_stats->q_errors[i] =
337 internal->tx_null_queues[i].err_pkts.cnt;
338 tx_total += igb_stats->q_opackets[i];
339 tx_err_total += igb_stats->q_errors[i];
342 igb_stats->ipackets = rx_total;
343 igb_stats->opackets = tx_total;
344 igb_stats->oerrors = tx_err_total;
350 eth_stats_reset(struct rte_eth_dev *dev)
353 struct pmd_internals *internal;
358 internal = dev->data->dev_private;
359 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
360 internal->rx_null_queues[i].rx_pkts.cnt = 0;
361 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
362 internal->tx_null_queues[i].tx_pkts.cnt = 0;
363 internal->tx_null_queues[i].err_pkts.cnt = 0;
368 eth_queue_release(void *q)
370 struct null_queue *nq;
376 rte_free(nq->dummy_packet);
380 eth_link_update(struct rte_eth_dev *dev __rte_unused,
381 int wait_to_complete __rte_unused) { return 0; }
384 eth_rss_reta_update(struct rte_eth_dev *dev,
385 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
388 struct pmd_internals *internal = dev->data->dev_private;
390 if (reta_size != internal->reta_size)
393 rte_spinlock_lock(&internal->rss_lock);
395 /* Copy RETA table */
396 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
397 internal->reta_conf[i].mask = reta_conf[i].mask;
398 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
399 if ((reta_conf[i].mask >> j) & 0x01)
400 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
403 rte_spinlock_unlock(&internal->rss_lock);
409 eth_rss_reta_query(struct rte_eth_dev *dev,
410 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
413 struct pmd_internals *internal = dev->data->dev_private;
415 if (reta_size != internal->reta_size)
418 rte_spinlock_lock(&internal->rss_lock);
420 /* Copy RETA table */
421 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
422 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
423 if ((reta_conf[i].mask >> j) & 0x01)
424 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
427 rte_spinlock_unlock(&internal->rss_lock);
433 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
435 struct pmd_internals *internal = dev->data->dev_private;
437 rte_spinlock_lock(&internal->rss_lock);
439 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
440 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
441 rss_conf->rss_hf & internal->flow_type_rss_offloads;
443 if (rss_conf->rss_key)
444 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
446 rte_spinlock_unlock(&internal->rss_lock);
452 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
453 struct rte_eth_rss_conf *rss_conf)
455 struct pmd_internals *internal = dev->data->dev_private;
457 rte_spinlock_lock(&internal->rss_lock);
459 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
460 if (rss_conf->rss_key)
461 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
463 rte_spinlock_unlock(&internal->rss_lock);
469 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
470 __rte_unused struct ether_addr *addr)
475 static const struct eth_dev_ops ops = {
476 .dev_start = eth_dev_start,
477 .dev_stop = eth_dev_stop,
478 .dev_configure = eth_dev_configure,
479 .dev_infos_get = eth_dev_info,
480 .rx_queue_setup = eth_rx_queue_setup,
481 .tx_queue_setup = eth_tx_queue_setup,
482 .rx_queue_release = eth_queue_release,
483 .tx_queue_release = eth_queue_release,
484 .mtu_set = eth_mtu_set,
485 .link_update = eth_link_update,
486 .mac_addr_set = eth_mac_address_set,
487 .stats_get = eth_stats_get,
488 .stats_reset = eth_stats_reset,
489 .reta_update = eth_rss_reta_update,
490 .reta_query = eth_rss_reta_query,
491 .rss_hash_update = eth_rss_hash_update,
492 .rss_hash_conf_get = eth_rss_hash_conf_get
495 static struct rte_vdev_driver pmd_null_drv;
498 eth_dev_null_create(struct rte_vdev_device *dev,
499 unsigned packet_size,
500 unsigned packet_copy)
502 const unsigned nb_rx_queues = 1;
503 const unsigned nb_tx_queues = 1;
504 struct rte_eth_dev_data *data;
505 struct pmd_internals *internals = NULL;
506 struct rte_eth_dev *eth_dev = NULL;
508 static const uint8_t default_rss_key[40] = {
509 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
510 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
511 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
512 0xBE, 0xAC, 0x01, 0xFA
515 if (dev->device.numa_node == SOCKET_ID_ANY)
516 dev->device.numa_node = rte_socket_id();
518 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
519 dev->device.numa_node);
521 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
525 /* now put it all together
526 * - store queue data in internals,
527 * - store numa_node info in ethdev data
528 * - point eth_dev_data to internals
529 * - and point eth_dev structure to new eth_dev_data structure
531 /* NOTE: we'll replace the data element, of originally allocated eth_dev
532 * so the nulls are local per-process */
534 internals = eth_dev->data->dev_private;
535 internals->packet_size = packet_size;
536 internals->packet_copy = packet_copy;
537 internals->port_id = eth_dev->data->port_id;
538 eth_random_addr(internals->eth_addr.addr_bytes);
540 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
541 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
543 rte_memcpy(internals->rss_key, default_rss_key, 40);
545 data = eth_dev->data;
546 data->nb_rx_queues = (uint16_t)nb_rx_queues;
547 data->nb_tx_queues = (uint16_t)nb_tx_queues;
548 data->dev_link = pmd_link;
549 data->mac_addrs = &internals->eth_addr;
551 eth_dev->dev_ops = &ops;
553 /* finally assign rx and tx ops */
555 eth_dev->rx_pkt_burst = eth_null_copy_rx;
556 eth_dev->tx_pkt_burst = eth_null_copy_tx;
558 eth_dev->rx_pkt_burst = eth_null_rx;
559 eth_dev->tx_pkt_burst = eth_null_tx;
562 rte_eth_dev_probing_finish(eth_dev);
567 get_packet_size_arg(const char *key __rte_unused,
568 const char *value, void *extra_args)
570 const char *a = value;
571 unsigned *packet_size = extra_args;
573 if ((value == NULL) || (extra_args == NULL))
576 *packet_size = (unsigned)strtoul(a, NULL, 0);
577 if (*packet_size == UINT_MAX)
584 get_packet_copy_arg(const char *key __rte_unused,
585 const char *value, void *extra_args)
587 const char *a = value;
588 unsigned *packet_copy = extra_args;
590 if ((value == NULL) || (extra_args == NULL))
593 *packet_copy = (unsigned)strtoul(a, NULL, 0);
594 if (*packet_copy == UINT_MAX)
601 rte_pmd_null_probe(struct rte_vdev_device *dev)
603 const char *name, *params;
604 unsigned packet_size = default_packet_size;
605 unsigned packet_copy = default_packet_copy;
606 struct rte_kvargs *kvlist = NULL;
607 struct rte_eth_dev *eth_dev;
613 name = rte_vdev_device_name(dev);
614 params = rte_vdev_device_args(dev);
615 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
617 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
618 eth_dev = rte_eth_dev_attach_secondary(name);
620 PMD_LOG(ERR, "Failed to probe %s", name);
623 /* TODO: request info from primary to set up Rx and Tx */
624 eth_dev->dev_ops = &ops;
625 eth_dev->device = &dev->device;
626 rte_eth_dev_probing_finish(eth_dev);
630 if (params != NULL) {
631 kvlist = rte_kvargs_parse(params, valid_arguments);
635 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
637 ret = rte_kvargs_process(kvlist,
638 ETH_NULL_PACKET_SIZE_ARG,
639 &get_packet_size_arg, &packet_size);
644 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
646 ret = rte_kvargs_process(kvlist,
647 ETH_NULL_PACKET_COPY_ARG,
648 &get_packet_copy_arg, &packet_copy);
654 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
655 "packet copy is %s", packet_size,
656 packet_copy ? "enabled" : "disabled");
658 ret = eth_dev_null_create(dev, packet_size, packet_copy);
662 rte_kvargs_free(kvlist);
667 rte_pmd_null_remove(struct rte_vdev_device *dev)
669 struct rte_eth_dev *eth_dev = NULL;
674 PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
677 /* find the ethdev entry */
678 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
682 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
683 return rte_eth_dev_release_port_secondary(eth_dev);
685 rte_free(eth_dev->data->dev_private);
687 rte_eth_dev_release_port(eth_dev);
692 static struct rte_vdev_driver pmd_null_drv = {
693 .probe = rte_pmd_null_probe,
694 .remove = rte_pmd_null_remove,
697 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
698 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
699 RTE_PMD_REGISTER_PARAM_STRING(net_null,
703 RTE_INIT(eth_null_init_log)
705 eth_null_logtype = rte_log_register("pmd.net.null");
706 if (eth_null_logtype >= 0)
707 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);