4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
43 #define ETH_NULL_PACKET_SIZE_ARG "size"
44 #define ETH_NULL_PACKET_COPY_ARG "copy"
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
49 static const char *valid_arguments[] = {
50 ETH_NULL_PACKET_SIZE_ARG,
51 ETH_NULL_PACKET_COPY_ARG,
58 struct pmd_internals *internals;
60 struct rte_mempool *mb_pool;
61 struct rte_mbuf *dummy_packet;
63 rte_atomic64_t rx_pkts;
64 rte_atomic64_t tx_pkts;
65 rte_atomic64_t err_pkts;
68 struct pmd_internals {
73 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 struct ether_addr eth_addr;
77 /** Bit mask of RSS offloads, the bit offset also means flow type */
78 uint64_t flow_type_rss_offloads;
80 rte_spinlock_t rss_lock;
83 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
86 uint8_t rss_key[40]; /**< 40-byte hash key. */
88 static struct rte_eth_link pmd_link = {
89 .link_speed = ETH_SPEED_NUM_10G,
90 .link_duplex = ETH_LINK_FULL_DUPLEX,
91 .link_status = ETH_LINK_DOWN,
92 .link_autoneg = ETH_LINK_FIXED,
95 static int eth_null_logtype;
97 #define PMD_LOG(level, fmt, args...) \
98 rte_log(RTE_LOG_ ## level, eth_null_logtype, \
99 "%s(): " fmt "\n", __func__, ##args)
102 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
105 struct null_queue *h = q;
106 unsigned packet_size;
108 if ((q == NULL) || (bufs == NULL))
111 packet_size = h->internals->packet_size;
112 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
115 for (i = 0; i < nb_bufs; i++) {
116 bufs[i]->data_len = (uint16_t)packet_size;
117 bufs[i]->pkt_len = packet_size;
118 bufs[i]->port = h->internals->port_id;
121 rte_atomic64_add(&(h->rx_pkts), i);
127 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
130 struct null_queue *h = q;
131 unsigned packet_size;
133 if ((q == NULL) || (bufs == NULL))
136 packet_size = h->internals->packet_size;
137 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
140 for (i = 0; i < nb_bufs; i++) {
141 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
143 bufs[i]->data_len = (uint16_t)packet_size;
144 bufs[i]->pkt_len = packet_size;
145 bufs[i]->port = h->internals->port_id;
148 rte_atomic64_add(&(h->rx_pkts), i);
154 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
157 struct null_queue *h = q;
159 if ((q == NULL) || (bufs == NULL))
162 for (i = 0; i < nb_bufs; i++)
163 rte_pktmbuf_free(bufs[i]);
165 rte_atomic64_add(&(h->tx_pkts), i);
171 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
174 struct null_queue *h = q;
175 unsigned packet_size;
177 if ((q == NULL) || (bufs == NULL))
180 packet_size = h->internals->packet_size;
181 for (i = 0; i < nb_bufs; i++) {
182 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
184 rte_pktmbuf_free(bufs[i]);
187 rte_atomic64_add(&(h->tx_pkts), i);
193 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
199 eth_dev_start(struct rte_eth_dev *dev)
204 dev->data->dev_link.link_status = ETH_LINK_UP;
209 eth_dev_stop(struct rte_eth_dev *dev)
214 dev->data->dev_link.link_status = ETH_LINK_DOWN;
218 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
219 uint16_t nb_rx_desc __rte_unused,
220 unsigned int socket_id __rte_unused,
221 const struct rte_eth_rxconf *rx_conf __rte_unused,
222 struct rte_mempool *mb_pool)
224 struct rte_mbuf *dummy_packet;
225 struct pmd_internals *internals;
226 unsigned packet_size;
228 if ((dev == NULL) || (mb_pool == NULL))
231 internals = dev->data->dev_private;
233 if (rx_queue_id >= dev->data->nb_rx_queues)
236 packet_size = internals->packet_size;
238 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
239 dev->data->rx_queues[rx_queue_id] =
240 &internals->rx_null_queues[rx_queue_id];
241 dummy_packet = rte_zmalloc_socket(NULL,
242 packet_size, 0, dev->data->numa_node);
243 if (dummy_packet == NULL)
246 internals->rx_null_queues[rx_queue_id].internals = internals;
247 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
253 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
254 uint16_t nb_tx_desc __rte_unused,
255 unsigned int socket_id __rte_unused,
256 const struct rte_eth_txconf *tx_conf __rte_unused)
258 struct rte_mbuf *dummy_packet;
259 struct pmd_internals *internals;
260 unsigned packet_size;
265 internals = dev->data->dev_private;
267 if (tx_queue_id >= dev->data->nb_tx_queues)
270 packet_size = internals->packet_size;
272 dev->data->tx_queues[tx_queue_id] =
273 &internals->tx_null_queues[tx_queue_id];
274 dummy_packet = rte_zmalloc_socket(NULL,
275 packet_size, 0, dev->data->numa_node);
276 if (dummy_packet == NULL)
279 internals->tx_null_queues[tx_queue_id].internals = internals;
280 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
286 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
292 eth_dev_info(struct rte_eth_dev *dev,
293 struct rte_eth_dev_info *dev_info)
295 struct pmd_internals *internals;
297 if ((dev == NULL) || (dev_info == NULL))
300 internals = dev->data->dev_private;
301 dev_info->max_mac_addrs = 1;
302 dev_info->max_rx_pktlen = (uint32_t)-1;
303 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
304 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
305 dev_info->min_rx_bufsize = 0;
306 dev_info->reta_size = internals->reta_size;
307 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
308 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
312 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
314 unsigned i, num_stats;
315 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
316 const struct pmd_internals *internal;
318 if ((dev == NULL) || (igb_stats == NULL))
321 internal = dev->data->dev_private;
322 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
323 RTE_MIN(dev->data->nb_rx_queues,
324 RTE_DIM(internal->rx_null_queues)));
325 for (i = 0; i < num_stats; i++) {
326 igb_stats->q_ipackets[i] =
327 internal->rx_null_queues[i].rx_pkts.cnt;
328 rx_total += igb_stats->q_ipackets[i];
331 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
332 RTE_MIN(dev->data->nb_tx_queues,
333 RTE_DIM(internal->tx_null_queues)));
334 for (i = 0; i < num_stats; i++) {
335 igb_stats->q_opackets[i] =
336 internal->tx_null_queues[i].tx_pkts.cnt;
337 igb_stats->q_errors[i] =
338 internal->tx_null_queues[i].err_pkts.cnt;
339 tx_total += igb_stats->q_opackets[i];
340 tx_err_total += igb_stats->q_errors[i];
343 igb_stats->ipackets = rx_total;
344 igb_stats->opackets = tx_total;
345 igb_stats->oerrors = tx_err_total;
351 eth_stats_reset(struct rte_eth_dev *dev)
354 struct pmd_internals *internal;
359 internal = dev->data->dev_private;
360 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
361 internal->rx_null_queues[i].rx_pkts.cnt = 0;
362 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
363 internal->tx_null_queues[i].tx_pkts.cnt = 0;
364 internal->tx_null_queues[i].err_pkts.cnt = 0;
369 eth_queue_release(void *q)
371 struct null_queue *nq;
377 rte_free(nq->dummy_packet);
381 eth_link_update(struct rte_eth_dev *dev __rte_unused,
382 int wait_to_complete __rte_unused) { return 0; }
385 eth_rss_reta_update(struct rte_eth_dev *dev,
386 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
389 struct pmd_internals *internal = dev->data->dev_private;
391 if (reta_size != internal->reta_size)
394 rte_spinlock_lock(&internal->rss_lock);
396 /* Copy RETA table */
397 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
398 internal->reta_conf[i].mask = reta_conf[i].mask;
399 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
400 if ((reta_conf[i].mask >> j) & 0x01)
401 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
404 rte_spinlock_unlock(&internal->rss_lock);
410 eth_rss_reta_query(struct rte_eth_dev *dev,
411 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
414 struct pmd_internals *internal = dev->data->dev_private;
416 if (reta_size != internal->reta_size)
419 rte_spinlock_lock(&internal->rss_lock);
421 /* Copy RETA table */
422 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
423 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
424 if ((reta_conf[i].mask >> j) & 0x01)
425 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
428 rte_spinlock_unlock(&internal->rss_lock);
434 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
436 struct pmd_internals *internal = dev->data->dev_private;
438 rte_spinlock_lock(&internal->rss_lock);
440 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
441 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
442 rss_conf->rss_hf & internal->flow_type_rss_offloads;
444 if (rss_conf->rss_key)
445 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
447 rte_spinlock_unlock(&internal->rss_lock);
453 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
454 struct rte_eth_rss_conf *rss_conf)
456 struct pmd_internals *internal = dev->data->dev_private;
458 rte_spinlock_lock(&internal->rss_lock);
460 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
461 if (rss_conf->rss_key)
462 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
464 rte_spinlock_unlock(&internal->rss_lock);
470 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
471 __rte_unused struct ether_addr *addr)
476 static const struct eth_dev_ops ops = {
477 .dev_start = eth_dev_start,
478 .dev_stop = eth_dev_stop,
479 .dev_configure = eth_dev_configure,
480 .dev_infos_get = eth_dev_info,
481 .rx_queue_setup = eth_rx_queue_setup,
482 .tx_queue_setup = eth_tx_queue_setup,
483 .rx_queue_release = eth_queue_release,
484 .tx_queue_release = eth_queue_release,
485 .mtu_set = eth_mtu_set,
486 .link_update = eth_link_update,
487 .mac_addr_set = eth_mac_address_set,
488 .stats_get = eth_stats_get,
489 .stats_reset = eth_stats_reset,
490 .reta_update = eth_rss_reta_update,
491 .reta_query = eth_rss_reta_query,
492 .rss_hash_update = eth_rss_hash_update,
493 .rss_hash_conf_get = eth_rss_hash_conf_get
496 static struct rte_vdev_driver pmd_null_drv;
499 eth_dev_null_create(struct rte_vdev_device *dev,
500 unsigned packet_size,
501 unsigned packet_copy)
503 const unsigned nb_rx_queues = 1;
504 const unsigned nb_tx_queues = 1;
505 struct rte_eth_dev_data *data;
506 struct pmd_internals *internals = NULL;
507 struct rte_eth_dev *eth_dev = NULL;
509 static const uint8_t default_rss_key[40] = {
510 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
511 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
512 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
513 0xBE, 0xAC, 0x01, 0xFA
516 if (dev->device.numa_node == SOCKET_ID_ANY)
517 dev->device.numa_node = rte_socket_id();
519 PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
520 dev->device.numa_node);
522 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
526 /* now put it all together
527 * - store queue data in internals,
528 * - store numa_node info in ethdev data
529 * - point eth_dev_data to internals
530 * - and point eth_dev structure to new eth_dev_data structure
532 /* NOTE: we'll replace the data element, of originally allocated eth_dev
533 * so the nulls are local per-process */
535 internals = eth_dev->data->dev_private;
536 internals->packet_size = packet_size;
537 internals->packet_copy = packet_copy;
538 internals->port_id = eth_dev->data->port_id;
539 eth_random_addr(internals->eth_addr.addr_bytes);
541 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
542 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
544 rte_memcpy(internals->rss_key, default_rss_key, 40);
546 data = eth_dev->data;
547 data->nb_rx_queues = (uint16_t)nb_rx_queues;
548 data->nb_tx_queues = (uint16_t)nb_tx_queues;
549 data->dev_link = pmd_link;
550 data->mac_addrs = &internals->eth_addr;
552 eth_dev->dev_ops = &ops;
554 /* finally assign rx and tx ops */
556 eth_dev->rx_pkt_burst = eth_null_copy_rx;
557 eth_dev->tx_pkt_burst = eth_null_copy_tx;
559 eth_dev->rx_pkt_burst = eth_null_rx;
560 eth_dev->tx_pkt_burst = eth_null_tx;
563 rte_eth_dev_probing_finish(eth_dev);
568 get_packet_size_arg(const char *key __rte_unused,
569 const char *value, void *extra_args)
571 const char *a = value;
572 unsigned *packet_size = extra_args;
574 if ((value == NULL) || (extra_args == NULL))
577 *packet_size = (unsigned)strtoul(a, NULL, 0);
578 if (*packet_size == UINT_MAX)
585 get_packet_copy_arg(const char *key __rte_unused,
586 const char *value, void *extra_args)
588 const char *a = value;
589 unsigned *packet_copy = extra_args;
591 if ((value == NULL) || (extra_args == NULL))
594 *packet_copy = (unsigned)strtoul(a, NULL, 0);
595 if (*packet_copy == UINT_MAX)
602 rte_pmd_null_probe(struct rte_vdev_device *dev)
604 const char *name, *params;
605 unsigned packet_size = default_packet_size;
606 unsigned packet_copy = default_packet_copy;
607 struct rte_kvargs *kvlist = NULL;
608 struct rte_eth_dev *eth_dev;
614 name = rte_vdev_device_name(dev);
615 params = rte_vdev_device_args(dev);
616 PMD_LOG(INFO, "Initializing pmd_null for %s", name);
618 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
619 strlen(params) == 0) {
620 eth_dev = rte_eth_dev_attach_secondary(name);
622 PMD_LOG(ERR, "Failed to probe %s", name);
625 /* TODO: request info from primary to set up Rx and Tx */
626 eth_dev->dev_ops = &ops;
627 rte_eth_dev_probing_finish(eth_dev);
631 if (params != NULL) {
632 kvlist = rte_kvargs_parse(params, valid_arguments);
636 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
638 ret = rte_kvargs_process(kvlist,
639 ETH_NULL_PACKET_SIZE_ARG,
640 &get_packet_size_arg, &packet_size);
645 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
647 ret = rte_kvargs_process(kvlist,
648 ETH_NULL_PACKET_COPY_ARG,
649 &get_packet_copy_arg, &packet_copy);
655 PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
656 "packet copy is %s", packet_size,
657 packet_copy ? "enabled" : "disabled");
659 ret = eth_dev_null_create(dev, packet_size, packet_copy);
663 rte_kvargs_free(kvlist);
668 rte_pmd_null_remove(struct rte_vdev_device *dev)
670 struct rte_eth_dev *eth_dev = NULL;
675 PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
678 /* find the ethdev entry */
679 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
683 rte_free(eth_dev->data->dev_private);
685 rte_eth_dev_release_port(eth_dev);
690 static struct rte_vdev_driver pmd_null_drv = {
691 .probe = rte_pmd_null_probe,
692 .remove = rte_pmd_null_remove,
695 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
696 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
697 RTE_PMD_REGISTER_PARAM_STRING(net_null,
701 RTE_INIT(eth_null_init_log)
703 eth_null_logtype = rte_log_register("pmd.net.null");
704 if (eth_null_logtype >= 0)
705 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);