4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
43 #define ETH_NULL_PACKET_SIZE_ARG "size"
44 #define ETH_NULL_PACKET_COPY_ARG "copy"
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
49 static const char *valid_arguments[] = {
50 ETH_NULL_PACKET_SIZE_ARG,
51 ETH_NULL_PACKET_COPY_ARG,
59 struct pmd_internals *internals;
61 struct rte_mempool *mb_pool;
62 struct rte_mbuf *dummy_packet;
64 rte_atomic64_t rx_pkts;
65 rte_atomic64_t tx_pkts;
66 rte_atomic64_t err_pkts;
69 struct pmd_internals {
74 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
77 /** Bit mask of RSS offloads, the bit offset also means flow type */
78 uint64_t flow_type_rss_offloads;
80 rte_spinlock_t rss_lock;
83 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
86 uint8_t rss_key[40]; /**< 40-byte hash key. */
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static struct rte_eth_link pmd_link = {
92 .link_speed = ETH_SPEED_NUM_10G,
93 .link_duplex = ETH_LINK_FULL_DUPLEX,
94 .link_status = ETH_LINK_DOWN,
95 .link_autoneg = ETH_LINK_SPEED_AUTONEG,
99 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
102 struct null_queue *h = q;
103 unsigned packet_size;
105 if ((q == NULL) || (bufs == NULL))
108 packet_size = h->internals->packet_size;
109 for (i = 0; i < nb_bufs; i++) {
110 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
113 bufs[i]->data_len = (uint16_t)packet_size;
114 bufs[i]->pkt_len = packet_size;
115 bufs[i]->port = h->internals->port_id;
118 rte_atomic64_add(&(h->rx_pkts), i);
124 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
127 struct null_queue *h = q;
128 unsigned packet_size;
130 if ((q == NULL) || (bufs == NULL))
133 packet_size = h->internals->packet_size;
134 for (i = 0; i < nb_bufs; i++) {
135 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
138 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
140 bufs[i]->data_len = (uint16_t)packet_size;
141 bufs[i]->pkt_len = packet_size;
142 bufs[i]->port = h->internals->port_id;
145 rte_atomic64_add(&(h->rx_pkts), i);
151 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
154 struct null_queue *h = q;
156 if ((q == NULL) || (bufs == NULL))
159 for (i = 0; i < nb_bufs; i++)
160 rte_pktmbuf_free(bufs[i]);
162 rte_atomic64_add(&(h->tx_pkts), i);
168 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
171 struct null_queue *h = q;
172 unsigned packet_size;
174 if ((q == NULL) || (bufs == NULL))
177 packet_size = h->internals->packet_size;
178 for (i = 0; i < nb_bufs; i++) {
179 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
181 rte_pktmbuf_free(bufs[i]);
184 rte_atomic64_add(&(h->tx_pkts), i);
190 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
196 eth_dev_start(struct rte_eth_dev *dev)
201 dev->data->dev_link.link_status = ETH_LINK_UP;
206 eth_dev_stop(struct rte_eth_dev *dev)
211 dev->data->dev_link.link_status = ETH_LINK_DOWN;
215 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
216 uint16_t nb_rx_desc __rte_unused,
217 unsigned int socket_id __rte_unused,
218 const struct rte_eth_rxconf *rx_conf __rte_unused,
219 struct rte_mempool *mb_pool)
221 struct rte_mbuf *dummy_packet;
222 struct pmd_internals *internals;
223 unsigned packet_size;
225 if ((dev == NULL) || (mb_pool == NULL))
228 internals = dev->data->dev_private;
230 if (rx_queue_id >= dev->data->nb_rx_queues)
233 packet_size = internals->packet_size;
235 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
236 dev->data->rx_queues[rx_queue_id] =
237 &internals->rx_null_queues[rx_queue_id];
238 dummy_packet = rte_zmalloc_socket(NULL,
239 packet_size, 0, dev->data->numa_node);
240 if (dummy_packet == NULL)
243 internals->rx_null_queues[rx_queue_id].internals = internals;
244 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
250 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
251 uint16_t nb_tx_desc __rte_unused,
252 unsigned int socket_id __rte_unused,
253 const struct rte_eth_txconf *tx_conf __rte_unused)
255 struct rte_mbuf *dummy_packet;
256 struct pmd_internals *internals;
257 unsigned packet_size;
262 internals = dev->data->dev_private;
264 if (tx_queue_id >= dev->data->nb_tx_queues)
267 packet_size = internals->packet_size;
269 dev->data->tx_queues[tx_queue_id] =
270 &internals->tx_null_queues[tx_queue_id];
271 dummy_packet = rte_zmalloc_socket(NULL,
272 packet_size, 0, dev->data->numa_node);
273 if (dummy_packet == NULL)
276 internals->tx_null_queues[tx_queue_id].internals = internals;
277 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
284 eth_dev_info(struct rte_eth_dev *dev,
285 struct rte_eth_dev_info *dev_info)
287 struct pmd_internals *internals;
289 if ((dev == NULL) || (dev_info == NULL))
292 internals = dev->data->dev_private;
293 dev_info->max_mac_addrs = 1;
294 dev_info->max_rx_pktlen = (uint32_t)-1;
295 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
296 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
297 dev_info->min_rx_bufsize = 0;
298 dev_info->reta_size = internals->reta_size;
299 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
303 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
305 unsigned i, num_stats;
306 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
307 const struct pmd_internals *internal;
309 if ((dev == NULL) || (igb_stats == NULL))
312 internal = dev->data->dev_private;
313 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
314 RTE_MIN(dev->data->nb_rx_queues,
315 RTE_DIM(internal->rx_null_queues)));
316 for (i = 0; i < num_stats; i++) {
317 igb_stats->q_ipackets[i] =
318 internal->rx_null_queues[i].rx_pkts.cnt;
319 rx_total += igb_stats->q_ipackets[i];
322 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
323 RTE_MIN(dev->data->nb_tx_queues,
324 RTE_DIM(internal->tx_null_queues)));
325 for (i = 0; i < num_stats; i++) {
326 igb_stats->q_opackets[i] =
327 internal->tx_null_queues[i].tx_pkts.cnt;
328 igb_stats->q_errors[i] =
329 internal->tx_null_queues[i].err_pkts.cnt;
330 tx_total += igb_stats->q_opackets[i];
331 tx_err_total += igb_stats->q_errors[i];
334 igb_stats->ipackets = rx_total;
335 igb_stats->opackets = tx_total;
336 igb_stats->oerrors = tx_err_total;
340 eth_stats_reset(struct rte_eth_dev *dev)
343 struct pmd_internals *internal;
348 internal = dev->data->dev_private;
349 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
350 internal->rx_null_queues[i].rx_pkts.cnt = 0;
351 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
352 internal->tx_null_queues[i].tx_pkts.cnt = 0;
353 internal->tx_null_queues[i].err_pkts.cnt = 0;
358 eth_queue_release(void *q)
360 struct null_queue *nq;
366 rte_free(nq->dummy_packet);
370 eth_link_update(struct rte_eth_dev *dev __rte_unused,
371 int wait_to_complete __rte_unused) { return 0; }
374 eth_rss_reta_update(struct rte_eth_dev *dev,
375 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
378 struct pmd_internals *internal = dev->data->dev_private;
380 if (reta_size != internal->reta_size)
383 rte_spinlock_lock(&internal->rss_lock);
385 /* Copy RETA table */
386 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
387 internal->reta_conf[i].mask = reta_conf[i].mask;
388 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
389 if ((reta_conf[i].mask >> j) & 0x01)
390 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
393 rte_spinlock_unlock(&internal->rss_lock);
399 eth_rss_reta_query(struct rte_eth_dev *dev,
400 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
403 struct pmd_internals *internal = dev->data->dev_private;
405 if (reta_size != internal->reta_size)
408 rte_spinlock_lock(&internal->rss_lock);
410 /* Copy RETA table */
411 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
412 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
413 if ((reta_conf[i].mask >> j) & 0x01)
414 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
417 rte_spinlock_unlock(&internal->rss_lock);
423 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
425 struct pmd_internals *internal = dev->data->dev_private;
427 rte_spinlock_lock(&internal->rss_lock);
429 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
430 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
431 rss_conf->rss_hf & internal->flow_type_rss_offloads;
433 if (rss_conf->rss_key)
434 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
436 rte_spinlock_unlock(&internal->rss_lock);
442 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
443 struct rte_eth_rss_conf *rss_conf)
445 struct pmd_internals *internal = dev->data->dev_private;
447 rte_spinlock_lock(&internal->rss_lock);
449 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
450 if (rss_conf->rss_key)
451 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
453 rte_spinlock_unlock(&internal->rss_lock);
458 static const struct eth_dev_ops ops = {
459 .dev_start = eth_dev_start,
460 .dev_stop = eth_dev_stop,
461 .dev_configure = eth_dev_configure,
462 .dev_infos_get = eth_dev_info,
463 .rx_queue_setup = eth_rx_queue_setup,
464 .tx_queue_setup = eth_tx_queue_setup,
465 .rx_queue_release = eth_queue_release,
466 .tx_queue_release = eth_queue_release,
467 .link_update = eth_link_update,
468 .stats_get = eth_stats_get,
469 .stats_reset = eth_stats_reset,
470 .reta_update = eth_rss_reta_update,
471 .reta_query = eth_rss_reta_query,
472 .rss_hash_update = eth_rss_hash_update,
473 .rss_hash_conf_get = eth_rss_hash_conf_get
476 static struct rte_vdev_driver pmd_null_drv;
479 eth_dev_null_create(struct rte_vdev_device *dev,
480 unsigned packet_size,
481 unsigned packet_copy)
483 const unsigned nb_rx_queues = 1;
484 const unsigned nb_tx_queues = 1;
485 struct rte_eth_dev_data *data = NULL;
486 struct pmd_internals *internals = NULL;
487 struct rte_eth_dev *eth_dev = NULL;
489 static const uint8_t default_rss_key[40] = {
490 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
491 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
492 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
493 0xBE, 0xAC, 0x01, 0xFA
496 if (dev->device.numa_node == SOCKET_ID_ANY)
497 dev->device.numa_node = rte_socket_id();
499 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
500 dev->device.numa_node);
502 /* now do all data allocation - for eth_dev structure, dummy pci driver
503 * and internal (private) data
505 data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
506 dev->device.numa_node);
510 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
516 /* now put it all together
517 * - store queue data in internals,
518 * - store numa_node info in ethdev data
519 * - point eth_dev_data to internals
520 * - and point eth_dev structure to new eth_dev_data structure
522 /* NOTE: we'll replace the data element, of originally allocated eth_dev
523 * so the nulls are local per-process */
525 internals = eth_dev->data->dev_private;
526 internals->packet_size = packet_size;
527 internals->packet_copy = packet_copy;
528 internals->port_id = eth_dev->data->port_id;
530 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
531 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
533 rte_memcpy(internals->rss_key, default_rss_key, 40);
535 rte_memcpy(data, eth_dev->data, sizeof(*data));
536 data->nb_rx_queues = (uint16_t)nb_rx_queues;
537 data->nb_tx_queues = (uint16_t)nb_tx_queues;
538 data->dev_link = pmd_link;
539 data->mac_addrs = ð_addr;
541 eth_dev->data = data;
542 eth_dev->dev_ops = &ops;
544 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
546 /* finally assign rx and tx ops */
548 eth_dev->rx_pkt_burst = eth_null_copy_rx;
549 eth_dev->tx_pkt_burst = eth_null_copy_tx;
551 eth_dev->rx_pkt_burst = eth_null_rx;
552 eth_dev->tx_pkt_burst = eth_null_tx;
559 get_packet_size_arg(const char *key __rte_unused,
560 const char *value, void *extra_args)
562 const char *a = value;
563 unsigned *packet_size = extra_args;
565 if ((value == NULL) || (extra_args == NULL))
568 *packet_size = (unsigned)strtoul(a, NULL, 0);
569 if (*packet_size == UINT_MAX)
576 get_packet_copy_arg(const char *key __rte_unused,
577 const char *value, void *extra_args)
579 const char *a = value;
580 unsigned *packet_copy = extra_args;
582 if ((value == NULL) || (extra_args == NULL))
585 *packet_copy = (unsigned)strtoul(a, NULL, 0);
586 if (*packet_copy == UINT_MAX)
593 rte_pmd_null_probe(struct rte_vdev_device *dev)
595 const char *name, *params;
596 unsigned packet_size = default_packet_size;
597 unsigned packet_copy = default_packet_copy;
598 struct rte_kvargs *kvlist = NULL;
604 name = rte_vdev_device_name(dev);
605 params = rte_vdev_device_args(dev);
606 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
608 if (params != NULL) {
609 kvlist = rte_kvargs_parse(params, valid_arguments);
613 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
615 ret = rte_kvargs_process(kvlist,
616 ETH_NULL_PACKET_SIZE_ARG,
617 &get_packet_size_arg, &packet_size);
622 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
624 ret = rte_kvargs_process(kvlist,
625 ETH_NULL_PACKET_COPY_ARG,
626 &get_packet_copy_arg, &packet_copy);
632 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
633 "packet copy is %s\n", packet_size,
634 packet_copy ? "enabled" : "disabled");
636 ret = eth_dev_null_create(dev, packet_size, packet_copy);
640 rte_kvargs_free(kvlist);
645 rte_pmd_null_remove(struct rte_vdev_device *dev)
647 struct rte_eth_dev *eth_dev = NULL;
652 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
655 /* find the ethdev entry */
656 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
660 rte_free(eth_dev->data->dev_private);
661 rte_free(eth_dev->data);
663 rte_eth_dev_release_port(eth_dev);
668 static struct rte_vdev_driver pmd_null_drv = {
669 .probe = rte_pmd_null_probe,
670 .remove = rte_pmd_null_remove,
673 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
674 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
675 RTE_PMD_REGISTER_PARAM_STRING(net_null,