4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
43 #define ETH_NULL_PACKET_SIZE_ARG "size"
44 #define ETH_NULL_PACKET_COPY_ARG "copy"
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
49 static const char *valid_arguments[] = {
50 ETH_NULL_PACKET_SIZE_ARG,
51 ETH_NULL_PACKET_COPY_ARG,
58 struct pmd_internals *internals;
60 struct rte_mempool *mb_pool;
61 struct rte_mbuf *dummy_packet;
63 rte_atomic64_t rx_pkts;
64 rte_atomic64_t tx_pkts;
65 rte_atomic64_t err_pkts;
68 struct pmd_internals {
73 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 struct ether_addr eth_addr;
77 /** Bit mask of RSS offloads, the bit offset also means flow type */
78 uint64_t flow_type_rss_offloads;
80 rte_spinlock_t rss_lock;
83 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
86 uint8_t rss_key[40]; /**< 40-byte hash key. */
88 static struct rte_eth_link pmd_link = {
89 .link_speed = ETH_SPEED_NUM_10G,
90 .link_duplex = ETH_LINK_FULL_DUPLEX,
91 .link_status = ETH_LINK_DOWN,
92 .link_autoneg = ETH_LINK_AUTONEG,
96 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 struct null_queue *h = q;
100 unsigned packet_size;
102 if ((q == NULL) || (bufs == NULL))
105 packet_size = h->internals->packet_size;
106 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
109 for (i = 0; i < nb_bufs; i++) {
110 bufs[i]->data_len = (uint16_t)packet_size;
111 bufs[i]->pkt_len = packet_size;
112 bufs[i]->port = h->internals->port_id;
115 rte_atomic64_add(&(h->rx_pkts), i);
121 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
124 struct null_queue *h = q;
125 unsigned packet_size;
127 if ((q == NULL) || (bufs == NULL))
130 packet_size = h->internals->packet_size;
131 if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
134 for (i = 0; i < nb_bufs; i++) {
135 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
137 bufs[i]->data_len = (uint16_t)packet_size;
138 bufs[i]->pkt_len = packet_size;
139 bufs[i]->port = h->internals->port_id;
142 rte_atomic64_add(&(h->rx_pkts), i);
148 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
151 struct null_queue *h = q;
153 if ((q == NULL) || (bufs == NULL))
156 for (i = 0; i < nb_bufs; i++)
157 rte_pktmbuf_free(bufs[i]);
159 rte_atomic64_add(&(h->tx_pkts), i);
165 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
168 struct null_queue *h = q;
169 unsigned packet_size;
171 if ((q == NULL) || (bufs == NULL))
174 packet_size = h->internals->packet_size;
175 for (i = 0; i < nb_bufs; i++) {
176 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
178 rte_pktmbuf_free(bufs[i]);
181 rte_atomic64_add(&(h->tx_pkts), i);
187 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
193 eth_dev_start(struct rte_eth_dev *dev)
198 dev->data->dev_link.link_status = ETH_LINK_UP;
203 eth_dev_stop(struct rte_eth_dev *dev)
208 dev->data->dev_link.link_status = ETH_LINK_DOWN;
212 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
213 uint16_t nb_rx_desc __rte_unused,
214 unsigned int socket_id __rte_unused,
215 const struct rte_eth_rxconf *rx_conf __rte_unused,
216 struct rte_mempool *mb_pool)
218 struct rte_mbuf *dummy_packet;
219 struct pmd_internals *internals;
220 unsigned packet_size;
222 if ((dev == NULL) || (mb_pool == NULL))
225 internals = dev->data->dev_private;
227 if (rx_queue_id >= dev->data->nb_rx_queues)
230 packet_size = internals->packet_size;
232 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
233 dev->data->rx_queues[rx_queue_id] =
234 &internals->rx_null_queues[rx_queue_id];
235 dummy_packet = rte_zmalloc_socket(NULL,
236 packet_size, 0, dev->data->numa_node);
237 if (dummy_packet == NULL)
240 internals->rx_null_queues[rx_queue_id].internals = internals;
241 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
247 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
248 uint16_t nb_tx_desc __rte_unused,
249 unsigned int socket_id __rte_unused,
250 const struct rte_eth_txconf *tx_conf __rte_unused)
252 struct rte_mbuf *dummy_packet;
253 struct pmd_internals *internals;
254 unsigned packet_size;
259 internals = dev->data->dev_private;
261 if (tx_queue_id >= dev->data->nb_tx_queues)
264 packet_size = internals->packet_size;
266 dev->data->tx_queues[tx_queue_id] =
267 &internals->tx_null_queues[tx_queue_id];
268 dummy_packet = rte_zmalloc_socket(NULL,
269 packet_size, 0, dev->data->numa_node);
270 if (dummy_packet == NULL)
273 internals->tx_null_queues[tx_queue_id].internals = internals;
274 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
280 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
286 eth_dev_info(struct rte_eth_dev *dev,
287 struct rte_eth_dev_info *dev_info)
289 struct pmd_internals *internals;
291 if ((dev == NULL) || (dev_info == NULL))
294 internals = dev->data->dev_private;
295 dev_info->max_mac_addrs = 1;
296 dev_info->max_rx_pktlen = (uint32_t)-1;
297 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
298 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
299 dev_info->min_rx_bufsize = 0;
300 dev_info->reta_size = internals->reta_size;
301 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
305 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
307 unsigned i, num_stats;
308 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
309 const struct pmd_internals *internal;
311 if ((dev == NULL) || (igb_stats == NULL))
314 internal = dev->data->dev_private;
315 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
316 RTE_MIN(dev->data->nb_rx_queues,
317 RTE_DIM(internal->rx_null_queues)));
318 for (i = 0; i < num_stats; i++) {
319 igb_stats->q_ipackets[i] =
320 internal->rx_null_queues[i].rx_pkts.cnt;
321 rx_total += igb_stats->q_ipackets[i];
324 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
325 RTE_MIN(dev->data->nb_tx_queues,
326 RTE_DIM(internal->tx_null_queues)));
327 for (i = 0; i < num_stats; i++) {
328 igb_stats->q_opackets[i] =
329 internal->tx_null_queues[i].tx_pkts.cnt;
330 igb_stats->q_errors[i] =
331 internal->tx_null_queues[i].err_pkts.cnt;
332 tx_total += igb_stats->q_opackets[i];
333 tx_err_total += igb_stats->q_errors[i];
336 igb_stats->ipackets = rx_total;
337 igb_stats->opackets = tx_total;
338 igb_stats->oerrors = tx_err_total;
344 eth_stats_reset(struct rte_eth_dev *dev)
347 struct pmd_internals *internal;
352 internal = dev->data->dev_private;
353 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
354 internal->rx_null_queues[i].rx_pkts.cnt = 0;
355 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
356 internal->tx_null_queues[i].tx_pkts.cnt = 0;
357 internal->tx_null_queues[i].err_pkts.cnt = 0;
362 eth_queue_release(void *q)
364 struct null_queue *nq;
370 rte_free(nq->dummy_packet);
374 eth_link_update(struct rte_eth_dev *dev __rte_unused,
375 int wait_to_complete __rte_unused) { return 0; }
378 eth_rss_reta_update(struct rte_eth_dev *dev,
379 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
382 struct pmd_internals *internal = dev->data->dev_private;
384 if (reta_size != internal->reta_size)
387 rte_spinlock_lock(&internal->rss_lock);
389 /* Copy RETA table */
390 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
391 internal->reta_conf[i].mask = reta_conf[i].mask;
392 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
393 if ((reta_conf[i].mask >> j) & 0x01)
394 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
397 rte_spinlock_unlock(&internal->rss_lock);
403 eth_rss_reta_query(struct rte_eth_dev *dev,
404 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
407 struct pmd_internals *internal = dev->data->dev_private;
409 if (reta_size != internal->reta_size)
412 rte_spinlock_lock(&internal->rss_lock);
414 /* Copy RETA table */
415 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
416 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
417 if ((reta_conf[i].mask >> j) & 0x01)
418 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
421 rte_spinlock_unlock(&internal->rss_lock);
427 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
429 struct pmd_internals *internal = dev->data->dev_private;
431 rte_spinlock_lock(&internal->rss_lock);
433 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
434 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
435 rss_conf->rss_hf & internal->flow_type_rss_offloads;
437 if (rss_conf->rss_key)
438 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
440 rte_spinlock_unlock(&internal->rss_lock);
446 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
447 struct rte_eth_rss_conf *rss_conf)
449 struct pmd_internals *internal = dev->data->dev_private;
451 rte_spinlock_lock(&internal->rss_lock);
453 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
454 if (rss_conf->rss_key)
455 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
457 rte_spinlock_unlock(&internal->rss_lock);
463 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
464 __rte_unused struct ether_addr *addr)
468 static const struct eth_dev_ops ops = {
469 .dev_start = eth_dev_start,
470 .dev_stop = eth_dev_stop,
471 .dev_configure = eth_dev_configure,
472 .dev_infos_get = eth_dev_info,
473 .rx_queue_setup = eth_rx_queue_setup,
474 .tx_queue_setup = eth_tx_queue_setup,
475 .rx_queue_release = eth_queue_release,
476 .tx_queue_release = eth_queue_release,
477 .mtu_set = eth_mtu_set,
478 .link_update = eth_link_update,
479 .mac_addr_set = eth_mac_address_set,
480 .stats_get = eth_stats_get,
481 .stats_reset = eth_stats_reset,
482 .reta_update = eth_rss_reta_update,
483 .reta_query = eth_rss_reta_query,
484 .rss_hash_update = eth_rss_hash_update,
485 .rss_hash_conf_get = eth_rss_hash_conf_get
488 static struct rte_vdev_driver pmd_null_drv;
491 eth_dev_null_create(struct rte_vdev_device *dev,
492 unsigned packet_size,
493 unsigned packet_copy)
495 const unsigned nb_rx_queues = 1;
496 const unsigned nb_tx_queues = 1;
497 struct rte_eth_dev_data *data = NULL;
498 struct pmd_internals *internals = NULL;
499 struct rte_eth_dev *eth_dev = NULL;
501 static const uint8_t default_rss_key[40] = {
502 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
503 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
504 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
505 0xBE, 0xAC, 0x01, 0xFA
508 if (dev->device.numa_node == SOCKET_ID_ANY)
509 dev->device.numa_node = rte_socket_id();
511 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
512 dev->device.numa_node);
514 /* now do all data allocation - for eth_dev structure, dummy pci driver
515 * and internal (private) data
517 data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
518 dev->device.numa_node);
522 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
527 /* now put it all together
528 * - store queue data in internals,
529 * - store numa_node info in ethdev data
530 * - point eth_dev_data to internals
531 * - and point eth_dev structure to new eth_dev_data structure
533 /* NOTE: we'll replace the data element, of originally allocated eth_dev
534 * so the nulls are local per-process */
536 internals = eth_dev->data->dev_private;
537 internals->packet_size = packet_size;
538 internals->packet_copy = packet_copy;
539 internals->port_id = eth_dev->data->port_id;
540 eth_random_addr(internals->eth_addr.addr_bytes);
542 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
543 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
545 rte_memcpy(internals->rss_key, default_rss_key, 40);
547 rte_memcpy(data, eth_dev->data, sizeof(*data));
548 data->nb_rx_queues = (uint16_t)nb_rx_queues;
549 data->nb_tx_queues = (uint16_t)nb_tx_queues;
550 data->dev_link = pmd_link;
551 data->mac_addrs = &internals->eth_addr;
553 eth_dev->data = data;
554 eth_dev->dev_ops = &ops;
556 /* finally assign rx and tx ops */
558 eth_dev->rx_pkt_burst = eth_null_copy_rx;
559 eth_dev->tx_pkt_burst = eth_null_copy_tx;
561 eth_dev->rx_pkt_burst = eth_null_rx;
562 eth_dev->tx_pkt_burst = eth_null_tx;
569 get_packet_size_arg(const char *key __rte_unused,
570 const char *value, void *extra_args)
572 const char *a = value;
573 unsigned *packet_size = extra_args;
575 if ((value == NULL) || (extra_args == NULL))
578 *packet_size = (unsigned)strtoul(a, NULL, 0);
579 if (*packet_size == UINT_MAX)
586 get_packet_copy_arg(const char *key __rte_unused,
587 const char *value, void *extra_args)
589 const char *a = value;
590 unsigned *packet_copy = extra_args;
592 if ((value == NULL) || (extra_args == NULL))
595 *packet_copy = (unsigned)strtoul(a, NULL, 0);
596 if (*packet_copy == UINT_MAX)
603 rte_pmd_null_probe(struct rte_vdev_device *dev)
605 const char *name, *params;
606 unsigned packet_size = default_packet_size;
607 unsigned packet_copy = default_packet_copy;
608 struct rte_kvargs *kvlist = NULL;
614 name = rte_vdev_device_name(dev);
615 params = rte_vdev_device_args(dev);
616 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
618 if (params != NULL) {
619 kvlist = rte_kvargs_parse(params, valid_arguments);
623 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
625 ret = rte_kvargs_process(kvlist,
626 ETH_NULL_PACKET_SIZE_ARG,
627 &get_packet_size_arg, &packet_size);
632 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
634 ret = rte_kvargs_process(kvlist,
635 ETH_NULL_PACKET_COPY_ARG,
636 &get_packet_copy_arg, &packet_copy);
642 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
643 "packet copy is %s\n", packet_size,
644 packet_copy ? "enabled" : "disabled");
646 ret = eth_dev_null_create(dev, packet_size, packet_copy);
650 rte_kvargs_free(kvlist);
655 rte_pmd_null_remove(struct rte_vdev_device *dev)
657 struct rte_eth_dev *eth_dev = NULL;
662 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
665 /* find the ethdev entry */
666 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
670 rte_free(eth_dev->data->dev_private);
671 rte_free(eth_dev->data);
673 rte_eth_dev_release_port(eth_dev);
678 static struct rte_vdev_driver pmd_null_drv = {
679 .probe = rte_pmd_null_probe,
680 .remove = rte_pmd_null_remove,
683 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
684 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
685 RTE_PMD_REGISTER_PARAM_STRING(net_null,