4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
42 #include "rte_eth_null.h"
44 #define ETH_NULL_PACKET_SIZE_ARG "size"
45 #define ETH_NULL_PACKET_COPY_ARG "copy"
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
50 static const char *valid_arguments[] = {
51 ETH_NULL_PACKET_SIZE_ARG,
52 ETH_NULL_PACKET_COPY_ARG,
59 struct pmd_internals *internals;
61 struct rte_mempool *mb_pool;
62 struct rte_mbuf *dummy_packet;
64 rte_atomic64_t rx_pkts;
65 rte_atomic64_t tx_pkts;
66 rte_atomic64_t err_pkts;
69 struct pmd_internals {
74 struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
77 /** Bit mask of RSS offloads, the bit offset also means flow type */
78 uint64_t flow_type_rss_offloads;
80 rte_spinlock_t rss_lock;
83 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
86 uint8_t rss_key[40]; /**< 40-byte hash key. */
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static const char *drivername = "Null PMD";
92 static struct rte_eth_link pmd_link = {
93 .link_speed = ETH_SPEED_NUM_10G,
94 .link_duplex = ETH_LINK_FULL_DUPLEX,
95 .link_status = ETH_LINK_DOWN,
96 .link_autoneg = ETH_LINK_SPEED_AUTONEG,
100 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
103 struct null_queue *h = q;
104 unsigned packet_size;
106 if ((q == NULL) || (bufs == NULL))
109 packet_size = h->internals->packet_size;
110 for (i = 0; i < nb_bufs; i++) {
111 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
114 bufs[i]->data_len = (uint16_t)packet_size;
115 bufs[i]->pkt_len = packet_size;
116 bufs[i]->nb_segs = 1;
117 bufs[i]->next = NULL;
118 bufs[i]->port = h->internals->port_id;
121 rte_atomic64_add(&(h->rx_pkts), i);
127 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
130 struct null_queue *h = q;
131 unsigned packet_size;
133 if ((q == NULL) || (bufs == NULL))
136 packet_size = h->internals->packet_size;
137 for (i = 0; i < nb_bufs; i++) {
138 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
141 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
143 bufs[i]->data_len = (uint16_t)packet_size;
144 bufs[i]->pkt_len = packet_size;
145 bufs[i]->nb_segs = 1;
146 bufs[i]->next = NULL;
147 bufs[i]->port = h->internals->port_id;
150 rte_atomic64_add(&(h->rx_pkts), i);
156 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
159 struct null_queue *h = q;
161 if ((q == NULL) || (bufs == NULL))
164 for (i = 0; i < nb_bufs; i++)
165 rte_pktmbuf_free(bufs[i]);
167 rte_atomic64_add(&(h->tx_pkts), i);
173 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
176 struct null_queue *h = q;
177 unsigned packet_size;
179 if ((q == NULL) || (bufs == NULL))
182 packet_size = h->internals->packet_size;
183 for (i = 0; i < nb_bufs; i++) {
184 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
186 rte_pktmbuf_free(bufs[i]);
189 rte_atomic64_add(&(h->tx_pkts), i);
195 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
201 eth_dev_start(struct rte_eth_dev *dev)
206 dev->data->dev_link.link_status = ETH_LINK_UP;
211 eth_dev_stop(struct rte_eth_dev *dev)
216 dev->data->dev_link.link_status = ETH_LINK_DOWN;
220 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
221 uint16_t nb_rx_desc __rte_unused,
222 unsigned int socket_id __rte_unused,
223 const struct rte_eth_rxconf *rx_conf __rte_unused,
224 struct rte_mempool *mb_pool)
226 struct rte_mbuf *dummy_packet;
227 struct pmd_internals *internals;
228 unsigned packet_size;
230 if ((dev == NULL) || (mb_pool == NULL))
233 internals = dev->data->dev_private;
235 if (rx_queue_id >= dev->data->nb_rx_queues)
238 packet_size = internals->packet_size;
240 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
241 dev->data->rx_queues[rx_queue_id] =
242 &internals->rx_null_queues[rx_queue_id];
243 dummy_packet = rte_zmalloc_socket(NULL,
244 packet_size, 0, dev->data->numa_node);
245 if (dummy_packet == NULL)
248 internals->rx_null_queues[rx_queue_id].internals = internals;
249 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
255 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
256 uint16_t nb_tx_desc __rte_unused,
257 unsigned int socket_id __rte_unused,
258 const struct rte_eth_txconf *tx_conf __rte_unused)
260 struct rte_mbuf *dummy_packet;
261 struct pmd_internals *internals;
262 unsigned packet_size;
267 internals = dev->data->dev_private;
269 if (tx_queue_id >= dev->data->nb_tx_queues)
272 packet_size = internals->packet_size;
274 dev->data->tx_queues[tx_queue_id] =
275 &internals->tx_null_queues[tx_queue_id];
276 dummy_packet = rte_zmalloc_socket(NULL,
277 packet_size, 0, dev->data->numa_node);
278 if (dummy_packet == NULL)
281 internals->tx_null_queues[tx_queue_id].internals = internals;
282 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
289 eth_dev_info(struct rte_eth_dev *dev,
290 struct rte_eth_dev_info *dev_info)
292 struct pmd_internals *internals;
294 if ((dev == NULL) || (dev_info == NULL))
297 internals = dev->data->dev_private;
298 dev_info->driver_name = drivername;
299 dev_info->max_mac_addrs = 1;
300 dev_info->max_rx_pktlen = (uint32_t)-1;
301 dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
302 dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
303 dev_info->min_rx_bufsize = 0;
304 dev_info->pci_dev = NULL;
305 dev_info->reta_size = internals->reta_size;
306 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
310 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
312 unsigned i, num_stats;
313 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
314 const struct pmd_internals *internal;
316 if ((dev == NULL) || (igb_stats == NULL))
319 internal = dev->data->dev_private;
320 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321 RTE_MIN(dev->data->nb_rx_queues,
322 RTE_DIM(internal->rx_null_queues)));
323 for (i = 0; i < num_stats; i++) {
324 igb_stats->q_ipackets[i] =
325 internal->rx_null_queues[i].rx_pkts.cnt;
326 rx_total += igb_stats->q_ipackets[i];
329 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
330 RTE_MIN(dev->data->nb_tx_queues,
331 RTE_DIM(internal->tx_null_queues)));
332 for (i = 0; i < num_stats; i++) {
333 igb_stats->q_opackets[i] =
334 internal->tx_null_queues[i].tx_pkts.cnt;
335 igb_stats->q_errors[i] =
336 internal->tx_null_queues[i].err_pkts.cnt;
337 tx_total += igb_stats->q_opackets[i];
338 tx_err_total += igb_stats->q_errors[i];
341 igb_stats->ipackets = rx_total;
342 igb_stats->opackets = tx_total;
343 igb_stats->oerrors = tx_err_total;
347 eth_stats_reset(struct rte_eth_dev *dev)
350 struct pmd_internals *internal;
355 internal = dev->data->dev_private;
356 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
357 internal->rx_null_queues[i].rx_pkts.cnt = 0;
358 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
359 internal->tx_null_queues[i].tx_pkts.cnt = 0;
360 internal->tx_null_queues[i].err_pkts.cnt = 0;
365 eth_queue_release(void *q)
367 struct null_queue *nq;
373 rte_free(nq->dummy_packet);
377 eth_link_update(struct rte_eth_dev *dev __rte_unused,
378 int wait_to_complete __rte_unused) { return 0; }
381 eth_rss_reta_update(struct rte_eth_dev *dev,
382 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
385 struct pmd_internals *internal = dev->data->dev_private;
387 if (reta_size != internal->reta_size)
390 rte_spinlock_lock(&internal->rss_lock);
392 /* Copy RETA table */
393 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
394 internal->reta_conf[i].mask = reta_conf[i].mask;
395 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
396 if ((reta_conf[i].mask >> j) & 0x01)
397 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
400 rte_spinlock_unlock(&internal->rss_lock);
406 eth_rss_reta_query(struct rte_eth_dev *dev,
407 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
410 struct pmd_internals *internal = dev->data->dev_private;
412 if (reta_size != internal->reta_size)
415 rte_spinlock_lock(&internal->rss_lock);
417 /* Copy RETA table */
418 for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
419 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
420 if ((reta_conf[i].mask >> j) & 0x01)
421 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
424 rte_spinlock_unlock(&internal->rss_lock);
430 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
432 struct pmd_internals *internal = dev->data->dev_private;
434 rte_spinlock_lock(&internal->rss_lock);
436 if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
437 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
438 rss_conf->rss_hf & internal->flow_type_rss_offloads;
440 if (rss_conf->rss_key)
441 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
443 rte_spinlock_unlock(&internal->rss_lock);
449 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
450 struct rte_eth_rss_conf *rss_conf)
452 struct pmd_internals *internal = dev->data->dev_private;
454 rte_spinlock_lock(&internal->rss_lock);
456 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
457 if (rss_conf->rss_key)
458 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
460 rte_spinlock_unlock(&internal->rss_lock);
465 static const struct eth_dev_ops ops = {
466 .dev_start = eth_dev_start,
467 .dev_stop = eth_dev_stop,
468 .dev_configure = eth_dev_configure,
469 .dev_infos_get = eth_dev_info,
470 .rx_queue_setup = eth_rx_queue_setup,
471 .tx_queue_setup = eth_tx_queue_setup,
472 .rx_queue_release = eth_queue_release,
473 .tx_queue_release = eth_queue_release,
474 .link_update = eth_link_update,
475 .stats_get = eth_stats_get,
476 .stats_reset = eth_stats_reset,
477 .reta_update = eth_rss_reta_update,
478 .reta_query = eth_rss_reta_query,
479 .rss_hash_update = eth_rss_hash_update,
480 .rss_hash_conf_get = eth_rss_hash_conf_get
484 eth_dev_null_create(const char *name,
485 const unsigned numa_node,
486 unsigned packet_size,
487 unsigned packet_copy)
489 const unsigned nb_rx_queues = 1;
490 const unsigned nb_tx_queues = 1;
491 struct rte_eth_dev_data *data = NULL;
492 struct pmd_internals *internals = NULL;
493 struct rte_eth_dev *eth_dev = NULL;
495 static const uint8_t default_rss_key[40] = {
496 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
497 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
498 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
499 0xBE, 0xAC, 0x01, 0xFA
505 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
508 /* now do all data allocation - for eth_dev structure, dummy pci driver
509 * and internal (private) data
511 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
515 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
516 if (internals == NULL)
519 /* reserve an ethdev entry */
520 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
524 /* now put it all together
525 * - store queue data in internals,
526 * - store numa_node info in ethdev data
527 * - point eth_dev_data to internals
528 * - and point eth_dev structure to new eth_dev_data structure
530 /* NOTE: we'll replace the data element, of originally allocated eth_dev
531 * so the nulls are local per-process */
533 internals->packet_size = packet_size;
534 internals->packet_copy = packet_copy;
535 internals->port_id = eth_dev->data->port_id;
537 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
538 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
540 rte_memcpy(internals->rss_key, default_rss_key, 40);
542 data->dev_private = internals;
543 data->port_id = eth_dev->data->port_id;
544 data->nb_rx_queues = (uint16_t)nb_rx_queues;
545 data->nb_tx_queues = (uint16_t)nb_tx_queues;
546 data->dev_link = pmd_link;
547 data->mac_addrs = ð_addr;
548 strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
550 eth_dev->data = data;
551 eth_dev->dev_ops = &ops;
553 TAILQ_INIT(ð_dev->link_intr_cbs);
555 eth_dev->driver = NULL;
556 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
557 data->kdrv = RTE_KDRV_NONE;
558 data->drv_name = drivername;
559 data->numa_node = numa_node;
561 /* finally assign rx and tx ops */
563 eth_dev->rx_pkt_burst = eth_null_copy_rx;
564 eth_dev->tx_pkt_burst = eth_null_copy_tx;
566 eth_dev->rx_pkt_burst = eth_null_rx;
567 eth_dev->tx_pkt_burst = eth_null_tx;
580 get_packet_size_arg(const char *key __rte_unused,
581 const char *value, void *extra_args)
583 const char *a = value;
584 unsigned *packet_size = extra_args;
586 if ((value == NULL) || (extra_args == NULL))
589 *packet_size = (unsigned)strtoul(a, NULL, 0);
590 if (*packet_size == UINT_MAX)
597 get_packet_copy_arg(const char *key __rte_unused,
598 const char *value, void *extra_args)
600 const char *a = value;
601 unsigned *packet_copy = extra_args;
603 if ((value == NULL) || (extra_args == NULL))
606 *packet_copy = (unsigned)strtoul(a, NULL, 0);
607 if (*packet_copy == UINT_MAX)
614 rte_pmd_null_devinit(const char *name, const char *params)
617 unsigned packet_size = default_packet_size;
618 unsigned packet_copy = default_packet_copy;
619 struct rte_kvargs *kvlist = NULL;
625 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
627 numa_node = rte_socket_id();
629 if (params != NULL) {
630 kvlist = rte_kvargs_parse(params, valid_arguments);
634 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
636 ret = rte_kvargs_process(kvlist,
637 ETH_NULL_PACKET_SIZE_ARG,
638 &get_packet_size_arg, &packet_size);
643 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
645 ret = rte_kvargs_process(kvlist,
646 ETH_NULL_PACKET_COPY_ARG,
647 &get_packet_copy_arg, &packet_copy);
653 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
654 "packet copy is %s\n", packet_size,
655 packet_copy ? "enabled" : "disabled");
657 ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
661 rte_kvargs_free(kvlist);
666 rte_pmd_null_devuninit(const char *name)
668 struct rte_eth_dev *eth_dev = NULL;
673 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
676 /* find the ethdev entry */
677 eth_dev = rte_eth_dev_allocated(name);
681 rte_free(eth_dev->data->dev_private);
682 rte_free(eth_dev->data);
684 rte_eth_dev_release_port(eth_dev);
689 static struct rte_driver pmd_null_drv = {
692 .init = rte_pmd_null_devinit,
693 .uninit = rte_pmd_null_devuninit,
696 PMD_REGISTER_DRIVER(pmd_null_drv);