4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
39 #include <rte_kvargs.h>
41 #define ETH_NULL_PACKET_SIZE_ARG "size"
42 #define ETH_NULL_PACKET_COPY_ARG "copy"
44 static unsigned default_packet_size = 64;
45 static unsigned default_packet_copy;
47 static const char *valid_arguments[] = {
48 ETH_NULL_PACKET_SIZE_ARG,
49 ETH_NULL_PACKET_COPY_ARG,
56 struct pmd_internals *internals;
58 struct rte_mempool *mb_pool;
59 struct rte_mbuf *dummy_packet;
61 rte_atomic64_t rx_pkts;
62 rte_atomic64_t tx_pkts;
63 rte_atomic64_t err_pkts;
66 struct pmd_internals {
71 unsigned nb_rx_queues;
72 unsigned nb_tx_queues;
74 struct null_queue rx_null_queues[1];
75 struct null_queue tx_null_queues[1];
79 static struct ether_addr eth_addr = { .addr_bytes = {0} };
80 static const char *drivername = "Null PMD";
81 static struct rte_eth_link pmd_link = {
83 .link_duplex = ETH_LINK_FULL_DUPLEX,
88 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
91 struct null_queue *h = q;
94 if ((q == NULL) || (bufs == NULL))
97 packet_size = h->internals->packet_size;
98 for (i = 0; i < nb_bufs; i++) {
99 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
102 bufs[i]->data_len = (uint16_t)packet_size;
103 bufs[i]->pkt_len = packet_size;
104 bufs[i]->nb_segs = 1;
105 bufs[i]->next = NULL;
108 rte_atomic64_add(&(h->rx_pkts), i);
114 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
117 struct null_queue *h = q;
118 unsigned packet_size;
120 if ((q == NULL) || (bufs == NULL))
123 packet_size = h->internals->packet_size;
124 for (i = 0; i < nb_bufs; i++) {
125 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
128 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
130 bufs[i]->data_len = (uint16_t)packet_size;
131 bufs[i]->pkt_len = packet_size;
132 bufs[i]->nb_segs = 1;
133 bufs[i]->next = NULL;
136 rte_atomic64_add(&(h->rx_pkts), i);
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
145 struct null_queue *h = q;
147 if ((q == NULL) || (bufs == NULL))
150 for (i = 0; i < nb_bufs; i++)
151 rte_pktmbuf_free(bufs[i]);
153 rte_atomic64_add(&(h->tx_pkts), i);
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
162 struct null_queue *h = q;
163 unsigned packet_size;
165 if ((q == NULL) || (bufs == NULL))
168 packet_size = h->internals->packet_size;
169 for (i = 0; i < nb_bufs; i++) {
170 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
172 rte_pktmbuf_free(bufs[i]);
175 rte_atomic64_add(&(h->tx_pkts), i);
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
184 eth_dev_start(struct rte_eth_dev *dev)
189 dev->data->dev_link.link_status = 1;
194 eth_dev_stop(struct rte_eth_dev *dev)
199 dev->data->dev_link.link_status = 0;
203 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
204 uint16_t nb_rx_desc __rte_unused,
205 unsigned int socket_id __rte_unused,
206 const struct rte_eth_rxconf *rx_conf __rte_unused,
207 struct rte_mempool *mb_pool)
209 struct rte_mbuf *dummy_packet;
210 struct pmd_internals *internals;
211 unsigned packet_size;
213 if ((dev == NULL) || (mb_pool == NULL))
216 if (rx_queue_id != 0)
219 internals = dev->data->dev_private;
220 packet_size = internals->packet_size;
222 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
223 dev->data->rx_queues[rx_queue_id] =
224 &internals->rx_null_queues[rx_queue_id];
225 dummy_packet = rte_zmalloc_socket(NULL,
226 packet_size, 0, internals->numa_node);
227 if (dummy_packet == NULL)
230 internals->rx_null_queues[rx_queue_id].internals = internals;
231 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
237 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
238 uint16_t nb_tx_desc __rte_unused,
239 unsigned int socket_id __rte_unused,
240 const struct rte_eth_txconf *tx_conf __rte_unused)
242 struct rte_mbuf *dummy_packet;
243 struct pmd_internals *internals;
244 unsigned packet_size;
249 if (tx_queue_id != 0)
252 internals = dev->data->dev_private;
253 packet_size = internals->packet_size;
255 dev->data->tx_queues[tx_queue_id] =
256 &internals->tx_null_queues[tx_queue_id];
257 dummy_packet = rte_zmalloc_socket(NULL,
258 packet_size, 0, internals->numa_node);
259 if (dummy_packet == NULL)
262 internals->tx_null_queues[tx_queue_id].internals = internals;
263 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
270 eth_dev_info(struct rte_eth_dev *dev,
271 struct rte_eth_dev_info *dev_info)
273 struct pmd_internals *internals;
275 if ((dev == NULL) || (dev_info == NULL))
278 internals = dev->data->dev_private;
279 dev_info->driver_name = drivername;
280 dev_info->max_mac_addrs = 1;
281 dev_info->max_rx_pktlen = (uint32_t)-1;
282 dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
283 dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
284 dev_info->min_rx_bufsize = 0;
285 dev_info->pci_dev = NULL;
289 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
291 unsigned i, num_stats;
292 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
293 const struct pmd_internals *internal;
295 if ((dev == NULL) || (igb_stats == NULL))
298 internal = dev->data->dev_private;
299 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
300 RTE_MIN(internal->nb_rx_queues,
301 RTE_DIM(internal->rx_null_queues)));
302 for (i = 0; i < num_stats; i++) {
303 igb_stats->q_ipackets[i] =
304 internal->rx_null_queues[i].rx_pkts.cnt;
305 rx_total += igb_stats->q_ipackets[i];
308 num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
309 RTE_MIN(internal->nb_tx_queues,
310 RTE_DIM(internal->tx_null_queues)));
311 for (i = 0; i < num_stats; i++) {
312 igb_stats->q_opackets[i] =
313 internal->tx_null_queues[i].tx_pkts.cnt;
314 igb_stats->q_errors[i] =
315 internal->tx_null_queues[i].err_pkts.cnt;
316 tx_total += igb_stats->q_opackets[i];
317 tx_err_total += igb_stats->q_errors[i];
320 igb_stats->ipackets = rx_total;
321 igb_stats->opackets = tx_total;
322 igb_stats->oerrors = tx_err_total;
326 eth_stats_reset(struct rte_eth_dev *dev)
329 struct pmd_internals *internal;
334 internal = dev->data->dev_private;
335 for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
336 internal->rx_null_queues[i].rx_pkts.cnt = 0;
337 for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
338 internal->tx_null_queues[i].tx_pkts.cnt = 0;
339 internal->tx_null_queues[i].err_pkts.cnt = 0;
343 static struct eth_driver rte_null_pmd = {
345 .name = "rte_null_pmd",
346 .drv_flags = RTE_PCI_DRV_DETACHABLE,
351 eth_queue_release(void *q)
353 struct null_queue *nq;
359 rte_free(nq->dummy_packet);
363 eth_link_update(struct rte_eth_dev *dev __rte_unused,
364 int wait_to_complete __rte_unused) { return 0; }
366 static const struct eth_dev_ops ops = {
367 .dev_start = eth_dev_start,
368 .dev_stop = eth_dev_stop,
369 .dev_configure = eth_dev_configure,
370 .dev_infos_get = eth_dev_info,
371 .rx_queue_setup = eth_rx_queue_setup,
372 .tx_queue_setup = eth_tx_queue_setup,
373 .rx_queue_release = eth_queue_release,
374 .tx_queue_release = eth_queue_release,
375 .link_update = eth_link_update,
376 .stats_get = eth_stats_get,
377 .stats_reset = eth_stats_reset,
381 eth_dev_null_create(const char *name,
382 const unsigned numa_node,
383 unsigned packet_size,
384 unsigned packet_copy)
386 const unsigned nb_rx_queues = 1;
387 const unsigned nb_tx_queues = 1;
388 struct rte_eth_dev_data *data = NULL;
389 struct rte_pci_device *pci_dev = NULL;
390 struct pmd_internals *internals = NULL;
391 struct rte_eth_dev *eth_dev = NULL;
396 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
399 /* now do all data allocation - for eth_dev structure, dummy pci driver
400 * and internal (private) data
402 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
406 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
410 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
411 if (internals == NULL)
414 /* reserve an ethdev entry */
415 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
419 /* now put it all together
420 * - store queue data in internals,
421 * - store numa_node info in pci_driver
422 * - point eth_dev_data to internals and pci_driver
423 * - and point eth_dev structure to new eth_dev_data structure
425 /* NOTE: we'll replace the data element, of originally allocated eth_dev
426 * so the nulls are local per-process */
428 internals->nb_rx_queues = nb_rx_queues;
429 internals->nb_tx_queues = nb_tx_queues;
430 internals->packet_size = packet_size;
431 internals->packet_copy = packet_copy;
432 internals->numa_node = numa_node;
434 pci_dev->numa_node = numa_node;
436 data->dev_private = internals;
437 data->port_id = eth_dev->data->port_id;
438 data->nb_rx_queues = (uint16_t)nb_rx_queues;
439 data->nb_tx_queues = (uint16_t)nb_tx_queues;
440 data->dev_link = pmd_link;
441 data->mac_addrs = ð_addr;
442 strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
444 eth_dev->data = data;
445 eth_dev->dev_ops = &ops;
446 eth_dev->pci_dev = pci_dev;
447 eth_dev->driver = &rte_null_pmd;
449 /* finally assign rx and tx ops */
451 eth_dev->rx_pkt_burst = eth_null_copy_rx;
452 eth_dev->tx_pkt_burst = eth_null_copy_tx;
454 eth_dev->rx_pkt_burst = eth_null_rx;
455 eth_dev->tx_pkt_burst = eth_null_tx;
469 get_packet_size_arg(const char *key __rte_unused,
470 const char *value, void *extra_args)
472 const char *a = value;
473 unsigned *packet_size = extra_args;
475 if ((value == NULL) || (extra_args == NULL))
478 *packet_size = (unsigned)strtoul(a, NULL, 0);
479 if (*packet_size == UINT_MAX)
486 get_packet_copy_arg(const char *key __rte_unused,
487 const char *value, void *extra_args)
489 const char *a = value;
490 unsigned *packet_copy = extra_args;
492 if ((value == NULL) || (extra_args == NULL))
495 *packet_copy = (unsigned)strtoul(a, NULL, 0);
496 if (*packet_copy == UINT_MAX)
503 rte_pmd_null_devinit(const char *name, const char *params)
506 unsigned packet_size = default_packet_size;
507 unsigned packet_copy = default_packet_copy;
508 struct rte_kvargs *kvlist = NULL;
514 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
516 numa_node = rte_socket_id();
518 if (params != NULL) {
519 kvlist = rte_kvargs_parse(params, valid_arguments);
523 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
525 ret = rte_kvargs_process(kvlist,
526 ETH_NULL_PACKET_SIZE_ARG,
527 &get_packet_size_arg, &packet_size);
532 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
534 ret = rte_kvargs_process(kvlist,
535 ETH_NULL_PACKET_COPY_ARG,
536 &get_packet_copy_arg, &packet_copy);
542 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
543 "packet copy is %s\n", packet_size,
544 packet_copy ? "enabled" : "disabled");
546 ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
550 rte_kvargs_free(kvlist);
555 rte_pmd_null_devuninit(const char *name)
557 struct rte_eth_dev *eth_dev = NULL;
562 RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
565 /* reserve an ethdev entry */
566 eth_dev = rte_eth_dev_allocated(name);
570 rte_free(eth_dev->data->dev_private);
571 rte_free(eth_dev->data);
572 rte_free(eth_dev->pci_dev);
574 rte_eth_dev_release_port(eth_dev);
579 static struct rte_driver pmd_null_drv = {
582 .init = rte_pmd_null_devinit,
583 .uninit = rte_pmd_null_devuninit,
586 PMD_REGISTER_DRIVER(pmd_null_drv);