4 * Copyright (C) IGEL Co.,Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of IGEL Co.,Ltd. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
39 #include <rte_kvargs.h>
41 #define ETH_NULL_PACKET_SIZE_ARG "size"
42 #define ETH_NULL_PACKET_COPY_ARG "copy"
44 static unsigned default_packet_size = 64;
45 static unsigned default_packet_copy;
47 static const char const *valid_arguments[] = {
48 ETH_NULL_PACKET_SIZE_ARG,
49 ETH_NULL_PACKET_COPY_ARG,
56 struct pmd_internals *internals;
58 struct rte_mempool *mb_pool;
59 struct rte_mbuf *dummy_packet;
61 rte_atomic64_t rx_pkts;
62 rte_atomic64_t tx_pkts;
63 rte_atomic64_t err_pkts;
66 struct pmd_internals {
71 unsigned nb_rx_queues;
72 unsigned nb_tx_queues;
74 struct null_queue rx_null_queues[1];
75 struct null_queue tx_null_queues[1];
79 static struct ether_addr eth_addr = { .addr_bytes = {0} };
80 static const char *drivername = "Null PMD";
81 static struct rte_eth_link pmd_link = {
83 .link_duplex = ETH_LINK_FULL_DUPLEX,
88 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
91 struct null_queue *h = q;
94 if ((q == NULL) || (bufs == NULL))
97 packet_size = h->internals->packet_size;
98 for (i = 0; i < nb_bufs; i++) {
99 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
102 bufs[i]->data_len = (uint16_t)packet_size;
103 bufs[i]->pkt_len = packet_size;
104 bufs[i]->nb_segs = 1;
105 bufs[i]->next = NULL;
108 rte_atomic64_add(&(h->rx_pkts), i);
114 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
117 struct null_queue *h = q;
118 unsigned packet_size;
120 if ((q == NULL) || (bufs == NULL))
123 packet_size = h->internals->packet_size;
124 for (i = 0; i < nb_bufs; i++) {
125 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
128 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
130 bufs[i]->data_len = (uint16_t)packet_size;
131 bufs[i]->pkt_len = packet_size;
132 bufs[i]->nb_segs = 1;
133 bufs[i]->next = NULL;
136 rte_atomic64_add(&(h->rx_pkts), i);
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
145 struct null_queue *h = q;
147 if ((q == NULL) || (bufs == NULL))
150 for (i = 0; i < nb_bufs; i++)
151 rte_pktmbuf_free(bufs[i]);
153 rte_atomic64_add(&(h->tx_pkts), i);
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
162 struct null_queue *h = q;
163 unsigned packet_size = h->internals->packet_size;
165 if ((q == NULL) || (bufs == NULL))
168 for (i = 0; i < nb_bufs; i++) {
169 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
171 rte_pktmbuf_free(bufs[i]);
174 rte_atomic64_add(&(h->tx_pkts), i);
180 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
183 eth_dev_start(struct rte_eth_dev *dev)
188 dev->data->dev_link.link_status = 1;
193 eth_dev_stop(struct rte_eth_dev *dev)
198 dev->data->dev_link.link_status = 0;
202 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
203 uint16_t nb_rx_desc __rte_unused,
204 unsigned int socket_id __rte_unused,
205 const struct rte_eth_rxconf *rx_conf __rte_unused,
206 struct rte_mempool *mb_pool)
208 struct rte_mbuf *dummy_packet;
209 struct pmd_internals *internals;
210 unsigned packet_size;
212 if ((dev == NULL) || (mb_pool == NULL))
215 if (rx_queue_id != 0)
218 internals = dev->data->dev_private;
219 packet_size = internals->packet_size;
221 internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
222 dev->data->rx_queues[rx_queue_id] =
223 &internals->rx_null_queues[rx_queue_id];
224 dummy_packet = rte_zmalloc_socket(NULL,
225 packet_size, 0, internals->numa_node);
226 if (dummy_packet == NULL)
229 internals->rx_null_queues[rx_queue_id].internals = internals;
230 internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
236 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
237 uint16_t nb_tx_desc __rte_unused,
238 unsigned int socket_id __rte_unused,
239 const struct rte_eth_txconf *tx_conf __rte_unused)
241 struct rte_mbuf *dummy_packet;
242 struct pmd_internals *internals;
243 unsigned packet_size;
248 if (tx_queue_id != 0)
251 internals = dev->data->dev_private;
252 packet_size = internals->packet_size;
254 dev->data->tx_queues[tx_queue_id] =
255 &internals->tx_null_queues[tx_queue_id];
256 dummy_packet = rte_zmalloc_socket(NULL,
257 packet_size, 0, internals->numa_node);
258 if (dummy_packet == NULL)
261 internals->tx_null_queues[tx_queue_id].internals = internals;
262 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
269 eth_dev_info(struct rte_eth_dev *dev,
270 struct rte_eth_dev_info *dev_info)
272 struct pmd_internals *internals;
274 if ((dev == NULL) || (dev_info == NULL))
277 internals = dev->data->dev_private;
278 dev_info->driver_name = drivername;
279 dev_info->max_mac_addrs = 1;
280 dev_info->max_rx_pktlen = (uint32_t)-1;
281 dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
282 dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
283 dev_info->min_rx_bufsize = 0;
284 dev_info->pci_dev = NULL;
288 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
291 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
292 const struct pmd_internals *internal;
294 if ((dev == NULL) || (igb_stats == NULL))
297 internal = dev->data->dev_private;
298 memset(igb_stats, 0, sizeof(*igb_stats));
299 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
300 i < internal->nb_rx_queues; i++) {
301 igb_stats->q_ipackets[i] =
302 internal->rx_null_queues[i].rx_pkts.cnt;
303 rx_total += igb_stats->q_ipackets[i];
306 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
307 i < internal->nb_tx_queues; i++) {
308 igb_stats->q_opackets[i] =
309 internal->tx_null_queues[i].tx_pkts.cnt;
310 igb_stats->q_errors[i] =
311 internal->tx_null_queues[i].err_pkts.cnt;
312 tx_total += igb_stats->q_opackets[i];
313 tx_err_total += igb_stats->q_errors[i];
316 igb_stats->ipackets = rx_total;
317 igb_stats->opackets = tx_total;
318 igb_stats->oerrors = tx_err_total;
322 eth_stats_reset(struct rte_eth_dev *dev)
325 struct pmd_internals *internal;
330 internal = dev->data->dev_private;
331 for (i = 0; i < internal->nb_rx_queues; i++)
332 internal->rx_null_queues[i].rx_pkts.cnt = 0;
333 for (i = 0; i < internal->nb_tx_queues; i++) {
334 internal->tx_null_queues[i].tx_pkts.cnt = 0;
335 internal->tx_null_queues[i].err_pkts.cnt = 0;
340 eth_queue_release(void *q)
342 struct null_queue *nq;
348 if (nq->dummy_packet)
349 rte_free(nq->dummy_packet);
353 eth_link_update(struct rte_eth_dev *dev __rte_unused,
354 int wait_to_complete __rte_unused) { return 0; }
356 static struct eth_dev_ops ops = {
357 .dev_start = eth_dev_start,
358 .dev_stop = eth_dev_stop,
359 .dev_configure = eth_dev_configure,
360 .dev_infos_get = eth_dev_info,
361 .rx_queue_setup = eth_rx_queue_setup,
362 .tx_queue_setup = eth_tx_queue_setup,
363 .rx_queue_release = eth_queue_release,
364 .tx_queue_release = eth_queue_release,
365 .link_update = eth_link_update,
366 .stats_get = eth_stats_get,
367 .stats_reset = eth_stats_reset,
371 eth_dev_null_create(const char *name,
372 const unsigned numa_node,
373 unsigned packet_size,
374 unsigned packet_copy)
376 const unsigned nb_rx_queues = 1;
377 const unsigned nb_tx_queues = 1;
378 struct rte_eth_dev_data *data = NULL;
379 struct rte_pci_device *pci_dev = NULL;
380 struct pmd_internals *internals = NULL;
381 struct rte_eth_dev *eth_dev = NULL;
386 RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
389 /* now do all data allocation - for eth_dev structure, dummy pci driver
390 * and internal (private) data
392 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
396 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
400 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
401 if (internals == NULL)
404 /* reserve an ethdev entry */
405 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
409 /* now put it all together
410 * - store queue data in internals,
411 * - store numa_node info in pci_driver
412 * - point eth_dev_data to internals and pci_driver
413 * - and point eth_dev structure to new eth_dev_data structure
415 /* NOTE: we'll replace the data element, of originally allocated eth_dev
416 * so the nulls are local per-process */
418 internals->nb_rx_queues = nb_rx_queues;
419 internals->nb_tx_queues = nb_tx_queues;
420 internals->packet_size = packet_size;
421 internals->packet_copy = packet_copy;
422 internals->numa_node = numa_node;
424 pci_dev->numa_node = numa_node;
426 data->dev_private = internals;
427 data->port_id = eth_dev->data->port_id;
428 data->nb_rx_queues = (uint16_t)nb_rx_queues;
429 data->nb_tx_queues = (uint16_t)nb_tx_queues;
430 data->dev_link = pmd_link;
431 data->mac_addrs = ð_addr;
433 eth_dev->data = data;
434 eth_dev->dev_ops = &ops;
435 eth_dev->pci_dev = pci_dev;
437 /* finally assign rx and tx ops */
439 eth_dev->rx_pkt_burst = eth_null_copy_rx;
440 eth_dev->tx_pkt_burst = eth_null_copy_tx;
442 eth_dev->rx_pkt_burst = eth_null_rx;
443 eth_dev->tx_pkt_burst = eth_null_tx;
459 get_packet_size_arg(const char *key __rte_unused,
460 const char *value, void *extra_args)
462 const char *a = value;
463 unsigned *packet_size = extra_args;
465 if ((value == NULL) || (extra_args == NULL))
468 *packet_size = (unsigned)strtoul(a, NULL, 0);
469 if (*packet_size == UINT_MAX)
476 get_packet_copy_arg(const char *key __rte_unused,
477 const char *value, void *extra_args)
479 const char *a = value;
480 unsigned *packet_copy = extra_args;
482 if ((value == NULL) || (extra_args == NULL))
485 *packet_copy = (unsigned)strtoul(a, NULL, 0);
486 if (*packet_copy == UINT_MAX)
493 rte_pmd_null_devinit(const char *name, const char *params)
496 unsigned packet_size = default_packet_size;
497 unsigned packet_copy = default_packet_copy;
498 struct rte_kvargs *kvlist;
504 RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
506 numa_node = rte_socket_id();
508 if (params != NULL) {
509 kvlist = rte_kvargs_parse(params, valid_arguments);
513 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
515 ret = rte_kvargs_process(kvlist,
516 ETH_NULL_PACKET_SIZE_ARG,
517 &get_packet_size_arg, &packet_size);
522 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
524 ret = rte_kvargs_process(kvlist,
525 ETH_NULL_PACKET_COPY_ARG,
526 &get_packet_copy_arg, &packet_copy);
532 RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
533 "packet copy is %s\n", packet_size,
534 packet_copy ? "enabled" : "disabled");
536 return eth_dev_null_create(name, numa_node, packet_size, packet_copy);
539 static struct rte_driver pmd_null_drv = {
542 .init = rte_pmd_null_devinit,
545 PMD_REGISTER_DRIVER(pmd_null_drv);