4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
39 #include <rte_memcpy.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_kvargs.h>
49 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
50 #define RTE_ETH_PCAP_SNAPLEN 4096
51 #define RTE_ETH_PCAP_PROMISC 1
52 #define RTE_ETH_PCAP_TIMEOUT -1
53 #define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
54 #define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
55 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
56 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
57 #define ETH_PCAP_IFACE_ARG "iface"
59 static char errbuf[PCAP_ERRBUF_SIZE];
60 static struct timeval start_time;
61 static uint64_t start_cycles;
64 struct pcap_rx_queue {
67 struct rte_mempool *mb_pool;
68 volatile unsigned long rx_pkts;
69 volatile unsigned long err_pkts;
72 struct pcap_tx_queue {
73 pcap_dumper_t *dumper;
75 volatile unsigned long tx_pkts;
76 volatile unsigned long err_pkts;
81 pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
86 pcap_dumper_t *dumpers[RTE_PMD_RING_MAX_TX_RINGS];
87 pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
90 struct pmd_internals {
91 unsigned nb_rx_queues;
92 unsigned nb_tx_queues;
96 struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
97 struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
100 const char *valid_arguments[] = {
101 ETH_PCAP_RX_PCAP_ARG,
102 ETH_PCAP_TX_PCAP_ARG,
103 ETH_PCAP_RX_IFACE_ARG,
104 ETH_PCAP_TX_IFACE_ARG,
109 static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
110 static const char *drivername = "Pcap PMD";
111 static struct rte_eth_link pmd_link = {
113 .link_duplex = ETH_LINK_FULL_DUPLEX,
119 eth_pcap_rx(void *queue,
120 struct rte_mbuf **bufs,
124 struct pcap_pkthdr header;
125 const u_char *packet;
126 struct rte_mbuf *mbuf;
127 struct pcap_rx_queue *pcap_q = queue;
128 struct rte_pktmbuf_pool_private *mbp_priv;
132 if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
135 /* Reads the given number of packets from the pcap file one by one
136 * and copies the packet data into a newly allocated mbuf to return.
138 for (i = 0; i < nb_pkts; i++) {
139 /* Get the next PCAP packet */
140 packet = pcap_next(pcap_q->pcap, &header);
141 if (unlikely(packet == NULL))
144 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
145 if (unlikely(mbuf == NULL))
148 /* Now get the space available for data in the mbuf */
149 mbp_priv = rte_mempool_get_priv(pcap_q->mb_pool);
150 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
151 RTE_PKTMBUF_HEADROOM);
153 if (header.len <= buf_size) {
154 /* pcap packet will fit in the mbuf, go ahead and copy */
155 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
157 mbuf->data_len = (uint16_t)header.len;
158 mbuf->pkt_len = mbuf->data_len;
159 mbuf->port = pcap_q->in_port;
163 /* pcap packet will not fit in the mbuf, so drop packet */
165 "PCAP packet %d bytes will not fit in mbuf (%d bytes)\n",
166 header.len, buf_size);
167 rte_pktmbuf_free(mbuf);
170 pcap_q->rx_pkts += num_rx;
175 calculate_timestamp(struct timeval *ts) {
177 struct timeval cur_time;
179 cycles = rte_get_timer_cycles() - start_cycles;
180 cur_time.tv_sec = cycles / hz;
181 cur_time.tv_usec = (cycles % hz) * 10e6 / hz;
182 timeradd(&start_time, &cur_time, ts);
186 * Callback to handle writing packets to a pcap file.
189 eth_pcap_tx_dumper(void *queue,
190 struct rte_mbuf **bufs,
194 struct rte_mbuf *mbuf;
195 struct pcap_tx_queue *dumper_q = queue;
197 struct pcap_pkthdr header;
199 if (dumper_q->dumper == NULL || nb_pkts == 0)
202 /* writes the nb_pkts packets to the previously opened pcap file dumper */
203 for (i = 0; i < nb_pkts; i++) {
205 calculate_timestamp(&header.ts);
206 header.len = mbuf->data_len;
207 header.caplen = header.len;
208 pcap_dump((u_char *)dumper_q->dumper, &header,
209 rte_pktmbuf_mtod(mbuf, void*));
210 rte_pktmbuf_free(mbuf);
215 * Since there's no place to hook a callback when the forwarding
216 * process stops and to make sure the pcap file is actually written,
217 * we flush the pcap dumper within each burst.
219 pcap_dump_flush(dumper_q->dumper);
220 dumper_q->tx_pkts += num_tx;
221 dumper_q->err_pkts += nb_pkts - num_tx;
226 * Callback to handle sending packets through a real NIC.
229 eth_pcap_tx(void *queue,
230 struct rte_mbuf **bufs,
235 struct rte_mbuf *mbuf;
236 struct pcap_tx_queue *tx_queue = queue;
239 if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
242 for (i = 0; i < nb_pkts; i++) {
244 ret = pcap_sendpacket(tx_queue->pcap,
245 rte_pktmbuf_mtod(mbuf, u_char *),
247 if (unlikely(ret != 0))
250 rte_pktmbuf_free(mbuf);
253 tx_queue->tx_pkts += num_tx;
254 tx_queue->err_pkts += nb_pkts - num_tx;
259 eth_dev_start(struct rte_eth_dev *dev)
261 dev->data->dev_link.link_status = 1;
266 * This function gets called when the current port gets stopped.
267 * Is the only place for us to close all the tx streams dumpers.
268 * If not called the dumpers will be flushed within each tx burst.
271 eth_dev_stop(struct rte_eth_dev *dev)
274 pcap_dumper_t *dumper;
276 struct pmd_internals *internals = dev->data->dev_private;
278 for (i = 0; i < internals->nb_tx_queues; i++) {
279 dumper = internals->tx_queue[i].dumper;
281 pcap_dump_close(dumper);
282 pcap = internals->tx_queue[i].pcap;
287 dev->data->dev_link.link_status = 0;
291 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
297 eth_dev_info(struct rte_eth_dev *dev,
298 struct rte_eth_dev_info *dev_info)
300 struct pmd_internals *internals = dev->data->dev_private;
301 dev_info->driver_name = drivername;
302 dev_info->if_index = internals->if_index;
303 dev_info->max_mac_addrs = 1;
304 dev_info->max_rx_pktlen = (uint32_t) -1;
305 dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
306 dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
307 dev_info->min_rx_bufsize = 0;
308 dev_info->pci_dev = NULL;
312 eth_stats_get(struct rte_eth_dev *dev,
313 struct rte_eth_stats *igb_stats)
316 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
317 const struct pmd_internals *internal = dev->data->dev_private;
319 memset(igb_stats, 0, sizeof(*igb_stats));
320 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_rx_queues;
322 igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
323 rx_total += igb_stats->q_ipackets[i];
326 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_tx_queues;
328 igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
329 igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
330 tx_total += igb_stats->q_opackets[i];
331 tx_err_total += igb_stats->q_errors[i];
334 igb_stats->ipackets = rx_total;
335 igb_stats->opackets = tx_total;
336 igb_stats->oerrors = tx_err_total;
340 eth_stats_reset(struct rte_eth_dev *dev)
343 struct pmd_internals *internal = dev->data->dev_private;
344 for (i = 0; i < internal->nb_rx_queues; i++)
345 internal->rx_queue[i].rx_pkts = 0;
346 for (i = 0; i < internal->nb_tx_queues; i++) {
347 internal->tx_queue[i].tx_pkts = 0;
348 internal->tx_queue[i].err_pkts = 0;
353 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
358 eth_queue_release(void *q __rte_unused)
363 eth_link_update(struct rte_eth_dev *dev __rte_unused,
364 int wait_to_complete __rte_unused)
370 eth_rx_queue_setup(struct rte_eth_dev *dev,
371 uint16_t rx_queue_id,
372 uint16_t nb_rx_desc __rte_unused,
373 unsigned int socket_id __rte_unused,
374 const struct rte_eth_rxconf *rx_conf __rte_unused,
375 struct rte_mempool *mb_pool)
377 struct pmd_internals *internals = dev->data->dev_private;
378 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
379 pcap_q->mb_pool = mb_pool;
380 dev->data->rx_queues[rx_queue_id] = pcap_q;
381 pcap_q->in_port = dev->data->port_id;
386 eth_tx_queue_setup(struct rte_eth_dev *dev,
387 uint16_t tx_queue_id,
388 uint16_t nb_tx_desc __rte_unused,
389 unsigned int socket_id __rte_unused,
390 const struct rte_eth_txconf *tx_conf __rte_unused)
393 struct pmd_internals *internals = dev->data->dev_private;
394 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
398 static struct eth_dev_ops ops = {
399 .dev_start = eth_dev_start,
400 .dev_stop = eth_dev_stop,
401 .dev_close = eth_dev_close,
402 .dev_configure = eth_dev_configure,
403 .dev_infos_get = eth_dev_info,
404 .rx_queue_setup = eth_rx_queue_setup,
405 .tx_queue_setup = eth_tx_queue_setup,
406 .rx_queue_release = eth_queue_release,
407 .tx_queue_release = eth_queue_release,
408 .link_update = eth_link_update,
409 .stats_get = eth_stats_get,
410 .stats_reset = eth_stats_reset,
414 * Function handler that opens the pcap file for reading a stores a
415 * reference of it for use it later on.
418 open_rx_pcap(const char *key __rte_unused, const char *value, void *extra_args)
421 const char *pcap_filename = value;
422 struct rx_pcaps *pcaps = extra_args;
425 for (i = 0; i < pcaps->num_of_rx; i++) {
426 if ((rx_pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
427 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
430 pcaps->pcaps[i] = rx_pcap;
437 * Opens a pcap file for writing and stores a reference to it
438 * for use it later on.
441 open_tx_pcap(const char *key __rte_unused, const char *value, void *extra_args)
444 const char *pcap_filename = value;
445 struct tx_pcaps *dumpers = extra_args;
447 pcap_dumper_t *dumper;
449 for (i = 0; i < dumpers->num_of_tx; i++) {
451 * We need to create a dummy empty pcap_t to use it
452 * with pcap_dump_open(). We create big enough an Ethernet
455 if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
457 RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
461 /* The dumper is created using the previous pcap_t reference */
462 if ((dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
463 RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
466 dumpers->dumpers[i] = dumper;
473 * pcap_open_live wrapper function
476 open_iface_live(const char *iface, pcap_t **pcap) {
477 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
478 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
481 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
488 * Opens an interface for reading and writing
491 open_rx_tx_iface(const char *key __rte_unused, const char *value, void *extra_args)
493 const char *iface = value;
494 pcap_t **pcap = extra_args;
496 if(open_iface_live(iface, pcap) < 0)
502 * Opens a NIC for reading packets from it
505 open_rx_iface(const char *key __rte_unused, const char *value, void *extra_args)
508 const char *iface = value;
509 struct rx_pcaps *pcaps = extra_args;
512 for (i = 0; i < pcaps->num_of_rx; i++) {
513 if(open_iface_live(iface, &pcap) < 0)
515 pcaps->pcaps[i] = pcap;
522 * Opens a NIC for writing packets to it
525 open_tx_iface(const char *key __rte_unused, const char *value, void *extra_args)
528 const char *iface = value;
529 struct tx_pcaps *pcaps = extra_args;
532 for (i = 0; i < pcaps->num_of_tx; i++) {
533 if(open_iface_live(iface, &pcap) < 0)
535 pcaps->pcaps[i] = pcap;
543 rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
544 const unsigned nb_tx_queues,
545 const unsigned numa_node,
546 struct pmd_internals **internals,
547 struct rte_eth_dev **eth_dev,
548 struct rte_kvargs *kvlist)
550 struct rte_eth_dev_data *data = NULL;
551 struct rte_pci_device *pci_dev = NULL;
553 struct rte_kvargs_pair *pair = NULL;
555 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
556 pair = &kvlist->pairs[k_idx];
557 if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
562 "Creating pcap-backed ethdev on numa socket %u\n", numa_node);
564 /* now do all data allocation - for eth_dev structure, dummy pci driver
565 * and internal (private) data
567 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
571 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
575 *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
576 if (*internals == NULL)
579 /* reserve an ethdev entry */
580 *eth_dev = rte_eth_dev_allocate(name);
581 if (*eth_dev == NULL)
584 /* now put it all together
585 * - store queue data in internals,
586 * - store numa_node info in pci_driver
587 * - point eth_dev_data to internals and pci_driver
588 * - and point eth_dev structure to new eth_dev_data structure
590 /* NOTE: we'll replace the data element, of originally allocated eth_dev
591 * so the rings are local per-process */
593 (*internals)->nb_rx_queues = nb_rx_queues;
594 (*internals)->nb_tx_queues = nb_tx_queues;
597 (*internals)->if_index = 0;
599 (*internals)->if_index = if_nametoindex(pair->value);
601 pci_dev->numa_node = numa_node;
603 data->dev_private = *internals;
604 data->port_id = (*eth_dev)->data->port_id;
605 data->nb_rx_queues = (uint16_t)nb_rx_queues;
606 data->nb_tx_queues = (uint16_t)nb_tx_queues;
607 data->dev_link = pmd_link;
608 data->mac_addrs = ð_addr;
610 (*eth_dev)->data = data;
611 (*eth_dev)->dev_ops = &ops;
612 (*eth_dev)->pci_dev = pci_dev;
621 rte_free(*internals);
626 rte_eth_from_pcaps_n_dumpers(const char *name, pcap_t * const rx_queues[],
627 const unsigned nb_rx_queues,
628 pcap_dumper_t * const tx_queues[],
629 const unsigned nb_tx_queues,
630 const unsigned numa_node,
631 struct rte_kvargs *kvlist)
633 struct pmd_internals *internals = NULL;
634 struct rte_eth_dev *eth_dev = NULL;
637 /* do some parameter checking */
638 if (rx_queues == NULL && nb_rx_queues > 0)
640 if (tx_queues == NULL && nb_tx_queues > 0)
643 if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
644 &internals, ð_dev, kvlist) < 0)
647 for (i = 0; i < nb_rx_queues; i++) {
648 internals->rx_queue->pcap = rx_queues[i];
650 for (i = 0; i < nb_tx_queues; i++) {
651 internals->tx_queue->dumper = tx_queues[i];
654 eth_dev->rx_pkt_burst = eth_pcap_rx;
655 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
661 rte_eth_from_pcaps(const char *name, pcap_t * const rx_queues[],
662 const unsigned nb_rx_queues,
663 pcap_t * const tx_queues[],
664 const unsigned nb_tx_queues,
665 const unsigned numa_node,
666 struct rte_kvargs *kvlist)
668 struct pmd_internals *internals = NULL;
669 struct rte_eth_dev *eth_dev = NULL;
672 /* do some parameter checking */
673 if (rx_queues == NULL && nb_rx_queues > 0)
675 if (tx_queues == NULL && nb_tx_queues > 0)
678 if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
679 &internals, ð_dev, kvlist) < 0)
682 for (i = 0; i < nb_rx_queues; i++) {
683 internals->rx_queue->pcap = rx_queues[i];
685 for (i = 0; i < nb_tx_queues; i++) {
686 internals->tx_queue->pcap = tx_queues[i];
689 eth_dev->rx_pkt_burst = eth_pcap_rx;
690 eth_dev->tx_pkt_burst = eth_pcap_tx;
697 rte_pmd_pcap_devinit(const char *name, const char *params)
699 unsigned numa_node, using_dumpers = 0;
701 struct rte_kvargs *kvlist;
702 struct rx_pcaps pcaps;
703 struct tx_pcaps dumpers;
705 RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
707 numa_node = rte_socket_id();
709 gettimeofday(&start_time, NULL);
710 start_cycles = rte_get_timer_cycles();
711 hz = rte_get_timer_hz();
713 kvlist = rte_kvargs_parse(params, valid_arguments);
718 * If iface argument is passed we open the NICs and use them for
721 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
723 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
724 &open_rx_tx_iface, &pcaps.pcaps[0]);
728 return rte_eth_from_pcaps(name, pcaps.pcaps, 1, pcaps.pcaps, 1,
733 * We check whether we want to open a RX stream from a real NIC or a
736 if ((pcaps.num_of_rx = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG))) {
737 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
738 &open_rx_pcap, &pcaps);
740 pcaps.num_of_rx = rte_kvargs_count(kvlist,
741 ETH_PCAP_RX_IFACE_ARG);
742 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
743 &open_rx_iface, &pcaps);
750 * We check whether we want to open a TX stream to a real NIC or a
753 if ((dumpers.num_of_tx = rte_kvargs_count(kvlist,
754 ETH_PCAP_TX_PCAP_ARG))) {
755 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
756 &open_tx_pcap, &dumpers);
759 dumpers.num_of_tx = rte_kvargs_count(kvlist,
760 ETH_PCAP_TX_IFACE_ARG);
761 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
762 &open_tx_iface, &dumpers);
769 return rte_eth_from_pcaps_n_dumpers(name, pcaps.pcaps, pcaps.num_of_rx,
770 dumpers.dumpers, dumpers.num_of_tx, numa_node, kvlist);
772 return rte_eth_from_pcaps(name, pcaps.pcaps, pcaps.num_of_rx, dumpers.pcaps,
773 dumpers.num_of_tx, numa_node, kvlist);
777 static struct rte_driver pmd_pcap_drv = {
780 .init = rte_pmd_pcap_devinit,
783 PMD_REGISTER_DRIVER(pmd_pcap_drv);