4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
39 #include <rte_memcpy.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_kvargs.h>
49 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
50 #define RTE_ETH_PCAP_SNAPLEN 4096
51 #define RTE_ETH_PCAP_PROMISC 1
52 #define RTE_ETH_PCAP_TIMEOUT -1
53 #define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
54 #define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
55 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
56 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
57 #define ETH_PCAP_IFACE_ARG "iface"
59 static char errbuf[PCAP_ERRBUF_SIZE];
60 static struct timeval start_time;
61 static uint64_t start_cycles;
64 struct pcap_rx_queue {
66 struct rte_mempool *mb_pool;
67 volatile unsigned long rx_pkts;
68 volatile unsigned long err_pkts;
71 struct pcap_tx_queue {
72 pcap_dumper_t *dumper;
74 volatile unsigned long tx_pkts;
75 volatile unsigned long err_pkts;
80 pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
85 pcap_dumper_t *dumpers[RTE_PMD_RING_MAX_TX_RINGS];
86 pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
89 struct pmd_internals {
90 unsigned nb_rx_queues;
91 unsigned nb_tx_queues;
95 struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
96 struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
99 const char *valid_arguments[] = {
100 ETH_PCAP_RX_PCAP_ARG,
101 ETH_PCAP_TX_PCAP_ARG,
102 ETH_PCAP_RX_IFACE_ARG,
103 ETH_PCAP_TX_IFACE_ARG,
108 static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
109 static const char *drivername = "Pcap PMD";
110 static struct rte_eth_link pmd_link = {
112 .link_duplex = ETH_LINK_FULL_DUPLEX,
118 eth_pcap_rx(void *queue,
119 struct rte_mbuf **bufs,
123 struct pcap_pkthdr header;
124 const u_char *packet;
125 struct rte_mbuf *mbuf;
126 struct pcap_rx_queue *pcap_q = queue;
127 struct rte_pktmbuf_pool_private *mbp_priv;
131 if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
134 /* Reads the given number of packets from the pcap file one by one
135 * and copies the packet data into a newly allocated mbuf to return.
137 for (i = 0; i < nb_pkts; i++) {
138 /* Get the next PCAP packet */
139 packet = pcap_next(pcap_q->pcap, &header);
140 if (unlikely(packet == NULL))
143 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
144 if (unlikely(mbuf == NULL))
147 /* Now get the space available for data in the mbuf */
148 mbp_priv = rte_mempool_get_priv(pcap_q->mb_pool);
149 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
150 RTE_PKTMBUF_HEADROOM);
152 if (header.len <= buf_size) {
153 /* pcap packet will fit in the mbuf, go ahead and copy */
154 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
156 mbuf->data_len = (uint16_t)header.len;
157 mbuf->pkt_len = mbuf->data_len;
161 /* pcap packet will not fit in the mbuf, so drop packet */
163 "PCAP packet %d bytes will not fit in mbuf (%d bytes)\n",
164 header.len, buf_size);
165 rte_pktmbuf_free(mbuf);
168 pcap_q->rx_pkts += num_rx;
173 calculate_timestamp(struct timeval *ts) {
175 struct timeval cur_time;
177 cycles = rte_get_timer_cycles() - start_cycles;
178 cur_time.tv_sec = cycles / hz;
179 cur_time.tv_usec = (cycles % hz) * 10e6 / hz;
180 timeradd(&start_time, &cur_time, ts);
184 * Callback to handle writing packets to a pcap file.
187 eth_pcap_tx_dumper(void *queue,
188 struct rte_mbuf **bufs,
192 struct rte_mbuf *mbuf;
193 struct pcap_tx_queue *dumper_q = queue;
195 struct pcap_pkthdr header;
197 if (dumper_q->dumper == NULL || nb_pkts == 0)
200 /* writes the nb_pkts packets to the previously opened pcap file dumper */
201 for (i = 0; i < nb_pkts; i++) {
203 calculate_timestamp(&header.ts);
204 header.len = mbuf->data_len;
205 header.caplen = header.len;
206 pcap_dump((u_char *)dumper_q->dumper, &header,
207 rte_pktmbuf_mtod(mbuf, void*));
208 rte_pktmbuf_free(mbuf);
213 * Since there's no place to hook a callback when the forwarding
214 * process stops and to make sure the pcap file is actually written,
215 * we flush the pcap dumper within each burst.
217 pcap_dump_flush(dumper_q->dumper);
218 dumper_q->tx_pkts += num_tx;
219 dumper_q->err_pkts += nb_pkts - num_tx;
224 * Callback to handle sending packets through a real NIC.
227 eth_pcap_tx(void *queue,
228 struct rte_mbuf **bufs,
233 struct rte_mbuf *mbuf;
234 struct pcap_tx_queue *tx_queue = queue;
237 if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
240 for (i = 0; i < nb_pkts; i++) {
242 ret = pcap_sendpacket(tx_queue->pcap,
243 rte_pktmbuf_mtod(mbuf, u_char *),
245 if (unlikely(ret != 0))
248 rte_pktmbuf_free(mbuf);
251 tx_queue->tx_pkts += num_tx;
252 tx_queue->err_pkts += nb_pkts - num_tx;
257 eth_dev_start(struct rte_eth_dev *dev)
259 dev->data->dev_link.link_status = 1;
264 * This function gets called when the current port gets stopped.
265 * Is the only place for us to close all the tx streams dumpers.
266 * If not called the dumpers will be flushed within each tx burst.
269 eth_dev_stop(struct rte_eth_dev *dev)
272 pcap_dumper_t *dumper;
274 struct pmd_internals *internals = dev->data->dev_private;
276 for (i = 0; i < internals->nb_tx_queues; i++) {
277 dumper = internals->tx_queue[i].dumper;
279 pcap_dump_close(dumper);
280 pcap = internals->tx_queue[i].pcap;
285 dev->data->dev_link.link_status = 0;
289 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
295 eth_dev_info(struct rte_eth_dev *dev,
296 struct rte_eth_dev_info *dev_info)
298 struct pmd_internals *internals = dev->data->dev_private;
299 dev_info->driver_name = drivername;
300 dev_info->if_index = internals->if_index;
301 dev_info->max_mac_addrs = 1;
302 dev_info->max_rx_pktlen = (uint32_t) -1;
303 dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
304 dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
305 dev_info->min_rx_bufsize = 0;
306 dev_info->pci_dev = NULL;
310 eth_stats_get(struct rte_eth_dev *dev,
311 struct rte_eth_stats *igb_stats)
314 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
315 const struct pmd_internals *internal = dev->data->dev_private;
317 memset(igb_stats, 0, sizeof(*igb_stats));
318 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_rx_queues;
320 igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
321 rx_total += igb_stats->q_ipackets[i];
324 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_tx_queues;
326 igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
327 igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
328 tx_total += igb_stats->q_opackets[i];
329 tx_err_total += igb_stats->q_errors[i];
332 igb_stats->ipackets = rx_total;
333 igb_stats->opackets = tx_total;
334 igb_stats->oerrors = tx_err_total;
338 eth_stats_reset(struct rte_eth_dev *dev)
341 struct pmd_internals *internal = dev->data->dev_private;
342 for (i = 0; i < internal->nb_rx_queues; i++)
343 internal->rx_queue[i].rx_pkts = 0;
344 for (i = 0; i < internal->nb_tx_queues; i++) {
345 internal->tx_queue[i].tx_pkts = 0;
346 internal->tx_queue[i].err_pkts = 0;
351 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
356 eth_queue_release(void *q __rte_unused)
361 eth_link_update(struct rte_eth_dev *dev __rte_unused,
362 int wait_to_complete __rte_unused)
368 eth_rx_queue_setup(struct rte_eth_dev *dev,
369 uint16_t rx_queue_id,
370 uint16_t nb_rx_desc __rte_unused,
371 unsigned int socket_id __rte_unused,
372 const struct rte_eth_rxconf *rx_conf __rte_unused,
373 struct rte_mempool *mb_pool)
375 struct pmd_internals *internals = dev->data->dev_private;
376 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
377 pcap_q->mb_pool = mb_pool;
378 dev->data->rx_queues[rx_queue_id] = pcap_q;
383 eth_tx_queue_setup(struct rte_eth_dev *dev,
384 uint16_t tx_queue_id,
385 uint16_t nb_tx_desc __rte_unused,
386 unsigned int socket_id __rte_unused,
387 const struct rte_eth_txconf *tx_conf __rte_unused)
390 struct pmd_internals *internals = dev->data->dev_private;
391 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
395 static struct eth_dev_ops ops = {
396 .dev_start = eth_dev_start,
397 .dev_stop = eth_dev_stop,
398 .dev_close = eth_dev_close,
399 .dev_configure = eth_dev_configure,
400 .dev_infos_get = eth_dev_info,
401 .rx_queue_setup = eth_rx_queue_setup,
402 .tx_queue_setup = eth_tx_queue_setup,
403 .rx_queue_release = eth_queue_release,
404 .tx_queue_release = eth_queue_release,
405 .link_update = eth_link_update,
406 .stats_get = eth_stats_get,
407 .stats_reset = eth_stats_reset,
411 * Function handler that opens the pcap file for reading a stores a
412 * reference of it for use it later on.
415 open_rx_pcap(const char *key __rte_unused, const char *value, void *extra_args)
418 const char *pcap_filename = value;
419 struct rx_pcaps *pcaps = extra_args;
422 for (i = 0; i < pcaps->num_of_rx; i++) {
423 if ((rx_pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
424 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
427 pcaps->pcaps[i] = rx_pcap;
434 * Opens a pcap file for writing and stores a reference to it
435 * for use it later on.
438 open_tx_pcap(const char *key __rte_unused, const char *value, void *extra_args)
441 const char *pcap_filename = value;
442 struct tx_pcaps *dumpers = extra_args;
444 pcap_dumper_t *dumper;
446 for (i = 0; i < dumpers->num_of_tx; i++) {
448 * We need to create a dummy empty pcap_t to use it
449 * with pcap_dump_open(). We create big enough an Ethernet
452 if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
454 RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
458 /* The dumper is created using the previous pcap_t reference */
459 if ((dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
460 RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
463 dumpers->dumpers[i] = dumper;
470 * pcap_open_live wrapper function
473 open_iface_live(const char *iface, pcap_t **pcap) {
474 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
475 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
478 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
485 * Opens an interface for reading and writing
488 open_rx_tx_iface(const char *key __rte_unused, const char *value, void *extra_args)
490 const char *iface = value;
491 pcap_t **pcap = extra_args;
493 if(open_iface_live(iface, pcap) < 0)
499 * Opens a NIC for reading packets from it
502 open_rx_iface(const char *key __rte_unused, const char *value, void *extra_args)
505 const char *iface = value;
506 struct rx_pcaps *pcaps = extra_args;
509 for (i = 0; i < pcaps->num_of_rx; i++) {
510 if(open_iface_live(iface, &pcap) < 0)
512 pcaps->pcaps[i] = pcap;
519 * Opens a NIC for writing packets to it
522 open_tx_iface(const char *key __rte_unused, const char *value, void *extra_args)
525 const char *iface = value;
526 struct tx_pcaps *pcaps = extra_args;
529 for (i = 0; i < pcaps->num_of_tx; i++) {
530 if(open_iface_live(iface, &pcap) < 0)
532 pcaps->pcaps[i] = pcap;
540 rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
541 const unsigned nb_tx_queues,
542 const unsigned numa_node,
543 struct pmd_internals **internals,
544 struct rte_eth_dev **eth_dev,
545 struct rte_kvargs *kvlist)
547 struct rte_eth_dev_data *data = NULL;
548 struct rte_pci_device *pci_dev = NULL;
550 struct rte_kvargs_pair *pair = NULL;
552 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
553 pair = &kvlist->pairs[k_idx];
554 if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
559 "Creating pcap-backed ethdev on numa socket %u\n", numa_node);
561 /* now do all data allocation - for eth_dev structure, dummy pci driver
562 * and internal (private) data
564 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
568 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
572 *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
573 if (*internals == NULL)
576 /* reserve an ethdev entry */
577 *eth_dev = rte_eth_dev_allocate(name);
578 if (*eth_dev == NULL)
581 /* now put it all together
582 * - store queue data in internals,
583 * - store numa_node info in pci_driver
584 * - point eth_dev_data to internals and pci_driver
585 * - and point eth_dev structure to new eth_dev_data structure
587 /* NOTE: we'll replace the data element, of originally allocated eth_dev
588 * so the rings are local per-process */
590 (*internals)->nb_rx_queues = nb_rx_queues;
591 (*internals)->nb_tx_queues = nb_tx_queues;
594 (*internals)->if_index = 0;
596 (*internals)->if_index = if_nametoindex(pair->value);
598 pci_dev->numa_node = numa_node;
600 data->dev_private = *internals;
601 data->port_id = (*eth_dev)->data->port_id;
602 data->nb_rx_queues = (uint16_t)nb_rx_queues;
603 data->nb_tx_queues = (uint16_t)nb_tx_queues;
604 data->dev_link = pmd_link;
605 data->mac_addrs = ð_addr;
607 (*eth_dev)->data = data;
608 (*eth_dev)->dev_ops = &ops;
609 (*eth_dev)->pci_dev = pci_dev;
618 rte_free(*internals);
623 rte_eth_from_pcaps_n_dumpers(const char *name, pcap_t * const rx_queues[],
624 const unsigned nb_rx_queues,
625 pcap_dumper_t * const tx_queues[],
626 const unsigned nb_tx_queues,
627 const unsigned numa_node,
628 struct rte_kvargs *kvlist)
630 struct pmd_internals *internals = NULL;
631 struct rte_eth_dev *eth_dev = NULL;
634 /* do some parameter checking */
635 if (rx_queues == NULL && nb_rx_queues > 0)
637 if (tx_queues == NULL && nb_tx_queues > 0)
640 if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
641 &internals, ð_dev, kvlist) < 0)
644 for (i = 0; i < nb_rx_queues; i++) {
645 internals->rx_queue->pcap = rx_queues[i];
647 for (i = 0; i < nb_tx_queues; i++) {
648 internals->tx_queue->dumper = tx_queues[i];
651 eth_dev->rx_pkt_burst = eth_pcap_rx;
652 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
658 rte_eth_from_pcaps(const char *name, pcap_t * const rx_queues[],
659 const unsigned nb_rx_queues,
660 pcap_t * const tx_queues[],
661 const unsigned nb_tx_queues,
662 const unsigned numa_node,
663 struct rte_kvargs *kvlist)
665 struct pmd_internals *internals = NULL;
666 struct rte_eth_dev *eth_dev = NULL;
669 /* do some parameter checking */
670 if (rx_queues == NULL && nb_rx_queues > 0)
672 if (tx_queues == NULL && nb_tx_queues > 0)
675 if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
676 &internals, ð_dev, kvlist) < 0)
679 for (i = 0; i < nb_rx_queues; i++) {
680 internals->rx_queue->pcap = rx_queues[i];
682 for (i = 0; i < nb_tx_queues; i++) {
683 internals->tx_queue->pcap = tx_queues[i];
686 eth_dev->rx_pkt_burst = eth_pcap_rx;
687 eth_dev->tx_pkt_burst = eth_pcap_tx;
694 rte_pmd_pcap_devinit(const char *name, const char *params)
696 unsigned numa_node, using_dumpers = 0;
698 struct rte_kvargs *kvlist;
699 struct rx_pcaps pcaps;
700 struct tx_pcaps dumpers;
702 RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
704 numa_node = rte_socket_id();
706 gettimeofday(&start_time, NULL);
707 start_cycles = rte_get_timer_cycles();
708 hz = rte_get_timer_hz();
710 kvlist = rte_kvargs_parse(params, valid_arguments);
715 * If iface argument is passed we open the NICs and use them for
718 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
720 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
721 &open_rx_tx_iface, &pcaps.pcaps[0]);
725 return rte_eth_from_pcaps(name, pcaps.pcaps, 1, pcaps.pcaps, 1,
730 * We check whether we want to open a RX stream from a real NIC or a
733 if ((pcaps.num_of_rx = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG))) {
734 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
735 &open_rx_pcap, &pcaps);
737 pcaps.num_of_rx = rte_kvargs_count(kvlist,
738 ETH_PCAP_RX_IFACE_ARG);
739 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
740 &open_rx_iface, &pcaps);
747 * We check whether we want to open a TX stream to a real NIC or a
750 if ((dumpers.num_of_tx = rte_kvargs_count(kvlist,
751 ETH_PCAP_TX_PCAP_ARG))) {
752 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
753 &open_tx_pcap, &dumpers);
756 dumpers.num_of_tx = rte_kvargs_count(kvlist,
757 ETH_PCAP_TX_IFACE_ARG);
758 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
759 &open_tx_iface, &dumpers);
766 return rte_eth_from_pcaps_n_dumpers(name, pcaps.pcaps, pcaps.num_of_rx,
767 dumpers.dumpers, dumpers.num_of_tx, numa_node, kvlist);
769 return rte_eth_from_pcaps(name, pcaps.pcaps, pcaps.num_of_rx, dumpers.pcaps,
770 dumpers.num_of_tx, numa_node, kvlist);
774 static struct rte_driver pmd_pcap_drv = {
777 .init = rte_pmd_pcap_devinit,
780 PMD_REGISTER_DRIVER(pmd_pcap_drv);