4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 * Copyright(c) 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <rte_cycles.h>
42 #include <rte_ethdev.h>
43 #include <rte_kvargs.h>
44 #include <rte_malloc.h>
48 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
49 #define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
50 #define RTE_ETH_PCAP_PROMISC 1
51 #define RTE_ETH_PCAP_TIMEOUT -1
52 #define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
53 #define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
54 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
55 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
56 #define ETH_PCAP_IFACE_ARG "iface"
58 #define ETH_PCAP_ARG_MAXLEN 64
60 #define RTE_PMD_PCAP_MAX_QUEUES 16
62 static char errbuf[PCAP_ERRBUF_SIZE];
63 static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
64 static struct timeval start_time;
65 static uint64_t start_cycles;
69 volatile unsigned long pkts;
70 volatile unsigned long bytes;
71 volatile unsigned long err_pkts;
74 struct pcap_rx_queue {
77 struct rte_mempool *mb_pool;
78 struct queue_stat rx_stat;
80 char type[ETH_PCAP_ARG_MAXLEN];
83 struct pcap_tx_queue {
84 pcap_dumper_t *dumper;
86 struct queue_stat tx_stat;
88 char type[ETH_PCAP_ARG_MAXLEN];
91 struct pmd_internals {
92 struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
93 struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
99 unsigned num_of_queue;
100 struct devargs_queue {
101 pcap_dumper_t *dumper;
105 } queue[RTE_PMD_PCAP_MAX_QUEUES];
108 static const char *valid_arguments[] = {
109 ETH_PCAP_RX_PCAP_ARG,
110 ETH_PCAP_TX_PCAP_ARG,
111 ETH_PCAP_RX_IFACE_ARG,
112 ETH_PCAP_TX_IFACE_ARG,
117 static int open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper);
118 static int open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap);
119 static int open_single_iface(const char *iface, pcap_t **pcap);
121 static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
122 static const char *drivername = "Pcap PMD";
123 static struct rte_eth_link pmd_link = {
124 .link_speed = ETH_SPEED_NUM_10G,
125 .link_duplex = ETH_LINK_FULL_DUPLEX,
126 .link_status = ETH_LINK_DOWN,
127 .link_autoneg = ETH_LINK_SPEED_FIXED,
131 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool,
132 struct rte_mbuf *mbuf,
136 struct rte_mbuf *m = mbuf;
138 /* Copy the first segment. */
139 uint16_t len = rte_pktmbuf_tailroom(mbuf);
141 rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
145 while (data_len > 0) {
146 /* Allocate next mbuf and point to that. */
147 m->next = rte_pktmbuf_alloc(mb_pool);
149 if (unlikely(!m->next))
154 /* Headroom is not needed in chained mbufs. */
155 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
159 /* Copy next segment. */
160 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
161 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
168 return mbuf->nb_segs;
171 /* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
173 eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
175 uint16_t data_len = 0;
178 rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
181 data_len += mbuf->data_len;
187 eth_pcap_rx(void *queue,
188 struct rte_mbuf **bufs,
192 struct pcap_pkthdr header;
193 const u_char *packet;
194 struct rte_mbuf *mbuf;
195 struct pcap_rx_queue *pcap_q = queue;
198 uint32_t rx_bytes = 0;
200 if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
203 /* Reads the given number of packets from the pcap file one by one
204 * and copies the packet data into a newly allocated mbuf to return.
206 for (i = 0; i < nb_pkts; i++) {
207 /* Get the next PCAP packet */
208 packet = pcap_next(pcap_q->pcap, &header);
209 if (unlikely(packet == NULL))
212 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
213 if (unlikely(mbuf == NULL))
216 /* Now get the space available for data in the mbuf */
217 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
218 RTE_PKTMBUF_HEADROOM);
220 if (header.caplen <= buf_size) {
221 /* pcap packet will fit in the mbuf, go ahead and copy */
222 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
224 mbuf->data_len = (uint16_t)header.caplen;
226 /* Try read jumbo frame into multi mbufs. */
227 if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
230 header.caplen) == -1))
234 mbuf->pkt_len = (uint16_t)header.caplen;
235 mbuf->port = pcap_q->in_port;
238 rx_bytes += header.caplen;
240 pcap_q->rx_stat.pkts += num_rx;
241 pcap_q->rx_stat.bytes += rx_bytes;
246 calculate_timestamp(struct timeval *ts) {
248 struct timeval cur_time;
250 cycles = rte_get_timer_cycles() - start_cycles;
251 cur_time.tv_sec = cycles / hz;
252 cur_time.tv_usec = (cycles % hz) * 10e6 / hz;
253 timeradd(&start_time, &cur_time, ts);
257 * Callback to handle writing packets to a pcap file.
260 eth_pcap_tx_dumper(void *queue,
261 struct rte_mbuf **bufs,
265 struct rte_mbuf *mbuf;
266 struct pcap_tx_queue *dumper_q = queue;
268 uint32_t tx_bytes = 0;
269 struct pcap_pkthdr header;
271 if (dumper_q->dumper == NULL || nb_pkts == 0)
274 /* writes the nb_pkts packets to the previously opened pcap file dumper */
275 for (i = 0; i < nb_pkts; i++) {
277 calculate_timestamp(&header.ts);
278 header.len = mbuf->pkt_len;
279 header.caplen = header.len;
281 if (likely(mbuf->nb_segs == 1)) {
282 pcap_dump((u_char *)dumper_q->dumper, &header,
283 rte_pktmbuf_mtod(mbuf, void*));
285 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
286 eth_pcap_gather_data(tx_pcap_data, mbuf);
287 pcap_dump((u_char *)dumper_q->dumper, &header,
291 "Dropping PCAP packet. "
292 "Size (%d) > max jumbo size (%d).\n",
294 ETHER_MAX_JUMBO_FRAME_LEN);
296 rte_pktmbuf_free(mbuf);
301 rte_pktmbuf_free(mbuf);
303 tx_bytes += mbuf->pkt_len;
307 * Since there's no place to hook a callback when the forwarding
308 * process stops and to make sure the pcap file is actually written,
309 * we flush the pcap dumper within each burst.
311 pcap_dump_flush(dumper_q->dumper);
312 dumper_q->tx_stat.pkts += num_tx;
313 dumper_q->tx_stat.bytes += tx_bytes;
314 dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
319 * Callback to handle sending packets through a real NIC.
322 eth_pcap_tx(void *queue,
323 struct rte_mbuf **bufs,
328 struct rte_mbuf *mbuf;
329 struct pcap_tx_queue *tx_queue = queue;
331 uint32_t tx_bytes = 0;
333 if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
336 for (i = 0; i < nb_pkts; i++) {
339 if (likely(mbuf->nb_segs == 1)) {
340 ret = pcap_sendpacket(tx_queue->pcap,
341 rte_pktmbuf_mtod(mbuf, u_char *),
344 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
345 eth_pcap_gather_data(tx_pcap_data, mbuf);
346 ret = pcap_sendpacket(tx_queue->pcap,
351 "Dropping PCAP packet. "
352 "Size (%d) > max jumbo size (%d).\n",
354 ETHER_MAX_JUMBO_FRAME_LEN);
356 rte_pktmbuf_free(mbuf);
361 if (unlikely(ret != 0))
364 tx_bytes += mbuf->pkt_len;
365 rte_pktmbuf_free(mbuf);
368 tx_queue->tx_stat.pkts += num_tx;
369 tx_queue->tx_stat.bytes += tx_bytes;
370 tx_queue->tx_stat.err_pkts += nb_pkts - num_tx;
375 eth_dev_start(struct rte_eth_dev *dev)
378 struct pmd_internals *internals = dev->data->dev_private;
379 struct pcap_tx_queue *tx;
380 struct pcap_rx_queue *rx;
382 /* Special iface case. Single pcap is open and shared between tx/rx. */
383 if (internals->single_iface) {
384 tx = &internals->tx_queue[0];
385 rx = &internals->rx_queue[0];
387 if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
388 if (open_single_iface(tx->name, &tx->pcap) < 0)
395 /* If not open already, open tx pcaps/dumpers */
396 for (i = 0; i < dev->data->nb_tx_queues; i++) {
397 tx = &internals->tx_queue[i];
399 if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
400 if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
404 else if (!tx->pcap && strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
405 if (open_single_iface(tx->name, &tx->pcap) < 0)
410 /* If not open already, open rx pcaps */
411 for (i = 0; i < dev->data->nb_rx_queues; i++) {
412 rx = &internals->rx_queue[i];
414 if (rx->pcap != NULL)
417 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
418 if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
422 else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
423 if (open_single_iface(rx->name, &rx->pcap) < 0)
430 dev->data->dev_link.link_status = ETH_LINK_UP;
435 * This function gets called when the current port gets stopped.
436 * Is the only place for us to close all the tx streams dumpers.
437 * If not called the dumpers will be flushed within each tx burst.
440 eth_dev_stop(struct rte_eth_dev *dev)
443 struct pmd_internals *internals = dev->data->dev_private;
444 struct pcap_tx_queue *tx;
445 struct pcap_rx_queue *rx;
447 /* Special iface case. Single pcap is open and shared between tx/rx. */
448 if (internals->single_iface) {
449 tx = &internals->tx_queue[0];
450 rx = &internals->rx_queue[0];
451 pcap_close(tx->pcap);
457 for (i = 0; i < dev->data->nb_tx_queues; i++) {
458 tx = &internals->tx_queue[i];
460 if (tx->dumper != NULL) {
461 pcap_dump_close(tx->dumper);
465 if (tx->pcap != NULL) {
466 pcap_close(tx->pcap);
471 for (i = 0; i < dev->data->nb_rx_queues; i++) {
472 rx = &internals->rx_queue[i];
474 if (rx->pcap != NULL) {
475 pcap_close(rx->pcap);
481 dev->data->dev_link.link_status = ETH_LINK_DOWN;
485 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
491 eth_dev_info(struct rte_eth_dev *dev,
492 struct rte_eth_dev_info *dev_info)
494 struct pmd_internals *internals = dev->data->dev_private;
495 dev_info->driver_name = drivername;
496 dev_info->if_index = internals->if_index;
497 dev_info->max_mac_addrs = 1;
498 dev_info->max_rx_pktlen = (uint32_t) -1;
499 dev_info->max_rx_queues = dev->data->nb_rx_queues;
500 dev_info->max_tx_queues = dev->data->nb_tx_queues;
501 dev_info->min_rx_bufsize = 0;
502 dev_info->pci_dev = NULL;
506 eth_stats_get(struct rte_eth_dev *dev,
507 struct rte_eth_stats *stats)
510 unsigned long rx_packets_total = 0, rx_bytes_total = 0;
511 unsigned long tx_packets_total = 0, tx_bytes_total = 0;
512 unsigned long tx_packets_err_total = 0;
513 const struct pmd_internals *internal = dev->data->dev_private;
515 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
516 i < dev->data->nb_rx_queues; i++) {
517 stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
518 stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
519 rx_packets_total += stats->q_ipackets[i];
520 rx_bytes_total += stats->q_ibytes[i];
523 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
524 i < dev->data->nb_tx_queues; i++) {
525 stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
526 stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
527 stats->q_errors[i] = internal->tx_queue[i].tx_stat.err_pkts;
528 tx_packets_total += stats->q_opackets[i];
529 tx_bytes_total += stats->q_obytes[i];
530 tx_packets_err_total += stats->q_errors[i];
533 stats->ipackets = rx_packets_total;
534 stats->ibytes = rx_bytes_total;
535 stats->opackets = tx_packets_total;
536 stats->obytes = tx_bytes_total;
537 stats->oerrors = tx_packets_err_total;
541 eth_stats_reset(struct rte_eth_dev *dev)
544 struct pmd_internals *internal = dev->data->dev_private;
545 for (i = 0; i < dev->data->nb_rx_queues; i++) {
546 internal->rx_queue[i].rx_stat.pkts = 0;
547 internal->rx_queue[i].rx_stat.bytes = 0;
549 for (i = 0; i < dev->data->nb_tx_queues; i++) {
550 internal->tx_queue[i].tx_stat.pkts = 0;
551 internal->tx_queue[i].tx_stat.bytes = 0;
552 internal->tx_queue[i].tx_stat.err_pkts = 0;
557 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
562 eth_queue_release(void *q __rte_unused)
567 eth_link_update(struct rte_eth_dev *dev __rte_unused,
568 int wait_to_complete __rte_unused)
574 eth_rx_queue_setup(struct rte_eth_dev *dev,
575 uint16_t rx_queue_id,
576 uint16_t nb_rx_desc __rte_unused,
577 unsigned int socket_id __rte_unused,
578 const struct rte_eth_rxconf *rx_conf __rte_unused,
579 struct rte_mempool *mb_pool)
581 struct pmd_internals *internals = dev->data->dev_private;
582 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
583 pcap_q->mb_pool = mb_pool;
584 dev->data->rx_queues[rx_queue_id] = pcap_q;
585 pcap_q->in_port = dev->data->port_id;
590 eth_tx_queue_setup(struct rte_eth_dev *dev,
591 uint16_t tx_queue_id,
592 uint16_t nb_tx_desc __rte_unused,
593 unsigned int socket_id __rte_unused,
594 const struct rte_eth_txconf *tx_conf __rte_unused)
597 struct pmd_internals *internals = dev->data->dev_private;
598 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
602 static const struct eth_dev_ops ops = {
603 .dev_start = eth_dev_start,
604 .dev_stop = eth_dev_stop,
605 .dev_close = eth_dev_close,
606 .dev_configure = eth_dev_configure,
607 .dev_infos_get = eth_dev_info,
608 .rx_queue_setup = eth_rx_queue_setup,
609 .tx_queue_setup = eth_tx_queue_setup,
610 .rx_queue_release = eth_queue_release,
611 .tx_queue_release = eth_queue_release,
612 .link_update = eth_link_update,
613 .stats_get = eth_stats_get,
614 .stats_reset = eth_stats_reset,
618 * Function handler that opens the pcap file for reading a stores a
619 * reference of it for use it later on.
622 open_rx_pcap(const char *key, const char *value, void *extra_args)
625 const char *pcap_filename = value;
626 struct pmd_devargs *rx = extra_args;
629 for (i = 0; i < rx->num_of_queue; i++) {
630 if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
633 rx->queue[i].pcap = pcap;
634 rx->queue[i].name = pcap_filename;
635 rx->queue[i].type = key;
642 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
644 if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
645 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
652 * Opens a pcap file for writing and stores a reference to it
653 * for use it later on.
656 open_tx_pcap(const char *key, const char *value, void *extra_args)
659 const char *pcap_filename = value;
660 struct pmd_devargs *dumpers = extra_args;
661 pcap_dumper_t *dumper;
663 for (i = 0; i < dumpers->num_of_queue; i++) {
664 if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
667 dumpers->queue[i].dumper = dumper;
668 dumpers->queue[i].name = pcap_filename;
669 dumpers->queue[i].type = key;
676 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
680 * We need to create a dummy empty pcap_t to use it
681 * with pcap_dump_open(). We create big enough an Ethernet
685 if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
687 RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
691 /* The dumper is created using the previous pcap_t reference */
692 if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
693 RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
701 * pcap_open_live wrapper function
704 open_iface_live(const char *iface, pcap_t **pcap) {
705 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
706 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
709 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
716 * Opens an interface for reading and writing
719 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
721 const char *iface = value;
722 struct pmd_devargs *tx = extra_args;
725 if (open_single_iface(iface, &pcap) < 0)
728 tx->queue[0].pcap = pcap;
729 tx->queue[0].name = iface;
730 tx->queue[0].type = key;
736 * Opens a NIC for reading packets from it
739 open_rx_iface(const char *key, const char *value, void *extra_args)
742 const char *iface = value;
743 struct pmd_devargs *rx = extra_args;
746 for (i = 0; i < rx->num_of_queue; i++) {
747 if (open_single_iface(iface, &pcap) < 0)
749 rx->queue[i].pcap = pcap;
750 rx->queue[i].name = iface;
751 rx->queue[i].type = key;
758 * Opens a NIC for writing packets to it
761 open_tx_iface(const char *key, const char *value, void *extra_args)
764 const char *iface = value;
765 struct pmd_devargs *tx = extra_args;
768 for (i = 0; i < tx->num_of_queue; i++) {
769 if (open_single_iface(iface, &pcap) < 0)
771 tx->queue[i].pcap = pcap;
772 tx->queue[i].name = iface;
773 tx->queue[i].type = key;
780 open_single_iface(const char *iface, pcap_t **pcap)
782 if (open_iface_live(iface, pcap) < 0) {
783 RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
791 rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
792 const unsigned nb_tx_queues, struct pmd_internals **internals,
793 struct rte_eth_dev **eth_dev)
795 struct rte_eth_dev_data *data = NULL;
796 unsigned int numa_node = rte_socket_id();
798 RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %u\n",
801 /* now do all data allocation - for eth_dev structure
802 * and internal (private) data
804 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
808 *internals = rte_zmalloc_socket(name, sizeof(**internals), 0,
810 if (*internals == NULL)
813 /* reserve an ethdev entry */
814 *eth_dev = rte_eth_dev_allocate(name);
815 if (*eth_dev == NULL)
818 /* check length of device name */
819 if ((strlen((*eth_dev)->data->name) + 1) > sizeof(data->name))
822 /* now put it all together
823 * - store queue data in internals,
824 * - store numa_node info in eth_dev
825 * - point eth_dev_data to internals
826 * - and point eth_dev structure to new eth_dev_data structure
828 data->dev_private = *internals;
829 data->port_id = (*eth_dev)->data->port_id;
830 snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
831 data->nb_rx_queues = (uint16_t)nb_rx_queues;
832 data->nb_tx_queues = (uint16_t)nb_tx_queues;
833 data->dev_link = pmd_link;
834 data->mac_addrs = ð_addr;
836 (*eth_dev)->data->name, strlen((*eth_dev)->data->name));
839 * NOTE: we'll replace the data element, of originally allocated
840 * eth_dev so the rings are local per-process
842 (*eth_dev)->data = data;
843 (*eth_dev)->dev_ops = &ops;
844 (*eth_dev)->driver = NULL;
845 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
846 data->kdrv = RTE_KDRV_NONE;
847 data->drv_name = drivername;
848 data->numa_node = numa_node;
854 rte_free(*internals);
860 rte_eth_from_pcaps_common(const char *name, struct pmd_devargs *rx_queues,
861 const unsigned nb_rx_queues, struct pmd_devargs *tx_queues,
862 const unsigned nb_tx_queues, struct rte_kvargs *kvlist,
863 struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
865 struct rte_kvargs_pair *pair = NULL;
869 /* do some parameter checking */
870 if (rx_queues == NULL && nb_rx_queues > 0)
872 if (tx_queues == NULL && nb_tx_queues > 0)
875 if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, internals,
879 for (i = 0; i < nb_rx_queues; i++) {
880 (*internals)->rx_queue[i].pcap = rx_queues->queue[i].pcap;
881 snprintf((*internals)->rx_queue[i].name,
882 sizeof((*internals)->rx_queue[i].name), "%s",
883 rx_queues->queue[i].name);
884 snprintf((*internals)->rx_queue[i].type,
885 sizeof((*internals)->rx_queue[i].type), "%s",
886 rx_queues->queue[i].type);
888 for (i = 0; i < nb_tx_queues; i++) {
889 (*internals)->tx_queue[i].dumper = tx_queues->queue[i].dumper;
890 snprintf((*internals)->tx_queue[i].name,
891 sizeof((*internals)->tx_queue[i].name), "%s",
892 tx_queues->queue[i].name);
893 snprintf((*internals)->tx_queue[i].type,
894 sizeof((*internals)->tx_queue[i].type), "%s",
895 tx_queues->queue[i].type);
898 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
899 pair = &kvlist->pairs[k_idx];
900 if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
905 (*internals)->if_index = 0;
907 (*internals)->if_index = if_nametoindex(pair->value);
913 rte_eth_from_pcaps(const char *name, struct pmd_devargs *rx_queues,
914 const unsigned nb_rx_queues, struct pmd_devargs *tx_queues,
915 const unsigned nb_tx_queues, struct rte_kvargs *kvlist,
916 int single_iface, unsigned int using_dumpers)
918 struct pmd_internals *internals = NULL;
919 struct rte_eth_dev *eth_dev = NULL;
922 ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
923 tx_queues, nb_tx_queues, kvlist, &internals, ð_dev);
928 /* store wether we are using a single interface for rx/tx or not */
929 internals->single_iface = single_iface;
931 eth_dev->rx_pkt_burst = eth_pcap_rx;
934 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
936 eth_dev->tx_pkt_burst = eth_pcap_tx;
943 rte_pmd_pcap_devinit(const char *name, const char *params)
945 unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
946 struct rte_kvargs *kvlist;
947 struct pmd_devargs pcaps = {0};
948 struct pmd_devargs dumpers = {0};
949 int single_iface = 0;
952 RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
954 gettimeofday(&start_time, NULL);
955 start_cycles = rte_get_timer_cycles();
956 hz = rte_get_timer_hz();
958 kvlist = rte_kvargs_parse(params, valid_arguments);
963 * If iface argument is passed we open the NICs and use them for
966 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
968 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
969 &open_rx_tx_iface, &pcaps);
973 dumpers.queue[0].pcap = pcaps.queue[0].pcap;
974 dumpers.queue[0].name = pcaps.queue[0].name;
975 dumpers.queue[0].type = pcaps.queue[0].type;
979 ret = rte_eth_from_pcaps(name, &pcaps, 1, &dumpers, 1,
980 kvlist, single_iface, is_tx_pcap);
986 * We check whether we want to open a RX stream from a real NIC or a
989 pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
990 if (pcaps.num_of_queue)
993 pcaps.num_of_queue = rte_kvargs_count(kvlist,
994 ETH_PCAP_RX_IFACE_ARG);
996 if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
997 pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
1000 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1001 &open_rx_pcap, &pcaps);
1003 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
1004 &open_rx_iface, &pcaps);
1010 * We check whether we want to open a TX stream to a real NIC or a
1013 dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
1014 if (dumpers.num_of_queue)
1017 dumpers.num_of_queue = rte_kvargs_count(kvlist,
1018 ETH_PCAP_TX_IFACE_ARG);
1020 if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
1021 dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
1024 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1025 &open_tx_pcap, &dumpers);
1027 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1028 &open_tx_iface, &dumpers);
1033 ret = rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_queue, &dumpers,
1034 dumpers.num_of_queue, kvlist, single_iface, is_tx_pcap);
1037 rte_kvargs_free(kvlist);
1042 rte_pmd_pcap_devuninit(const char *name)
1044 struct rte_eth_dev *eth_dev = NULL;
1046 RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n",
1052 /* reserve an ethdev entry */
1053 eth_dev = rte_eth_dev_allocated(name);
1054 if (eth_dev == NULL)
1057 rte_free(eth_dev->data->dev_private);
1058 rte_free(eth_dev->data);
1060 rte_eth_dev_release_port(eth_dev);
1065 static struct rte_vdev_driver pmd_pcap_drv = {
1066 .init = rte_pmd_pcap_devinit,
1067 .uninit = rte_pmd_pcap_devuninit,
1070 DRIVER_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1071 DRIVER_REGISTER_PARAM_STRING(net_pcap,
1072 ETH_PCAP_RX_PCAP_ARG "=<string> "
1073 ETH_PCAP_TX_PCAP_ARG "=<string> "
1074 ETH_PCAP_RX_IFACE_ARG "=<ifc> "
1075 ETH_PCAP_TX_IFACE_ARG "=<ifc> "
1076 ETH_PCAP_IFACE_ARG "=<ifc>");