4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright(c) 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
39 #include <rte_memcpy.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_kvargs.h>
49 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
50 #define RTE_ETH_PCAP_SNAPLEN 4096
51 #define RTE_ETH_PCAP_PROMISC 1
52 #define RTE_ETH_PCAP_TIMEOUT -1
53 #define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
54 #define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
55 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
56 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
57 #define ETH_PCAP_IFACE_ARG "iface"
59 static char errbuf[PCAP_ERRBUF_SIZE];
60 static struct timeval start_time;
61 static uint64_t start_cycles;
64 struct pcap_rx_queue {
67 struct rte_mempool *mb_pool;
68 volatile unsigned long rx_pkts;
69 volatile unsigned long err_pkts;
74 struct pcap_tx_queue {
75 pcap_dumper_t *dumper;
77 volatile unsigned long tx_pkts;
78 volatile unsigned long err_pkts;
85 pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
86 const char *names[RTE_PMD_RING_MAX_RX_RINGS];
87 const char *types[RTE_PMD_RING_MAX_RX_RINGS];
92 pcap_dumper_t *dumpers[RTE_PMD_RING_MAX_TX_RINGS];
93 pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
94 const char *names[RTE_PMD_RING_MAX_RX_RINGS];
95 const char *types[RTE_PMD_RING_MAX_RX_RINGS];
98 struct pmd_internals {
99 struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
100 struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
101 unsigned nb_rx_queues;
102 unsigned nb_tx_queues;
107 const char *valid_arguments[] = {
108 ETH_PCAP_RX_PCAP_ARG,
109 ETH_PCAP_TX_PCAP_ARG,
110 ETH_PCAP_RX_IFACE_ARG,
111 ETH_PCAP_TX_IFACE_ARG,
116 static int open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper);
117 static int open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap);
118 static int open_single_iface(const char *iface, pcap_t **pcap);
120 static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
121 static const char *drivername = "Pcap PMD";
122 static struct rte_eth_link pmd_link = {
124 .link_duplex = ETH_LINK_FULL_DUPLEX,
130 eth_pcap_rx(void *queue,
131 struct rte_mbuf **bufs,
135 struct pcap_pkthdr header;
136 const u_char *packet;
137 struct rte_mbuf *mbuf;
138 struct pcap_rx_queue *pcap_q = queue;
142 if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
145 /* Reads the given number of packets from the pcap file one by one
146 * and copies the packet data into a newly allocated mbuf to return.
148 for (i = 0; i < nb_pkts; i++) {
149 /* Get the next PCAP packet */
150 packet = pcap_next(pcap_q->pcap, &header);
151 if (unlikely(packet == NULL))
154 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
155 if (unlikely(mbuf == NULL))
158 /* Now get the space available for data in the mbuf */
159 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
160 RTE_PKTMBUF_HEADROOM);
162 if (header.len <= buf_size) {
163 /* pcap packet will fit in the mbuf, go ahead and copy */
164 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
166 mbuf->data_len = (uint16_t)header.len;
167 mbuf->pkt_len = mbuf->data_len;
168 mbuf->port = pcap_q->in_port;
172 /* pcap packet will not fit in the mbuf, so drop packet */
174 "PCAP packet %d bytes will not fit in mbuf (%d bytes)\n",
175 header.len, buf_size);
176 rte_pktmbuf_free(mbuf);
179 pcap_q->rx_pkts += num_rx;
184 calculate_timestamp(struct timeval *ts) {
186 struct timeval cur_time;
188 cycles = rte_get_timer_cycles() - start_cycles;
189 cur_time.tv_sec = cycles / hz;
190 cur_time.tv_usec = (cycles % hz) * 10e6 / hz;
191 timeradd(&start_time, &cur_time, ts);
195 * Callback to handle writing packets to a pcap file.
198 eth_pcap_tx_dumper(void *queue,
199 struct rte_mbuf **bufs,
203 struct rte_mbuf *mbuf;
204 struct pcap_tx_queue *dumper_q = queue;
206 struct pcap_pkthdr header;
208 if (dumper_q->dumper == NULL || nb_pkts == 0)
211 /* writes the nb_pkts packets to the previously opened pcap file dumper */
212 for (i = 0; i < nb_pkts; i++) {
214 calculate_timestamp(&header.ts);
215 header.len = mbuf->data_len;
216 header.caplen = header.len;
217 pcap_dump((u_char *)dumper_q->dumper, &header,
218 rte_pktmbuf_mtod(mbuf, void*));
219 rte_pktmbuf_free(mbuf);
224 * Since there's no place to hook a callback when the forwarding
225 * process stops and to make sure the pcap file is actually written,
226 * we flush the pcap dumper within each burst.
228 pcap_dump_flush(dumper_q->dumper);
229 dumper_q->tx_pkts += num_tx;
230 dumper_q->err_pkts += nb_pkts - num_tx;
235 * Callback to handle sending packets through a real NIC.
238 eth_pcap_tx(void *queue,
239 struct rte_mbuf **bufs,
244 struct rte_mbuf *mbuf;
245 struct pcap_tx_queue *tx_queue = queue;
248 if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
251 for (i = 0; i < nb_pkts; i++) {
253 ret = pcap_sendpacket(tx_queue->pcap,
254 rte_pktmbuf_mtod(mbuf, u_char *),
256 if (unlikely(ret != 0))
259 rte_pktmbuf_free(mbuf);
262 tx_queue->tx_pkts += num_tx;
263 tx_queue->err_pkts += nb_pkts - num_tx;
268 eth_dev_start(struct rte_eth_dev *dev)
271 struct pmd_internals *internals = dev->data->dev_private;
272 struct pcap_tx_queue *tx;
273 struct pcap_rx_queue *rx;
275 /* Special iface case. Single pcap is open and shared between tx/rx. */
276 if (internals->single_iface) {
277 tx = &internals->tx_queue[0];
278 rx = &internals->rx_queue[0];
280 if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
281 if (open_single_iface(tx->name, &tx->pcap) < 0)
288 /* If not open already, open tx pcaps/dumpers */
289 for (i = 0; i < internals->nb_tx_queues; i++) {
290 tx = &internals->tx_queue[i];
292 if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
293 if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
297 else if (!tx->pcap && strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
298 if (open_single_iface(tx->name, &tx->pcap) < 0)
303 /* If not open already, open rx pcaps */
304 for (i = 0; i < internals->nb_rx_queues; i++) {
305 rx = &internals->rx_queue[i];
307 if (rx->pcap != NULL)
310 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
311 if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
315 else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
316 if (open_single_iface(rx->name, &rx->pcap) < 0)
323 dev->data->dev_link.link_status = 1;
328 * This function gets called when the current port gets stopped.
329 * Is the only place for us to close all the tx streams dumpers.
330 * If not called the dumpers will be flushed within each tx burst.
333 eth_dev_stop(struct rte_eth_dev *dev)
336 struct pmd_internals *internals = dev->data->dev_private;
337 struct pcap_tx_queue *tx;
338 struct pcap_rx_queue *rx;
340 /* Special iface case. Single pcap is open and shared between tx/rx. */
341 if (internals->single_iface) {
342 tx = &internals->tx_queue[0];
343 rx = &internals->rx_queue[0];
344 pcap_close(tx->pcap);
350 for (i = 0; i < internals->nb_tx_queues; i++) {
351 tx = &internals->tx_queue[i];
353 if (tx->dumper != NULL) {
354 pcap_dump_close(tx->dumper);
358 if (tx->pcap != NULL) {
359 pcap_close(tx->pcap);
364 for (i = 0; i < internals->nb_rx_queues; i++) {
365 rx = &internals->rx_queue[i];
367 if (rx->pcap != NULL) {
368 pcap_close(rx->pcap);
374 dev->data->dev_link.link_status = 0;
378 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
384 eth_dev_info(struct rte_eth_dev *dev,
385 struct rte_eth_dev_info *dev_info)
387 struct pmd_internals *internals = dev->data->dev_private;
388 dev_info->driver_name = drivername;
389 dev_info->if_index = internals->if_index;
390 dev_info->max_mac_addrs = 1;
391 dev_info->max_rx_pktlen = (uint32_t) -1;
392 dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
393 dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
394 dev_info->min_rx_bufsize = 0;
395 dev_info->pci_dev = NULL;
399 eth_stats_get(struct rte_eth_dev *dev,
400 struct rte_eth_stats *igb_stats)
403 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
404 const struct pmd_internals *internal = dev->data->dev_private;
406 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_rx_queues;
408 igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
409 rx_total += igb_stats->q_ipackets[i];
412 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_tx_queues;
414 igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
415 igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
416 tx_total += igb_stats->q_opackets[i];
417 tx_err_total += igb_stats->q_errors[i];
420 igb_stats->ipackets = rx_total;
421 igb_stats->opackets = tx_total;
422 igb_stats->oerrors = tx_err_total;
426 eth_stats_reset(struct rte_eth_dev *dev)
429 struct pmd_internals *internal = dev->data->dev_private;
430 for (i = 0; i < internal->nb_rx_queues; i++)
431 internal->rx_queue[i].rx_pkts = 0;
432 for (i = 0; i < internal->nb_tx_queues; i++) {
433 internal->tx_queue[i].tx_pkts = 0;
434 internal->tx_queue[i].err_pkts = 0;
439 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
444 eth_queue_release(void *q __rte_unused)
449 eth_link_update(struct rte_eth_dev *dev __rte_unused,
450 int wait_to_complete __rte_unused)
456 eth_rx_queue_setup(struct rte_eth_dev *dev,
457 uint16_t rx_queue_id,
458 uint16_t nb_rx_desc __rte_unused,
459 unsigned int socket_id __rte_unused,
460 const struct rte_eth_rxconf *rx_conf __rte_unused,
461 struct rte_mempool *mb_pool)
463 struct pmd_internals *internals = dev->data->dev_private;
464 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
465 pcap_q->mb_pool = mb_pool;
466 dev->data->rx_queues[rx_queue_id] = pcap_q;
467 pcap_q->in_port = dev->data->port_id;
472 eth_tx_queue_setup(struct rte_eth_dev *dev,
473 uint16_t tx_queue_id,
474 uint16_t nb_tx_desc __rte_unused,
475 unsigned int socket_id __rte_unused,
476 const struct rte_eth_txconf *tx_conf __rte_unused)
479 struct pmd_internals *internals = dev->data->dev_private;
480 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
484 static const struct eth_dev_ops ops = {
485 .dev_start = eth_dev_start,
486 .dev_stop = eth_dev_stop,
487 .dev_close = eth_dev_close,
488 .dev_configure = eth_dev_configure,
489 .dev_infos_get = eth_dev_info,
490 .rx_queue_setup = eth_rx_queue_setup,
491 .tx_queue_setup = eth_tx_queue_setup,
492 .rx_queue_release = eth_queue_release,
493 .tx_queue_release = eth_queue_release,
494 .link_update = eth_link_update,
495 .stats_get = eth_stats_get,
496 .stats_reset = eth_stats_reset,
499 static struct eth_driver rte_pcap_pmd = {
501 .name = "rte_pcap_pmd",
502 .drv_flags = RTE_PCI_DRV_DETACHABLE,
507 * Function handler that opens the pcap file for reading a stores a
508 * reference of it for use it later on.
511 open_rx_pcap(const char *key, const char *value, void *extra_args)
514 const char *pcap_filename = value;
515 struct rx_pcaps *pcaps = extra_args;
518 for (i = 0; i < pcaps->num_of_rx; i++) {
519 if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
522 pcaps->pcaps[i] = pcap;
523 pcaps->names[i] = pcap_filename;
524 pcaps->types[i] = key;
531 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
533 if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
534 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
541 * Opens a pcap file for writing and stores a reference to it
542 * for use it later on.
545 open_tx_pcap(const char *key, const char *value, void *extra_args)
548 const char *pcap_filename = value;
549 struct tx_pcaps *dumpers = extra_args;
550 pcap_dumper_t *dumper;
552 for (i = 0; i < dumpers->num_of_tx; i++) {
553 if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
556 dumpers->dumpers[i] = dumper;
557 dumpers->names[i] = pcap_filename;
558 dumpers->types[i] = key;
565 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
569 * We need to create a dummy empty pcap_t to use it
570 * with pcap_dump_open(). We create big enough an Ethernet
574 if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
576 RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
580 /* The dumper is created using the previous pcap_t reference */
581 if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
582 RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
590 * pcap_open_live wrapper function
593 open_iface_live(const char *iface, pcap_t **pcap) {
594 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
595 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
598 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
605 * Opens an interface for reading and writing
608 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
610 const char *iface = value;
611 struct rx_pcaps *pcaps = extra_args;
614 if (open_single_iface(iface, &pcap) < 0)
617 pcaps->pcaps[0] = pcap;
618 pcaps->names[0] = iface;
619 pcaps->types[0] = key;
625 * Opens a NIC for reading packets from it
628 open_rx_iface(const char *key, const char *value, void *extra_args)
631 const char *iface = value;
632 struct rx_pcaps *pcaps = extra_args;
635 for (i = 0; i < pcaps->num_of_rx; i++) {
636 if (open_single_iface(iface, &pcap) < 0)
638 pcaps->pcaps[i] = pcap;
639 pcaps->names[i] = iface;
640 pcaps->types[i] = key;
647 * Opens a NIC for writing packets to it
650 open_tx_iface(const char *key, const char *value, void *extra_args)
653 const char *iface = value;
654 struct tx_pcaps *pcaps = extra_args;
657 for (i = 0; i < pcaps->num_of_tx; i++) {
658 if (open_single_iface(iface, &pcap) < 0)
660 pcaps->pcaps[i] = pcap;
661 pcaps->names[i] = iface;
662 pcaps->types[i] = key;
669 open_single_iface(const char *iface, pcap_t **pcap)
671 if (open_iface_live(iface, pcap) < 0) {
672 RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
680 rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
681 const unsigned nb_tx_queues,
682 const unsigned numa_node,
683 struct pmd_internals **internals,
684 struct rte_eth_dev **eth_dev,
685 struct rte_kvargs *kvlist)
687 struct rte_eth_dev_data *data = NULL;
688 struct rte_pci_device *pci_dev = NULL;
690 struct rte_kvargs_pair *pair = NULL;
692 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
693 pair = &kvlist->pairs[k_idx];
694 if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
699 "Creating pcap-backed ethdev on numa socket %u\n", numa_node);
701 /* now do all data allocation - for eth_dev structure, dummy pci driver
702 * and internal (private) data
704 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
708 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
712 *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
713 if (*internals == NULL)
716 /* reserve an ethdev entry */
717 *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
718 if (*eth_dev == NULL)
721 /* check length of device name */
722 if ((strlen((*eth_dev)->data->name) + 1) > sizeof(data->name))
725 /* now put it all together
726 * - store queue data in internals,
727 * - store numa_node info in pci_driver
728 * - point eth_dev_data to internals and pci_driver
729 * - and point eth_dev structure to new eth_dev_data structure
731 /* NOTE: we'll replace the data element, of originally allocated eth_dev
732 * so the rings are local per-process */
734 (*internals)->nb_rx_queues = nb_rx_queues;
735 (*internals)->nb_tx_queues = nb_tx_queues;
738 (*internals)->if_index = 0;
740 (*internals)->if_index = if_nametoindex(pair->value);
742 pci_dev->numa_node = numa_node;
744 data->dev_private = *internals;
745 data->port_id = (*eth_dev)->data->port_id;
746 snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
747 data->nb_rx_queues = (uint16_t)nb_rx_queues;
748 data->nb_tx_queues = (uint16_t)nb_tx_queues;
749 data->dev_link = pmd_link;
750 data->mac_addrs = ð_addr;
752 (*eth_dev)->data->name, strlen((*eth_dev)->data->name));
754 (*eth_dev)->data = data;
755 (*eth_dev)->dev_ops = &ops;
756 (*eth_dev)->pci_dev = pci_dev;
757 (*eth_dev)->driver = &rte_pcap_pmd;
764 rte_free(*internals);
770 rte_eth_from_pcaps_n_dumpers(const char *name,
771 struct rx_pcaps *rx_queues,
772 const unsigned nb_rx_queues,
773 struct tx_pcaps *tx_queues,
774 const unsigned nb_tx_queues,
775 const unsigned numa_node,
776 struct rte_kvargs *kvlist)
778 struct pmd_internals *internals = NULL;
779 struct rte_eth_dev *eth_dev = NULL;
782 /* do some parameter checking */
783 if (rx_queues == NULL && nb_rx_queues > 0)
785 if (tx_queues == NULL && nb_tx_queues > 0)
788 if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
789 &internals, ð_dev, kvlist) < 0)
792 for (i = 0; i < nb_rx_queues; i++) {
793 internals->rx_queue->pcap = rx_queues->pcaps[i];
794 internals->rx_queue->name = rx_queues->names[i];
795 internals->rx_queue->type = rx_queues->types[i];
797 for (i = 0; i < nb_tx_queues; i++) {
798 internals->tx_queue->dumper = tx_queues->dumpers[i];
799 internals->tx_queue->name = tx_queues->names[i];
800 internals->tx_queue->type = tx_queues->types[i];
803 /* using multiple pcaps/interfaces */
804 internals->single_iface = 0;
806 eth_dev->rx_pkt_burst = eth_pcap_rx;
807 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
812 struct rx_pcaps pcaps;
814 rte_eth_from_pcaps(const char *name,
815 struct rx_pcaps *rx_queues,
816 const unsigned nb_rx_queues,
817 struct tx_pcaps *tx_queues,
818 const unsigned nb_tx_queues,
819 const unsigned numa_node,
820 struct rte_kvargs *kvlist,
823 struct pmd_internals *internals = NULL;
824 struct rte_eth_dev *eth_dev = NULL;
827 /* do some parameter checking */
828 if (rx_queues == NULL && nb_rx_queues > 0)
830 if (tx_queues == NULL && nb_tx_queues > 0)
833 if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
834 &internals, ð_dev, kvlist) < 0)
837 for (i = 0; i < nb_rx_queues; i++) {
838 internals->rx_queue->pcap = rx_queues->pcaps[i];
839 internals->rx_queue->name = rx_queues->names[i];
840 internals->rx_queue->type = rx_queues->types[i];
842 for (i = 0; i < nb_tx_queues; i++) {
843 internals->tx_queue->pcap = tx_queues->pcaps[i];
844 internals->tx_queue->name = tx_queues->names[i];
845 internals->tx_queue->type = tx_queues->types[i];
848 /* store wether we are using a single interface for rx/tx or not */
849 internals->single_iface = single_iface;
851 eth_dev->rx_pkt_burst = eth_pcap_rx;
852 eth_dev->tx_pkt_burst = eth_pcap_tx;
859 rte_pmd_pcap_devinit(const char *name, const char *params)
861 unsigned numa_node, using_dumpers = 0;
863 struct rte_kvargs *kvlist;
864 struct rx_pcaps pcaps;
865 struct tx_pcaps dumpers;
867 RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
869 numa_node = rte_socket_id();
871 gettimeofday(&start_time, NULL);
872 start_cycles = rte_get_timer_cycles();
873 hz = rte_get_timer_hz();
875 kvlist = rte_kvargs_parse(params, valid_arguments);
880 * If iface argument is passed we open the NICs and use them for
883 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
885 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
886 &open_rx_tx_iface, &pcaps);
889 dumpers.pcaps[0] = pcaps.pcaps[0];
890 dumpers.names[0] = pcaps.names[0];
891 dumpers.types[0] = pcaps.types[0];
892 ret = rte_eth_from_pcaps(name, &pcaps, 1, &dumpers, 1,
893 numa_node, kvlist, 1);
898 * We check whether we want to open a RX stream from a real NIC or a
901 if ((pcaps.num_of_rx = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG))) {
902 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
903 &open_rx_pcap, &pcaps);
905 pcaps.num_of_rx = rte_kvargs_count(kvlist,
906 ETH_PCAP_RX_IFACE_ARG);
907 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
908 &open_rx_iface, &pcaps);
915 * We check whether we want to open a TX stream to a real NIC or a
918 if ((dumpers.num_of_tx = rte_kvargs_count(kvlist,
919 ETH_PCAP_TX_PCAP_ARG))) {
920 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
921 &open_tx_pcap, &dumpers);
924 dumpers.num_of_tx = rte_kvargs_count(kvlist,
925 ETH_PCAP_TX_IFACE_ARG);
926 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
927 &open_tx_iface, &dumpers);
934 ret = rte_eth_from_pcaps_n_dumpers(name, &pcaps, pcaps.num_of_rx,
935 &dumpers, dumpers.num_of_tx, numa_node, kvlist);
937 ret = rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_rx, &dumpers,
938 dumpers.num_of_tx, numa_node, kvlist, 0);
941 rte_kvargs_free(kvlist);
946 rte_pmd_pcap_devuninit(const char *name)
948 struct rte_eth_dev *eth_dev = NULL;
950 RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n",
956 /* reserve an ethdev entry */
957 eth_dev = rte_eth_dev_allocated(name);
961 rte_free(eth_dev->data->dev_private);
962 rte_free(eth_dev->data);
963 rte_free(eth_dev->pci_dev);
965 rte_eth_dev_release_port(eth_dev);
970 static struct rte_driver pmd_pcap_drv = {
973 .init = rte_pmd_pcap_devinit,
974 .uninit = rte_pmd_pcap_devuninit,
977 PMD_REGISTER_DRIVER(pmd_pcap_drv);