4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 * Copyright(c) 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
39 #include <rte_memcpy.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_kvargs.h>
49 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
50 #define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
51 #define RTE_ETH_PCAP_PROMISC 1
52 #define RTE_ETH_PCAP_TIMEOUT -1
53 #define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
54 #define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
55 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
56 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
57 #define ETH_PCAP_IFACE_ARG "iface"
59 #define ETH_PCAP_ARG_MAXLEN 64
61 #define RTE_PMD_PCAP_MAX_QUEUES 16
63 static char errbuf[PCAP_ERRBUF_SIZE];
64 static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
65 static struct timeval start_time;
66 static uint64_t start_cycles;
69 struct pcap_rx_queue {
72 struct rte_mempool *mb_pool;
73 volatile unsigned long rx_pkts;
74 volatile unsigned long rx_bytes;
75 volatile unsigned long err_pkts;
77 char type[ETH_PCAP_ARG_MAXLEN];
80 struct pcap_tx_queue {
81 pcap_dumper_t *dumper;
83 volatile unsigned long tx_pkts;
84 volatile unsigned long tx_bytes;
85 volatile unsigned long err_pkts;
87 char type[ETH_PCAP_ARG_MAXLEN];
92 pcap_t *pcaps[RTE_PMD_PCAP_MAX_QUEUES];
93 const char *names[RTE_PMD_PCAP_MAX_QUEUES];
94 const char *types[RTE_PMD_PCAP_MAX_QUEUES];
99 pcap_dumper_t *dumpers[RTE_PMD_PCAP_MAX_QUEUES];
100 pcap_t *pcaps[RTE_PMD_PCAP_MAX_QUEUES];
101 const char *names[RTE_PMD_PCAP_MAX_QUEUES];
102 const char *types[RTE_PMD_PCAP_MAX_QUEUES];
105 struct pmd_internals {
106 struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
107 struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
112 const char *valid_arguments[] = {
113 ETH_PCAP_RX_PCAP_ARG,
114 ETH_PCAP_TX_PCAP_ARG,
115 ETH_PCAP_RX_IFACE_ARG,
116 ETH_PCAP_TX_IFACE_ARG,
121 static int open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper);
122 static int open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap);
123 static int open_single_iface(const char *iface, pcap_t **pcap);
125 static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
126 static const char *drivername = "Pcap PMD";
127 static struct rte_eth_link pmd_link = {
128 .link_speed = ETH_SPEED_NUM_10G,
129 .link_duplex = ETH_LINK_FULL_DUPLEX,
130 .link_status = ETH_LINK_DOWN,
131 .link_autoneg = ETH_LINK_SPEED_FIXED,
135 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool,
136 struct rte_mbuf *mbuf,
140 struct rte_mbuf *m = mbuf;
142 /* Copy the first segment. */
143 uint16_t len = rte_pktmbuf_tailroom(mbuf);
145 rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
149 while (data_len > 0) {
150 /* Allocate next mbuf and point to that. */
151 m->next = rte_pktmbuf_alloc(mb_pool);
153 if (unlikely(!m->next))
158 /* Headroom is not needed in chained mbufs. */
159 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
163 /* Copy next segment. */
164 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
165 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
172 return mbuf->nb_segs;
175 /* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
177 eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
179 uint16_t data_len = 0;
182 rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
185 data_len += mbuf->data_len;
191 eth_pcap_rx(void *queue,
192 struct rte_mbuf **bufs,
196 struct pcap_pkthdr header;
197 const u_char *packet;
198 struct rte_mbuf *mbuf;
199 struct pcap_rx_queue *pcap_q = queue;
202 uint32_t rx_bytes = 0;
204 if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
207 /* Reads the given number of packets from the pcap file one by one
208 * and copies the packet data into a newly allocated mbuf to return.
210 for (i = 0; i < nb_pkts; i++) {
211 /* Get the next PCAP packet */
212 packet = pcap_next(pcap_q->pcap, &header);
213 if (unlikely(packet == NULL))
216 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
217 if (unlikely(mbuf == NULL))
220 /* Now get the space available for data in the mbuf */
221 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
222 RTE_PKTMBUF_HEADROOM);
224 if (header.caplen <= buf_size) {
225 /* pcap packet will fit in the mbuf, go ahead and copy */
226 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
228 mbuf->data_len = (uint16_t)header.caplen;
230 /* Try read jumbo frame into multi mbufs. */
231 if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
234 header.caplen) == -1))
238 mbuf->pkt_len = (uint16_t)header.caplen;
239 mbuf->port = pcap_q->in_port;
242 rx_bytes += header.caplen;
244 pcap_q->rx_pkts += num_rx;
245 pcap_q->rx_bytes += rx_bytes;
250 calculate_timestamp(struct timeval *ts) {
252 struct timeval cur_time;
254 cycles = rte_get_timer_cycles() - start_cycles;
255 cur_time.tv_sec = cycles / hz;
256 cur_time.tv_usec = (cycles % hz) * 10e6 / hz;
257 timeradd(&start_time, &cur_time, ts);
261 * Callback to handle writing packets to a pcap file.
264 eth_pcap_tx_dumper(void *queue,
265 struct rte_mbuf **bufs,
269 struct rte_mbuf *mbuf;
270 struct pcap_tx_queue *dumper_q = queue;
272 uint32_t tx_bytes = 0;
273 struct pcap_pkthdr header;
275 if (dumper_q->dumper == NULL || nb_pkts == 0)
278 /* writes the nb_pkts packets to the previously opened pcap file dumper */
279 for (i = 0; i < nb_pkts; i++) {
281 calculate_timestamp(&header.ts);
282 header.len = mbuf->pkt_len;
283 header.caplen = header.len;
285 if (likely(mbuf->nb_segs == 1)) {
286 pcap_dump((u_char *)dumper_q->dumper, &header,
287 rte_pktmbuf_mtod(mbuf, void*));
289 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
290 eth_pcap_gather_data(tx_pcap_data, mbuf);
291 pcap_dump((u_char *)dumper_q->dumper, &header,
295 "Dropping PCAP packet. "
296 "Size (%d) > max jumbo size (%d).\n",
298 ETHER_MAX_JUMBO_FRAME_LEN);
300 rte_pktmbuf_free(mbuf);
305 rte_pktmbuf_free(mbuf);
307 tx_bytes += mbuf->pkt_len;
311 * Since there's no place to hook a callback when the forwarding
312 * process stops and to make sure the pcap file is actually written,
313 * we flush the pcap dumper within each burst.
315 pcap_dump_flush(dumper_q->dumper);
316 dumper_q->tx_pkts += num_tx;
317 dumper_q->tx_bytes += tx_bytes;
318 dumper_q->err_pkts += nb_pkts - num_tx;
323 * Callback to handle sending packets through a real NIC.
326 eth_pcap_tx(void *queue,
327 struct rte_mbuf **bufs,
332 struct rte_mbuf *mbuf;
333 struct pcap_tx_queue *tx_queue = queue;
335 uint32_t tx_bytes = 0;
337 if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
340 for (i = 0; i < nb_pkts; i++) {
343 if (likely(mbuf->nb_segs == 1)) {
344 ret = pcap_sendpacket(tx_queue->pcap,
345 rte_pktmbuf_mtod(mbuf, u_char *),
348 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
349 eth_pcap_gather_data(tx_pcap_data, mbuf);
350 ret = pcap_sendpacket(tx_queue->pcap,
355 "Dropping PCAP packet. "
356 "Size (%d) > max jumbo size (%d).\n",
358 ETHER_MAX_JUMBO_FRAME_LEN);
360 rte_pktmbuf_free(mbuf);
365 if (unlikely(ret != 0))
368 tx_bytes += mbuf->pkt_len;
369 rte_pktmbuf_free(mbuf);
372 tx_queue->tx_pkts += num_tx;
373 tx_queue->tx_bytes += tx_bytes;
374 tx_queue->err_pkts += nb_pkts - num_tx;
379 eth_dev_start(struct rte_eth_dev *dev)
382 struct pmd_internals *internals = dev->data->dev_private;
383 struct pcap_tx_queue *tx;
384 struct pcap_rx_queue *rx;
386 /* Special iface case. Single pcap is open and shared between tx/rx. */
387 if (internals->single_iface) {
388 tx = &internals->tx_queue[0];
389 rx = &internals->rx_queue[0];
391 if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
392 if (open_single_iface(tx->name, &tx->pcap) < 0)
399 /* If not open already, open tx pcaps/dumpers */
400 for (i = 0; i < dev->data->nb_tx_queues; i++) {
401 tx = &internals->tx_queue[i];
403 if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
404 if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
408 else if (!tx->pcap && strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
409 if (open_single_iface(tx->name, &tx->pcap) < 0)
414 /* If not open already, open rx pcaps */
415 for (i = 0; i < dev->data->nb_rx_queues; i++) {
416 rx = &internals->rx_queue[i];
418 if (rx->pcap != NULL)
421 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
422 if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
426 else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
427 if (open_single_iface(rx->name, &rx->pcap) < 0)
434 dev->data->dev_link.link_status = ETH_LINK_UP;
439 * This function gets called when the current port gets stopped.
440 * Is the only place for us to close all the tx streams dumpers.
441 * If not called the dumpers will be flushed within each tx burst.
444 eth_dev_stop(struct rte_eth_dev *dev)
447 struct pmd_internals *internals = dev->data->dev_private;
448 struct pcap_tx_queue *tx;
449 struct pcap_rx_queue *rx;
451 /* Special iface case. Single pcap is open and shared between tx/rx. */
452 if (internals->single_iface) {
453 tx = &internals->tx_queue[0];
454 rx = &internals->rx_queue[0];
455 pcap_close(tx->pcap);
461 for (i = 0; i < dev->data->nb_tx_queues; i++) {
462 tx = &internals->tx_queue[i];
464 if (tx->dumper != NULL) {
465 pcap_dump_close(tx->dumper);
469 if (tx->pcap != NULL) {
470 pcap_close(tx->pcap);
475 for (i = 0; i < dev->data->nb_rx_queues; i++) {
476 rx = &internals->rx_queue[i];
478 if (rx->pcap != NULL) {
479 pcap_close(rx->pcap);
485 dev->data->dev_link.link_status = ETH_LINK_DOWN;
489 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
495 eth_dev_info(struct rte_eth_dev *dev,
496 struct rte_eth_dev_info *dev_info)
498 struct pmd_internals *internals = dev->data->dev_private;
499 dev_info->driver_name = drivername;
500 dev_info->if_index = internals->if_index;
501 dev_info->max_mac_addrs = 1;
502 dev_info->max_rx_pktlen = (uint32_t) -1;
503 dev_info->max_rx_queues = dev->data->nb_rx_queues;
504 dev_info->max_tx_queues = dev->data->nb_tx_queues;
505 dev_info->min_rx_bufsize = 0;
506 dev_info->pci_dev = NULL;
510 eth_stats_get(struct rte_eth_dev *dev,
511 struct rte_eth_stats *igb_stats)
514 unsigned long rx_packets_total = 0, rx_bytes_total = 0;
515 unsigned long tx_packets_total = 0, tx_bytes_total = 0;
516 unsigned long tx_packets_err_total = 0;
517 const struct pmd_internals *internal = dev->data->dev_private;
519 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
520 i < dev->data->nb_rx_queues; i++) {
521 igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
522 igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
523 rx_packets_total += igb_stats->q_ipackets[i];
524 rx_bytes_total += igb_stats->q_ibytes[i];
527 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
528 i < dev->data->nb_tx_queues; i++) {
529 igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
530 igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
531 igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
532 tx_packets_total += igb_stats->q_opackets[i];
533 tx_bytes_total += igb_stats->q_obytes[i];
534 tx_packets_err_total += igb_stats->q_errors[i];
537 igb_stats->ipackets = rx_packets_total;
538 igb_stats->ibytes = rx_bytes_total;
539 igb_stats->opackets = tx_packets_total;
540 igb_stats->obytes = tx_bytes_total;
541 igb_stats->oerrors = tx_packets_err_total;
545 eth_stats_reset(struct rte_eth_dev *dev)
548 struct pmd_internals *internal = dev->data->dev_private;
549 for (i = 0; i < dev->data->nb_rx_queues; i++) {
550 internal->rx_queue[i].rx_pkts = 0;
551 internal->rx_queue[i].rx_bytes = 0;
553 for (i = 0; i < dev->data->nb_tx_queues; i++) {
554 internal->tx_queue[i].tx_pkts = 0;
555 internal->tx_queue[i].tx_bytes = 0;
556 internal->tx_queue[i].err_pkts = 0;
561 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
566 eth_queue_release(void *q __rte_unused)
571 eth_link_update(struct rte_eth_dev *dev __rte_unused,
572 int wait_to_complete __rte_unused)
578 eth_rx_queue_setup(struct rte_eth_dev *dev,
579 uint16_t rx_queue_id,
580 uint16_t nb_rx_desc __rte_unused,
581 unsigned int socket_id __rte_unused,
582 const struct rte_eth_rxconf *rx_conf __rte_unused,
583 struct rte_mempool *mb_pool)
585 struct pmd_internals *internals = dev->data->dev_private;
586 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
587 pcap_q->mb_pool = mb_pool;
588 dev->data->rx_queues[rx_queue_id] = pcap_q;
589 pcap_q->in_port = dev->data->port_id;
594 eth_tx_queue_setup(struct rte_eth_dev *dev,
595 uint16_t tx_queue_id,
596 uint16_t nb_tx_desc __rte_unused,
597 unsigned int socket_id __rte_unused,
598 const struct rte_eth_txconf *tx_conf __rte_unused)
601 struct pmd_internals *internals = dev->data->dev_private;
602 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
606 static const struct eth_dev_ops ops = {
607 .dev_start = eth_dev_start,
608 .dev_stop = eth_dev_stop,
609 .dev_close = eth_dev_close,
610 .dev_configure = eth_dev_configure,
611 .dev_infos_get = eth_dev_info,
612 .rx_queue_setup = eth_rx_queue_setup,
613 .tx_queue_setup = eth_tx_queue_setup,
614 .rx_queue_release = eth_queue_release,
615 .tx_queue_release = eth_queue_release,
616 .link_update = eth_link_update,
617 .stats_get = eth_stats_get,
618 .stats_reset = eth_stats_reset,
622 * Function handler that opens the pcap file for reading a stores a
623 * reference of it for use it later on.
626 open_rx_pcap(const char *key, const char *value, void *extra_args)
629 const char *pcap_filename = value;
630 struct rx_pcaps *pcaps = extra_args;
633 for (i = 0; i < pcaps->num_of_rx; i++) {
634 if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
637 pcaps->pcaps[i] = pcap;
638 pcaps->names[i] = pcap_filename;
639 pcaps->types[i] = key;
646 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
648 if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
649 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
656 * Opens a pcap file for writing and stores a reference to it
657 * for use it later on.
660 open_tx_pcap(const char *key, const char *value, void *extra_args)
663 const char *pcap_filename = value;
664 struct tx_pcaps *dumpers = extra_args;
665 pcap_dumper_t *dumper;
667 for (i = 0; i < dumpers->num_of_tx; i++) {
668 if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
671 dumpers->dumpers[i] = dumper;
672 dumpers->names[i] = pcap_filename;
673 dumpers->types[i] = key;
680 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
684 * We need to create a dummy empty pcap_t to use it
685 * with pcap_dump_open(). We create big enough an Ethernet
689 if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
691 RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
695 /* The dumper is created using the previous pcap_t reference */
696 if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
697 RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
705 * pcap_open_live wrapper function
708 open_iface_live(const char *iface, pcap_t **pcap) {
709 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
710 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
713 RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
720 * Opens an interface for reading and writing
723 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
725 const char *iface = value;
726 struct rx_pcaps *pcaps = extra_args;
729 if (open_single_iface(iface, &pcap) < 0)
732 pcaps->pcaps[0] = pcap;
733 pcaps->names[0] = iface;
734 pcaps->types[0] = key;
740 * Opens a NIC for reading packets from it
743 open_rx_iface(const char *key, const char *value, void *extra_args)
746 const char *iface = value;
747 struct rx_pcaps *pcaps = extra_args;
750 for (i = 0; i < pcaps->num_of_rx; i++) {
751 if (open_single_iface(iface, &pcap) < 0)
753 pcaps->pcaps[i] = pcap;
754 pcaps->names[i] = iface;
755 pcaps->types[i] = key;
762 * Opens a NIC for writing packets to it
765 open_tx_iface(const char *key, const char *value, void *extra_args)
768 const char *iface = value;
769 struct tx_pcaps *pcaps = extra_args;
772 for (i = 0; i < pcaps->num_of_tx; i++) {
773 if (open_single_iface(iface, &pcap) < 0)
775 pcaps->pcaps[i] = pcap;
776 pcaps->names[i] = iface;
777 pcaps->types[i] = key;
784 open_single_iface(const char *iface, pcap_t **pcap)
786 if (open_iface_live(iface, pcap) < 0) {
787 RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
795 rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
796 const unsigned nb_tx_queues,
797 const unsigned numa_node,
798 struct pmd_internals **internals,
799 struct rte_eth_dev **eth_dev,
800 struct rte_kvargs *kvlist)
802 struct rte_eth_dev_data *data = NULL;
804 struct rte_kvargs_pair *pair = NULL;
806 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
807 pair = &kvlist->pairs[k_idx];
808 if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
813 "Creating pcap-backed ethdev on numa socket %u\n", numa_node);
815 /* now do all data allocation - for eth_dev structure
816 * and internal (private) data
818 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
822 *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
823 if (*internals == NULL)
826 /* reserve an ethdev entry */
827 *eth_dev = rte_eth_dev_allocate(name);
828 if (*eth_dev == NULL)
831 /* check length of device name */
832 if ((strlen((*eth_dev)->data->name) + 1) > sizeof(data->name))
835 /* now put it all together
836 * - store queue data in internals,
837 * - store numa_node info in eth_dev
838 * - point eth_dev_data to internals
839 * - and point eth_dev structure to new eth_dev_data structure
841 /* NOTE: we'll replace the data element, of originally allocated eth_dev
842 * so the rings are local per-process */
845 (*internals)->if_index = 0;
847 (*internals)->if_index = if_nametoindex(pair->value);
849 data->dev_private = *internals;
850 data->port_id = (*eth_dev)->data->port_id;
851 snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
852 data->nb_rx_queues = (uint16_t)nb_rx_queues;
853 data->nb_tx_queues = (uint16_t)nb_tx_queues;
854 data->dev_link = pmd_link;
855 data->mac_addrs = ð_addr;
857 (*eth_dev)->data->name, strlen((*eth_dev)->data->name));
859 (*eth_dev)->data = data;
860 (*eth_dev)->dev_ops = &ops;
861 (*eth_dev)->driver = NULL;
862 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
863 data->kdrv = RTE_KDRV_NONE;
864 data->drv_name = drivername;
865 data->numa_node = numa_node;
871 rte_free(*internals);
877 rte_eth_from_pcaps_common(const char *name, struct rx_pcaps *rx_queues,
878 const unsigned nb_rx_queues, struct tx_pcaps *tx_queues,
879 const unsigned nb_tx_queues, const unsigned numa_node,
880 struct rte_kvargs *kvlist, struct pmd_internals **internals,
881 struct rte_eth_dev **eth_dev)
885 /* do some parameter checking */
886 if (rx_queues == NULL && nb_rx_queues > 0)
888 if (tx_queues == NULL && nb_tx_queues > 0)
891 if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
892 internals, eth_dev, kvlist) < 0)
895 for (i = 0; i < nb_rx_queues; i++) {
896 (*internals)->rx_queue[i].pcap = rx_queues->pcaps[i];
897 snprintf((*internals)->rx_queue[i].name,
898 sizeof((*internals)->rx_queue[i].name), "%s",
899 rx_queues->names[i]);
900 snprintf((*internals)->rx_queue[i].type,
901 sizeof((*internals)->rx_queue[i].type), "%s",
902 rx_queues->types[i]);
904 for (i = 0; i < nb_tx_queues; i++) {
905 (*internals)->tx_queue[i].dumper = tx_queues->dumpers[i];
906 snprintf((*internals)->tx_queue[i].name,
907 sizeof((*internals)->tx_queue[i].name), "%s",
908 tx_queues->names[i]);
909 snprintf((*internals)->tx_queue[i].type,
910 sizeof((*internals)->tx_queue[i].type), "%s",
911 tx_queues->types[i]);
918 rte_eth_from_pcaps_n_dumpers(const char *name,
919 struct rx_pcaps *rx_queues,
920 const unsigned nb_rx_queues,
921 struct tx_pcaps *tx_queues,
922 const unsigned nb_tx_queues,
923 const unsigned numa_node,
924 struct rte_kvargs *kvlist)
926 struct pmd_internals *internals = NULL;
927 struct rte_eth_dev *eth_dev = NULL;
930 ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
931 tx_queues, nb_tx_queues, numa_node, kvlist,
932 &internals, ð_dev);
937 /* using multiple pcaps/interfaces */
938 internals->single_iface = 0;
940 eth_dev->rx_pkt_burst = eth_pcap_rx;
941 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
947 rte_eth_from_pcaps(const char *name,
948 struct rx_pcaps *rx_queues,
949 const unsigned nb_rx_queues,
950 struct tx_pcaps *tx_queues,
951 const unsigned nb_tx_queues,
952 const unsigned numa_node,
953 struct rte_kvargs *kvlist,
956 struct pmd_internals *internals = NULL;
957 struct rte_eth_dev *eth_dev = NULL;
960 ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
961 tx_queues, nb_tx_queues, numa_node, kvlist,
962 &internals, ð_dev);
967 /* store wether we are using a single interface for rx/tx or not */
968 internals->single_iface = single_iface;
970 eth_dev->rx_pkt_burst = eth_pcap_rx;
971 eth_dev->tx_pkt_burst = eth_pcap_tx;
978 rte_pmd_pcap_devinit(const char *name, const char *params)
980 unsigned numa_node, using_dumpers = 0;
982 struct rte_kvargs *kvlist;
983 struct rx_pcaps pcaps = {0};
984 struct tx_pcaps dumpers = {0};
986 RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
988 numa_node = rte_socket_id();
990 gettimeofday(&start_time, NULL);
991 start_cycles = rte_get_timer_cycles();
992 hz = rte_get_timer_hz();
994 kvlist = rte_kvargs_parse(params, valid_arguments);
999 * If iface argument is passed we open the NICs and use them for
1002 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
1004 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
1005 &open_rx_tx_iface, &pcaps);
1008 dumpers.pcaps[0] = pcaps.pcaps[0];
1009 dumpers.names[0] = pcaps.names[0];
1010 dumpers.types[0] = pcaps.types[0];
1011 ret = rte_eth_from_pcaps(name, &pcaps, 1, &dumpers, 1,
1012 numa_node, kvlist, 1);
1017 * We check whether we want to open a RX stream from a real NIC or a
1020 if ((pcaps.num_of_rx = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG))) {
1021 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1022 &open_rx_pcap, &pcaps);
1024 pcaps.num_of_rx = rte_kvargs_count(kvlist,
1025 ETH_PCAP_RX_IFACE_ARG);
1026 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
1027 &open_rx_iface, &pcaps);
1034 * We check whether we want to open a TX stream to a real NIC or a
1037 if ((dumpers.num_of_tx = rte_kvargs_count(kvlist,
1038 ETH_PCAP_TX_PCAP_ARG))) {
1039 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1040 &open_tx_pcap, &dumpers);
1043 dumpers.num_of_tx = rte_kvargs_count(kvlist,
1044 ETH_PCAP_TX_IFACE_ARG);
1045 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1046 &open_tx_iface, &dumpers);
1053 ret = rte_eth_from_pcaps_n_dumpers(name, &pcaps, pcaps.num_of_rx,
1054 &dumpers, dumpers.num_of_tx, numa_node, kvlist);
1056 ret = rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_rx, &dumpers,
1057 dumpers.num_of_tx, numa_node, kvlist, 0);
1060 rte_kvargs_free(kvlist);
1065 rte_pmd_pcap_devuninit(const char *name)
1067 struct rte_eth_dev *eth_dev = NULL;
1069 RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n",
1075 /* reserve an ethdev entry */
1076 eth_dev = rte_eth_dev_allocated(name);
1077 if (eth_dev == NULL)
1080 rte_free(eth_dev->data->dev_private);
1081 rte_free(eth_dev->data);
1083 rte_eth_dev_release_port(eth_dev);
1088 static struct rte_vdev_driver pmd_pcap_drv = {
1089 .init = rte_pmd_pcap_devinit,
1090 .uninit = rte_pmd_pcap_devuninit,
1093 DRIVER_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1094 DRIVER_REGISTER_PARAM_STRING(net_pcap,