1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright(c) 2014 6WIND S.A.
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
14 #if defined(RTE_EXEC_ENV_FREEBSD)
15 #include <sys/sysctl.h>
16 #include <net/if_dl.h>
21 #include <rte_cycles.h>
22 #include <ethdev_driver.h>
23 #include <ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_mbuf_dyn.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_string_fns.h>
31 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
32 #define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN
33 #define RTE_ETH_PCAP_PROMISC 1
34 #define RTE_ETH_PCAP_TIMEOUT -1
36 #define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
37 #define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
38 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
39 #define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
40 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
41 #define ETH_PCAP_IFACE_ARG "iface"
42 #define ETH_PCAP_PHY_MAC_ARG "phy_mac"
43 #define ETH_PCAP_INFINITE_RX_ARG "infinite_rx"
45 #define ETH_PCAP_ARG_MAXLEN 64
47 #define RTE_PMD_PCAP_MAX_QUEUES 16
49 static char errbuf[PCAP_ERRBUF_SIZE];
50 static struct timeval start_time;
51 static uint64_t start_cycles;
53 static uint8_t iface_idx;
55 static uint64_t timestamp_rx_dynflag;
56 static int timestamp_dynfield_offset = -1;
59 volatile unsigned long pkts;
60 volatile unsigned long bytes;
61 volatile unsigned long err_pkts;
64 struct queue_missed_stat {
65 /* last value retrieved from pcap */
67 /* stores values lost by pcap stop or rollover */
68 unsigned long mnemonic;
69 /* value on last reset */
73 struct pcap_rx_queue {
76 struct rte_mempool *mb_pool;
77 struct queue_stat rx_stat;
78 struct queue_missed_stat missed_stat;
80 char type[ETH_PCAP_ARG_MAXLEN];
82 /* Contains pre-generated packets to be looped through */
83 struct rte_ring *pkts;
86 struct pcap_tx_queue {
89 struct queue_stat tx_stat;
91 char type[ETH_PCAP_ARG_MAXLEN];
94 struct pmd_internals {
95 struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
96 struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
97 char devargs[ETH_PCAP_ARG_MAXLEN];
98 struct rte_ether_addr eth_addr;
102 unsigned int infinite_rx;
105 struct pmd_process_private {
106 pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
107 pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
108 pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
112 unsigned int num_of_queue;
113 struct devargs_queue {
114 pcap_dumper_t *dumper;
118 } queue[RTE_PMD_PCAP_MAX_QUEUES];
122 struct pmd_devargs_all {
123 struct pmd_devargs rx_queues;
124 struct pmd_devargs tx_queues;
126 unsigned int is_tx_pcap;
127 unsigned int is_tx_iface;
128 unsigned int is_rx_pcap;
129 unsigned int is_rx_iface;
130 unsigned int infinite_rx;
133 static const char *valid_arguments[] = {
134 ETH_PCAP_RX_PCAP_ARG,
135 ETH_PCAP_TX_PCAP_ARG,
136 ETH_PCAP_RX_IFACE_ARG,
137 ETH_PCAP_RX_IFACE_IN_ARG,
138 ETH_PCAP_TX_IFACE_ARG,
140 ETH_PCAP_PHY_MAC_ARG,
141 ETH_PCAP_INFINITE_RX_ARG,
145 static struct rte_eth_link pmd_link = {
146 .link_speed = ETH_SPEED_NUM_10G,
147 .link_duplex = ETH_LINK_FULL_DUPLEX,
148 .link_status = ETH_LINK_DOWN,
149 .link_autoneg = ETH_LINK_FIXED,
152 RTE_LOG_REGISTER(eth_pcap_logtype, pmd.net.pcap, NOTICE);
154 #define PMD_LOG(level, fmt, args...) \
155 rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
156 "%s(): " fmt "\n", __func__, ##args)
158 static struct queue_missed_stat*
159 queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid)
161 struct pmd_internals *internals = dev->data->dev_private;
162 struct queue_missed_stat *missed_stat =
163 &internals->rx_queue[qid].missed_stat;
164 const struct pmd_process_private *pp = dev->process_private;
165 pcap_t *pcap = pp->rx_pcap[qid];
166 struct pcap_stat stat;
168 if (!pcap || (pcap_stats(pcap, &stat) != 0))
171 /* rollover check - best effort fixup assuming single rollover */
172 if (stat.ps_drop < missed_stat->pcap)
173 missed_stat->mnemonic += UINT_MAX;
174 missed_stat->pcap = stat.ps_drop;
180 queue_missed_stat_on_stop_update(struct rte_eth_dev *dev, unsigned int qid)
182 struct queue_missed_stat *missed_stat =
183 queue_missed_stat_update(dev, qid);
185 missed_stat->mnemonic += missed_stat->pcap;
186 missed_stat->pcap = 0;
190 queue_missed_stat_reset(struct rte_eth_dev *dev, unsigned int qid)
192 struct queue_missed_stat *missed_stat =
193 queue_missed_stat_update(dev, qid);
195 missed_stat->reset = missed_stat->pcap;
196 missed_stat->mnemonic = 0;
200 queue_missed_stat_get(struct rte_eth_dev *dev, unsigned int qid)
202 const struct queue_missed_stat *missed_stat =
203 queue_missed_stat_update(dev, qid);
205 return missed_stat->pcap + missed_stat->mnemonic - missed_stat->reset;
209 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
210 const u_char *data, uint16_t data_len)
212 /* Copy the first segment. */
213 uint16_t len = rte_pktmbuf_tailroom(mbuf);
214 struct rte_mbuf *m = mbuf;
216 rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
220 while (data_len > 0) {
221 /* Allocate next mbuf and point to that. */
222 m->next = rte_pktmbuf_alloc(mb_pool);
224 if (unlikely(!m->next))
229 /* Headroom is not needed in chained mbufs. */
230 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
234 /* Copy next segment. */
235 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
236 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
243 return mbuf->nb_segs;
247 eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
250 struct pcap_rx_queue *pcap_q = queue;
251 uint32_t rx_bytes = 0;
253 if (unlikely(nb_pkts == 0))
256 if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
259 for (i = 0; i < nb_pkts; i++) {
260 struct rte_mbuf *pcap_buf;
261 int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf);
265 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
266 rte_pktmbuf_mtod(pcap_buf, void *),
268 bufs[i]->data_len = pcap_buf->data_len;
269 bufs[i]->pkt_len = pcap_buf->pkt_len;
270 bufs[i]->port = pcap_q->port_id;
271 rx_bytes += pcap_buf->data_len;
273 /* Enqueue packet back on ring to allow infinite rx. */
274 rte_ring_enqueue(pcap_q->pkts, pcap_buf);
277 pcap_q->rx_stat.pkts += i;
278 pcap_q->rx_stat.bytes += rx_bytes;
284 eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
287 struct pcap_pkthdr header;
288 struct pmd_process_private *pp;
289 const u_char *packet;
290 struct rte_mbuf *mbuf;
291 struct pcap_rx_queue *pcap_q = queue;
293 uint32_t rx_bytes = 0;
296 pp = rte_eth_devices[pcap_q->port_id].process_private;
297 pcap = pp->rx_pcap[pcap_q->queue_id];
299 if (unlikely(pcap == NULL || nb_pkts == 0))
302 /* Reads the given number of packets from the pcap file one by one
303 * and copies the packet data into a newly allocated mbuf to return.
305 for (i = 0; i < nb_pkts; i++) {
306 /* Get the next PCAP packet */
307 packet = pcap_next(pcap, &header);
308 if (unlikely(packet == NULL))
311 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
312 if (unlikely(mbuf == NULL))
315 if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
316 /* pcap packet will fit in the mbuf, can copy it */
317 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
319 mbuf->data_len = (uint16_t)header.caplen;
321 /* Try read jumbo frame into multi mbufs. */
322 if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
325 header.caplen) == -1)) {
326 rte_pktmbuf_free(mbuf);
331 mbuf->pkt_len = (uint16_t)header.caplen;
332 *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset,
333 rte_mbuf_timestamp_t *) =
334 (uint64_t)header.ts.tv_sec * 1000000 +
336 mbuf->ol_flags |= timestamp_rx_dynflag;
337 mbuf->port = pcap_q->port_id;
340 rx_bytes += header.caplen;
342 pcap_q->rx_stat.pkts += num_rx;
343 pcap_q->rx_stat.bytes += rx_bytes;
349 eth_null_rx(void *queue __rte_unused,
350 struct rte_mbuf **bufs __rte_unused,
351 uint16_t nb_pkts __rte_unused)
356 #define NSEC_PER_SEC 1000000000L
359 calculate_timestamp(struct timeval *ts) {
361 struct timeval cur_time;
363 cycles = rte_get_timer_cycles() - start_cycles;
364 cur_time.tv_sec = cycles / hz;
365 cur_time.tv_usec = (cycles % hz) * NSEC_PER_SEC / hz;
367 ts->tv_sec = start_time.tv_sec + cur_time.tv_sec;
368 ts->tv_usec = start_time.tv_usec + cur_time.tv_usec;
369 if (ts->tv_usec >= NSEC_PER_SEC) {
370 ts->tv_usec -= NSEC_PER_SEC;
376 * Callback to handle writing packets to a pcap file.
379 eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
382 struct rte_mbuf *mbuf;
383 struct pmd_process_private *pp;
384 struct pcap_tx_queue *dumper_q = queue;
386 uint32_t tx_bytes = 0;
387 struct pcap_pkthdr header;
388 pcap_dumper_t *dumper;
389 unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
392 pp = rte_eth_devices[dumper_q->port_id].process_private;
393 dumper = pp->tx_dumper[dumper_q->queue_id];
395 if (dumper == NULL || nb_pkts == 0)
398 /* writes the nb_pkts packets to the previously opened pcap file
400 for (i = 0; i < nb_pkts; i++) {
402 len = caplen = rte_pktmbuf_pkt_len(mbuf);
403 if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
404 len > sizeof(temp_data))) {
405 caplen = sizeof(temp_data);
408 calculate_timestamp(&header.ts);
410 header.caplen = caplen;
411 /* rte_pktmbuf_read() returns a pointer to the data directly
412 * in the mbuf (when the mbuf is contiguous) or, otherwise,
413 * a pointer to temp_data after copying into it.
415 pcap_dump((u_char *)dumper, &header,
416 rte_pktmbuf_read(mbuf, 0, caplen, temp_data));
420 rte_pktmbuf_free(mbuf);
424 * Since there's no place to hook a callback when the forwarding
425 * process stops and to make sure the pcap file is actually written,
426 * we flush the pcap dumper within each burst.
428 pcap_dump_flush(dumper);
429 dumper_q->tx_stat.pkts += num_tx;
430 dumper_q->tx_stat.bytes += tx_bytes;
431 dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
437 * Callback to handle dropping packets in the infinite rx case.
440 eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
443 uint32_t tx_bytes = 0;
444 struct pcap_tx_queue *tx_queue = queue;
446 if (unlikely(nb_pkts == 0))
449 for (i = 0; i < nb_pkts; i++) {
450 tx_bytes += bufs[i]->pkt_len;
451 rte_pktmbuf_free(bufs[i]);
454 tx_queue->tx_stat.pkts += nb_pkts;
455 tx_queue->tx_stat.bytes += tx_bytes;
461 * Callback to handle sending packets through a real NIC.
464 eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
468 struct rte_mbuf *mbuf;
469 struct pmd_process_private *pp;
470 struct pcap_tx_queue *tx_queue = queue;
472 uint32_t tx_bytes = 0;
474 unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
477 pp = rte_eth_devices[tx_queue->port_id].process_private;
478 pcap = pp->tx_pcap[tx_queue->queue_id];
480 if (unlikely(nb_pkts == 0 || pcap == NULL))
483 for (i = 0; i < nb_pkts; i++) {
485 len = rte_pktmbuf_pkt_len(mbuf);
486 if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
487 len > sizeof(temp_data))) {
489 "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).",
490 len, sizeof(temp_data));
491 rte_pktmbuf_free(mbuf);
495 /* rte_pktmbuf_read() returns a pointer to the data directly
496 * in the mbuf (when the mbuf is contiguous) or, otherwise,
497 * a pointer to temp_data after copying into it.
499 ret = pcap_sendpacket(pcap,
500 rte_pktmbuf_read(mbuf, 0, len, temp_data), len);
501 if (unlikely(ret != 0))
505 rte_pktmbuf_free(mbuf);
508 tx_queue->tx_stat.pkts += num_tx;
509 tx_queue->tx_stat.bytes += tx_bytes;
510 tx_queue->tx_stat.err_pkts += i - num_tx;
516 * pcap_open_live wrapper function
519 open_iface_live(const char *iface, pcap_t **pcap) {
520 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
521 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
524 PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
532 open_single_iface(const char *iface, pcap_t **pcap)
534 if (open_iface_live(iface, pcap) < 0) {
535 PMD_LOG(ERR, "Couldn't open interface %s", iface);
543 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
548 * We need to create a dummy empty pcap_t to use it
549 * with pcap_dump_open(). We create big enough an Ethernet
552 tx_pcap = pcap_open_dead_with_tstamp_precision(DLT_EN10MB,
553 RTE_ETH_PCAP_SNAPSHOT_LEN, PCAP_TSTAMP_PRECISION_NANO);
554 if (tx_pcap == NULL) {
555 PMD_LOG(ERR, "Couldn't create dead pcap");
559 /* The dumper is created using the previous pcap_t reference */
560 *dumper = pcap_dump_open(tx_pcap, pcap_filename);
561 if (*dumper == NULL) {
563 PMD_LOG(ERR, "Couldn't open %s for writing.",
573 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
575 *pcap = pcap_open_offline(pcap_filename, errbuf);
577 PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
586 count_packets_in_pcap(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
588 const u_char *packet;
589 struct pcap_pkthdr header;
590 uint64_t pcap_pkt_count = 0;
592 while ((packet = pcap_next(*pcap, &header)))
595 /* The pcap is reopened so it can be used as normal later. */
598 open_single_rx_pcap(pcap_q->name, pcap);
600 return pcap_pkt_count;
604 eth_dev_start(struct rte_eth_dev *dev)
607 struct pmd_internals *internals = dev->data->dev_private;
608 struct pmd_process_private *pp = dev->process_private;
609 struct pcap_tx_queue *tx;
610 struct pcap_rx_queue *rx;
612 /* Special iface case. Single pcap is open and shared between tx/rx. */
613 if (internals->single_iface) {
614 tx = &internals->tx_queue[0];
615 rx = &internals->rx_queue[0];
617 if (!pp->tx_pcap[0] &&
618 strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
619 if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0)
621 pp->rx_pcap[0] = pp->tx_pcap[0];
627 /* If not open already, open tx pcaps/dumpers */
628 for (i = 0; i < dev->data->nb_tx_queues; i++) {
629 tx = &internals->tx_queue[i];
631 if (!pp->tx_dumper[i] &&
632 strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
633 if (open_single_tx_pcap(tx->name,
634 &pp->tx_dumper[i]) < 0)
636 } else if (!pp->tx_pcap[i] &&
637 strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
638 if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0)
643 /* If not open already, open rx pcaps */
644 for (i = 0; i < dev->data->nb_rx_queues; i++) {
645 rx = &internals->rx_queue[i];
647 if (pp->rx_pcap[i] != NULL)
650 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
651 if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0)
653 } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
654 if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0)
660 for (i = 0; i < dev->data->nb_rx_queues; i++)
661 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
663 for (i = 0; i < dev->data->nb_tx_queues; i++)
664 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
666 dev->data->dev_link.link_status = ETH_LINK_UP;
672 * This function gets called when the current port gets stopped.
673 * Is the only place for us to close all the tx streams dumpers.
674 * If not called the dumpers will be flushed within each tx burst.
677 eth_dev_stop(struct rte_eth_dev *dev)
680 struct pmd_internals *internals = dev->data->dev_private;
681 struct pmd_process_private *pp = dev->process_private;
683 /* Special iface case. Single pcap is open and shared between tx/rx. */
684 if (internals->single_iface) {
685 queue_missed_stat_on_stop_update(dev, 0);
686 if (pp->tx_pcap[0] != NULL) {
687 pcap_close(pp->tx_pcap[0]);
688 pp->tx_pcap[0] = NULL;
689 pp->rx_pcap[0] = NULL;
694 for (i = 0; i < dev->data->nb_tx_queues; i++) {
695 if (pp->tx_dumper[i] != NULL) {
696 pcap_dump_close(pp->tx_dumper[i]);
697 pp->tx_dumper[i] = NULL;
700 if (pp->tx_pcap[i] != NULL) {
701 pcap_close(pp->tx_pcap[i]);
702 pp->tx_pcap[i] = NULL;
706 for (i = 0; i < dev->data->nb_rx_queues; i++) {
707 if (pp->rx_pcap[i] != NULL) {
708 queue_missed_stat_on_stop_update(dev, i);
709 pcap_close(pp->rx_pcap[i]);
710 pp->rx_pcap[i] = NULL;
715 for (i = 0; i < dev->data->nb_rx_queues; i++)
716 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
718 for (i = 0; i < dev->data->nb_tx_queues; i++)
719 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
721 dev->data->dev_link.link_status = ETH_LINK_DOWN;
727 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
733 eth_dev_info(struct rte_eth_dev *dev,
734 struct rte_eth_dev_info *dev_info)
736 struct pmd_internals *internals = dev->data->dev_private;
738 dev_info->if_index = internals->if_index;
739 dev_info->max_mac_addrs = 1;
740 dev_info->max_rx_pktlen = (uint32_t) -1;
741 dev_info->max_rx_queues = dev->data->nb_rx_queues;
742 dev_info->max_tx_queues = dev->data->nb_tx_queues;
743 dev_info->min_rx_bufsize = 0;
749 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
752 unsigned long rx_packets_total = 0, rx_bytes_total = 0;
753 unsigned long rx_missed_total = 0;
754 unsigned long tx_packets_total = 0, tx_bytes_total = 0;
755 unsigned long tx_packets_err_total = 0;
756 const struct pmd_internals *internal = dev->data->dev_private;
758 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
759 i < dev->data->nb_rx_queues; i++) {
760 stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
761 stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
762 rx_packets_total += stats->q_ipackets[i];
763 rx_bytes_total += stats->q_ibytes[i];
764 rx_missed_total += queue_missed_stat_get(dev, i);
767 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
768 i < dev->data->nb_tx_queues; i++) {
769 stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
770 stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
771 tx_packets_total += stats->q_opackets[i];
772 tx_bytes_total += stats->q_obytes[i];
773 tx_packets_err_total += internal->tx_queue[i].tx_stat.err_pkts;
776 stats->ipackets = rx_packets_total;
777 stats->ibytes = rx_bytes_total;
778 stats->imissed = rx_missed_total;
779 stats->opackets = tx_packets_total;
780 stats->obytes = tx_bytes_total;
781 stats->oerrors = tx_packets_err_total;
787 eth_stats_reset(struct rte_eth_dev *dev)
790 struct pmd_internals *internal = dev->data->dev_private;
792 for (i = 0; i < dev->data->nb_rx_queues; i++) {
793 internal->rx_queue[i].rx_stat.pkts = 0;
794 internal->rx_queue[i].rx_stat.bytes = 0;
795 queue_missed_stat_reset(dev, i);
798 for (i = 0; i < dev->data->nb_tx_queues; i++) {
799 internal->tx_queue[i].tx_stat.pkts = 0;
800 internal->tx_queue[i].tx_stat.bytes = 0;
801 internal->tx_queue[i].tx_stat.err_pkts = 0;
808 infinite_rx_ring_free(struct rte_ring *pkts)
810 struct rte_mbuf *bufs;
812 while (!rte_ring_dequeue(pkts, (void **)&bufs))
813 rte_pktmbuf_free(bufs);
819 eth_dev_close(struct rte_eth_dev *dev)
822 struct pmd_internals *internals = dev->data->dev_private;
824 PMD_LOG(INFO, "Closing pcap ethdev on NUMA socket %d",
829 rte_free(dev->process_private);
831 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
834 /* Device wide flag, but cleanup must be performed per queue. */
835 if (internals->infinite_rx) {
836 for (i = 0; i < dev->data->nb_rx_queues; i++) {
837 struct pcap_rx_queue *pcap_q = &internals->rx_queue[i];
840 * 'pcap_q->pkts' can be NULL if 'eth_dev_close()'
841 * called before 'eth_rx_queue_setup()' has been called
843 if (pcap_q->pkts == NULL)
846 infinite_rx_ring_free(pcap_q->pkts);
850 if (internals->phy_mac == 0)
851 /* not dynamically allocated, must not be freed */
852 dev->data->mac_addrs = NULL;
858 eth_queue_release(void *q __rte_unused)
863 eth_link_update(struct rte_eth_dev *dev __rte_unused,
864 int wait_to_complete __rte_unused)
870 eth_rx_queue_setup(struct rte_eth_dev *dev,
871 uint16_t rx_queue_id,
872 uint16_t nb_rx_desc __rte_unused,
873 unsigned int socket_id __rte_unused,
874 const struct rte_eth_rxconf *rx_conf __rte_unused,
875 struct rte_mempool *mb_pool)
877 struct pmd_internals *internals = dev->data->dev_private;
878 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
880 pcap_q->mb_pool = mb_pool;
881 pcap_q->port_id = dev->data->port_id;
882 pcap_q->queue_id = rx_queue_id;
883 dev->data->rx_queues[rx_queue_id] = pcap_q;
885 if (internals->infinite_rx) {
886 struct pmd_process_private *pp;
887 char ring_name[NAME_MAX];
888 static uint32_t ring_number;
889 uint64_t pcap_pkt_count = 0;
890 struct rte_mbuf *bufs[1];
893 pp = rte_eth_devices[pcap_q->port_id].process_private;
894 pcap = &pp->rx_pcap[pcap_q->queue_id];
896 if (unlikely(*pcap == NULL))
899 pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q);
901 snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu32,
904 pcap_q->pkts = rte_ring_create(ring_name,
905 rte_align64pow2(pcap_pkt_count + 1), 0,
906 RING_F_SP_ENQ | RING_F_SC_DEQ);
911 /* Fill ring with packets from PCAP file one by one. */
912 while (eth_pcap_rx(pcap_q, bufs, 1)) {
913 /* Check for multiseg mbufs. */
914 if (bufs[0]->nb_segs != 1) {
915 infinite_rx_ring_free(pcap_q->pkts);
917 "Multiseg mbufs are not supported in infinite_rx mode.");
921 rte_ring_enqueue_bulk(pcap_q->pkts,
922 (void * const *)bufs, 1, NULL);
925 if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) {
926 infinite_rx_ring_free(pcap_q->pkts);
928 "Not enough mbufs to accommodate packets in pcap file. "
929 "At least %" PRIu64 " mbufs per queue is required.",
935 * Reset the stats for this queue since eth_pcap_rx calls above
936 * didn't result in the application receiving packets.
938 pcap_q->rx_stat.pkts = 0;
939 pcap_q->rx_stat.bytes = 0;
946 eth_tx_queue_setup(struct rte_eth_dev *dev,
947 uint16_t tx_queue_id,
948 uint16_t nb_tx_desc __rte_unused,
949 unsigned int socket_id __rte_unused,
950 const struct rte_eth_txconf *tx_conf __rte_unused)
952 struct pmd_internals *internals = dev->data->dev_private;
953 struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
955 pcap_q->port_id = dev->data->port_id;
956 pcap_q->queue_id = tx_queue_id;
957 dev->data->tx_queues[tx_queue_id] = pcap_q;
963 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
965 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
971 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
973 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
979 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
981 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
987 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
989 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
994 static const struct eth_dev_ops ops = {
995 .dev_start = eth_dev_start,
996 .dev_stop = eth_dev_stop,
997 .dev_close = eth_dev_close,
998 .dev_configure = eth_dev_configure,
999 .dev_infos_get = eth_dev_info,
1000 .rx_queue_setup = eth_rx_queue_setup,
1001 .tx_queue_setup = eth_tx_queue_setup,
1002 .rx_queue_start = eth_rx_queue_start,
1003 .tx_queue_start = eth_tx_queue_start,
1004 .rx_queue_stop = eth_rx_queue_stop,
1005 .tx_queue_stop = eth_tx_queue_stop,
1006 .rx_queue_release = eth_queue_release,
1007 .tx_queue_release = eth_queue_release,
1008 .link_update = eth_link_update,
1009 .stats_get = eth_stats_get,
1010 .stats_reset = eth_stats_reset,
1014 add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
1015 pcap_t *pcap, pcap_dumper_t *dumper)
1017 if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
1020 pmd->queue[pmd->num_of_queue].pcap = pcap;
1022 pmd->queue[pmd->num_of_queue].dumper = dumper;
1023 pmd->queue[pmd->num_of_queue].name = name;
1024 pmd->queue[pmd->num_of_queue].type = type;
1025 pmd->num_of_queue++;
1030 * Function handler that opens the pcap file for reading a stores a
1031 * reference of it for use it later on.
1034 open_rx_pcap(const char *key, const char *value, void *extra_args)
1036 const char *pcap_filename = value;
1037 struct pmd_devargs *rx = extra_args;
1038 pcap_t *pcap = NULL;
1040 if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
1043 if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
1052 * Opens a pcap file for writing and stores a reference to it
1053 * for use it later on.
1056 open_tx_pcap(const char *key, const char *value, void *extra_args)
1058 const char *pcap_filename = value;
1059 struct pmd_devargs *dumpers = extra_args;
1060 pcap_dumper_t *dumper;
1062 if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
1065 if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
1066 pcap_dump_close(dumper);
1074 * Opens an interface for reading and writing
1077 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
1079 const char *iface = value;
1080 struct pmd_devargs *tx = extra_args;
1081 pcap_t *pcap = NULL;
1083 if (open_single_iface(iface, &pcap) < 0)
1086 tx->queue[0].pcap = pcap;
1087 tx->queue[0].name = iface;
1088 tx->queue[0].type = key;
1094 set_iface_direction(const char *iface, pcap_t *pcap,
1095 pcap_direction_t direction)
1097 const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
1098 if (pcap_setdirection(pcap, direction) < 0) {
1099 PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
1100 iface, direction_str, pcap_geterr(pcap));
1103 PMD_LOG(INFO, "Setting %s pcap direction %s\n",
1104 iface, direction_str);
1109 open_iface(const char *key, const char *value, void *extra_args)
1111 const char *iface = value;
1112 struct pmd_devargs *pmd = extra_args;
1113 pcap_t *pcap = NULL;
1115 if (open_single_iface(iface, &pcap) < 0)
1117 if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
1126 * Opens a NIC for reading packets from it
1129 open_rx_iface(const char *key, const char *value, void *extra_args)
1131 int ret = open_iface(key, value, extra_args);
1134 if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
1135 struct pmd_devargs *pmd = extra_args;
1136 unsigned int qid = pmd->num_of_queue - 1;
1138 set_iface_direction(pmd->queue[qid].name,
1139 pmd->queue[qid].pcap,
1147 rx_iface_args_process(const char *key, const char *value, void *extra_args)
1149 if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
1150 strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
1151 return open_rx_iface(key, value, extra_args);
1157 * Opens a NIC for writing packets to it
1160 open_tx_iface(const char *key, const char *value, void *extra_args)
1162 return open_iface(key, value, extra_args);
1166 select_phy_mac(const char *key __rte_unused, const char *value,
1170 const int phy_mac = atoi(value);
1171 int *enable_phy_mac = extra_args;
1174 *enable_phy_mac = 1;
1180 get_infinite_rx_arg(const char *key __rte_unused,
1181 const char *value, void *extra_args)
1184 const int infinite_rx = atoi(value);
1185 int *enable_infinite_rx = extra_args;
1187 if (infinite_rx > 0)
1188 *enable_infinite_rx = 1;
1194 pmd_init_internals(struct rte_vdev_device *vdev,
1195 const unsigned int nb_rx_queues,
1196 const unsigned int nb_tx_queues,
1197 struct pmd_internals **internals,
1198 struct rte_eth_dev **eth_dev)
1200 struct rte_eth_dev_data *data;
1201 struct pmd_process_private *pp;
1202 unsigned int numa_node = vdev->device.numa_node;
1204 PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
1207 pp = (struct pmd_process_private *)
1208 rte_zmalloc(NULL, sizeof(struct pmd_process_private),
1209 RTE_CACHE_LINE_SIZE);
1213 "Failed to allocate memory for process private");
1217 /* reserve an ethdev entry */
1218 *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
1223 (*eth_dev)->process_private = pp;
1224 /* now put it all together
1225 * - store queue data in internals,
1226 * - store numa_node info in eth_dev
1227 * - point eth_dev_data to internals
1228 * - and point eth_dev structure to new eth_dev_data structure
1230 *internals = (*eth_dev)->data->dev_private;
1232 * Interface MAC = 02:70:63:61:70:<iface_idx>
1233 * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
1234 * where the middle 4 characters are converted to hex.
1236 (*internals)->eth_addr = (struct rte_ether_addr) {
1237 .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
1239 (*internals)->phy_mac = 0;
1240 data = (*eth_dev)->data;
1241 data->nb_rx_queues = (uint16_t)nb_rx_queues;
1242 data->nb_tx_queues = (uint16_t)nb_tx_queues;
1243 data->dev_link = pmd_link;
1244 data->mac_addrs = &(*internals)->eth_addr;
1245 data->promiscuous = 1;
1246 data->all_multicast = 1;
1247 data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1250 * NOTE: we'll replace the data element, of originally allocated
1251 * eth_dev so the rings are local per-process
1253 (*eth_dev)->dev_ops = &ops;
1255 strlcpy((*internals)->devargs, rte_vdev_device_args(vdev),
1256 ETH_PCAP_ARG_MAXLEN);
1262 eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,
1263 const unsigned int numa_node)
1265 #if defined(RTE_EXEC_ENV_LINUX)
1268 int if_fd = socket(AF_INET, SOCK_DGRAM, 0);
1273 rte_strscpy(ifr.ifr_name, if_name, sizeof(ifr.ifr_name));
1274 if (ioctl(if_fd, SIOCGIFHWADDR, &ifr)) {
1279 mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
1285 PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1286 eth_dev->data->mac_addrs = mac_addrs;
1287 rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1288 ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1294 #elif defined(RTE_EXEC_ENV_FREEBSD)
1296 struct if_msghdr *ifm;
1297 struct sockaddr_dl *sdl;
1306 mib[4] = NET_RT_IFLIST;
1307 mib[5] = if_nametoindex(if_name);
1309 if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0)
1315 buf = rte_malloc(NULL, len, 0);
1319 if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
1323 ifm = (struct if_msghdr *)buf;
1324 sdl = (struct sockaddr_dl *)(ifm + 1);
1326 mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
1332 PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1333 eth_dev->data->mac_addrs = mac_addrs;
1334 rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1335 LLADDR(sdl), RTE_ETHER_ADDR_LEN);
1346 eth_from_pcaps_common(struct rte_vdev_device *vdev,
1347 struct pmd_devargs_all *devargs_all,
1348 struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
1350 struct pmd_process_private *pp;
1351 struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1352 struct pmd_devargs *tx_queues = &devargs_all->tx_queues;
1353 const unsigned int nb_rx_queues = rx_queues->num_of_queue;
1354 const unsigned int nb_tx_queues = tx_queues->num_of_queue;
1357 if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
1361 pp = (*eth_dev)->process_private;
1362 for (i = 0; i < nb_rx_queues; i++) {
1363 struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
1364 struct devargs_queue *queue = &rx_queues->queue[i];
1366 pp->rx_pcap[i] = queue->pcap;
1367 strlcpy(rx->name, queue->name, sizeof(rx->name));
1368 strlcpy(rx->type, queue->type, sizeof(rx->type));
1371 for (i = 0; i < nb_tx_queues; i++) {
1372 struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
1373 struct devargs_queue *queue = &tx_queues->queue[i];
1375 pp->tx_dumper[i] = queue->dumper;
1376 pp->tx_pcap[i] = queue->pcap;
1377 strlcpy(tx->name, queue->name, sizeof(tx->name));
1378 strlcpy(tx->type, queue->type, sizeof(tx->type));
1385 eth_from_pcaps(struct rte_vdev_device *vdev,
1386 struct pmd_devargs_all *devargs_all)
1388 struct pmd_internals *internals = NULL;
1389 struct rte_eth_dev *eth_dev = NULL;
1390 struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1391 int single_iface = devargs_all->single_iface;
1392 unsigned int infinite_rx = devargs_all->infinite_rx;
1395 ret = eth_from_pcaps_common(vdev, devargs_all, &internals, ð_dev);
1400 /* store weather we are using a single interface for rx/tx or not */
1401 internals->single_iface = single_iface;
1404 internals->if_index = if_nametoindex(rx_queues->queue[0].name);
1406 /* phy_mac arg is applied only only if "iface" devarg is provided */
1407 if (rx_queues->phy_mac) {
1408 if (eth_pcap_update_mac(rx_queues->queue[0].name,
1409 eth_dev, vdev->device.numa_node) == 0)
1410 internals->phy_mac = 1;
1414 internals->infinite_rx = infinite_rx;
1415 /* Assign rx ops. */
1417 eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
1418 else if (devargs_all->is_rx_pcap || devargs_all->is_rx_iface ||
1420 eth_dev->rx_pkt_burst = eth_pcap_rx;
1422 eth_dev->rx_pkt_burst = eth_null_rx;
1424 /* Assign tx ops. */
1425 if (devargs_all->is_tx_pcap)
1426 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1427 else if (devargs_all->is_tx_iface || single_iface)
1428 eth_dev->tx_pkt_burst = eth_pcap_tx;
1430 eth_dev->tx_pkt_burst = eth_tx_drop;
1432 rte_eth_dev_probing_finish(eth_dev);
1437 pmd_pcap_probe(struct rte_vdev_device *dev)
1440 struct rte_kvargs *kvlist;
1441 struct pmd_devargs pcaps = {0};
1442 struct pmd_devargs dumpers = {0};
1443 struct rte_eth_dev *eth_dev = NULL;
1444 struct pmd_internals *internal;
1447 struct pmd_devargs_all devargs_all = {
1454 name = rte_vdev_device_name(dev);
1455 PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
1457 gettimeofday(&start_time, NULL);
1458 start_cycles = rte_get_timer_cycles();
1459 hz = rte_get_timer_hz();
1461 ret = rte_mbuf_dyn_rx_timestamp_register(×tamp_dynfield_offset,
1462 ×tamp_rx_dynflag);
1464 PMD_LOG(ERR, "Failed to register Rx timestamp field/flag");
1468 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1469 eth_dev = rte_eth_dev_attach_secondary(name);
1471 PMD_LOG(ERR, "Failed to probe %s", name);
1475 internal = eth_dev->data->dev_private;
1477 kvlist = rte_kvargs_parse(internal->devargs, valid_arguments);
1481 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
1488 * If iface argument is passed we open the NICs and use them for
1491 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
1493 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
1494 &open_rx_tx_iface, &pcaps);
1498 dumpers.queue[0] = pcaps.queue[0];
1500 ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG,
1501 &select_phy_mac, &pcaps.phy_mac);
1505 dumpers.phy_mac = pcaps.phy_mac;
1507 devargs_all.single_iface = 1;
1508 pcaps.num_of_queue = 1;
1509 dumpers.num_of_queue = 1;
1515 * We check whether we want to open a RX stream from a real NIC, a
1516 * pcap file or open a dummy RX stream
1518 devargs_all.is_rx_pcap =
1519 rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
1520 devargs_all.is_rx_iface =
1521 (rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) +
1522 rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_IN_ARG)) ? 1 : 0;
1523 pcaps.num_of_queue = 0;
1525 devargs_all.is_tx_pcap =
1526 rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
1527 devargs_all.is_tx_iface =
1528 rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG) ? 1 : 0;
1529 dumpers.num_of_queue = 0;
1531 if (devargs_all.is_rx_pcap) {
1533 * We check whether we want to infinitely rx the pcap file.
1535 unsigned int infinite_rx_arg_cnt = rte_kvargs_count(kvlist,
1536 ETH_PCAP_INFINITE_RX_ARG);
1538 if (infinite_rx_arg_cnt == 1) {
1539 ret = rte_kvargs_process(kvlist,
1540 ETH_PCAP_INFINITE_RX_ARG,
1541 &get_infinite_rx_arg,
1542 &devargs_all.infinite_rx);
1545 PMD_LOG(INFO, "infinite_rx has been %s for %s",
1546 devargs_all.infinite_rx ? "enabled" : "disabled",
1549 } else if (infinite_rx_arg_cnt > 1) {
1550 PMD_LOG(WARNING, "infinite_rx has not been enabled since the "
1551 "argument has been provided more than once "
1555 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1556 &open_rx_pcap, &pcaps);
1557 } else if (devargs_all.is_rx_iface) {
1558 ret = rte_kvargs_process(kvlist, NULL,
1559 &rx_iface_args_process, &pcaps);
1560 } else if (devargs_all.is_tx_iface || devargs_all.is_tx_pcap) {
1563 /* Count number of tx queue args passed before dummy rx queue
1564 * creation so a dummy rx queue can be created for each tx queue
1566 unsigned int num_tx_queues =
1567 (rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) +
1568 rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG));
1570 PMD_LOG(INFO, "Creating null rx queue since no rx queues were provided.");
1572 /* Creating a dummy rx queue for each tx queue passed */
1573 for (i = 0; i < num_tx_queues; i++)
1574 ret = add_queue(&pcaps, "dummy_rx", "rx_null", NULL,
1577 PMD_LOG(ERR, "Error - No rx or tx queues provided");
1584 * We check whether we want to open a TX stream to a real NIC,
1585 * a pcap file, or drop packets on tx
1587 if (devargs_all.is_tx_pcap) {
1588 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1589 &open_tx_pcap, &dumpers);
1590 } else if (devargs_all.is_tx_iface) {
1591 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1592 &open_tx_iface, &dumpers);
1596 PMD_LOG(INFO, "Dropping packets on tx since no tx queues were provided.");
1598 /* Add 1 dummy queue per rxq which counts and drops packets. */
1599 for (i = 0; i < pcaps.num_of_queue; i++)
1600 ret = add_queue(&dumpers, "dummy_tx", "tx_drop", NULL,
1608 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1609 struct pmd_process_private *pp;
1612 internal = eth_dev->data->dev_private;
1613 pp = (struct pmd_process_private *)
1615 sizeof(struct pmd_process_private),
1616 RTE_CACHE_LINE_SIZE);
1620 "Failed to allocate memory for process private");
1625 eth_dev->dev_ops = &ops;
1626 eth_dev->device = &dev->device;
1628 /* setup process private */
1629 for (i = 0; i < pcaps.num_of_queue; i++)
1630 pp->rx_pcap[i] = pcaps.queue[i].pcap;
1632 for (i = 0; i < dumpers.num_of_queue; i++) {
1633 pp->tx_dumper[i] = dumpers.queue[i].dumper;
1634 pp->tx_pcap[i] = dumpers.queue[i].pcap;
1637 eth_dev->process_private = pp;
1638 eth_dev->rx_pkt_burst = eth_pcap_rx;
1639 if (devargs_all.is_tx_pcap)
1640 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1642 eth_dev->tx_pkt_burst = eth_pcap_tx;
1644 rte_eth_dev_probing_finish(eth_dev);
1648 devargs_all.rx_queues = pcaps;
1649 devargs_all.tx_queues = dumpers;
1651 ret = eth_from_pcaps(dev, &devargs_all);
1654 rte_kvargs_free(kvlist);
1660 pmd_pcap_remove(struct rte_vdev_device *dev)
1662 struct rte_eth_dev *eth_dev = NULL;
1667 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1668 if (eth_dev == NULL)
1669 return 0; /* port already released */
1671 eth_dev_close(eth_dev);
1672 rte_eth_dev_release_port(eth_dev);
1677 static struct rte_vdev_driver pmd_pcap_drv = {
1678 .probe = pmd_pcap_probe,
1679 .remove = pmd_pcap_remove,
1682 RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1683 RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
1684 RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
1685 ETH_PCAP_RX_PCAP_ARG "=<string> "
1686 ETH_PCAP_TX_PCAP_ARG "=<string> "
1687 ETH_PCAP_RX_IFACE_ARG "=<ifc> "
1688 ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
1689 ETH_PCAP_TX_IFACE_ARG "=<ifc> "
1690 ETH_PCAP_IFACE_ARG "=<ifc> "
1691 ETH_PCAP_PHY_MAC_ARG "=<int>"
1692 ETH_PCAP_INFINITE_RX_ARG "=<0|1>");