1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright(c) 2014 6WIND S.A.
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
14 #if defined(RTE_EXEC_ENV_FREEBSD)
15 #include <sys/sysctl.h>
16 #include <net/if_dl.h>
21 #include <rte_cycles.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_bus_vdev.h>
28 #include <rte_string_fns.h>
30 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
31 #define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN
32 #define RTE_ETH_PCAP_PROMISC 1
33 #define RTE_ETH_PCAP_TIMEOUT -1
35 #define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
36 #define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
37 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
38 #define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
39 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
40 #define ETH_PCAP_IFACE_ARG "iface"
41 #define ETH_PCAP_PHY_MAC_ARG "phy_mac"
42 #define ETH_PCAP_INFINITE_RX_ARG "infinite_rx"
44 #define ETH_PCAP_ARG_MAXLEN 64
46 #define RTE_PMD_PCAP_MAX_QUEUES 16
48 static char errbuf[PCAP_ERRBUF_SIZE];
49 static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
50 static struct timeval start_time;
51 static uint64_t start_cycles;
53 static uint8_t iface_idx;
56 volatile unsigned long pkts;
57 volatile unsigned long bytes;
58 volatile unsigned long err_pkts;
61 struct pcap_rx_queue {
64 struct rte_mempool *mb_pool;
65 struct queue_stat rx_stat;
67 char type[ETH_PCAP_ARG_MAXLEN];
69 /* Contains pre-generated packets to be looped through */
70 struct rte_ring *pkts;
73 struct pcap_tx_queue {
76 struct queue_stat tx_stat;
78 char type[ETH_PCAP_ARG_MAXLEN];
81 struct pmd_internals {
82 struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
83 struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
84 char devargs[ETH_PCAP_ARG_MAXLEN];
85 struct rte_ether_addr eth_addr;
89 unsigned int infinite_rx;
92 struct pmd_process_private {
93 pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
94 pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
95 pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
99 unsigned int num_of_queue;
100 struct devargs_queue {
101 pcap_dumper_t *dumper;
105 } queue[RTE_PMD_PCAP_MAX_QUEUES];
109 struct pmd_devargs_all {
110 struct pmd_devargs rx_queues;
111 struct pmd_devargs tx_queues;
113 unsigned int is_tx_pcap;
114 unsigned int is_tx_iface;
115 unsigned int is_rx_pcap;
116 unsigned int is_rx_iface;
117 unsigned int infinite_rx;
120 static const char *valid_arguments[] = {
121 ETH_PCAP_RX_PCAP_ARG,
122 ETH_PCAP_TX_PCAP_ARG,
123 ETH_PCAP_RX_IFACE_ARG,
124 ETH_PCAP_RX_IFACE_IN_ARG,
125 ETH_PCAP_TX_IFACE_ARG,
127 ETH_PCAP_PHY_MAC_ARG,
128 ETH_PCAP_INFINITE_RX_ARG,
132 static struct rte_eth_link pmd_link = {
133 .link_speed = ETH_SPEED_NUM_10G,
134 .link_duplex = ETH_LINK_FULL_DUPLEX,
135 .link_status = ETH_LINK_DOWN,
136 .link_autoneg = ETH_LINK_FIXED,
139 static int eth_pcap_logtype;
141 #define PMD_LOG(level, fmt, args...) \
142 rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
143 "%s(): " fmt "\n", __func__, ##args)
146 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
147 const u_char *data, uint16_t data_len)
149 /* Copy the first segment. */
150 uint16_t len = rte_pktmbuf_tailroom(mbuf);
151 struct rte_mbuf *m = mbuf;
153 rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
157 while (data_len > 0) {
158 /* Allocate next mbuf and point to that. */
159 m->next = rte_pktmbuf_alloc(mb_pool);
161 if (unlikely(!m->next))
166 /* Headroom is not needed in chained mbufs. */
167 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
171 /* Copy next segment. */
172 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
173 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
180 return mbuf->nb_segs;
183 /* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
185 eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
187 uint16_t data_len = 0;
190 rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
193 data_len += mbuf->data_len;
199 eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
202 struct pcap_rx_queue *pcap_q = queue;
203 uint32_t rx_bytes = 0;
205 if (unlikely(nb_pkts == 0))
208 if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
211 for (i = 0; i < nb_pkts; i++) {
212 struct rte_mbuf *pcap_buf;
213 int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf);
217 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
218 rte_pktmbuf_mtod(pcap_buf, void *),
220 bufs[i]->data_len = pcap_buf->data_len;
221 bufs[i]->pkt_len = pcap_buf->pkt_len;
222 bufs[i]->port = pcap_q->port_id;
223 rx_bytes += pcap_buf->data_len;
225 /* Enqueue packet back on ring to allow infinite rx. */
226 rte_ring_enqueue(pcap_q->pkts, pcap_buf);
229 pcap_q->rx_stat.pkts += i;
230 pcap_q->rx_stat.bytes += rx_bytes;
236 eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
239 struct pcap_pkthdr header;
240 struct pmd_process_private *pp;
241 const u_char *packet;
242 struct rte_mbuf *mbuf;
243 struct pcap_rx_queue *pcap_q = queue;
245 uint32_t rx_bytes = 0;
248 pp = rte_eth_devices[pcap_q->port_id].process_private;
249 pcap = pp->rx_pcap[pcap_q->queue_id];
251 if (unlikely(pcap == NULL || nb_pkts == 0))
254 /* Reads the given number of packets from the pcap file one by one
255 * and copies the packet data into a newly allocated mbuf to return.
257 for (i = 0; i < nb_pkts; i++) {
258 /* Get the next PCAP packet */
259 packet = pcap_next(pcap, &header);
260 if (unlikely(packet == NULL))
263 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
264 if (unlikely(mbuf == NULL))
267 if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
268 /* pcap packet will fit in the mbuf, can copy it */
269 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
271 mbuf->data_len = (uint16_t)header.caplen;
273 /* Try read jumbo frame into multi mbufs. */
274 if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
277 header.caplen) == -1)) {
278 rte_pktmbuf_free(mbuf);
283 mbuf->pkt_len = (uint16_t)header.caplen;
284 mbuf->port = pcap_q->port_id;
287 rx_bytes += header.caplen;
289 pcap_q->rx_stat.pkts += num_rx;
290 pcap_q->rx_stat.bytes += rx_bytes;
296 eth_null_rx(void *queue __rte_unused,
297 struct rte_mbuf **bufs __rte_unused,
298 uint16_t nb_pkts __rte_unused)
304 calculate_timestamp(struct timeval *ts) {
306 struct timeval cur_time;
308 cycles = rte_get_timer_cycles() - start_cycles;
309 cur_time.tv_sec = cycles / hz;
310 cur_time.tv_usec = (cycles % hz) * 1e6 / hz;
311 timeradd(&start_time, &cur_time, ts);
315 * Callback to handle writing packets to a pcap file.
318 eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
321 struct rte_mbuf *mbuf;
322 struct pmd_process_private *pp;
323 struct pcap_tx_queue *dumper_q = queue;
325 uint32_t tx_bytes = 0;
326 struct pcap_pkthdr header;
327 pcap_dumper_t *dumper;
329 pp = rte_eth_devices[dumper_q->port_id].process_private;
330 dumper = pp->tx_dumper[dumper_q->queue_id];
332 if (dumper == NULL || nb_pkts == 0)
335 /* writes the nb_pkts packets to the previously opened pcap file
337 for (i = 0; i < nb_pkts; i++) {
339 calculate_timestamp(&header.ts);
340 header.len = mbuf->pkt_len;
341 header.caplen = header.len;
343 if (likely(mbuf->nb_segs == 1)) {
344 pcap_dump((u_char *)dumper, &header,
345 rte_pktmbuf_mtod(mbuf, void*));
347 if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
348 eth_pcap_gather_data(tx_pcap_data, mbuf);
349 pcap_dump((u_char *)dumper, &header,
353 "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
355 RTE_ETHER_MAX_JUMBO_FRAME_LEN);
357 rte_pktmbuf_free(mbuf);
363 tx_bytes += mbuf->pkt_len;
364 rte_pktmbuf_free(mbuf);
368 * Since there's no place to hook a callback when the forwarding
369 * process stops and to make sure the pcap file is actually written,
370 * we flush the pcap dumper within each burst.
372 pcap_dump_flush(dumper);
373 dumper_q->tx_stat.pkts += num_tx;
374 dumper_q->tx_stat.bytes += tx_bytes;
375 dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
381 * Callback to handle dropping packets in the infinite rx case.
384 eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
387 uint32_t tx_bytes = 0;
388 struct pcap_tx_queue *tx_queue = queue;
390 if (unlikely(nb_pkts == 0))
393 for (i = 0; i < nb_pkts; i++) {
394 tx_bytes += bufs[i]->data_len;
395 rte_pktmbuf_free(bufs[i]);
398 tx_queue->tx_stat.pkts += nb_pkts;
399 tx_queue->tx_stat.bytes += tx_bytes;
405 * Callback to handle sending packets through a real NIC.
408 eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
412 struct rte_mbuf *mbuf;
413 struct pmd_process_private *pp;
414 struct pcap_tx_queue *tx_queue = queue;
416 uint32_t tx_bytes = 0;
419 pp = rte_eth_devices[tx_queue->port_id].process_private;
420 pcap = pp->tx_pcap[tx_queue->queue_id];
422 if (unlikely(nb_pkts == 0 || pcap == NULL))
425 for (i = 0; i < nb_pkts; i++) {
428 if (likely(mbuf->nb_segs == 1)) {
429 ret = pcap_sendpacket(pcap,
430 rte_pktmbuf_mtod(mbuf, u_char *),
433 if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
434 eth_pcap_gather_data(tx_pcap_data, mbuf);
435 ret = pcap_sendpacket(pcap,
436 tx_pcap_data, mbuf->pkt_len);
439 "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
441 RTE_ETHER_MAX_JUMBO_FRAME_LEN);
443 rte_pktmbuf_free(mbuf);
448 if (unlikely(ret != 0))
451 tx_bytes += mbuf->pkt_len;
452 rte_pktmbuf_free(mbuf);
455 tx_queue->tx_stat.pkts += num_tx;
456 tx_queue->tx_stat.bytes += tx_bytes;
457 tx_queue->tx_stat.err_pkts += i - num_tx;
463 * pcap_open_live wrapper function
466 open_iface_live(const char *iface, pcap_t **pcap) {
467 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
468 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
471 PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
479 open_single_iface(const char *iface, pcap_t **pcap)
481 if (open_iface_live(iface, pcap) < 0) {
482 PMD_LOG(ERR, "Couldn't open interface %s", iface);
490 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
495 * We need to create a dummy empty pcap_t to use it
496 * with pcap_dump_open(). We create big enough an Ethernet
499 tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
500 if (tx_pcap == NULL) {
501 PMD_LOG(ERR, "Couldn't create dead pcap");
505 /* The dumper is created using the previous pcap_t reference */
506 *dumper = pcap_dump_open(tx_pcap, pcap_filename);
507 if (*dumper == NULL) {
509 PMD_LOG(ERR, "Couldn't open %s for writing.",
519 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
521 *pcap = pcap_open_offline(pcap_filename, errbuf);
523 PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
532 count_packets_in_pcap(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
534 const u_char *packet;
535 struct pcap_pkthdr header;
536 uint64_t pcap_pkt_count = 0;
538 while ((packet = pcap_next(*pcap, &header)))
541 /* The pcap is reopened so it can be used as normal later. */
544 open_single_rx_pcap(pcap_q->name, pcap);
546 return pcap_pkt_count;
550 eth_dev_start(struct rte_eth_dev *dev)
553 struct pmd_internals *internals = dev->data->dev_private;
554 struct pmd_process_private *pp = dev->process_private;
555 struct pcap_tx_queue *tx;
556 struct pcap_rx_queue *rx;
558 /* Special iface case. Single pcap is open and shared between tx/rx. */
559 if (internals->single_iface) {
560 tx = &internals->tx_queue[0];
561 rx = &internals->rx_queue[0];
563 if (!pp->tx_pcap[0] &&
564 strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
565 if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0)
567 pp->rx_pcap[0] = pp->tx_pcap[0];
573 /* If not open already, open tx pcaps/dumpers */
574 for (i = 0; i < dev->data->nb_tx_queues; i++) {
575 tx = &internals->tx_queue[i];
577 if (!pp->tx_dumper[i] &&
578 strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
579 if (open_single_tx_pcap(tx->name,
580 &pp->tx_dumper[i]) < 0)
582 } else if (!pp->tx_pcap[i] &&
583 strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
584 if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0)
589 /* If not open already, open rx pcaps */
590 for (i = 0; i < dev->data->nb_rx_queues; i++) {
591 rx = &internals->rx_queue[i];
593 if (pp->rx_pcap[i] != NULL)
596 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
597 if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0)
599 } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
600 if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0)
606 for (i = 0; i < dev->data->nb_rx_queues; i++)
607 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
609 for (i = 0; i < dev->data->nb_tx_queues; i++)
610 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
612 dev->data->dev_link.link_status = ETH_LINK_UP;
618 * This function gets called when the current port gets stopped.
619 * Is the only place for us to close all the tx streams dumpers.
620 * If not called the dumpers will be flushed within each tx burst.
623 eth_dev_stop(struct rte_eth_dev *dev)
626 struct pmd_internals *internals = dev->data->dev_private;
627 struct pmd_process_private *pp = dev->process_private;
629 /* Special iface case. Single pcap is open and shared between tx/rx. */
630 if (internals->single_iface) {
631 pcap_close(pp->tx_pcap[0]);
632 pp->tx_pcap[0] = NULL;
633 pp->rx_pcap[0] = NULL;
637 for (i = 0; i < dev->data->nb_tx_queues; i++) {
638 if (pp->tx_dumper[i] != NULL) {
639 pcap_dump_close(pp->tx_dumper[i]);
640 pp->tx_dumper[i] = NULL;
643 if (pp->tx_pcap[i] != NULL) {
644 pcap_close(pp->tx_pcap[i]);
645 pp->tx_pcap[i] = NULL;
649 for (i = 0; i < dev->data->nb_rx_queues; i++) {
650 if (pp->rx_pcap[i] != NULL) {
651 pcap_close(pp->rx_pcap[i]);
652 pp->rx_pcap[i] = NULL;
657 for (i = 0; i < dev->data->nb_rx_queues; i++)
658 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
660 for (i = 0; i < dev->data->nb_tx_queues; i++)
661 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
663 dev->data->dev_link.link_status = ETH_LINK_DOWN;
667 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
673 eth_dev_info(struct rte_eth_dev *dev,
674 struct rte_eth_dev_info *dev_info)
676 struct pmd_internals *internals = dev->data->dev_private;
678 dev_info->if_index = internals->if_index;
679 dev_info->max_mac_addrs = 1;
680 dev_info->max_rx_pktlen = (uint32_t) -1;
681 dev_info->max_rx_queues = dev->data->nb_rx_queues;
682 dev_info->max_tx_queues = dev->data->nb_tx_queues;
683 dev_info->min_rx_bufsize = 0;
687 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
690 unsigned long rx_packets_total = 0, rx_bytes_total = 0;
691 unsigned long tx_packets_total = 0, tx_bytes_total = 0;
692 unsigned long tx_packets_err_total = 0;
693 const struct pmd_internals *internal = dev->data->dev_private;
695 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
696 i < dev->data->nb_rx_queues; i++) {
697 stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
698 stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
699 rx_packets_total += stats->q_ipackets[i];
700 rx_bytes_total += stats->q_ibytes[i];
703 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
704 i < dev->data->nb_tx_queues; i++) {
705 stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
706 stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
707 tx_packets_total += stats->q_opackets[i];
708 tx_bytes_total += stats->q_obytes[i];
709 tx_packets_err_total += internal->tx_queue[i].tx_stat.err_pkts;
712 stats->ipackets = rx_packets_total;
713 stats->ibytes = rx_bytes_total;
714 stats->opackets = tx_packets_total;
715 stats->obytes = tx_bytes_total;
716 stats->oerrors = tx_packets_err_total;
722 eth_stats_reset(struct rte_eth_dev *dev)
725 struct pmd_internals *internal = dev->data->dev_private;
727 for (i = 0; i < dev->data->nb_rx_queues; i++) {
728 internal->rx_queue[i].rx_stat.pkts = 0;
729 internal->rx_queue[i].rx_stat.bytes = 0;
732 for (i = 0; i < dev->data->nb_tx_queues; i++) {
733 internal->tx_queue[i].tx_stat.pkts = 0;
734 internal->tx_queue[i].tx_stat.bytes = 0;
735 internal->tx_queue[i].tx_stat.err_pkts = 0;
740 eth_dev_close(struct rte_eth_dev *dev)
743 struct pmd_internals *internals = dev->data->dev_private;
745 /* Device wide flag, but cleanup must be performed per queue. */
746 if (internals->infinite_rx) {
747 for (i = 0; i < dev->data->nb_rx_queues; i++) {
748 struct pcap_rx_queue *pcap_q = &internals->rx_queue[i];
749 struct rte_mbuf *pcap_buf;
751 while (!rte_ring_dequeue(pcap_q->pkts,
753 rte_pktmbuf_free(pcap_buf);
755 rte_ring_free(pcap_q->pkts);
762 eth_queue_release(void *q __rte_unused)
767 eth_link_update(struct rte_eth_dev *dev __rte_unused,
768 int wait_to_complete __rte_unused)
774 eth_rx_queue_setup(struct rte_eth_dev *dev,
775 uint16_t rx_queue_id,
776 uint16_t nb_rx_desc __rte_unused,
777 unsigned int socket_id __rte_unused,
778 const struct rte_eth_rxconf *rx_conf __rte_unused,
779 struct rte_mempool *mb_pool)
781 struct pmd_internals *internals = dev->data->dev_private;
782 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
784 pcap_q->mb_pool = mb_pool;
785 pcap_q->port_id = dev->data->port_id;
786 pcap_q->queue_id = rx_queue_id;
787 dev->data->rx_queues[rx_queue_id] = pcap_q;
789 if (internals->infinite_rx) {
790 struct pmd_process_private *pp;
791 char ring_name[NAME_MAX];
792 static uint32_t ring_number;
793 uint64_t pcap_pkt_count = 0;
794 struct rte_mbuf *bufs[1];
797 pp = rte_eth_devices[pcap_q->port_id].process_private;
798 pcap = &pp->rx_pcap[pcap_q->queue_id];
800 if (unlikely(*pcap == NULL))
803 pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q);
805 snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu16,
808 pcap_q->pkts = rte_ring_create(ring_name,
809 rte_align64pow2(pcap_pkt_count + 1), 0,
810 RING_F_SP_ENQ | RING_F_SC_DEQ);
815 /* Fill ring with packets from PCAP file one by one. */
816 while (eth_pcap_rx(pcap_q, bufs, 1)) {
817 /* Check for multiseg mbufs. */
818 if (bufs[0]->nb_segs != 1) {
819 rte_pktmbuf_free(*bufs);
821 while (!rte_ring_dequeue(pcap_q->pkts,
823 rte_pktmbuf_free(*bufs);
825 rte_ring_free(pcap_q->pkts);
826 PMD_LOG(ERR, "Multiseg mbufs are not supported in infinite_rx "
831 rte_ring_enqueue_bulk(pcap_q->pkts,
832 (void * const *)bufs, 1, NULL);
835 * Reset the stats for this queue since eth_pcap_rx calls above
836 * didn't result in the application receiving packets.
838 pcap_q->rx_stat.pkts = 0;
839 pcap_q->rx_stat.bytes = 0;
846 eth_tx_queue_setup(struct rte_eth_dev *dev,
847 uint16_t tx_queue_id,
848 uint16_t nb_tx_desc __rte_unused,
849 unsigned int socket_id __rte_unused,
850 const struct rte_eth_txconf *tx_conf __rte_unused)
852 struct pmd_internals *internals = dev->data->dev_private;
853 struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
855 pcap_q->port_id = dev->data->port_id;
856 pcap_q->queue_id = tx_queue_id;
857 dev->data->tx_queues[tx_queue_id] = pcap_q;
863 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
865 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
871 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
873 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
879 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
881 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
887 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
889 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
894 static const struct eth_dev_ops ops = {
895 .dev_start = eth_dev_start,
896 .dev_stop = eth_dev_stop,
897 .dev_close = eth_dev_close,
898 .dev_configure = eth_dev_configure,
899 .dev_infos_get = eth_dev_info,
900 .rx_queue_setup = eth_rx_queue_setup,
901 .tx_queue_setup = eth_tx_queue_setup,
902 .rx_queue_start = eth_rx_queue_start,
903 .tx_queue_start = eth_tx_queue_start,
904 .rx_queue_stop = eth_rx_queue_stop,
905 .tx_queue_stop = eth_tx_queue_stop,
906 .rx_queue_release = eth_queue_release,
907 .tx_queue_release = eth_queue_release,
908 .link_update = eth_link_update,
909 .stats_get = eth_stats_get,
910 .stats_reset = eth_stats_reset,
914 add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
915 pcap_t *pcap, pcap_dumper_t *dumper)
917 if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
920 pmd->queue[pmd->num_of_queue].pcap = pcap;
922 pmd->queue[pmd->num_of_queue].dumper = dumper;
923 pmd->queue[pmd->num_of_queue].name = name;
924 pmd->queue[pmd->num_of_queue].type = type;
930 * Function handler that opens the pcap file for reading a stores a
931 * reference of it for use it later on.
934 open_rx_pcap(const char *key, const char *value, void *extra_args)
936 const char *pcap_filename = value;
937 struct pmd_devargs *rx = extra_args;
940 if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
943 if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
952 * Opens a pcap file for writing and stores a reference to it
953 * for use it later on.
956 open_tx_pcap(const char *key, const char *value, void *extra_args)
958 const char *pcap_filename = value;
959 struct pmd_devargs *dumpers = extra_args;
960 pcap_dumper_t *dumper;
962 if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
965 if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
966 pcap_dump_close(dumper);
974 * Opens an interface for reading and writing
977 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
979 const char *iface = value;
980 struct pmd_devargs *tx = extra_args;
983 if (open_single_iface(iface, &pcap) < 0)
986 tx->queue[0].pcap = pcap;
987 tx->queue[0].name = iface;
988 tx->queue[0].type = key;
994 set_iface_direction(const char *iface, pcap_t *pcap,
995 pcap_direction_t direction)
997 const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
998 if (pcap_setdirection(pcap, direction) < 0) {
999 PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
1000 iface, direction_str, pcap_geterr(pcap));
1003 PMD_LOG(INFO, "Setting %s pcap direction %s\n",
1004 iface, direction_str);
1009 open_iface(const char *key, const char *value, void *extra_args)
1011 const char *iface = value;
1012 struct pmd_devargs *pmd = extra_args;
1013 pcap_t *pcap = NULL;
1015 if (open_single_iface(iface, &pcap) < 0)
1017 if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
1026 * Opens a NIC for reading packets from it
1029 open_rx_iface(const char *key, const char *value, void *extra_args)
1031 int ret = open_iface(key, value, extra_args);
1034 if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
1035 struct pmd_devargs *pmd = extra_args;
1036 unsigned int qid = pmd->num_of_queue - 1;
1038 set_iface_direction(pmd->queue[qid].name,
1039 pmd->queue[qid].pcap,
1047 rx_iface_args_process(const char *key, const char *value, void *extra_args)
1049 if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
1050 strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
1051 return open_rx_iface(key, value, extra_args);
1057 * Opens a NIC for writing packets to it
1060 open_tx_iface(const char *key, const char *value, void *extra_args)
1062 return open_iface(key, value, extra_args);
1066 select_phy_mac(const char *key __rte_unused, const char *value,
1070 const int phy_mac = atoi(value);
1071 int *enable_phy_mac = extra_args;
1074 *enable_phy_mac = 1;
1080 get_infinite_rx_arg(const char *key __rte_unused,
1081 const char *value, void *extra_args)
1084 const int infinite_rx = atoi(value);
1085 int *enable_infinite_rx = extra_args;
1087 if (infinite_rx > 0)
1088 *enable_infinite_rx = 1;
1094 pmd_init_internals(struct rte_vdev_device *vdev,
1095 const unsigned int nb_rx_queues,
1096 const unsigned int nb_tx_queues,
1097 struct pmd_internals **internals,
1098 struct rte_eth_dev **eth_dev)
1100 struct rte_eth_dev_data *data;
1101 struct pmd_process_private *pp;
1102 unsigned int numa_node = vdev->device.numa_node;
1104 PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
1107 pp = (struct pmd_process_private *)
1108 rte_zmalloc(NULL, sizeof(struct pmd_process_private),
1109 RTE_CACHE_LINE_SIZE);
1113 "Failed to allocate memory for process private");
1117 /* reserve an ethdev entry */
1118 *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
1123 (*eth_dev)->process_private = pp;
1124 /* now put it all together
1125 * - store queue data in internals,
1126 * - store numa_node info in eth_dev
1127 * - point eth_dev_data to internals
1128 * - and point eth_dev structure to new eth_dev_data structure
1130 *internals = (*eth_dev)->data->dev_private;
1132 * Interface MAC = 02:70:63:61:70:<iface_idx>
1133 * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
1134 * where the middle 4 characters are converted to hex.
1136 (*internals)->eth_addr = (struct rte_ether_addr) {
1137 .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
1139 (*internals)->phy_mac = 0;
1140 data = (*eth_dev)->data;
1141 data->nb_rx_queues = (uint16_t)nb_rx_queues;
1142 data->nb_tx_queues = (uint16_t)nb_tx_queues;
1143 data->dev_link = pmd_link;
1144 data->mac_addrs = &(*internals)->eth_addr;
1147 * NOTE: we'll replace the data element, of originally allocated
1148 * eth_dev so the rings are local per-process
1150 (*eth_dev)->dev_ops = &ops;
1152 strlcpy((*internals)->devargs, rte_vdev_device_args(vdev),
1153 ETH_PCAP_ARG_MAXLEN);
1159 eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,
1160 const unsigned int numa_node)
1162 #if defined(RTE_EXEC_ENV_LINUX)
1165 int if_fd = socket(AF_INET, SOCK_DGRAM, 0);
1170 rte_strscpy(ifr.ifr_name, if_name, sizeof(ifr.ifr_name));
1171 if (ioctl(if_fd, SIOCGIFHWADDR, &ifr)) {
1176 mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
1182 PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1183 eth_dev->data->mac_addrs = mac_addrs;
1184 rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1185 ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1191 #elif defined(RTE_EXEC_ENV_FREEBSD)
1193 struct if_msghdr *ifm;
1194 struct sockaddr_dl *sdl;
1203 mib[4] = NET_RT_IFLIST;
1204 mib[5] = if_nametoindex(if_name);
1206 if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0)
1212 buf = rte_malloc(NULL, len, 0);
1216 if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
1220 ifm = (struct if_msghdr *)buf;
1221 sdl = (struct sockaddr_dl *)(ifm + 1);
1223 mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
1229 PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1230 eth_dev->data->mac_addrs = mac_addrs;
1231 rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1232 LLADDR(sdl), RTE_ETHER_ADDR_LEN);
1243 eth_from_pcaps_common(struct rte_vdev_device *vdev,
1244 struct pmd_devargs_all *devargs_all,
1245 struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
1247 struct pmd_process_private *pp;
1248 struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1249 struct pmd_devargs *tx_queues = &devargs_all->tx_queues;
1250 const unsigned int nb_rx_queues = rx_queues->num_of_queue;
1251 const unsigned int nb_tx_queues = tx_queues->num_of_queue;
1254 /* do some parameter checking */
1255 if (rx_queues == NULL && nb_rx_queues > 0)
1257 if (tx_queues == NULL && nb_tx_queues > 0)
1260 if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
1264 pp = (*eth_dev)->process_private;
1265 for (i = 0; i < nb_rx_queues; i++) {
1266 struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
1267 struct devargs_queue *queue = &rx_queues->queue[i];
1269 pp->rx_pcap[i] = queue->pcap;
1270 strlcpy(rx->name, queue->name, sizeof(rx->name));
1271 strlcpy(rx->type, queue->type, sizeof(rx->type));
1274 for (i = 0; i < nb_tx_queues; i++) {
1275 struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
1276 struct devargs_queue *queue = &tx_queues->queue[i];
1278 pp->tx_dumper[i] = queue->dumper;
1279 pp->tx_pcap[i] = queue->pcap;
1280 strlcpy(tx->name, queue->name, sizeof(tx->name));
1281 strlcpy(tx->type, queue->type, sizeof(tx->type));
1288 eth_from_pcaps(struct rte_vdev_device *vdev,
1289 struct pmd_devargs_all *devargs_all)
1291 struct pmd_internals *internals = NULL;
1292 struct rte_eth_dev *eth_dev = NULL;
1293 struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1294 int single_iface = devargs_all->single_iface;
1295 unsigned int infinite_rx = devargs_all->infinite_rx;
1298 ret = eth_from_pcaps_common(vdev, devargs_all, &internals, ð_dev);
1303 /* store weather we are using a single interface for rx/tx or not */
1304 internals->single_iface = single_iface;
1307 internals->if_index = if_nametoindex(rx_queues->queue[0].name);
1309 /* phy_mac arg is applied only only if "iface" devarg is provided */
1310 if (rx_queues->phy_mac) {
1311 int ret = eth_pcap_update_mac(rx_queues->queue[0].name,
1312 eth_dev, vdev->device.numa_node);
1314 internals->phy_mac = 1;
1318 internals->infinite_rx = infinite_rx;
1319 /* Assign rx ops. */
1321 eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
1322 else if (devargs_all->is_rx_pcap || devargs_all->is_rx_iface ||
1324 eth_dev->rx_pkt_burst = eth_pcap_rx;
1326 eth_dev->rx_pkt_burst = eth_null_rx;
1328 /* Assign tx ops. */
1329 if (devargs_all->is_tx_pcap)
1330 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1331 else if (devargs_all->is_tx_iface || single_iface)
1332 eth_dev->tx_pkt_burst = eth_pcap_tx;
1334 eth_dev->tx_pkt_burst = eth_tx_drop;
1336 rte_eth_dev_probing_finish(eth_dev);
1341 pmd_pcap_probe(struct rte_vdev_device *dev)
1344 struct rte_kvargs *kvlist;
1345 struct pmd_devargs pcaps = {0};
1346 struct pmd_devargs dumpers = {0};
1347 struct rte_eth_dev *eth_dev = NULL;
1348 struct pmd_internals *internal;
1351 struct pmd_devargs_all devargs_all = {
1358 name = rte_vdev_device_name(dev);
1359 PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
1361 gettimeofday(&start_time, NULL);
1362 start_cycles = rte_get_timer_cycles();
1363 hz = rte_get_timer_hz();
1365 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1366 eth_dev = rte_eth_dev_attach_secondary(name);
1368 PMD_LOG(ERR, "Failed to probe %s", name);
1372 internal = eth_dev->data->dev_private;
1374 kvlist = rte_kvargs_parse(internal->devargs, valid_arguments);
1378 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
1385 * If iface argument is passed we open the NICs and use them for
1388 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
1390 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
1391 &open_rx_tx_iface, &pcaps);
1395 dumpers.queue[0] = pcaps.queue[0];
1397 ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG,
1398 &select_phy_mac, &pcaps.phy_mac);
1402 dumpers.phy_mac = pcaps.phy_mac;
1404 devargs_all.single_iface = 1;
1405 pcaps.num_of_queue = 1;
1406 dumpers.num_of_queue = 1;
1412 * We check whether we want to open a RX stream from a real NIC, a
1413 * pcap file or open a dummy RX stream
1415 devargs_all.is_rx_pcap =
1416 rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
1417 devargs_all.is_rx_iface =
1418 rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) ? 1 : 0;
1419 pcaps.num_of_queue = 0;
1421 devargs_all.is_tx_pcap =
1422 rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
1423 devargs_all.is_tx_iface =
1424 rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG) ? 1 : 0;
1425 dumpers.num_of_queue = 0;
1427 if (devargs_all.is_rx_pcap) {
1429 * We check whether we want to infinitely rx the pcap file.
1431 unsigned int infinite_rx_arg_cnt = rte_kvargs_count(kvlist,
1432 ETH_PCAP_INFINITE_RX_ARG);
1434 if (infinite_rx_arg_cnt == 1) {
1435 ret = rte_kvargs_process(kvlist,
1436 ETH_PCAP_INFINITE_RX_ARG,
1437 &get_infinite_rx_arg,
1438 &devargs_all.infinite_rx);
1441 PMD_LOG(INFO, "infinite_rx has been %s for %s",
1442 devargs_all.infinite_rx ? "enabled" : "disabled",
1445 } else if (infinite_rx_arg_cnt > 1) {
1446 PMD_LOG(WARNING, "infinite_rx has not been enabled since the "
1447 "argument has been provided more than once "
1451 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1452 &open_rx_pcap, &pcaps);
1453 } else if (devargs_all.is_rx_iface) {
1454 ret = rte_kvargs_process(kvlist, NULL,
1455 &rx_iface_args_process, &pcaps);
1456 } else if (devargs_all.is_tx_iface || devargs_all.is_tx_pcap) {
1459 /* Count number of tx queue args passed before dummy rx queue
1460 * creation so a dummy rx queue can be created for each tx queue
1462 unsigned int num_tx_queues =
1463 (rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) +
1464 rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG));
1466 PMD_LOG(INFO, "Creating null rx queue since no rx queues were provided.");
1468 /* Creating a dummy rx queue for each tx queue passed */
1469 for (i = 0; i < num_tx_queues; i++)
1470 ret = add_queue(&pcaps, "dummy_rx", "rx_null", NULL,
1473 PMD_LOG(ERR, "Error - No rx or tx queues provided");
1480 * We check whether we want to open a TX stream to a real NIC,
1481 * a pcap file, or drop packets on tx
1483 if (devargs_all.is_tx_pcap) {
1484 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1485 &open_tx_pcap, &dumpers);
1486 } else if (devargs_all.is_tx_iface) {
1487 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1488 &open_tx_iface, &dumpers);
1492 PMD_LOG(INFO, "Dropping packets on tx since no tx queues were provided.");
1494 /* Add 1 dummy queue per rxq which counts and drops packets. */
1495 for (i = 0; i < pcaps.num_of_queue; i++)
1496 ret = add_queue(&dumpers, "dummy_tx", "tx_drop", NULL,
1504 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1505 struct pmd_process_private *pp;
1508 internal = eth_dev->data->dev_private;
1509 pp = (struct pmd_process_private *)
1511 sizeof(struct pmd_process_private),
1512 RTE_CACHE_LINE_SIZE);
1516 "Failed to allocate memory for process private");
1521 eth_dev->dev_ops = &ops;
1522 eth_dev->device = &dev->device;
1524 /* setup process private */
1525 for (i = 0; i < pcaps.num_of_queue; i++)
1526 pp->rx_pcap[i] = pcaps.queue[i].pcap;
1528 for (i = 0; i < dumpers.num_of_queue; i++) {
1529 pp->tx_dumper[i] = dumpers.queue[i].dumper;
1530 pp->tx_pcap[i] = dumpers.queue[i].pcap;
1533 eth_dev->process_private = pp;
1534 eth_dev->rx_pkt_burst = eth_pcap_rx;
1535 if (devargs_all.is_tx_pcap)
1536 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1538 eth_dev->tx_pkt_burst = eth_pcap_tx;
1540 rte_eth_dev_probing_finish(eth_dev);
1544 devargs_all.rx_queues = pcaps;
1545 devargs_all.tx_queues = dumpers;
1547 ret = eth_from_pcaps(dev, &devargs_all);
1550 rte_kvargs_free(kvlist);
1556 pmd_pcap_remove(struct rte_vdev_device *dev)
1558 struct pmd_internals *internals = NULL;
1559 struct rte_eth_dev *eth_dev = NULL;
1561 PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
1567 /* reserve an ethdev entry */
1568 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1569 if (eth_dev == NULL)
1572 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1573 internals = eth_dev->data->dev_private;
1574 if (internals != NULL && internals->phy_mac == 0)
1575 /* not dynamically allocated, must not be freed */
1576 eth_dev->data->mac_addrs = NULL;
1579 eth_dev_close(eth_dev);
1581 rte_free(eth_dev->process_private);
1582 rte_eth_dev_release_port(eth_dev);
1587 static struct rte_vdev_driver pmd_pcap_drv = {
1588 .probe = pmd_pcap_probe,
1589 .remove = pmd_pcap_remove,
1592 RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1593 RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
1594 RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
1595 ETH_PCAP_RX_PCAP_ARG "=<string> "
1596 ETH_PCAP_TX_PCAP_ARG "=<string> "
1597 ETH_PCAP_RX_IFACE_ARG "=<ifc> "
1598 ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
1599 ETH_PCAP_TX_IFACE_ARG "=<ifc> "
1600 ETH_PCAP_IFACE_ARG "=<ifc> "
1601 ETH_PCAP_PHY_MAC_ARG "=<int>"
1602 ETH_PCAP_INFINITE_RX_ARG "=<0|1>");
1604 RTE_INIT(eth_pcap_init_log)
1606 eth_pcap_logtype = rte_log_register("pmd.net.pcap");
1607 if (eth_pcap_logtype >= 0)
1608 rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE);