1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright(c) 2014 6WIND S.A.
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
14 #if defined(RTE_EXEC_ENV_BSDAPP)
15 #include <sys/sysctl.h>
16 #include <net/if_dl.h>
21 #include <rte_cycles.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_bus_vdev.h>
28 #include <rte_string_fns.h>
30 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
31 #define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
32 #define RTE_ETH_PCAP_PROMISC 1
33 #define RTE_ETH_PCAP_TIMEOUT -1
35 #define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
36 #define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
37 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
38 #define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
39 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
40 #define ETH_PCAP_IFACE_ARG "iface"
41 #define ETH_PCAP_PHY_MAC_ARG "phy_mac"
43 #define ETH_PCAP_ARG_MAXLEN 64
45 #define RTE_PMD_PCAP_MAX_QUEUES 16
47 static char errbuf[PCAP_ERRBUF_SIZE];
48 static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
49 static struct timeval start_time;
50 static uint64_t start_cycles;
52 static uint8_t iface_idx;
55 volatile unsigned long pkts;
56 volatile unsigned long bytes;
57 volatile unsigned long err_pkts;
60 struct pcap_rx_queue {
63 struct rte_mempool *mb_pool;
64 struct queue_stat rx_stat;
66 char type[ETH_PCAP_ARG_MAXLEN];
69 struct pcap_tx_queue {
72 struct queue_stat tx_stat;
74 char type[ETH_PCAP_ARG_MAXLEN];
77 struct pmd_internals {
78 struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
79 struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
80 char devargs[ETH_PCAP_ARG_MAXLEN];
81 struct ether_addr eth_addr;
87 struct pmd_process_private {
88 pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
89 pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
90 pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
94 unsigned int num_of_queue;
95 struct devargs_queue {
96 pcap_dumper_t *dumper;
100 } queue[RTE_PMD_PCAP_MAX_QUEUES];
104 static const char *valid_arguments[] = {
105 ETH_PCAP_RX_PCAP_ARG,
106 ETH_PCAP_TX_PCAP_ARG,
107 ETH_PCAP_RX_IFACE_ARG,
108 ETH_PCAP_RX_IFACE_IN_ARG,
109 ETH_PCAP_TX_IFACE_ARG,
111 ETH_PCAP_PHY_MAC_ARG,
115 static struct rte_eth_link pmd_link = {
116 .link_speed = ETH_SPEED_NUM_10G,
117 .link_duplex = ETH_LINK_FULL_DUPLEX,
118 .link_status = ETH_LINK_DOWN,
119 .link_autoneg = ETH_LINK_FIXED,
122 static int eth_pcap_logtype;
124 #define PMD_LOG(level, fmt, args...) \
125 rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
126 "%s(): " fmt "\n", __func__, ##args)
129 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
130 const u_char *data, uint16_t data_len)
132 /* Copy the first segment. */
133 uint16_t len = rte_pktmbuf_tailroom(mbuf);
134 struct rte_mbuf *m = mbuf;
136 rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
140 while (data_len > 0) {
141 /* Allocate next mbuf and point to that. */
142 m->next = rte_pktmbuf_alloc(mb_pool);
144 if (unlikely(!m->next))
149 /* Headroom is not needed in chained mbufs. */
150 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
154 /* Copy next segment. */
155 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
156 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
163 return mbuf->nb_segs;
166 /* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
168 eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
170 uint16_t data_len = 0;
173 rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
176 data_len += mbuf->data_len;
182 eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
185 struct pcap_pkthdr header;
186 struct pmd_process_private *pp;
187 const u_char *packet;
188 struct rte_mbuf *mbuf;
189 struct pcap_rx_queue *pcap_q = queue;
192 uint32_t rx_bytes = 0;
195 pp = rte_eth_devices[pcap_q->port_id].process_private;
196 pcap = pp->rx_pcap[pcap_q->queue_id];
198 if (unlikely(pcap == NULL || nb_pkts == 0))
201 /* Reads the given number of packets from the pcap file one by one
202 * and copies the packet data into a newly allocated mbuf to return.
204 for (i = 0; i < nb_pkts; i++) {
205 /* Get the next PCAP packet */
206 packet = pcap_next(pcap, &header);
207 if (unlikely(packet == NULL))
210 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
211 if (unlikely(mbuf == NULL))
214 /* Now get the space available for data in the mbuf */
215 buf_size = rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
216 RTE_PKTMBUF_HEADROOM;
218 if (header.caplen <= buf_size) {
219 /* pcap packet will fit in the mbuf, can copy it */
220 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
222 mbuf->data_len = (uint16_t)header.caplen;
224 /* Try read jumbo frame into multi mbufs. */
225 if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
228 header.caplen) == -1)) {
229 rte_pktmbuf_free(mbuf);
234 mbuf->pkt_len = (uint16_t)header.caplen;
235 mbuf->port = pcap_q->port_id;
238 rx_bytes += header.caplen;
240 pcap_q->rx_stat.pkts += num_rx;
241 pcap_q->rx_stat.bytes += rx_bytes;
247 calculate_timestamp(struct timeval *ts) {
249 struct timeval cur_time;
251 cycles = rte_get_timer_cycles() - start_cycles;
252 cur_time.tv_sec = cycles / hz;
253 cur_time.tv_usec = (cycles % hz) * 1e6 / hz;
254 timeradd(&start_time, &cur_time, ts);
258 * Callback to handle writing packets to a pcap file.
261 eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
264 struct rte_mbuf *mbuf;
265 struct pmd_process_private *pp;
266 struct pcap_tx_queue *dumper_q = queue;
268 uint32_t tx_bytes = 0;
269 struct pcap_pkthdr header;
270 pcap_dumper_t *dumper;
272 pp = rte_eth_devices[dumper_q->port_id].process_private;
273 dumper = pp->tx_dumper[dumper_q->queue_id];
275 if (dumper == NULL || nb_pkts == 0)
278 /* writes the nb_pkts packets to the previously opened pcap file
280 for (i = 0; i < nb_pkts; i++) {
282 calculate_timestamp(&header.ts);
283 header.len = mbuf->pkt_len;
284 header.caplen = header.len;
286 if (likely(mbuf->nb_segs == 1)) {
287 pcap_dump((u_char *)dumper, &header,
288 rte_pktmbuf_mtod(mbuf, void*));
290 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
291 eth_pcap_gather_data(tx_pcap_data, mbuf);
292 pcap_dump((u_char *)dumper, &header,
296 "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
298 ETHER_MAX_JUMBO_FRAME_LEN);
300 rte_pktmbuf_free(mbuf);
306 tx_bytes += mbuf->pkt_len;
307 rte_pktmbuf_free(mbuf);
311 * Since there's no place to hook a callback when the forwarding
312 * process stops and to make sure the pcap file is actually written,
313 * we flush the pcap dumper within each burst.
315 pcap_dump_flush(dumper);
316 dumper_q->tx_stat.pkts += num_tx;
317 dumper_q->tx_stat.bytes += tx_bytes;
318 dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
324 * Callback to handle sending packets through a real NIC.
327 eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
331 struct rte_mbuf *mbuf;
332 struct pmd_process_private *pp;
333 struct pcap_tx_queue *tx_queue = queue;
335 uint32_t tx_bytes = 0;
338 pp = rte_eth_devices[tx_queue->port_id].process_private;
339 pcap = pp->tx_pcap[tx_queue->queue_id];
341 if (unlikely(nb_pkts == 0 || pcap == NULL))
344 for (i = 0; i < nb_pkts; i++) {
347 if (likely(mbuf->nb_segs == 1)) {
348 ret = pcap_sendpacket(pcap,
349 rte_pktmbuf_mtod(mbuf, u_char *),
352 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
353 eth_pcap_gather_data(tx_pcap_data, mbuf);
354 ret = pcap_sendpacket(pcap,
355 tx_pcap_data, mbuf->pkt_len);
358 "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
360 ETHER_MAX_JUMBO_FRAME_LEN);
362 rte_pktmbuf_free(mbuf);
367 if (unlikely(ret != 0))
370 tx_bytes += mbuf->pkt_len;
371 rte_pktmbuf_free(mbuf);
374 tx_queue->tx_stat.pkts += num_tx;
375 tx_queue->tx_stat.bytes += tx_bytes;
376 tx_queue->tx_stat.err_pkts += nb_pkts - num_tx;
382 * pcap_open_live wrapper function
385 open_iface_live(const char *iface, pcap_t **pcap) {
386 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
387 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
390 PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
398 open_single_iface(const char *iface, pcap_t **pcap)
400 if (open_iface_live(iface, pcap) < 0) {
401 PMD_LOG(ERR, "Couldn't open interface %s", iface);
409 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
414 * We need to create a dummy empty pcap_t to use it
415 * with pcap_dump_open(). We create big enough an Ethernet
418 tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
419 if (tx_pcap == NULL) {
420 PMD_LOG(ERR, "Couldn't create dead pcap");
424 /* The dumper is created using the previous pcap_t reference */
425 *dumper = pcap_dump_open(tx_pcap, pcap_filename);
426 if (*dumper == NULL) {
428 PMD_LOG(ERR, "Couldn't open %s for writing.",
438 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
440 *pcap = pcap_open_offline(pcap_filename, errbuf);
442 PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
451 eth_dev_start(struct rte_eth_dev *dev)
454 struct pmd_internals *internals = dev->data->dev_private;
455 struct pmd_process_private *pp = dev->process_private;
456 struct pcap_tx_queue *tx;
457 struct pcap_rx_queue *rx;
459 /* Special iface case. Single pcap is open and shared between tx/rx. */
460 if (internals->single_iface) {
461 tx = &internals->tx_queue[0];
462 rx = &internals->rx_queue[0];
464 if (!pp->tx_pcap[0] &&
465 strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
466 if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0)
468 pp->rx_pcap[0] = pp->tx_pcap[0];
474 /* If not open already, open tx pcaps/dumpers */
475 for (i = 0; i < dev->data->nb_tx_queues; i++) {
476 tx = &internals->tx_queue[i];
478 if (!pp->tx_dumper[i] &&
479 strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
480 if (open_single_tx_pcap(tx->name,
481 &pp->tx_dumper[i]) < 0)
483 } else if (!pp->tx_pcap[i] &&
484 strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
485 if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0)
490 /* If not open already, open rx pcaps */
491 for (i = 0; i < dev->data->nb_rx_queues; i++) {
492 rx = &internals->rx_queue[i];
494 if (pp->rx_pcap[i] != NULL)
497 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
498 if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0)
500 } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
501 if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0)
507 for (i = 0; i < dev->data->nb_rx_queues; i++)
508 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
510 for (i = 0; i < dev->data->nb_tx_queues; i++)
511 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
513 dev->data->dev_link.link_status = ETH_LINK_UP;
519 * This function gets called when the current port gets stopped.
520 * Is the only place for us to close all the tx streams dumpers.
521 * If not called the dumpers will be flushed within each tx burst.
524 eth_dev_stop(struct rte_eth_dev *dev)
527 struct pmd_internals *internals = dev->data->dev_private;
528 struct pmd_process_private *pp = dev->process_private;
530 /* Special iface case. Single pcap is open and shared between tx/rx. */
531 if (internals->single_iface) {
532 pcap_close(pp->tx_pcap[0]);
533 pp->tx_pcap[0] = NULL;
534 pp->rx_pcap[0] = NULL;
538 for (i = 0; i < dev->data->nb_tx_queues; i++) {
539 if (pp->tx_dumper[i] != NULL) {
540 pcap_dump_close(pp->tx_dumper[i]);
541 pp->tx_dumper[i] = NULL;
544 if (pp->tx_pcap[i] != NULL) {
545 pcap_close(pp->tx_pcap[i]);
546 pp->tx_pcap[i] = NULL;
550 for (i = 0; i < dev->data->nb_rx_queues; i++) {
551 if (pp->rx_pcap[i] != NULL) {
552 pcap_close(pp->rx_pcap[i]);
553 pp->rx_pcap[i] = NULL;
558 for (i = 0; i < dev->data->nb_rx_queues; i++)
559 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
561 for (i = 0; i < dev->data->nb_tx_queues; i++)
562 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
564 dev->data->dev_link.link_status = ETH_LINK_DOWN;
568 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
574 eth_dev_info(struct rte_eth_dev *dev,
575 struct rte_eth_dev_info *dev_info)
577 struct pmd_internals *internals = dev->data->dev_private;
579 dev_info->if_index = internals->if_index;
580 dev_info->max_mac_addrs = 1;
581 dev_info->max_rx_pktlen = (uint32_t) -1;
582 dev_info->max_rx_queues = dev->data->nb_rx_queues;
583 dev_info->max_tx_queues = dev->data->nb_tx_queues;
584 dev_info->min_rx_bufsize = 0;
588 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
591 unsigned long rx_packets_total = 0, rx_bytes_total = 0;
592 unsigned long tx_packets_total = 0, tx_bytes_total = 0;
593 unsigned long tx_packets_err_total = 0;
594 const struct pmd_internals *internal = dev->data->dev_private;
596 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
597 i < dev->data->nb_rx_queues; i++) {
598 stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
599 stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
600 rx_packets_total += stats->q_ipackets[i];
601 rx_bytes_total += stats->q_ibytes[i];
604 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
605 i < dev->data->nb_tx_queues; i++) {
606 stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
607 stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
608 stats->q_errors[i] = internal->tx_queue[i].tx_stat.err_pkts;
609 tx_packets_total += stats->q_opackets[i];
610 tx_bytes_total += stats->q_obytes[i];
611 tx_packets_err_total += stats->q_errors[i];
614 stats->ipackets = rx_packets_total;
615 stats->ibytes = rx_bytes_total;
616 stats->opackets = tx_packets_total;
617 stats->obytes = tx_bytes_total;
618 stats->oerrors = tx_packets_err_total;
624 eth_stats_reset(struct rte_eth_dev *dev)
627 struct pmd_internals *internal = dev->data->dev_private;
629 for (i = 0; i < dev->data->nb_rx_queues; i++) {
630 internal->rx_queue[i].rx_stat.pkts = 0;
631 internal->rx_queue[i].rx_stat.bytes = 0;
634 for (i = 0; i < dev->data->nb_tx_queues; i++) {
635 internal->tx_queue[i].tx_stat.pkts = 0;
636 internal->tx_queue[i].tx_stat.bytes = 0;
637 internal->tx_queue[i].tx_stat.err_pkts = 0;
642 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
647 eth_queue_release(void *q __rte_unused)
652 eth_link_update(struct rte_eth_dev *dev __rte_unused,
653 int wait_to_complete __rte_unused)
659 eth_rx_queue_setup(struct rte_eth_dev *dev,
660 uint16_t rx_queue_id,
661 uint16_t nb_rx_desc __rte_unused,
662 unsigned int socket_id __rte_unused,
663 const struct rte_eth_rxconf *rx_conf __rte_unused,
664 struct rte_mempool *mb_pool)
666 struct pmd_internals *internals = dev->data->dev_private;
667 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
669 pcap_q->mb_pool = mb_pool;
670 pcap_q->port_id = dev->data->port_id;
671 pcap_q->queue_id = rx_queue_id;
672 dev->data->rx_queues[rx_queue_id] = pcap_q;
678 eth_tx_queue_setup(struct rte_eth_dev *dev,
679 uint16_t tx_queue_id,
680 uint16_t nb_tx_desc __rte_unused,
681 unsigned int socket_id __rte_unused,
682 const struct rte_eth_txconf *tx_conf __rte_unused)
684 struct pmd_internals *internals = dev->data->dev_private;
685 struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
687 pcap_q->port_id = dev->data->port_id;
688 pcap_q->queue_id = tx_queue_id;
689 dev->data->tx_queues[tx_queue_id] = pcap_q;
695 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
697 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
703 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
705 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
711 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
713 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
719 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
721 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
726 static const struct eth_dev_ops ops = {
727 .dev_start = eth_dev_start,
728 .dev_stop = eth_dev_stop,
729 .dev_close = eth_dev_close,
730 .dev_configure = eth_dev_configure,
731 .dev_infos_get = eth_dev_info,
732 .rx_queue_setup = eth_rx_queue_setup,
733 .tx_queue_setup = eth_tx_queue_setup,
734 .rx_queue_start = eth_rx_queue_start,
735 .tx_queue_start = eth_tx_queue_start,
736 .rx_queue_stop = eth_rx_queue_stop,
737 .tx_queue_stop = eth_tx_queue_stop,
738 .rx_queue_release = eth_queue_release,
739 .tx_queue_release = eth_queue_release,
740 .link_update = eth_link_update,
741 .stats_get = eth_stats_get,
742 .stats_reset = eth_stats_reset,
746 add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
747 pcap_t *pcap, pcap_dumper_t *dumper)
749 if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
752 pmd->queue[pmd->num_of_queue].pcap = pcap;
754 pmd->queue[pmd->num_of_queue].dumper = dumper;
755 pmd->queue[pmd->num_of_queue].name = name;
756 pmd->queue[pmd->num_of_queue].type = type;
762 * Function handler that opens the pcap file for reading a stores a
763 * reference of it for use it later on.
766 open_rx_pcap(const char *key, const char *value, void *extra_args)
768 const char *pcap_filename = value;
769 struct pmd_devargs *rx = extra_args;
772 if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
775 if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
784 * Opens a pcap file for writing and stores a reference to it
785 * for use it later on.
788 open_tx_pcap(const char *key, const char *value, void *extra_args)
790 const char *pcap_filename = value;
791 struct pmd_devargs *dumpers = extra_args;
792 pcap_dumper_t *dumper;
794 if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
797 if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
798 pcap_dump_close(dumper);
806 * Opens an interface for reading and writing
809 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
811 const char *iface = value;
812 struct pmd_devargs *tx = extra_args;
815 if (open_single_iface(iface, &pcap) < 0)
818 tx->queue[0].pcap = pcap;
819 tx->queue[0].name = iface;
820 tx->queue[0].type = key;
826 set_iface_direction(const char *iface, pcap_t *pcap,
827 pcap_direction_t direction)
829 const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
830 if (pcap_setdirection(pcap, direction) < 0) {
831 PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
832 iface, direction_str, pcap_geterr(pcap));
835 PMD_LOG(INFO, "Setting %s pcap direction %s\n",
836 iface, direction_str);
841 open_iface(const char *key, const char *value, void *extra_args)
843 const char *iface = value;
844 struct pmd_devargs *pmd = extra_args;
847 if (open_single_iface(iface, &pcap) < 0)
849 if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
858 * Opens a NIC for reading packets from it
861 open_rx_iface(const char *key, const char *value, void *extra_args)
863 int ret = open_iface(key, value, extra_args);
866 if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
867 struct pmd_devargs *pmd = extra_args;
868 unsigned int qid = pmd->num_of_queue - 1;
870 set_iface_direction(pmd->queue[qid].name,
871 pmd->queue[qid].pcap,
879 rx_iface_args_process(const char *key, const char *value, void *extra_args)
881 if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
882 strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
883 return open_rx_iface(key, value, extra_args);
889 * Opens a NIC for writing packets to it
892 open_tx_iface(const char *key, const char *value, void *extra_args)
894 return open_iface(key, value, extra_args);
898 select_phy_mac(const char *key __rte_unused, const char *value,
902 const int phy_mac = atoi(value);
903 int *enable_phy_mac = extra_args;
911 static struct rte_vdev_driver pmd_pcap_drv;
914 pmd_init_internals(struct rte_vdev_device *vdev,
915 const unsigned int nb_rx_queues,
916 const unsigned int nb_tx_queues,
917 struct pmd_internals **internals,
918 struct rte_eth_dev **eth_dev)
920 struct rte_eth_dev_data *data;
921 struct pmd_process_private *pp;
922 unsigned int numa_node = vdev->device.numa_node;
924 PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
927 pp = (struct pmd_process_private *)
928 rte_zmalloc(NULL, sizeof(struct pmd_process_private),
929 RTE_CACHE_LINE_SIZE);
933 "Failed to allocate memory for process private");
937 /* reserve an ethdev entry */
938 *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
943 (*eth_dev)->process_private = pp;
944 /* now put it all together
945 * - store queue data in internals,
946 * - store numa_node info in eth_dev
947 * - point eth_dev_data to internals
948 * - and point eth_dev structure to new eth_dev_data structure
950 *internals = (*eth_dev)->data->dev_private;
952 * Interface MAC = 02:70:63:61:70:<iface_idx>
953 * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
954 * where the middle 4 characters are converted to hex.
956 (*internals)->eth_addr = (struct ether_addr) {
957 .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
959 (*internals)->phy_mac = 0;
960 data = (*eth_dev)->data;
961 data->nb_rx_queues = (uint16_t)nb_rx_queues;
962 data->nb_tx_queues = (uint16_t)nb_tx_queues;
963 data->dev_link = pmd_link;
964 data->mac_addrs = &(*internals)->eth_addr;
967 * NOTE: we'll replace the data element, of originally allocated
968 * eth_dev so the rings are local per-process
970 (*eth_dev)->dev_ops = &ops;
972 strlcpy((*internals)->devargs, rte_vdev_device_args(vdev),
973 ETH_PCAP_ARG_MAXLEN);
979 eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,
980 const unsigned int numa_node)
982 #if defined(RTE_EXEC_ENV_LINUXAPP)
985 int if_fd = socket(AF_INET, SOCK_DGRAM, 0);
990 rte_strscpy(ifr.ifr_name, if_name, sizeof(ifr.ifr_name));
991 if (ioctl(if_fd, SIOCGIFHWADDR, &ifr)) {
996 mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
1002 PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1003 eth_dev->data->mac_addrs = mac_addrs;
1004 rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1005 ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
1011 #elif defined(RTE_EXEC_ENV_BSDAPP)
1013 struct if_msghdr *ifm;
1014 struct sockaddr_dl *sdl;
1023 mib[4] = NET_RT_IFLIST;
1024 mib[5] = if_nametoindex(if_name);
1026 if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0)
1032 buf = rte_malloc(NULL, len, 0);
1036 if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
1040 ifm = (struct if_msghdr *)buf;
1041 sdl = (struct sockaddr_dl *)(ifm + 1);
1043 mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
1049 PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1050 eth_dev->data->mac_addrs = mac_addrs;
1051 rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1052 LLADDR(sdl), ETHER_ADDR_LEN);
1063 eth_from_pcaps_common(struct rte_vdev_device *vdev,
1064 struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
1065 struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
1066 struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
1068 struct pmd_process_private *pp;
1071 /* do some parameter checking */
1072 if (rx_queues == NULL && nb_rx_queues > 0)
1074 if (tx_queues == NULL && nb_tx_queues > 0)
1077 if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
1081 pp = (*eth_dev)->process_private;
1082 for (i = 0; i < nb_rx_queues; i++) {
1083 struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
1084 struct devargs_queue *queue = &rx_queues->queue[i];
1086 pp->rx_pcap[i] = queue->pcap;
1087 snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
1088 snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
1091 for (i = 0; i < nb_tx_queues; i++) {
1092 struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
1093 struct devargs_queue *queue = &tx_queues->queue[i];
1095 pp->tx_dumper[i] = queue->dumper;
1096 pp->tx_pcap[i] = queue->pcap;
1097 snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
1098 snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
1105 eth_from_pcaps(struct rte_vdev_device *vdev,
1106 struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
1107 struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
1108 int single_iface, unsigned int using_dumpers)
1110 struct pmd_internals *internals = NULL;
1111 struct rte_eth_dev *eth_dev = NULL;
1114 ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues,
1115 tx_queues, nb_tx_queues, &internals, ð_dev);
1120 /* store weather we are using a single interface for rx/tx or not */
1121 internals->single_iface = single_iface;
1124 internals->if_index = if_nametoindex(rx_queues->queue[0].name);
1126 /* phy_mac arg is applied only only if "iface" devarg is provided */
1127 if (rx_queues->phy_mac) {
1128 int ret = eth_pcap_update_mac(rx_queues->queue[0].name,
1129 eth_dev, vdev->device.numa_node);
1131 internals->phy_mac = 1;
1135 eth_dev->rx_pkt_burst = eth_pcap_rx;
1138 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1140 eth_dev->tx_pkt_burst = eth_pcap_tx;
1142 rte_eth_dev_probing_finish(eth_dev);
1147 pmd_pcap_probe(struct rte_vdev_device *dev)
1150 unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
1151 struct rte_kvargs *kvlist;
1152 struct pmd_devargs pcaps = {0};
1153 struct pmd_devargs dumpers = {0};
1154 struct rte_eth_dev *eth_dev = NULL;
1155 struct pmd_internals *internal;
1156 int single_iface = 0;
1159 name = rte_vdev_device_name(dev);
1160 PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
1162 gettimeofday(&start_time, NULL);
1163 start_cycles = rte_get_timer_cycles();
1164 hz = rte_get_timer_hz();
1166 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1167 eth_dev = rte_eth_dev_attach_secondary(name);
1169 PMD_LOG(ERR, "Failed to probe %s", name);
1173 internal = eth_dev->data->dev_private;
1175 kvlist = rte_kvargs_parse(internal->devargs, valid_arguments);
1179 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
1186 * If iface argument is passed we open the NICs and use them for
1189 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
1191 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
1192 &open_rx_tx_iface, &pcaps);
1196 dumpers.queue[0] = pcaps.queue[0];
1198 ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG,
1199 &select_phy_mac, &pcaps.phy_mac);
1203 dumpers.phy_mac = pcaps.phy_mac;
1206 pcaps.num_of_queue = 1;
1207 dumpers.num_of_queue = 1;
1213 * We check whether we want to open a RX stream from a real NIC or a
1216 is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
1217 pcaps.num_of_queue = 0;
1220 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1221 &open_rx_pcap, &pcaps);
1223 ret = rte_kvargs_process(kvlist, NULL,
1224 &rx_iface_args_process, &pcaps);
1231 * We check whether we want to open a TX stream to a real NIC or a
1234 is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
1235 dumpers.num_of_queue = 0;
1238 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1239 &open_tx_pcap, &dumpers);
1241 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1242 &open_tx_iface, &dumpers);
1248 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1249 struct pmd_process_private *pp;
1252 internal = eth_dev->data->dev_private;
1253 pp = (struct pmd_process_private *)
1255 sizeof(struct pmd_process_private),
1256 RTE_CACHE_LINE_SIZE);
1260 "Failed to allocate memory for process private");
1264 eth_dev->dev_ops = &ops;
1265 eth_dev->device = &dev->device;
1267 /* setup process private */
1268 for (i = 0; i < pcaps.num_of_queue; i++)
1269 pp->rx_pcap[i] = pcaps.queue[i].pcap;
1271 for (i = 0; i < dumpers.num_of_queue; i++) {
1272 pp->tx_dumper[i] = dumpers.queue[i].dumper;
1273 pp->tx_pcap[i] = dumpers.queue[i].pcap;
1276 eth_dev->process_private = pp;
1277 eth_dev->rx_pkt_burst = eth_pcap_rx;
1279 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1281 eth_dev->tx_pkt_burst = eth_pcap_tx;
1283 rte_eth_dev_probing_finish(eth_dev);
1287 ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
1288 dumpers.num_of_queue, single_iface, is_tx_pcap);
1291 rte_kvargs_free(kvlist);
1297 pmd_pcap_remove(struct rte_vdev_device *dev)
1299 struct pmd_internals *internals = NULL;
1300 struct rte_eth_dev *eth_dev = NULL;
1302 PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
1308 /* reserve an ethdev entry */
1309 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1310 if (eth_dev == NULL)
1313 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1314 internals = eth_dev->data->dev_private;
1315 if (internals != NULL && internals->phy_mac == 0)
1316 /* not dynamically allocated, must not be freed */
1317 eth_dev->data->mac_addrs = NULL;
1320 rte_free(eth_dev->process_private);
1321 rte_eth_dev_release_port(eth_dev);
1326 static struct rte_vdev_driver pmd_pcap_drv = {
1327 .probe = pmd_pcap_probe,
1328 .remove = pmd_pcap_remove,
1331 RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1332 RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
1333 RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
1334 ETH_PCAP_RX_PCAP_ARG "=<string> "
1335 ETH_PCAP_TX_PCAP_ARG "=<string> "
1336 ETH_PCAP_RX_IFACE_ARG "=<ifc> "
1337 ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
1338 ETH_PCAP_TX_IFACE_ARG "=<ifc> "
1339 ETH_PCAP_IFACE_ARG "=<ifc> "
1340 ETH_PCAP_PHY_MAC_ARG "=<int>");
1342 RTE_INIT(eth_pcap_init_log)
1344 eth_pcap_logtype = rte_log_register("pmd.net.pcap");
1345 if (eth_pcap_logtype >= 0)
1346 rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE);