1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright(c) 2014 6WIND S.A.
13 #include <rte_cycles.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_ethdev_vdev.h>
16 #include <rte_kvargs.h>
17 #include <rte_malloc.h>
19 #include <rte_bus_vdev.h>
21 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
22 #define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
23 #define RTE_ETH_PCAP_PROMISC 1
24 #define RTE_ETH_PCAP_TIMEOUT -1
26 #define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
27 #define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
28 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
29 #define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
30 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
31 #define ETH_PCAP_IFACE_ARG "iface"
33 #define ETH_PCAP_ARG_MAXLEN 64
35 #define RTE_PMD_PCAP_MAX_QUEUES 16
37 static char errbuf[PCAP_ERRBUF_SIZE];
38 static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
39 static struct timeval start_time;
40 static uint64_t start_cycles;
42 static uint8_t iface_idx;
45 volatile unsigned long pkts;
46 volatile unsigned long bytes;
47 volatile unsigned long err_pkts;
50 struct pcap_rx_queue {
53 struct rte_mempool *mb_pool;
54 struct queue_stat rx_stat;
56 char type[ETH_PCAP_ARG_MAXLEN];
59 struct pcap_tx_queue {
60 pcap_dumper_t *dumper;
62 struct queue_stat tx_stat;
64 char type[ETH_PCAP_ARG_MAXLEN];
67 struct pmd_internals {
68 struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
69 struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
70 struct ether_addr eth_addr;
76 unsigned int num_of_queue;
77 struct devargs_queue {
78 pcap_dumper_t *dumper;
82 } queue[RTE_PMD_PCAP_MAX_QUEUES];
85 static const char *valid_arguments[] = {
88 ETH_PCAP_RX_IFACE_ARG,
89 ETH_PCAP_RX_IFACE_IN_ARG,
90 ETH_PCAP_TX_IFACE_ARG,
95 static struct rte_eth_link pmd_link = {
96 .link_speed = ETH_SPEED_NUM_10G,
97 .link_duplex = ETH_LINK_FULL_DUPLEX,
98 .link_status = ETH_LINK_DOWN,
99 .link_autoneg = ETH_LINK_FIXED,
102 static int eth_pcap_logtype;
104 #define PMD_LOG(level, fmt, args...) \
105 rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
106 "%s(): " fmt "\n", __func__, ##args)
109 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
110 const u_char *data, uint16_t data_len)
112 /* Copy the first segment. */
113 uint16_t len = rte_pktmbuf_tailroom(mbuf);
114 struct rte_mbuf *m = mbuf;
116 rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
120 while (data_len > 0) {
121 /* Allocate next mbuf and point to that. */
122 m->next = rte_pktmbuf_alloc(mb_pool);
124 if (unlikely(!m->next))
129 /* Headroom is not needed in chained mbufs. */
130 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
134 /* Copy next segment. */
135 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
136 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
143 return mbuf->nb_segs;
146 /* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
148 eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
150 uint16_t data_len = 0;
153 rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
156 data_len += mbuf->data_len;
162 eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
165 struct pcap_pkthdr header;
166 const u_char *packet;
167 struct rte_mbuf *mbuf;
168 struct pcap_rx_queue *pcap_q = queue;
171 uint32_t rx_bytes = 0;
173 if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
176 /* Reads the given number of packets from the pcap file one by one
177 * and copies the packet data into a newly allocated mbuf to return.
179 for (i = 0; i < nb_pkts; i++) {
180 /* Get the next PCAP packet */
181 packet = pcap_next(pcap_q->pcap, &header);
182 if (unlikely(packet == NULL))
185 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
186 if (unlikely(mbuf == NULL))
189 /* Now get the space available for data in the mbuf */
190 buf_size = rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
191 RTE_PKTMBUF_HEADROOM;
193 if (header.caplen <= buf_size) {
194 /* pcap packet will fit in the mbuf, can copy it */
195 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
197 mbuf->data_len = (uint16_t)header.caplen;
199 /* Try read jumbo frame into multi mbufs. */
200 if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
203 header.caplen) == -1)) {
204 rte_pktmbuf_free(mbuf);
209 mbuf->pkt_len = (uint16_t)header.caplen;
210 mbuf->port = pcap_q->in_port;
213 rx_bytes += header.caplen;
215 pcap_q->rx_stat.pkts += num_rx;
216 pcap_q->rx_stat.bytes += rx_bytes;
222 calculate_timestamp(struct timeval *ts) {
224 struct timeval cur_time;
226 cycles = rte_get_timer_cycles() - start_cycles;
227 cur_time.tv_sec = cycles / hz;
228 cur_time.tv_usec = (cycles % hz) * 1e6 / hz;
229 timeradd(&start_time, &cur_time, ts);
233 * Callback to handle writing packets to a pcap file.
236 eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
239 struct rte_mbuf *mbuf;
240 struct pcap_tx_queue *dumper_q = queue;
242 uint32_t tx_bytes = 0;
243 struct pcap_pkthdr header;
245 if (dumper_q->dumper == NULL || nb_pkts == 0)
248 /* writes the nb_pkts packets to the previously opened pcap file
250 for (i = 0; i < nb_pkts; i++) {
252 calculate_timestamp(&header.ts);
253 header.len = mbuf->pkt_len;
254 header.caplen = header.len;
256 if (likely(mbuf->nb_segs == 1)) {
257 pcap_dump((u_char *)dumper_q->dumper, &header,
258 rte_pktmbuf_mtod(mbuf, void*));
260 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
261 eth_pcap_gather_data(tx_pcap_data, mbuf);
262 pcap_dump((u_char *)dumper_q->dumper, &header,
266 "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
268 ETHER_MAX_JUMBO_FRAME_LEN);
270 rte_pktmbuf_free(mbuf);
276 tx_bytes += mbuf->pkt_len;
277 rte_pktmbuf_free(mbuf);
281 * Since there's no place to hook a callback when the forwarding
282 * process stops and to make sure the pcap file is actually written,
283 * we flush the pcap dumper within each burst.
285 pcap_dump_flush(dumper_q->dumper);
286 dumper_q->tx_stat.pkts += num_tx;
287 dumper_q->tx_stat.bytes += tx_bytes;
288 dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
294 * Callback to handle sending packets through a real NIC.
297 eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
301 struct rte_mbuf *mbuf;
302 struct pcap_tx_queue *tx_queue = queue;
304 uint32_t tx_bytes = 0;
306 if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
309 for (i = 0; i < nb_pkts; i++) {
312 if (likely(mbuf->nb_segs == 1)) {
313 ret = pcap_sendpacket(tx_queue->pcap,
314 rte_pktmbuf_mtod(mbuf, u_char *),
317 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
318 eth_pcap_gather_data(tx_pcap_data, mbuf);
319 ret = pcap_sendpacket(tx_queue->pcap,
320 tx_pcap_data, mbuf->pkt_len);
323 "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
325 ETHER_MAX_JUMBO_FRAME_LEN);
327 rte_pktmbuf_free(mbuf);
332 if (unlikely(ret != 0))
335 tx_bytes += mbuf->pkt_len;
336 rte_pktmbuf_free(mbuf);
339 tx_queue->tx_stat.pkts += num_tx;
340 tx_queue->tx_stat.bytes += tx_bytes;
341 tx_queue->tx_stat.err_pkts += nb_pkts - num_tx;
347 * pcap_open_live wrapper function
350 open_iface_live(const char *iface, pcap_t **pcap) {
351 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
352 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
355 PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
363 open_single_iface(const char *iface, pcap_t **pcap)
365 if (open_iface_live(iface, pcap) < 0) {
366 PMD_LOG(ERR, "Couldn't open interface %s", iface);
374 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
379 * We need to create a dummy empty pcap_t to use it
380 * with pcap_dump_open(). We create big enough an Ethernet
383 tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
384 if (tx_pcap == NULL) {
385 PMD_LOG(ERR, "Couldn't create dead pcap");
389 /* The dumper is created using the previous pcap_t reference */
390 *dumper = pcap_dump_open(tx_pcap, pcap_filename);
391 if (*dumper == NULL) {
393 PMD_LOG(ERR, "Couldn't open %s for writing.",
403 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
405 *pcap = pcap_open_offline(pcap_filename, errbuf);
407 PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
416 eth_dev_start(struct rte_eth_dev *dev)
419 struct pmd_internals *internals = dev->data->dev_private;
420 struct pcap_tx_queue *tx;
421 struct pcap_rx_queue *rx;
423 /* Special iface case. Single pcap is open and shared between tx/rx. */
424 if (internals->single_iface) {
425 tx = &internals->tx_queue[0];
426 rx = &internals->rx_queue[0];
428 if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
429 if (open_single_iface(tx->name, &tx->pcap) < 0)
437 /* If not open already, open tx pcaps/dumpers */
438 for (i = 0; i < dev->data->nb_tx_queues; i++) {
439 tx = &internals->tx_queue[i];
442 strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
443 if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
445 } else if (!tx->pcap &&
446 strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
447 if (open_single_iface(tx->name, &tx->pcap) < 0)
452 /* If not open already, open rx pcaps */
453 for (i = 0; i < dev->data->nb_rx_queues; i++) {
454 rx = &internals->rx_queue[i];
456 if (rx->pcap != NULL)
459 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
460 if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
462 } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
463 if (open_single_iface(rx->name, &rx->pcap) < 0)
469 for (i = 0; i < dev->data->nb_rx_queues; i++)
470 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
472 for (i = 0; i < dev->data->nb_tx_queues; i++)
473 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
475 dev->data->dev_link.link_status = ETH_LINK_UP;
481 * This function gets called when the current port gets stopped.
482 * Is the only place for us to close all the tx streams dumpers.
483 * If not called the dumpers will be flushed within each tx burst.
486 eth_dev_stop(struct rte_eth_dev *dev)
489 struct pmd_internals *internals = dev->data->dev_private;
490 struct pcap_tx_queue *tx;
491 struct pcap_rx_queue *rx;
493 /* Special iface case. Single pcap is open and shared between tx/rx. */
494 if (internals->single_iface) {
495 tx = &internals->tx_queue[0];
496 rx = &internals->rx_queue[0];
497 pcap_close(tx->pcap);
503 for (i = 0; i < dev->data->nb_tx_queues; i++) {
504 tx = &internals->tx_queue[i];
506 if (tx->dumper != NULL) {
507 pcap_dump_close(tx->dumper);
511 if (tx->pcap != NULL) {
512 pcap_close(tx->pcap);
517 for (i = 0; i < dev->data->nb_rx_queues; i++) {
518 rx = &internals->rx_queue[i];
520 if (rx->pcap != NULL) {
521 pcap_close(rx->pcap);
527 for (i = 0; i < dev->data->nb_rx_queues; i++)
528 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
530 for (i = 0; i < dev->data->nb_tx_queues; i++)
531 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
533 dev->data->dev_link.link_status = ETH_LINK_DOWN;
537 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
543 eth_dev_info(struct rte_eth_dev *dev,
544 struct rte_eth_dev_info *dev_info)
546 struct pmd_internals *internals = dev->data->dev_private;
548 dev_info->if_index = internals->if_index;
549 dev_info->max_mac_addrs = 1;
550 dev_info->max_rx_pktlen = (uint32_t) -1;
551 dev_info->max_rx_queues = dev->data->nb_rx_queues;
552 dev_info->max_tx_queues = dev->data->nb_tx_queues;
553 dev_info->min_rx_bufsize = 0;
557 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
560 unsigned long rx_packets_total = 0, rx_bytes_total = 0;
561 unsigned long tx_packets_total = 0, tx_bytes_total = 0;
562 unsigned long tx_packets_err_total = 0;
563 const struct pmd_internals *internal = dev->data->dev_private;
565 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
566 i < dev->data->nb_rx_queues; i++) {
567 stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
568 stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
569 rx_packets_total += stats->q_ipackets[i];
570 rx_bytes_total += stats->q_ibytes[i];
573 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
574 i < dev->data->nb_tx_queues; i++) {
575 stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
576 stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
577 stats->q_errors[i] = internal->tx_queue[i].tx_stat.err_pkts;
578 tx_packets_total += stats->q_opackets[i];
579 tx_bytes_total += stats->q_obytes[i];
580 tx_packets_err_total += stats->q_errors[i];
583 stats->ipackets = rx_packets_total;
584 stats->ibytes = rx_bytes_total;
585 stats->opackets = tx_packets_total;
586 stats->obytes = tx_bytes_total;
587 stats->oerrors = tx_packets_err_total;
593 eth_stats_reset(struct rte_eth_dev *dev)
596 struct pmd_internals *internal = dev->data->dev_private;
598 for (i = 0; i < dev->data->nb_rx_queues; i++) {
599 internal->rx_queue[i].rx_stat.pkts = 0;
600 internal->rx_queue[i].rx_stat.bytes = 0;
603 for (i = 0; i < dev->data->nb_tx_queues; i++) {
604 internal->tx_queue[i].tx_stat.pkts = 0;
605 internal->tx_queue[i].tx_stat.bytes = 0;
606 internal->tx_queue[i].tx_stat.err_pkts = 0;
611 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
616 eth_queue_release(void *q __rte_unused)
621 eth_link_update(struct rte_eth_dev *dev __rte_unused,
622 int wait_to_complete __rte_unused)
628 eth_rx_queue_setup(struct rte_eth_dev *dev,
629 uint16_t rx_queue_id,
630 uint16_t nb_rx_desc __rte_unused,
631 unsigned int socket_id __rte_unused,
632 const struct rte_eth_rxconf *rx_conf __rte_unused,
633 struct rte_mempool *mb_pool)
635 struct pmd_internals *internals = dev->data->dev_private;
636 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
638 pcap_q->mb_pool = mb_pool;
639 dev->data->rx_queues[rx_queue_id] = pcap_q;
640 pcap_q->in_port = dev->data->port_id;
646 eth_tx_queue_setup(struct rte_eth_dev *dev,
647 uint16_t tx_queue_id,
648 uint16_t nb_tx_desc __rte_unused,
649 unsigned int socket_id __rte_unused,
650 const struct rte_eth_txconf *tx_conf __rte_unused)
652 struct pmd_internals *internals = dev->data->dev_private;
654 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
660 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
662 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
668 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
670 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
676 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
678 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
684 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
686 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
691 static const struct eth_dev_ops ops = {
692 .dev_start = eth_dev_start,
693 .dev_stop = eth_dev_stop,
694 .dev_close = eth_dev_close,
695 .dev_configure = eth_dev_configure,
696 .dev_infos_get = eth_dev_info,
697 .rx_queue_setup = eth_rx_queue_setup,
698 .tx_queue_setup = eth_tx_queue_setup,
699 .rx_queue_start = eth_rx_queue_start,
700 .tx_queue_start = eth_tx_queue_start,
701 .rx_queue_stop = eth_rx_queue_stop,
702 .tx_queue_stop = eth_tx_queue_stop,
703 .rx_queue_release = eth_queue_release,
704 .tx_queue_release = eth_queue_release,
705 .link_update = eth_link_update,
706 .stats_get = eth_stats_get,
707 .stats_reset = eth_stats_reset,
711 add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
712 pcap_t *pcap, pcap_dumper_t *dumper)
714 if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
717 pmd->queue[pmd->num_of_queue].pcap = pcap;
719 pmd->queue[pmd->num_of_queue].dumper = dumper;
720 pmd->queue[pmd->num_of_queue].name = name;
721 pmd->queue[pmd->num_of_queue].type = type;
727 * Function handler that opens the pcap file for reading a stores a
728 * reference of it for use it later on.
731 open_rx_pcap(const char *key, const char *value, void *extra_args)
733 const char *pcap_filename = value;
734 struct pmd_devargs *rx = extra_args;
737 if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
740 if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
749 * Opens a pcap file for writing and stores a reference to it
750 * for use it later on.
753 open_tx_pcap(const char *key, const char *value, void *extra_args)
755 const char *pcap_filename = value;
756 struct pmd_devargs *dumpers = extra_args;
757 pcap_dumper_t *dumper;
759 if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
762 if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
763 pcap_dump_close(dumper);
771 * Opens an interface for reading and writing
774 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
776 const char *iface = value;
777 struct pmd_devargs *tx = extra_args;
780 if (open_single_iface(iface, &pcap) < 0)
783 tx->queue[0].pcap = pcap;
784 tx->queue[0].name = iface;
785 tx->queue[0].type = key;
791 set_iface_direction(const char *iface, pcap_t *pcap,
792 pcap_direction_t direction)
794 const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
795 if (pcap_setdirection(pcap, direction) < 0) {
796 PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
797 iface, direction_str, pcap_geterr(pcap));
800 PMD_LOG(INFO, "Setting %s pcap direction %s\n",
801 iface, direction_str);
806 open_iface(const char *key, const char *value, void *extra_args)
808 const char *iface = value;
809 struct pmd_devargs *pmd = extra_args;
812 if (open_single_iface(iface, &pcap) < 0)
814 if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
823 * Opens a NIC for reading packets from it
826 open_rx_iface(const char *key, const char *value, void *extra_args)
828 int ret = open_iface(key, value, extra_args);
831 if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
832 struct pmd_devargs *pmd = extra_args;
833 unsigned int qid = pmd->num_of_queue - 1;
835 set_iface_direction(pmd->queue[qid].name,
836 pmd->queue[qid].pcap,
844 rx_iface_args_process(const char *key, const char *value, void *extra_args)
846 if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
847 strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
848 return open_rx_iface(key, value, extra_args);
854 * Opens a NIC for writing packets to it
857 open_tx_iface(const char *key, const char *value, void *extra_args)
859 return open_iface(key, value, extra_args);
862 static struct rte_vdev_driver pmd_pcap_drv;
865 pmd_init_internals(struct rte_vdev_device *vdev,
866 const unsigned int nb_rx_queues,
867 const unsigned int nb_tx_queues,
868 struct pmd_internals **internals,
869 struct rte_eth_dev **eth_dev)
871 struct rte_eth_dev_data *data;
872 unsigned int numa_node = vdev->device.numa_node;
874 PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
877 /* reserve an ethdev entry */
878 *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
882 /* now put it all together
883 * - store queue data in internals,
884 * - store numa_node info in eth_dev
885 * - point eth_dev_data to internals
886 * - and point eth_dev structure to new eth_dev_data structure
888 *internals = (*eth_dev)->data->dev_private;
890 * Interface MAC = 02:70:63:61:70:<iface_idx>
891 * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
892 * where the middle 4 characters are converted to hex.
894 (*internals)->eth_addr = (struct ether_addr) {
895 .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
897 data = (*eth_dev)->data;
898 data->nb_rx_queues = (uint16_t)nb_rx_queues;
899 data->nb_tx_queues = (uint16_t)nb_tx_queues;
900 data->dev_link = pmd_link;
901 data->mac_addrs = &(*internals)->eth_addr;
904 * NOTE: we'll replace the data element, of originally allocated
905 * eth_dev so the rings are local per-process
907 (*eth_dev)->dev_ops = &ops;
913 eth_from_pcaps_common(struct rte_vdev_device *vdev,
914 struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
915 struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
916 struct rte_kvargs *kvlist, struct pmd_internals **internals,
917 struct rte_eth_dev **eth_dev)
919 struct rte_kvargs_pair *pair = NULL;
923 /* do some parameter checking */
924 if (rx_queues == NULL && nb_rx_queues > 0)
926 if (tx_queues == NULL && nb_tx_queues > 0)
929 if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
933 for (i = 0; i < nb_rx_queues; i++) {
934 struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
935 struct devargs_queue *queue = &rx_queues->queue[i];
937 rx->pcap = queue->pcap;
938 snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
939 snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
942 for (i = 0; i < nb_tx_queues; i++) {
943 struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
944 struct devargs_queue *queue = &tx_queues->queue[i];
946 tx->dumper = queue->dumper;
947 tx->pcap = queue->pcap;
948 snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
949 snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
952 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
953 pair = &kvlist->pairs[k_idx];
954 if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
959 (*internals)->if_index = 0;
961 (*internals)->if_index = if_nametoindex(pair->value);
967 eth_from_pcaps(struct rte_vdev_device *vdev,
968 struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
969 struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
970 struct rte_kvargs *kvlist, int single_iface,
971 unsigned int using_dumpers)
973 struct pmd_internals *internals = NULL;
974 struct rte_eth_dev *eth_dev = NULL;
977 ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues,
978 tx_queues, nb_tx_queues, kvlist, &internals, ð_dev);
983 /* store weather we are using a single interface for rx/tx or not */
984 internals->single_iface = single_iface;
986 eth_dev->rx_pkt_burst = eth_pcap_rx;
989 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
991 eth_dev->tx_pkt_burst = eth_pcap_tx;
993 rte_eth_dev_probing_finish(eth_dev);
998 pmd_pcap_probe(struct rte_vdev_device *dev)
1001 unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
1002 struct rte_kvargs *kvlist;
1003 struct pmd_devargs pcaps = {0};
1004 struct pmd_devargs dumpers = {0};
1005 struct rte_eth_dev *eth_dev;
1006 int single_iface = 0;
1009 name = rte_vdev_device_name(dev);
1010 PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
1012 gettimeofday(&start_time, NULL);
1013 start_cycles = rte_get_timer_cycles();
1014 hz = rte_get_timer_hz();
1016 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1017 strlen(rte_vdev_device_args(dev)) == 0) {
1018 eth_dev = rte_eth_dev_attach_secondary(name);
1020 PMD_LOG(ERR, "Failed to probe %s", name);
1023 /* TODO: request info from primary to set up Rx and Tx */
1024 eth_dev->dev_ops = &ops;
1025 eth_dev->device = &dev->device;
1026 rte_eth_dev_probing_finish(eth_dev);
1030 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1035 * If iface argument is passed we open the NICs and use them for
1038 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
1040 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
1041 &open_rx_tx_iface, &pcaps);
1046 dumpers.queue[0] = pcaps.queue[0];
1049 pcaps.num_of_queue = 1;
1050 dumpers.num_of_queue = 1;
1056 * We check whether we want to open a RX stream from a real NIC or a
1059 is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
1060 pcaps.num_of_queue = 0;
1063 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1064 &open_rx_pcap, &pcaps);
1066 ret = rte_kvargs_process(kvlist, NULL,
1067 &rx_iface_args_process, &pcaps);
1074 * We check whether we want to open a TX stream to a real NIC or a
1077 is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
1078 dumpers.num_of_queue = 0;
1081 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1082 &open_tx_pcap, &dumpers);
1084 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1085 &open_tx_iface, &dumpers);
1091 ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
1092 dumpers.num_of_queue, kvlist, single_iface, is_tx_pcap);
1095 rte_kvargs_free(kvlist);
1101 pmd_pcap_remove(struct rte_vdev_device *dev)
1103 struct rte_eth_dev *eth_dev = NULL;
1105 PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
1111 /* reserve an ethdev entry */
1112 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1113 if (eth_dev == NULL)
1116 rte_free(eth_dev->data->dev_private);
1118 rte_eth_dev_release_port(eth_dev);
1123 static struct rte_vdev_driver pmd_pcap_drv = {
1124 .probe = pmd_pcap_probe,
1125 .remove = pmd_pcap_remove,
1128 RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1129 RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
1130 RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
1131 ETH_PCAP_RX_PCAP_ARG "=<string> "
1132 ETH_PCAP_TX_PCAP_ARG "=<string> "
1133 ETH_PCAP_RX_IFACE_ARG "=<ifc> "
1134 ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
1135 ETH_PCAP_TX_IFACE_ARG "=<ifc> "
1136 ETH_PCAP_IFACE_ARG "=<ifc>");
1138 RTE_INIT(eth_pcap_init_log)
1140 eth_pcap_logtype = rte_log_register("pmd.net.pcap");
1141 if (eth_pcap_logtype >= 0)
1142 rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE);