*/
#include <time.h>
-#include <rte_mbuf.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-#include <rte_memcpy.h>
-#include <rte_string_fns.h>
-#include <rte_cycles.h>
-#include <rte_kvargs.h>
-#include <rte_vdev.h>
#include <net/if.h>
#include <pcap.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_vdev.h>
+
#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
#define RTE_ETH_PCAP_PROMISC 1
static uint64_t start_cycles;
static uint64_t hz;
+struct queue_stat {
+ volatile unsigned long pkts;
+ volatile unsigned long bytes;
+ volatile unsigned long err_pkts;
+};
+
struct pcap_rx_queue {
pcap_t *pcap;
uint8_t in_port;
struct rte_mempool *mb_pool;
- volatile unsigned long rx_pkts;
- volatile unsigned long rx_bytes;
- volatile unsigned long err_pkts;
+ struct queue_stat rx_stat;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
};
struct pcap_tx_queue {
pcap_dumper_t *dumper;
pcap_t *pcap;
- volatile unsigned long tx_pkts;
- volatile unsigned long tx_bytes;
- volatile unsigned long err_pkts;
+ struct queue_stat tx_stat;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
};
} queue[RTE_PMD_PCAP_MAX_QUEUES];
};
-const char *valid_arguments[] = {
+static const char *valid_arguments[] = {
ETH_PCAP_RX_PCAP_ARG,
ETH_PCAP_TX_PCAP_ARG,
ETH_PCAP_RX_IFACE_ARG,
NULL
};
-static int open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper);
-static int open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap);
-static int open_single_iface(const char *iface, pcap_t **pcap);
-
static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
static const char *drivername = "Pcap PMD";
static struct rte_eth_link pmd_link = {
num_rx++;
rx_bytes += header.caplen;
}
- pcap_q->rx_pkts += num_rx;
- pcap_q->rx_bytes += rx_bytes;
+ pcap_q->rx_stat.pkts += num_rx;
+ pcap_q->rx_stat.bytes += rx_bytes;
return num_rx;
}
* we flush the pcap dumper within each burst.
*/
pcap_dump_flush(dumper_q->dumper);
- dumper_q->tx_pkts += num_tx;
- dumper_q->tx_bytes += tx_bytes;
- dumper_q->err_pkts += nb_pkts - num_tx;
+ dumper_q->tx_stat.pkts += num_tx;
+ dumper_q->tx_stat.bytes += tx_bytes;
+ dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
return num_tx;
}
rte_pktmbuf_free(mbuf);
}
- tx_queue->tx_pkts += num_tx;
- tx_queue->tx_bytes += tx_bytes;
- tx_queue->err_pkts += nb_pkts - num_tx;
+ tx_queue->tx_stat.pkts += num_tx;
+ tx_queue->tx_stat.bytes += tx_bytes;
+ tx_queue->tx_stat.err_pkts += nb_pkts - num_tx;
return num_tx;
}
+/*
+ * pcap_open_live wrapper function
+ */
+static inline int
+open_iface_live(const char *iface, pcap_t **pcap) {
+ *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
+ RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
+
+ if (*pcap == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+open_single_iface(const char *iface, pcap_t **pcap)
+{
+ if (open_iface_live(iface, pcap) < 0) {
+ RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
+{
+ pcap_t *tx_pcap;
+
+ /*
+ * We need to create a dummy empty pcap_t to use it
+ * with pcap_dump_open(). We create big enough an Ethernet
+ * pcap holder.
+ */
+ if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
+ == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
+ return -1;
+ }
+
+ /* The dumper is created using the previous pcap_t reference */
+ if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n",
+ pcap_filename);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
+{
+ if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
+ return -1;
+ }
+
+ return 0;
+}
+
static int
eth_dev_start(struct rte_eth_dev *dev)
{
static void
eth_stats_get(struct rte_eth_dev *dev,
- struct rte_eth_stats *igb_stats)
+ struct rte_eth_stats *stats)
{
unsigned i;
unsigned long rx_packets_total = 0, rx_bytes_total = 0;
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < dev->data->nb_rx_queues; i++) {
- igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
- igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
- rx_packets_total += igb_stats->q_ipackets[i];
- rx_bytes_total += igb_stats->q_ibytes[i];
+ stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
+ stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
+ rx_packets_total += stats->q_ipackets[i];
+ rx_bytes_total += stats->q_ibytes[i];
}
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < dev->data->nb_tx_queues; i++) {
- igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
- igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
- igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
- tx_packets_total += igb_stats->q_opackets[i];
- tx_bytes_total += igb_stats->q_obytes[i];
- tx_packets_err_total += igb_stats->q_errors[i];
+ stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
+ stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
+ stats->q_errors[i] = internal->tx_queue[i].tx_stat.err_pkts;
+ tx_packets_total += stats->q_opackets[i];
+ tx_bytes_total += stats->q_obytes[i];
+ tx_packets_err_total += stats->q_errors[i];
}
- igb_stats->ipackets = rx_packets_total;
- igb_stats->ibytes = rx_bytes_total;
- igb_stats->opackets = tx_packets_total;
- igb_stats->obytes = tx_bytes_total;
- igb_stats->oerrors = tx_packets_err_total;
+ stats->ipackets = rx_packets_total;
+ stats->ibytes = rx_bytes_total;
+ stats->opackets = tx_packets_total;
+ stats->obytes = tx_bytes_total;
+ stats->oerrors = tx_packets_err_total;
}
static void
unsigned i;
struct pmd_internals *internal = dev->data->dev_private;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- internal->rx_queue[i].rx_pkts = 0;
- internal->rx_queue[i].rx_bytes = 0;
+ internal->rx_queue[i].rx_stat.pkts = 0;
+ internal->rx_queue[i].rx_stat.bytes = 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- internal->tx_queue[i].tx_pkts = 0;
- internal->tx_queue[i].tx_bytes = 0;
- internal->tx_queue[i].err_pkts = 0;
+ internal->tx_queue[i].tx_stat.pkts = 0;
+ internal->tx_queue[i].tx_stat.bytes = 0;
+ internal->tx_queue[i].tx_stat.err_pkts = 0;
}
}
return 0;
}
-static int
-open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
-{
- if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
- return -1;
- }
- return 0;
-}
-
/*
* Opens a pcap file for writing and stores a reference to it
* for use it later on.
return 0;
}
-static int
-open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
-{
- pcap_t *tx_pcap;
- /*
- * We need to create a dummy empty pcap_t to use it
- * with pcap_dump_open(). We create big enough an Ethernet
- * pcap holder.
- */
-
- if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
- == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
- return -1;
- }
-
- /* The dumper is created using the previous pcap_t reference */
- if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
- return -1;
- }
-
- return 0;
-}
-
-/*
- * pcap_open_live wrapper function
- */
-static inline int
-open_iface_live(const char *iface, pcap_t **pcap) {
- *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
- RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
-
- if (*pcap == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
- return -1;
- }
- return 0;
-}
-
/*
* Opens an interface for reading and writing
*/
return 0;
}
-static int
-open_single_iface(const char *iface, pcap_t **pcap)
-{
- if (open_iface_live(iface, pcap) < 0) {
- RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
- return -1;
- }
-
- return 0;
-}
-
static int
rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct pmd_internals **internals,
- struct rte_eth_dev **eth_dev,
- struct rte_kvargs *kvlist)
+ const unsigned nb_tx_queues, struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev)
{
struct rte_eth_dev_data *data = NULL;
- unsigned k_idx;
- struct rte_kvargs_pair *pair = NULL;
-
- for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
- pair = &kvlist->pairs[k_idx];
- if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
- break;
- }
+ unsigned int numa_node = rte_socket_id();
- RTE_LOG(INFO, PMD,
- "Creating pcap-backed ethdev on numa socket %u\n", numa_node);
+ RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %u\n",
+ numa_node);
/* now do all data allocation - for eth_dev structure
* and internal (private) data
if (data == NULL)
goto error;
- *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
+ *internals = rte_zmalloc_socket(name, sizeof(**internals), 0,
+ numa_node);
if (*internals == NULL)
goto error;
if (*eth_dev == NULL)
goto error;
- /* check length of device name */
- if ((strlen((*eth_dev)->data->name) + 1) > sizeof(data->name))
- goto error;
-
/* now put it all together
* - store queue data in internals,
* - store numa_node info in eth_dev
* - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
- /* NOTE: we'll replace the data element, of originally allocated eth_dev
- * so the rings are local per-process */
-
- if (pair == NULL)
- (*internals)->if_index = 0;
- else
- (*internals)->if_index = if_nametoindex(pair->value);
-
data->dev_private = *internals;
data->port_id = (*eth_dev)->data->port_id;
snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
data->mac_addrs = ð_addr;
- strncpy(data->name,
- (*eth_dev)->data->name, strlen((*eth_dev)->data->name));
+ /*
+ * NOTE: we'll replace the data element, of originally allocated
+ * eth_dev so the rings are local per-process
+ */
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
(*eth_dev)->driver = NULL;
static int
rte_eth_from_pcaps_common(const char *name, struct pmd_devargs *rx_queues,
const unsigned nb_rx_queues, struct pmd_devargs *tx_queues,
- const unsigned nb_tx_queues, const unsigned numa_node,
- struct rte_kvargs *kvlist, struct pmd_internals **internals,
- struct rte_eth_dev **eth_dev)
+ const unsigned nb_tx_queues, struct rte_kvargs *kvlist,
+ struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
{
+ struct rte_kvargs_pair *pair = NULL;
+ unsigned k_idx;
unsigned i;
/* do some parameter checking */
if (tx_queues == NULL && nb_tx_queues > 0)
return -1;
- if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
- internals, eth_dev, kvlist) < 0)
+ if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, internals,
+ eth_dev) < 0)
return -1;
for (i = 0; i < nb_rx_queues; i++) {
- (*internals)->rx_queue[i].pcap = rx_queues->queue[i].pcap;
- snprintf((*internals)->rx_queue[i].name,
- sizeof((*internals)->rx_queue[i].name), "%s",
- rx_queues->queue[i].name);
- snprintf((*internals)->rx_queue[i].type,
- sizeof((*internals)->rx_queue[i].type), "%s",
- rx_queues->queue[i].type);
- }
- for (i = 0; i < nb_tx_queues; i++) {
- (*internals)->tx_queue[i].dumper = tx_queues->queue[i].dumper;
- snprintf((*internals)->tx_queue[i].name,
- sizeof((*internals)->tx_queue[i].name), "%s",
- tx_queues->queue[i].name);
- snprintf((*internals)->tx_queue[i].type,
- sizeof((*internals)->tx_queue[i].type), "%s",
- tx_queues->queue[i].type);
- }
+ struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
+ struct devargs_queue *queue = &rx_queues->queue[i];
- return 0;
-}
-
-static int
-rte_eth_from_pcaps_n_dumpers(const char *name,
- struct pmd_devargs *rx_queues,
- const unsigned nb_rx_queues,
- struct pmd_devargs *tx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct rte_kvargs *kvlist)
-{
- struct pmd_internals *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- int ret;
+ rx->pcap = queue->pcap;
+ snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
+ snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
+ }
- ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
- tx_queues, nb_tx_queues, numa_node, kvlist,
- &internals, ð_dev);
+ for (i = 0; i < nb_tx_queues; i++) {
+ struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
+ struct devargs_queue *queue = &tx_queues->queue[i];
- if (ret < 0)
- return ret;
+ tx->dumper = queue->dumper;
+ tx->pcap = queue->pcap;
+ snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
+ snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
+ }
- /* using multiple pcaps/interfaces */
- internals->single_iface = 0;
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
+ break;
+ }
- eth_dev->rx_pkt_burst = eth_pcap_rx;
- eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
+ if (pair == NULL)
+ (*internals)->if_index = 0;
+ else
+ (*internals)->if_index = if_nametoindex(pair->value);
return 0;
}
static int
-rte_eth_from_pcaps(const char *name,
- struct pmd_devargs *rx_queues,
- const unsigned nb_rx_queues,
- struct pmd_devargs *tx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct rte_kvargs *kvlist,
- int single_iface)
+rte_eth_from_pcaps(const char *name, struct pmd_devargs *rx_queues,
+ const unsigned nb_rx_queues, struct pmd_devargs *tx_queues,
+ const unsigned nb_tx_queues, struct rte_kvargs *kvlist,
+ int single_iface, unsigned int using_dumpers)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
int ret;
ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
- tx_queues, nb_tx_queues, numa_node, kvlist,
- &internals, ð_dev);
+ tx_queues, nb_tx_queues, kvlist, &internals, ð_dev);
if (ret < 0)
return ret;
internals->single_iface = single_iface;
eth_dev->rx_pkt_burst = eth_pcap_rx;
- eth_dev->tx_pkt_burst = eth_pcap_tx;
+
+ if (using_dumpers)
+ eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
+ else
+ eth_dev->tx_pkt_burst = eth_pcap_tx;
return 0;
}
static int
rte_pmd_pcap_devinit(const char *name, const char *params)
{
- unsigned numa_node, using_dumpers = 0;
- int ret;
+ unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
struct rte_kvargs *kvlist;
struct pmd_devargs pcaps = {0};
struct pmd_devargs dumpers = {0};
+ int single_iface = 0;
+ int ret;
RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
- numa_node = rte_socket_id();
-
gettimeofday(&start_time, NULL);
start_cycles = rte_get_timer_cycles();
hz = rte_get_timer_hz();
ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
&open_rx_tx_iface, &pcaps);
+
if (ret < 0)
goto free_kvlist;
- dumpers.queue[0].pcap = pcaps.queue[0].pcap;
- dumpers.queue[0].name = pcaps.queue[0].name;
- dumpers.queue[0].type = pcaps.queue[0].type;
- ret = rte_eth_from_pcaps(name, &pcaps, 1, &dumpers, 1,
- numa_node, kvlist, 1);
- goto free_kvlist;
+
+ dumpers.queue[0] = pcaps.queue[0];
+
+ single_iface = 1;
+ pcaps.num_of_queue = 1;
+ dumpers.num_of_queue = 1;
+
+ goto create_eth;
}
/*
* We check whether we want to open a RX stream from a real NIC or a
* pcap file
*/
- if ((pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG))) {
- ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
- &open_rx_pcap, &pcaps);
- } else {
+ pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
+ if (pcaps.num_of_queue)
+ is_rx_pcap = 1;
+ else
pcaps.num_of_queue = rte_kvargs_count(kvlist,
ETH_PCAP_RX_IFACE_ARG);
+
+ if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
+ pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+
+ if (is_rx_pcap)
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
+ &open_rx_pcap, &pcaps);
+ else
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
&open_rx_iface, &pcaps);
- }
if (ret < 0)
goto free_kvlist;
* We check whether we want to open a TX stream to a real NIC or a
* pcap file
*/
- if ((dumpers.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_PCAP_ARG))) {
- ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
- &open_tx_pcap, &dumpers);
- using_dumpers = 1;
- } else {
+ dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
+ if (dumpers.num_of_queue)
+ is_tx_pcap = 1;
+ else
dumpers.num_of_queue = rte_kvargs_count(kvlist,
ETH_PCAP_TX_IFACE_ARG);
+
+ if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
+ dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+
+ if (is_tx_pcap)
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
+ &open_tx_pcap, &dumpers);
+ else
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
&open_tx_iface, &dumpers);
- }
if (ret < 0)
goto free_kvlist;
- if (using_dumpers)
- ret = rte_eth_from_pcaps_n_dumpers(name, &pcaps, pcaps.num_of_queue,
- &dumpers, dumpers.num_of_queue, numa_node, kvlist);
- else
- ret = rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_queue, &dumpers,
- dumpers.num_of_queue, numa_node, kvlist, 0);
+create_eth:
+ ret = rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_queue, &dumpers,
+ dumpers.num_of_queue, kvlist, single_iface, is_tx_pcap);
free_kvlist:
rte_kvargs_free(kvlist);