/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* Copyright(c) 2014 6WIND S.A.
* All rights reserved.
*
#include <rte_string_fns.h>
#include <rte_cycles.h>
#include <rte_kvargs.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <net/if.h>
#define ETH_PCAP_ARG_MAXLEN 64
+#define RTE_PMD_PCAP_MAX_QUEUES 16
+
static char errbuf[PCAP_ERRBUF_SIZE];
static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
static struct timeval start_time;
struct rx_pcaps {
unsigned num_of_rx;
- pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
- const char *names[RTE_PMD_RING_MAX_RX_RINGS];
- const char *types[RTE_PMD_RING_MAX_RX_RINGS];
+ pcap_t *pcaps[RTE_PMD_PCAP_MAX_QUEUES];
+ const char *names[RTE_PMD_PCAP_MAX_QUEUES];
+ const char *types[RTE_PMD_PCAP_MAX_QUEUES];
};
struct tx_pcaps {
unsigned num_of_tx;
- pcap_dumper_t *dumpers[RTE_PMD_RING_MAX_TX_RINGS];
- pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
- const char *names[RTE_PMD_RING_MAX_RX_RINGS];
- const char *types[RTE_PMD_RING_MAX_RX_RINGS];
+ pcap_dumper_t *dumpers[RTE_PMD_PCAP_MAX_QUEUES];
+ pcap_t *pcaps[RTE_PMD_PCAP_MAX_QUEUES];
+ const char *names[RTE_PMD_PCAP_MAX_QUEUES];
+ const char *types[RTE_PMD_PCAP_MAX_QUEUES];
};
struct pmd_internals {
- struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
- struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
- unsigned nb_rx_queues;
- unsigned nb_tx_queues;
+ struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
+ struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
int if_index;
int single_iface;
};
static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
static const char *drivername = "Pcap PMD";
static struct rte_eth_link pmd_link = {
- .link_speed = 10000,
+ .link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = 0
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_SPEED_FIXED,
};
static int
}
/* If not open already, open tx pcaps/dumpers */
- for (i = 0; i < internals->nb_tx_queues; i++) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
tx = &internals->tx_queue[i];
if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
}
/* If not open already, open rx pcaps */
- for (i = 0; i < internals->nb_rx_queues; i++) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
rx = &internals->rx_queue[i];
if (rx->pcap != NULL)
status_up:
- dev->data->dev_link.link_status = 1;
+ dev->data->dev_link.link_status = ETH_LINK_UP;
return 0;
}
goto status_down;
}
- for (i = 0; i < internals->nb_tx_queues; i++) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
tx = &internals->tx_queue[i];
if (tx->dumper != NULL) {
}
}
- for (i = 0; i < internals->nb_rx_queues; i++) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
rx = &internals->rx_queue[i];
if (rx->pcap != NULL) {
}
status_down:
- dev->data->dev_link.link_status = 0;
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
}
static int
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t) -1;
- dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
- dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
+ dev_info->max_rx_queues = dev->data->nb_rx_queues;
+ dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = 0;
dev_info->pci_dev = NULL;
}
unsigned long tx_packets_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_rx_queues;
- i++) {
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_rx_queues; i++) {
igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
rx_packets_total += igb_stats->q_ipackets[i];
rx_bytes_total += igb_stats->q_ibytes[i];
}
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_tx_queues;
- i++) {
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_tx_queues; i++) {
igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
{
unsigned i;
struct pmd_internals *internal = dev->data->dev_private;
- for (i = 0; i < internal->nb_rx_queues; i++) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
internal->rx_queue[i].rx_pkts = 0;
internal->rx_queue[i].rx_bytes = 0;
}
- for (i = 0; i < internal->nb_tx_queues; i++) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
internal->tx_queue[i].tx_pkts = 0;
internal->tx_queue[i].tx_bytes = 0;
internal->tx_queue[i].err_pkts = 0;
goto error;
/* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ *eth_dev = rte_eth_dev_allocate(name);
if (*eth_dev == NULL)
goto error;
/* NOTE: we'll replace the data element, of originally allocated eth_dev
* so the rings are local per-process */
- (*internals)->nb_rx_queues = nb_rx_queues;
- (*internals)->nb_tx_queues = nb_tx_queues;
-
if (pair == NULL)
(*internals)->if_index = 0;
else
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
- (*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
(*eth_dev)->driver = NULL;
- (*eth_dev)->data->kdrv = RTE_KDRV_NONE;
- (*eth_dev)->data->drv_name = drivername;
- (*eth_dev)->data->numa_node = numa_node;
+ data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+ data->kdrv = RTE_KDRV_NONE;
+ data->drv_name = drivername;
+ data->numa_node = numa_node;
return 0;
}
static int
-rte_eth_from_pcaps_n_dumpers(const char *name,
- struct rx_pcaps *rx_queues,
- const unsigned nb_rx_queues,
- struct tx_pcaps *tx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct rte_kvargs *kvlist)
+rte_eth_from_pcaps_common(const char *name, struct rx_pcaps *rx_queues,
+ const unsigned nb_rx_queues, struct tx_pcaps *tx_queues,
+ const unsigned nb_tx_queues, const unsigned numa_node,
+ struct rte_kvargs *kvlist, struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev)
{
- struct pmd_internals *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
unsigned i;
/* do some parameter checking */
return -1;
if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
- &internals, ð_dev, kvlist) < 0)
+ internals, eth_dev, kvlist) < 0)
return -1;
for (i = 0; i < nb_rx_queues; i++) {
- internals->rx_queue[i].pcap = rx_queues->pcaps[i];
- snprintf(internals->rx_queue[i].name,
- sizeof(internals->rx_queue[i].name), "%s",
+ (*internals)->rx_queue[i].pcap = rx_queues->pcaps[i];
+ snprintf((*internals)->rx_queue[i].name,
+ sizeof((*internals)->rx_queue[i].name), "%s",
rx_queues->names[i]);
- snprintf(internals->rx_queue[i].type,
- sizeof(internals->rx_queue[i].type), "%s",
+ snprintf((*internals)->rx_queue[i].type,
+ sizeof((*internals)->rx_queue[i].type), "%s",
rx_queues->types[i]);
}
for (i = 0; i < nb_tx_queues; i++) {
- internals->tx_queue[i].dumper = tx_queues->dumpers[i];
- snprintf(internals->tx_queue[i].name,
- sizeof(internals->tx_queue[i].name), "%s",
+ (*internals)->tx_queue[i].dumper = tx_queues->dumpers[i];
+ snprintf((*internals)->tx_queue[i].name,
+ sizeof((*internals)->tx_queue[i].name), "%s",
tx_queues->names[i]);
- snprintf(internals->tx_queue[i].type,
- sizeof(internals->tx_queue[i].type), "%s",
+ snprintf((*internals)->tx_queue[i].type,
+ sizeof((*internals)->tx_queue[i].type), "%s",
tx_queues->types[i]);
}
+ return 0;
+}
+
+static int
+rte_eth_from_pcaps_n_dumpers(const char *name,
+ struct rx_pcaps *rx_queues,
+ const unsigned nb_rx_queues,
+ struct tx_pcaps *tx_queues,
+ const unsigned nb_tx_queues,
+ const unsigned numa_node,
+ struct rte_kvargs *kvlist)
+{
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ int ret;
+
+ ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
+ tx_queues, nb_tx_queues, numa_node, kvlist,
+ &internals, ð_dev);
+
+ if (ret < 0)
+ return ret;
+
/* using multiple pcaps/interfaces */
internals->single_iface = 0;
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
- unsigned i;
-
- /* do some parameter checking */
- if (rx_queues == NULL && nb_rx_queues > 0)
- return -1;
- if (tx_queues == NULL && nb_tx_queues > 0)
- return -1;
+ int ret;
- if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
- &internals, ð_dev, kvlist) < 0)
- return -1;
+ ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
+ tx_queues, nb_tx_queues, numa_node, kvlist,
+ &internals, ð_dev);
- for (i = 0; i < nb_rx_queues; i++) {
- internals->rx_queue[i].pcap = rx_queues->pcaps[i];
- snprintf(internals->rx_queue[i].name,
- sizeof(internals->rx_queue[i].name), "%s",
- rx_queues->names[i]);
- snprintf(internals->rx_queue[i].type,
- sizeof(internals->rx_queue[i].type), "%s",
- rx_queues->types[i]);
- }
- for (i = 0; i < nb_tx_queues; i++) {
- internals->tx_queue[i].dumper = tx_queues->dumpers[i];
- snprintf(internals->tx_queue[i].name,
- sizeof(internals->tx_queue[i].name), "%s",
- tx_queues->names[i]);
- snprintf(internals->tx_queue[i].type,
- sizeof(internals->tx_queue[i].type), "%s",
- tx_queues->types[i]);
- }
+ if (ret < 0)
+ return ret;
/* store wether we are using a single interface for rx/tx or not */
internals->single_iface = single_iface;
unsigned numa_node, using_dumpers = 0;
int ret;
struct rte_kvargs *kvlist;
- struct rx_pcaps pcaps;
- struct tx_pcaps dumpers;
+ struct rx_pcaps pcaps = {0};
+ struct tx_pcaps dumpers = {0};
RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
return 0;
}
-static struct rte_driver pmd_pcap_drv = {
- .name = "eth_pcap",
- .type = PMD_VDEV,
+static struct rte_vdev_driver pmd_pcap_drv = {
.init = rte_pmd_pcap_devinit,
.uninit = rte_pmd_pcap_devuninit,
};
-PMD_REGISTER_DRIVER(pmd_pcap_drv);
+DRIVER_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
+DRIVER_REGISTER_PARAM_STRING(net_pcap,
+ ETH_PCAP_RX_PCAP_ARG "=<string> "
+ ETH_PCAP_TX_PCAP_ARG "=<string> "
+ ETH_PCAP_RX_IFACE_ARG "=<ifc> "
+ ETH_PCAP_TX_IFACE_ARG "=<ifc> "
+ ETH_PCAP_IFACE_ARG "=<ifc>");