/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* Copyright(c) 2014 6WIND S.A.
* All rights reserved.
*
uint8_t in_port;
struct rte_mempool *mb_pool;
volatile unsigned long rx_pkts;
+ volatile unsigned long rx_bytes;
volatile unsigned long err_pkts;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
pcap_dumper_t *dumper;
pcap_t *pcap;
volatile unsigned long tx_pkts;
+ volatile unsigned long tx_bytes;
volatile unsigned long err_pkts;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
struct pmd_internals {
struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
- unsigned nb_rx_queues;
- unsigned nb_tx_queues;
int if_index;
int single_iface;
};
static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
static const char *drivername = "Pcap PMD";
static struct rte_eth_link pmd_link = {
- .link_speed = 10000,
+ .link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = 0
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_SPEED_FIXED,
};
static int
struct pcap_rx_queue *pcap_q = queue;
uint16_t num_rx = 0;
uint16_t buf_size;
+ uint32_t rx_bytes = 0;
if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
return 0;
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
RTE_PKTMBUF_HEADROOM);
- if (header.len <= buf_size) {
+ if (header.caplen <= buf_size) {
/* pcap packet will fit in the mbuf, go ahead and copy */
rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
- header.len);
- mbuf->data_len = (uint16_t)header.len;
+ header.caplen);
+ mbuf->data_len = (uint16_t)header.caplen;
} else {
/* Try read jumbo frame into multi mbufs. */
if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
mbuf,
packet,
- header.len) == -1))
+ header.caplen) == -1))
break;
}
- mbuf->pkt_len = (uint16_t)header.len;
+ mbuf->pkt_len = (uint16_t)header.caplen;
mbuf->port = pcap_q->in_port;
bufs[num_rx] = mbuf;
num_rx++;
+ rx_bytes += header.caplen;
}
pcap_q->rx_pkts += num_rx;
+ pcap_q->rx_bytes += rx_bytes;
return num_rx;
}
struct rte_mbuf *mbuf;
struct pcap_tx_queue *dumper_q = queue;
uint16_t num_tx = 0;
+ uint32_t tx_bytes = 0;
struct pcap_pkthdr header;
if (dumper_q->dumper == NULL || nb_pkts == 0)
rte_pktmbuf_free(mbuf);
num_tx++;
+ tx_bytes += mbuf->pkt_len;
}
/*
*/
pcap_dump_flush(dumper_q->dumper);
dumper_q->tx_pkts += num_tx;
+ dumper_q->tx_bytes += tx_bytes;
dumper_q->err_pkts += nb_pkts - num_tx;
return num_tx;
}
struct rte_mbuf *mbuf;
struct pcap_tx_queue *tx_queue = queue;
uint16_t num_tx = 0;
+ uint32_t tx_bytes = 0;
if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
return 0;
if (unlikely(ret != 0))
break;
num_tx++;
+ tx_bytes += mbuf->pkt_len;
rte_pktmbuf_free(mbuf);
}
tx_queue->tx_pkts += num_tx;
+ tx_queue->tx_bytes += tx_bytes;
tx_queue->err_pkts += nb_pkts - num_tx;
return num_tx;
}
}
/* If not open already, open tx pcaps/dumpers */
- for (i = 0; i < internals->nb_tx_queues; i++) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
tx = &internals->tx_queue[i];
if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
}
/* If not open already, open rx pcaps */
- for (i = 0; i < internals->nb_rx_queues; i++) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
rx = &internals->rx_queue[i];
if (rx->pcap != NULL)
status_up:
- dev->data->dev_link.link_status = 1;
+ dev->data->dev_link.link_status = ETH_LINK_UP;
return 0;
}
goto status_down;
}
- for (i = 0; i < internals->nb_tx_queues; i++) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
tx = &internals->tx_queue[i];
if (tx->dumper != NULL) {
}
}
- for (i = 0; i < internals->nb_rx_queues; i++) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
rx = &internals->rx_queue[i];
if (rx->pcap != NULL) {
}
status_down:
- dev->data->dev_link.link_status = 0;
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
}
static int
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t) -1;
- dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
- dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
+ dev_info->max_rx_queues = dev->data->nb_rx_queues;
+ dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = 0;
dev_info->pci_dev = NULL;
}
struct rte_eth_stats *igb_stats)
{
unsigned i;
- unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+ unsigned long tx_packets_total = 0, tx_bytes_total = 0;
+ unsigned long tx_packets_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_rx_queues;
- i++) {
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_rx_queues; i++) {
igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
- rx_total += igb_stats->q_ipackets[i];
+ igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
+ rx_packets_total += igb_stats->q_ipackets[i];
+ rx_bytes_total += igb_stats->q_ibytes[i];
}
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_tx_queues;
- i++) {
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < dev->data->nb_tx_queues; i++) {
igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
+ igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
- tx_total += igb_stats->q_opackets[i];
- tx_err_total += igb_stats->q_errors[i];
+ tx_packets_total += igb_stats->q_opackets[i];
+ tx_bytes_total += igb_stats->q_obytes[i];
+ tx_packets_err_total += igb_stats->q_errors[i];
}
- igb_stats->ipackets = rx_total;
- igb_stats->opackets = tx_total;
- igb_stats->oerrors = tx_err_total;
+ igb_stats->ipackets = rx_packets_total;
+ igb_stats->ibytes = rx_bytes_total;
+ igb_stats->opackets = tx_packets_total;
+ igb_stats->obytes = tx_bytes_total;
+ igb_stats->oerrors = tx_packets_err_total;
}
static void
{
unsigned i;
struct pmd_internals *internal = dev->data->dev_private;
- for (i = 0; i < internal->nb_rx_queues; i++)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
internal->rx_queue[i].rx_pkts = 0;
- for (i = 0; i < internal->nb_tx_queues; i++) {
+ internal->rx_queue[i].rx_bytes = 0;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
internal->tx_queue[i].tx_pkts = 0;
+ internal->tx_queue[i].tx_bytes = 0;
internal->tx_queue[i].err_pkts = 0;
}
}
.stats_reset = eth_stats_reset,
};
-static struct eth_driver rte_pcap_pmd = {
- .pci_drv = {
- .name = "rte_pcap_pmd",
- .drv_flags = RTE_PCI_DRV_DETACHABLE,
- },
-};
-
/*
* Function handler that opens the pcap file for reading a stores a
* reference of it for use it later on.
struct rte_kvargs *kvlist)
{
struct rte_eth_dev_data *data = NULL;
- struct rte_pci_device *pci_dev = NULL;
unsigned k_idx;
struct rte_kvargs_pair *pair = NULL;
RTE_LOG(INFO, PMD,
"Creating pcap-backed ethdev on numa socket %u\n", numa_node);
- /* now do all data allocation - for eth_dev structure, dummy pci driver
+ /* now do all data allocation - for eth_dev structure
* and internal (private) data
*/
data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
if (data == NULL)
goto error;
- pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
- if (pci_dev == NULL)
- goto error;
-
*internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
if (*internals == NULL)
goto error;
/* now put it all together
* - store queue data in internals,
- * - store numa_node info in pci_driver
- * - point eth_dev_data to internals and pci_driver
+ * - store numa_node info in eth_dev
+ * - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
/* NOTE: we'll replace the data element, of originally allocated eth_dev
* so the rings are local per-process */
- (*internals)->nb_rx_queues = nb_rx_queues;
- (*internals)->nb_tx_queues = nb_tx_queues;
-
if (pair == NULL)
(*internals)->if_index = 0;
else
(*internals)->if_index = if_nametoindex(pair->value);
- pci_dev->numa_node = numa_node;
-
data->dev_private = *internals;
data->port_id = (*eth_dev)->data->port_id;
snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
- (*eth_dev)->pci_dev = pci_dev;
- (*eth_dev)->driver = &rte_pcap_pmd;
+ (*eth_dev)->driver = NULL;
+ data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+ data->kdrv = RTE_KDRV_NONE;
+ data->drv_name = drivername;
+ data->numa_node = numa_node;
return 0;
error:
rte_free(data);
- rte_free(pci_dev);
rte_free(*internals);
return -1;
}
static int
-rte_eth_from_pcaps_n_dumpers(const char *name,
- struct rx_pcaps *rx_queues,
- const unsigned nb_rx_queues,
- struct tx_pcaps *tx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct rte_kvargs *kvlist)
+rte_eth_from_pcaps_common(const char *name, struct rx_pcaps *rx_queues,
+ const unsigned nb_rx_queues, struct tx_pcaps *tx_queues,
+ const unsigned nb_tx_queues, const unsigned numa_node,
+ struct rte_kvargs *kvlist, struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev)
{
- struct pmd_internals *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
unsigned i;
/* do some parameter checking */
return -1;
if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
- &internals, ð_dev, kvlist) < 0)
+ internals, eth_dev, kvlist) < 0)
return -1;
for (i = 0; i < nb_rx_queues; i++) {
- internals->rx_queue[i].pcap = rx_queues->pcaps[i];
- snprintf(internals->rx_queue[i].name,
- sizeof(internals->rx_queue[i].name), "%s",
+ (*internals)->rx_queue[i].pcap = rx_queues->pcaps[i];
+ snprintf((*internals)->rx_queue[i].name,
+ sizeof((*internals)->rx_queue[i].name), "%s",
rx_queues->names[i]);
- snprintf(internals->rx_queue[i].type,
- sizeof(internals->rx_queue[i].type), "%s",
+ snprintf((*internals)->rx_queue[i].type,
+ sizeof((*internals)->rx_queue[i].type), "%s",
rx_queues->types[i]);
}
for (i = 0; i < nb_tx_queues; i++) {
- internals->tx_queue[i].dumper = tx_queues->dumpers[i];
- snprintf(internals->tx_queue[i].name,
- sizeof(internals->tx_queue[i].name), "%s",
+ (*internals)->tx_queue[i].dumper = tx_queues->dumpers[i];
+ snprintf((*internals)->tx_queue[i].name,
+ sizeof((*internals)->tx_queue[i].name), "%s",
tx_queues->names[i]);
- snprintf(internals->tx_queue[i].type,
- sizeof(internals->tx_queue[i].type), "%s",
+ snprintf((*internals)->tx_queue[i].type,
+ sizeof((*internals)->tx_queue[i].type), "%s",
tx_queues->types[i]);
}
+ return 0;
+}
+
+static int
+rte_eth_from_pcaps_n_dumpers(const char *name,
+ struct rx_pcaps *rx_queues,
+ const unsigned nb_rx_queues,
+ struct tx_pcaps *tx_queues,
+ const unsigned nb_tx_queues,
+ const unsigned numa_node,
+ struct rte_kvargs *kvlist)
+{
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ int ret;
+
+ ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
+ tx_queues, nb_tx_queues, numa_node, kvlist,
+ &internals, ð_dev);
+
+ if (ret < 0)
+ return ret;
+
/* using multiple pcaps/interfaces */
internals->single_iface = 0;
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
- unsigned i;
-
- /* do some parameter checking */
- if (rx_queues == NULL && nb_rx_queues > 0)
- return -1;
- if (tx_queues == NULL && nb_tx_queues > 0)
- return -1;
+ int ret;
- if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
- &internals, ð_dev, kvlist) < 0)
- return -1;
+ ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
+ tx_queues, nb_tx_queues, numa_node, kvlist,
+ &internals, ð_dev);
- for (i = 0; i < nb_rx_queues; i++) {
- internals->rx_queue[i].pcap = rx_queues->pcaps[i];
- snprintf(internals->rx_queue[i].name,
- sizeof(internals->rx_queue[i].name), "%s",
- rx_queues->names[i]);
- snprintf(internals->rx_queue[i].type,
- sizeof(internals->rx_queue[i].type), "%s",
- rx_queues->types[i]);
- }
- for (i = 0; i < nb_tx_queues; i++) {
- internals->tx_queue[i].dumper = tx_queues->dumpers[i];
- snprintf(internals->tx_queue[i].name,
- sizeof(internals->tx_queue[i].name), "%s",
- tx_queues->names[i]);
- snprintf(internals->tx_queue[i].type,
- sizeof(internals->tx_queue[i].type), "%s",
- tx_queues->types[i]);
- }
+ if (ret < 0)
+ return ret;
/* store wether we are using a single interface for rx/tx or not */
internals->single_iface = single_iface;
unsigned numa_node, using_dumpers = 0;
int ret;
struct rte_kvargs *kvlist;
- struct rx_pcaps pcaps;
- struct tx_pcaps dumpers;
+ struct rx_pcaps pcaps = {0};
+ struct tx_pcaps dumpers = {0};
RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
rte_free(eth_dev->data->dev_private);
rte_free(eth_dev->data);
- rte_free(eth_dev->pci_dev);
rte_eth_dev_release_port(eth_dev);