volatile unsigned long pkts;
volatile unsigned long bytes;
volatile unsigned long err_pkts;
+ volatile unsigned long rx_nombuf;
};
struct queue_missed_stat {
.link_autoneg = ETH_LINK_FIXED,
};
-RTE_LOG_REGISTER(eth_pcap_logtype, pmd.net.pcap, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
static struct queue_missed_stat*
queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid)
break;
mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
- if (unlikely(mbuf == NULL))
+ if (unlikely(mbuf == NULL)) {
+ pcap_q->rx_stat.rx_nombuf++;
break;
+ }
if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
/* pcap packet will fit in the mbuf, can copy it */
mbuf,
packet,
header.caplen) == -1)) {
+ pcap_q->rx_stat.err_pkts++;
rte_pktmbuf_free(mbuf);
break;
}
unsigned int i;
unsigned long rx_packets_total = 0, rx_bytes_total = 0;
unsigned long rx_missed_total = 0;
+ unsigned long rx_nombuf_total = 0, rx_err_total = 0;
unsigned long tx_packets_total = 0, tx_bytes_total = 0;
unsigned long tx_packets_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
i < dev->data->nb_rx_queues; i++) {
stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
+ rx_nombuf_total += internal->rx_queue[i].rx_stat.rx_nombuf;
+ rx_err_total += internal->rx_queue[i].rx_stat.err_pkts;
rx_packets_total += stats->q_ipackets[i];
rx_bytes_total += stats->q_ibytes[i];
rx_missed_total += queue_missed_stat_get(dev, i);
stats->ipackets = rx_packets_total;
stats->ibytes = rx_bytes_total;
stats->imissed = rx_missed_total;
+ stats->ierrors = rx_err_total;
+ stats->rx_nombuf = rx_nombuf_total;
stats->opackets = tx_packets_total;
stats->obytes = tx_bytes_total;
stats->oerrors = tx_packets_err_total;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
internal->rx_queue[i].rx_stat.pkts = 0;
internal->rx_queue[i].rx_stat.bytes = 0;
+ internal->rx_queue[i].rx_stat.err_pkts = 0;
+ internal->rx_queue[i].rx_stat.rx_nombuf = 0;
queue_missed_stat_reset(dev, i);
}
return 0;
}
+static void
+eth_release_pcaps(struct pmd_devargs *pcaps,
+ struct pmd_devargs *dumpers,
+ int single_iface)
+{
+ unsigned int i;
+
+ if (single_iface) {
+ if (pcaps->queue[0].pcap)
+ pcap_close(pcaps->queue[0].pcap);
+ return;
+ }
+
+ for (i = 0; i < dumpers->num_of_queue; i++) {
+ if (dumpers->queue[i].dumper)
+ pcap_dump_close(dumpers->queue[i].dumper);
+
+ if (dumpers->queue[i].pcap)
+ pcap_close(dumpers->queue[i].pcap);
+ }
+
+ for (i = 0; i < pcaps->num_of_queue; i++) {
+ if (pcaps->queue[i].pcap)
+ pcap_close(pcaps->queue[i].pcap);
+ }
+}
+
static int
pmd_pcap_probe(struct rte_vdev_device *dev)
{
free_kvlist:
rte_kvargs_free(kvlist);
+ if (ret < 0)
+ eth_release_pcaps(&pcaps, &dumpers, devargs_all.single_iface);
+
return ret;
}