volatile unsigned long err_pkts;
};
+struct queue_missed_stat {
+ /* last value retrieved from pcap */
+ unsigned int pcap;
+ /* stores values lost by pcap stop or rollover */
+ unsigned long mnemonic;
+ /* value on last reset */
+ unsigned long reset;
+};
+
struct pcap_rx_queue {
uint16_t port_id;
uint16_t queue_id;
struct rte_mempool *mb_pool;
struct queue_stat rx_stat;
+ struct queue_missed_stat missed_stat;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
"%s(): " fmt "\n", __func__, ##args)
+static struct queue_missed_stat*
+queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct queue_missed_stat *missed_stat =
+ &internals->rx_queue[qid].missed_stat;
+ const struct pmd_process_private *pp = dev->process_private;
+ pcap_t *pcap = pp->rx_pcap[qid];
+ struct pcap_stat stat;
+
+ if (!pcap || (pcap_stats(pcap, &stat) != 0))
+ return missed_stat;
+
+ /* rollover check - best effort fixup assuming single rollover */
+ if (stat.ps_drop < missed_stat->pcap)
+ missed_stat->mnemonic += UINT_MAX;
+ missed_stat->pcap = stat.ps_drop;
+
+ return missed_stat;
+}
+
+static void
+queue_missed_stat_on_stop_update(struct rte_eth_dev *dev, unsigned int qid)
+{
+ struct queue_missed_stat *missed_stat =
+ queue_missed_stat_update(dev, qid);
+
+ missed_stat->mnemonic += missed_stat->pcap;
+ missed_stat->pcap = 0;
+}
+
+static void
+queue_missed_stat_reset(struct rte_eth_dev *dev, unsigned int qid)
+{
+ struct queue_missed_stat *missed_stat =
+ queue_missed_stat_update(dev, qid);
+
+ missed_stat->reset = missed_stat->pcap;
+ missed_stat->mnemonic = 0;
+}
+
+static unsigned long
+queue_missed_stat_get(struct rte_eth_dev *dev, unsigned int qid)
+{
+ const struct queue_missed_stat *missed_stat =
+ queue_missed_stat_update(dev, qid);
+
+ return missed_stat->pcap + missed_stat->mnemonic - missed_stat->reset;
+}
+
static int
eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
const u_char *data, uint16_t data_len)
/* Special iface case. Single pcap is open and shared between tx/rx. */
if (internals->single_iface) {
+ queue_missed_stat_on_stop_update(dev, 0);
pcap_close(pp->tx_pcap[0]);
pp->tx_pcap[0] = NULL;
pp->rx_pcap[0] = NULL;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (pp->rx_pcap[i] != NULL) {
+ queue_missed_stat_on_stop_update(dev, i);
pcap_close(pp->rx_pcap[i]);
pp->rx_pcap[i] = NULL;
}
{
unsigned int i;
unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+ unsigned long rx_missed_total = 0;
unsigned long tx_packets_total = 0, tx_bytes_total = 0;
unsigned long tx_packets_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
rx_packets_total += stats->q_ipackets[i];
rx_bytes_total += stats->q_ibytes[i];
+ rx_missed_total += queue_missed_stat_get(dev, i);
}
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
stats->ipackets = rx_packets_total;
stats->ibytes = rx_bytes_total;
+ stats->imissed = rx_missed_total;
stats->opackets = tx_packets_total;
stats->obytes = tx_bytes_total;
stats->oerrors = tx_packets_err_total;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
internal->rx_queue[i].rx_stat.pkts = 0;
internal->rx_queue[i].rx_stat.bytes = 0;
+ queue_missed_stat_reset(dev, i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {