eal: add timespec_get shim
[dpdk.git] / drivers / net / pcap / rte_eth_pcap.c
index ff02ade..ef50d08 100644 (file)
@@ -24,6 +24,7 @@
 #include <rte_kvargs.h>
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
+#include <rte_mbuf_dyn.h>
 #include <rte_bus_vdev.h>
 #include <rte_string_fns.h>
 
@@ -60,11 +61,21 @@ struct queue_stat {
        volatile unsigned long err_pkts;
 };
 
+struct queue_missed_stat {
+       /* last value retrieved from pcap */
+       unsigned int pcap;
+       /* stores values lost by pcap stop or rollover */
+       unsigned long mnemonic;
+       /* value on last reset */
+       unsigned long reset;
+};
+
 struct pcap_rx_queue {
        uint16_t port_id;
        uint16_t queue_id;
        struct rte_mempool *mb_pool;
        struct queue_stat rx_stat;
+       struct queue_missed_stat missed_stat;
        char name[PATH_MAX];
        char type[ETH_PCAP_ARG_MAXLEN];
 
@@ -144,6 +155,56 @@ RTE_LOG_REGISTER(eth_pcap_logtype, pmd.net.pcap, NOTICE);
        rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
                "%s(): " fmt "\n", __func__, ##args)
 
+static struct queue_missed_stat*
+queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid)
+{
+       struct pmd_internals *internals = dev->data->dev_private;
+       struct queue_missed_stat *missed_stat =
+                       &internals->rx_queue[qid].missed_stat;
+       const struct pmd_process_private *pp = dev->process_private;
+       pcap_t *pcap = pp->rx_pcap[qid];
+       struct pcap_stat stat;
+
+       if (!pcap || (pcap_stats(pcap, &stat) != 0))
+               return missed_stat;
+
+       /* rollover check - best effort fixup assuming single rollover */
+       if (stat.ps_drop < missed_stat->pcap)
+               missed_stat->mnemonic += UINT_MAX;
+       missed_stat->pcap = stat.ps_drop;
+
+       return missed_stat;
+}
+
+static void
+queue_missed_stat_on_stop_update(struct rte_eth_dev *dev, unsigned int qid)
+{
+       struct queue_missed_stat *missed_stat =
+                       queue_missed_stat_update(dev, qid);
+
+       missed_stat->mnemonic += missed_stat->pcap;
+       missed_stat->pcap = 0;
+}
+
+static void
+queue_missed_stat_reset(struct rte_eth_dev *dev, unsigned int qid)
+{
+       struct queue_missed_stat *missed_stat =
+                       queue_missed_stat_update(dev, qid);
+
+       missed_stat->reset = missed_stat->pcap;
+       missed_stat->mnemonic = 0;
+}
+
+static unsigned long
+queue_missed_stat_get(struct rte_eth_dev *dev, unsigned int qid)
+{
+       const struct queue_missed_stat *missed_stat =
+                       queue_missed_stat_update(dev, qid);
+
+       return missed_stat->pcap + missed_stat->mnemonic - missed_stat->reset;
+}
+
 static int
 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
                const u_char *data, uint16_t data_len)
@@ -386,7 +447,7 @@ eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                return 0;
 
        for (i = 0; i < nb_pkts; i++) {
-               tx_bytes += bufs[i]->data_len;
+               tx_bytes += bufs[i]->pkt_len;
                rte_pktmbuf_free(bufs[i]);
        }
 
@@ -621,9 +682,12 @@ eth_dev_stop(struct rte_eth_dev *dev)
 
        /* Special iface case. Single pcap is open and shared between tx/rx. */
        if (internals->single_iface) {
-               pcap_close(pp->tx_pcap[0]);
-               pp->tx_pcap[0] = NULL;
-               pp->rx_pcap[0] = NULL;
+               queue_missed_stat_on_stop_update(dev, 0);
+               if (pp->tx_pcap[0] != NULL) {
+                       pcap_close(pp->tx_pcap[0]);
+                       pp->tx_pcap[0] = NULL;
+                       pp->rx_pcap[0] = NULL;
+               }
                goto status_down;
        }
 
@@ -641,6 +705,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                if (pp->rx_pcap[i] != NULL) {
+                       queue_missed_stat_on_stop_update(dev, i);
                        pcap_close(pp->rx_pcap[i]);
                        pp->rx_pcap[i] = NULL;
                }
@@ -685,6 +750,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        unsigned int i;
        unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+       unsigned long rx_missed_total = 0;
        unsigned long tx_packets_total = 0, tx_bytes_total = 0;
        unsigned long tx_packets_err_total = 0;
        const struct pmd_internals *internal = dev->data->dev_private;
@@ -695,6 +761,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
                rx_packets_total += stats->q_ipackets[i];
                rx_bytes_total += stats->q_ibytes[i];
+               rx_missed_total += queue_missed_stat_get(dev, i);
        }
 
        for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
@@ -708,6 +775,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        stats->ipackets = rx_packets_total;
        stats->ibytes = rx_bytes_total;
+       stats->imissed = rx_missed_total;
        stats->opackets = tx_packets_total;
        stats->obytes = tx_bytes_total;
        stats->oerrors = tx_packets_err_total;
@@ -724,6 +792,7 @@ eth_stats_reset(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                internal->rx_queue[i].rx_stat.pkts = 0;
                internal->rx_queue[i].rx_stat.bytes = 0;
+               queue_missed_stat_reset(dev, i);
        }
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -735,6 +804,17 @@ eth_stats_reset(struct rte_eth_dev *dev)
        return 0;
 }
 
+static inline void
+infinite_rx_ring_free(struct rte_ring *pkts)
+{
+       struct rte_mbuf *bufs;
+
+       while (!rte_ring_dequeue(pkts, (void **)&bufs))
+               rte_pktmbuf_free(bufs);
+
+       rte_ring_free(pkts);
+}
+
 static int
 eth_dev_close(struct rte_eth_dev *dev)
 {
@@ -744,6 +824,8 @@ eth_dev_close(struct rte_eth_dev *dev)
        PMD_LOG(INFO, "Closing pcap ethdev on NUMA socket %d",
                        rte_socket_id());
 
+       eth_dev_stop(dev);
+
        rte_free(dev->process_private);
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -753,7 +835,6 @@ eth_dev_close(struct rte_eth_dev *dev)
        if (internals->infinite_rx) {
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
                        struct pcap_rx_queue *pcap_q = &internals->rx_queue[i];
-                       struct rte_mbuf *pcap_buf;
 
                        /*
                         * 'pcap_q->pkts' can be NULL if 'eth_dev_close()'
@@ -762,11 +843,7 @@ eth_dev_close(struct rte_eth_dev *dev)
                        if (pcap_q->pkts == NULL)
                                continue;
 
-                       while (!rte_ring_dequeue(pcap_q->pkts,
-                                       (void **)&pcap_buf))
-                               rte_pktmbuf_free(pcap_buf);
-
-                       rte_ring_free(pcap_q->pkts);
+                       infinite_rx_ring_free(pcap_q->pkts);
                }
        }
 
@@ -821,7 +898,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
 
                pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q);
 
-               snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu16,
+               snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu32,
                                ring_number);
 
                pcap_q->pkts = rte_ring_create(ring_name,
@@ -835,21 +912,25 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
                while (eth_pcap_rx(pcap_q, bufs, 1)) {
                        /* Check for multiseg mbufs. */
                        if (bufs[0]->nb_segs != 1) {
-                               rte_pktmbuf_free(*bufs);
-
-                               while (!rte_ring_dequeue(pcap_q->pkts,
-                                               (void **)bufs))
-                                       rte_pktmbuf_free(*bufs);
-
-                               rte_ring_free(pcap_q->pkts);
-                               PMD_LOG(ERR, "Multiseg mbufs are not supported in infinite_rx "
-                                               "mode.");
+                               infinite_rx_ring_free(pcap_q->pkts);
+                               PMD_LOG(ERR,
+                                       "Multiseg mbufs are not supported in infinite_rx mode.");
                                return -EINVAL;
                        }
 
                        rte_ring_enqueue_bulk(pcap_q->pkts,
                                        (void * const *)bufs, 1, NULL);
                }
+
+               if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) {
+                       infinite_rx_ring_free(pcap_q->pkts);
+                       PMD_LOG(ERR,
+                               "Not enough mbufs to accommodate packets in pcap file. "
+                               "At least %" PRIu64 " mbufs per queue is required.",
+                               pcap_pkt_count);
+                       return -EINVAL;
+               }
+
                /*
                 * Reset the stats for this queue since eth_pcap_rx calls above
                 * didn't result in the application receiving packets.