int queue_id;
int port_id;
int cksum_enabled;
+ uint64_t bytes_recv;
+ uint64_t drop_mac;
};
struct mrvl_txq {
struct mrvl_priv *priv;
int queue_id;
int port_id;
+ uint64_t bytes_sent;
};
/*
return ret;
priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
+ priv->ppio_params.maintain_stats = 1;
priv->nb_rx_queues = dev->data->nb_rx_queues;
if (dev->data->nb_rx_queues == 1 &&
pp2_ppio_enable(priv->ppio);
}
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Stats structure output buffer.
+ */
+static void
+mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_ppio_statistics ppio_stats;
+ uint64_t drop_mac = 0;
+ unsigned int i, idx, ret;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mrvl_rxq *rxq = dev->data->rx_queues[i];
+ struct pp2_ppio_inq_statistics rx_stats;
+
+ if (!rxq)
+ continue;
+
+ idx = rxq->queue_id;
+ if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
+ RTE_LOG(ERR, PMD,
+ "rx queue %d stats out of range (0 - %d)\n",
+ idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ continue;
+ }
+
+ ret = pp2_ppio_inq_get_statistics(priv->ppio,
+ priv->rxq_map[idx].tc,
+ priv->rxq_map[idx].inq,
+ &rx_stats, 0);
+ if (unlikely(ret)) {
+ RTE_LOG(ERR, PMD,
+ "Failed to update rx queue %d stats\n", idx);
+ break;
+ }
+
+ stats->q_ibytes[idx] = rxq->bytes_recv;
+ stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
+ stats->q_errors[idx] = rx_stats.drop_early +
+ rx_stats.drop_fullq +
+ rx_stats.drop_bm +
+ rxq->drop_mac;
+ stats->ibytes += rxq->bytes_recv;
+ drop_mac += rxq->drop_mac;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+ struct pp2_ppio_outq_statistics tx_stats;
+
+ if (!txq)
+ continue;
+
+ idx = txq->queue_id;
+ if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
+ RTE_LOG(ERR, PMD,
+ "tx queue %d stats out of range (0 - %d)\n",
+ idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ }
+
+ ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
+ &tx_stats, 0);
+ if (unlikely(ret)) {
+ RTE_LOG(ERR, PMD,
+ "Failed to update tx queue %d stats\n", idx);
+ break;
+ }
+
+ stats->q_opackets[idx] = tx_stats.deq_desc;
+ stats->q_obytes[idx] = txq->bytes_sent;
+ stats->obytes += txq->bytes_sent;
+ }
+
+ ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
+ if (unlikely(ret)) {
+ RTE_LOG(ERR, PMD, "Failed to update port statistics\n");
+ return;
+ }
+
+ stats->ipackets += ppio_stats.rx_packets - drop_mac;
+ stats->opackets += ppio_stats.tx_packets;
+ stats->imissed += ppio_stats.rx_fullq_dropped +
+ ppio_stats.rx_bm_dropped +
+ ppio_stats.rx_early_dropped +
+ ppio_stats.rx_fifo_dropped +
+ ppio_stats.rx_cls_dropped;
+ stats->ierrors = drop_mac;
+}
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_stats_reset(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mrvl_rxq *rxq = dev->data->rx_queues[i];
+
+ pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
+ priv->rxq_map[i].inq, NULL, 1);
+ rxq->bytes_recv = 0;
+ rxq->drop_mac = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+
+ pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
+ txq->bytes_sent = 0;
+ }
+
+ pp2_ppio_get_statistics(priv->ppio, NULL, 1);
+}
+
/**
* DPDK callback to get information about the device.
*
.mac_addr_add = mrvl_mac_addr_add,
.mac_addr_set = mrvl_mac_addr_set,
.mtu_set = mrvl_mtu_set,
+ .stats_get = mrvl_stats_get,
+ .stats_reset = mrvl_stats_reset,
.dev_infos_get = mrvl_dev_infos_get,
.dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
.rxq_info_get = mrvl_rxq_info_get,
pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
mrvl_port_bpool_size
[bpool->pp2_id][bpool->id][core_id]++;
+ q->drop_mac++;
continue;
}
mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
rx_pkts[rx_done++] = mbuf;
+ q->bytes_recv += mbuf->pkt_len;
}
if (rte_spinlock_trylock(&q->priv->lock) == 1) {
struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
struct pp2_hif *hif = hifs[rte_lcore_id()];
struct pp2_ppio_desc descs[nb_pkts];
- int i, ret;
+ int i, ret, bytes_sent = 0;
uint16_t num, sq_free_size;
+ uint64_t addr;
if (unlikely(!q->priv->ppio))
return 0;
pp2_ppio_outq_desc_set_pkt_len(&descs[i],
rte_pktmbuf_pkt_len(mbuf));
+ bytes_sent += rte_pktmbuf_pkt_len(mbuf);
/*
* in case unsupported ol_flags were passed
* do not update descriptor offload information
for (i = nb_pkts; i < num; i++) {
sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
MRVL_PP2_TX_SHADOWQ_MASK;
+ addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
+ bytes_sent -=
+ rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
}
sq->size -= num - nb_pkts;
}
+ q->bytes_sent += bytes_sent;
+
return nb_pkts;
}