From: Jiawen Wu Date: Mon, 19 Oct 2020 08:53:52 +0000 (+0800) Subject: net/txgbe: add queue stats mapping X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=c1d4e9d37abdc6c07a05f7d96928e624fea9ebb5;p=dpdk.git net/txgbe: add queue stats mapping Add queue stats mapping set, and clear hardware counters. Signed-off-by: Jiawen Wu Reviewed-by: Ferruh Yigit --- diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini index e18632205f..ad7513eca6 100644 --- a/doc/guides/nics/features/txgbe.ini +++ b/doc/guides/nics/features/txgbe.ini @@ -25,6 +25,7 @@ Inner L4 checksum = P Packet type parsing = Y Basic stats = Y Extended stats = Y +Stats per queue = Y Multiprocess aware = Y Linux UIO = Y Linux VFIO = Y diff --git a/drivers/net/txgbe/base/txgbe_hw.c b/drivers/net/txgbe/base/txgbe_hw.c index afd7172a9c..5e81464e96 100644 --- a/drivers/net/txgbe/base/txgbe_hw.c +++ b/drivers/net/txgbe/base/txgbe_hw.c @@ -115,6 +115,123 @@ s32 txgbe_init_hw(struct txgbe_hw *hw) return status; } +/** + * txgbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw) +{ + u16 i = 0; + + DEBUGFUNC("txgbe_clear_hw_cntrs"); + + /* QP Stats */ + /* don't write clear queue stats */ + for (i = 0; i < TXGBE_MAX_QP; i++) { + hw->qp_last[i].rx_qp_packets = 0; + hw->qp_last[i].tx_qp_packets = 0; + hw->qp_last[i].rx_qp_bytes = 0; + hw->qp_last[i].tx_qp_bytes = 0; + hw->qp_last[i].rx_qp_mc_packets = 0; + } + + /* PB Stats */ + for (i = 0; i < TXGBE_MAX_UP; i++) { + rd32(hw, TXGBE_PBRXUPXON(i)); + rd32(hw, TXGBE_PBRXUPXOFF(i)); + rd32(hw, TXGBE_PBTXUPXON(i)); + rd32(hw, TXGBE_PBTXUPXOFF(i)); + rd32(hw, TXGBE_PBTXUPOFF(i)); + + rd32(hw, TXGBE_PBRXMISS(i)); + } + rd32(hw, TXGBE_PBRXLNKXON); + rd32(hw, TXGBE_PBRXLNKXOFF); + rd32(hw, TXGBE_PBTXLNKXON); + rd32(hw, TXGBE_PBTXLNKXOFF); + + /* DMA Stats */ + rd32(hw, TXGBE_DMARXPKT); + rd32(hw, TXGBE_DMATXPKT); + + rd64(hw, TXGBE_DMARXOCTL); + rd64(hw, TXGBE_DMATXOCTL); + + /* MAC Stats */ + rd64(hw, TXGBE_MACRXERRCRCL); + rd64(hw, TXGBE_MACRXMPKTL); + rd64(hw, TXGBE_MACTXMPKTL); + + rd64(hw, TXGBE_MACRXPKTL); + rd64(hw, TXGBE_MACTXPKTL); + rd64(hw, TXGBE_MACRXGBOCTL); + + rd64(hw, TXGBE_MACRXOCTL); + rd32(hw, TXGBE_MACTXOCTL); + + rd64(hw, TXGBE_MACRX1TO64L); + rd64(hw, TXGBE_MACRX65TO127L); + rd64(hw, TXGBE_MACRX128TO255L); + rd64(hw, TXGBE_MACRX256TO511L); + rd64(hw, TXGBE_MACRX512TO1023L); + rd64(hw, TXGBE_MACRX1024TOMAXL); + rd64(hw, TXGBE_MACTX1TO64L); + rd64(hw, TXGBE_MACTX65TO127L); + rd64(hw, TXGBE_MACTX128TO255L); + rd64(hw, TXGBE_MACTX256TO511L); + rd64(hw, TXGBE_MACTX512TO1023L); + rd64(hw, TXGBE_MACTX1024TOMAXL); + + rd64(hw, TXGBE_MACRXERRLENL); + rd32(hw, TXGBE_MACRXOVERSIZE); + rd32(hw, TXGBE_MACRXJABBER); + + /* FCoE Stats */ + rd32(hw, TXGBE_FCOECRC); + rd32(hw, TXGBE_FCOELAST); + rd32(hw, TXGBE_FCOERPDC); + rd32(hw, TXGBE_FCOEPRC); + rd32(hw, TXGBE_FCOEPTC); + rd32(hw, TXGBE_FCOEDWRC); + rd32(hw, TXGBE_FCOEDWTC); + + /* Flow Director Stats */ + rd32(hw, TXGBE_FDIRMATCH); + rd32(hw, TXGBE_FDIRMISS); + rd32(hw, TXGBE_FDIRUSED); + rd32(hw, TXGBE_FDIRUSED); + rd32(hw, TXGBE_FDIRFAIL); + rd32(hw, TXGBE_FDIRFAIL); + + /* MACsec Stats */ + rd32(hw, TXGBE_LSECTX_UTPKT); + rd32(hw, TXGBE_LSECTX_ENCPKT); + rd32(hw, TXGBE_LSECTX_PROTPKT); + rd32(hw, TXGBE_LSECTX_ENCOCT); + rd32(hw, TXGBE_LSECTX_PROTOCT); + rd32(hw, TXGBE_LSECRX_UTPKT); + rd32(hw, TXGBE_LSECRX_BTPKT); + rd32(hw, TXGBE_LSECRX_NOSCIPKT); + rd32(hw, TXGBE_LSECRX_UNSCIPKT); + rd32(hw, TXGBE_LSECRX_DECOCT); + rd32(hw, TXGBE_LSECRX_VLDOCT); + rd32(hw, TXGBE_LSECRX_UNCHKPKT); + rd32(hw, TXGBE_LSECRX_DLYPKT); + rd32(hw, TXGBE_LSECRX_LATEPKT); + for (i = 0; i < 2; i++) { + rd32(hw, TXGBE_LSECRX_OKPKT(i)); + rd32(hw, TXGBE_LSECRX_INVPKT(i)); + rd32(hw, TXGBE_LSECRX_BADPKT(i)); + } + rd32(hw, TXGBE_LSECRX_INVSAPKT); + rd32(hw, TXGBE_LSECRX_BADSAPKT); + + return 0; +} + /** * txgbe_get_mac_addr - Generic get MAC address * @hw: pointer to hardware structure @@ -1455,6 +1572,7 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw) /* MAC */ mac->init_hw = txgbe_init_hw; mac->start_hw = txgbe_start_hw_raptor; + mac->clear_hw_cntrs = txgbe_clear_hw_cntrs; mac->enable_rx_dma = txgbe_enable_rx_dma_raptor; mac->get_mac_addr = txgbe_get_mac_addr; mac->stop_hw = txgbe_stop_hw; diff --git a/drivers/net/txgbe/base/txgbe_hw.h b/drivers/net/txgbe/base/txgbe_hw.h index f0435976dc..48543b9512 100644 --- a/drivers/net/txgbe/base/txgbe_hw.h +++ b/drivers/net/txgbe/base/txgbe_hw.h @@ -11,6 +11,7 @@ s32 txgbe_init_hw(struct txgbe_hw *hw); s32 txgbe_start_hw(struct txgbe_hw *hw); s32 txgbe_stop_hw(struct txgbe_hw *hw); s32 txgbe_start_hw_gen2(struct txgbe_hw *hw); +s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw); s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr); void txgbe_set_lan_id_multi_port(struct txgbe_hw *hw); diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index c11c29116c..961138c816 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -261,6 +261,60 @@ txgbe_disable_intr(struct txgbe_hw *hw) txgbe_flush(hw); } +static int +txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, + uint16_t queue_id, + uint8_t stat_idx, + uint8_t is_rx) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); + struct txgbe_stat_mappings *stat_mappings = + TXGBE_DEV_STAT_MAPPINGS(eth_dev); + uint32_t qsmr_mask = 0; + uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; + uint32_t q_map; + uint8_t n, offset; + + if (hw->mac.type != txgbe_mac_raptor) + return -ENOSYS; + + if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK) + return -EIO; + + PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); + + n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); + if (n >= TXGBE_NB_STAT_MAPPING) { + PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); + return -EIO; + } + offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); + + /* Now clear any previous stat_idx set */ + clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); + if (!is_rx) + stat_mappings->tqsm[n] &= ~clearing_mask; + else + stat_mappings->rqsm[n] &= ~clearing_mask; + + q_map = (uint32_t)stat_idx; + q_map &= QMAP_FIELD_RESERVED_BITS_MASK; + qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); + if (!is_rx) + stat_mappings->tqsm[n] |= qsmr_mask; + else + stat_mappings->rqsm[n] |= qsmr_mask; + + PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); + PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, + is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]); + return 0; +} + static int eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { @@ -2374,6 +2428,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .xstats_reset = txgbe_dev_xstats_reset, .xstats_get_names = txgbe_dev_xstats_get_names, .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id, + .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set, .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get, .rx_queue_start = txgbe_dev_rx_queue_start, .rx_queue_stop = txgbe_dev_rx_queue_stop,