struct rte_mbuf *nmb; /* pointer of the new mbuf */
struct rte_mbuf *rxm;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t ol_info;
uint64_t dma_addr;
/* Load remained descriptor data and extract necessary fields */
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
- ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
- l234_info, &cksum_err);
+ ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
if (unlikely(ret))
goto pkt_err;
if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
- if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
- hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
- cksum_err);
hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
/* Increment bytes counter */
struct rte_mbuf *rxm;
struct rte_eth_dev *dev;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t gro_size;
uint32_t ol_info;
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
- l234_info, &cksum_err);
+ l234_info);
if (unlikely(ret))
goto pkt_err;
first_seg->packet_type = hns3_rx_calc_ptype(rxq,
l234_info, ol_info);
- if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
- hns3_rx_set_cksum_flag(first_seg,
- first_seg->packet_type,
- cksum_err);
hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
/* Increment bytes counter */
extern uint64_t hns3_timestamp_rx_dynflag;
extern int hns3_timestamp_dynfield_offset;
-static inline int
-hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
- uint32_t bd_base_info, uint32_t l234_info,
- uint32_t *cksum_err)
+static inline void
+hns3_rx_set_cksum_flag(struct hns3_rx_queue *rxq,
+ struct rte_mbuf *rxm,
+ uint32_t l234_info)
{
-#define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \
- BIT(HNS3_RXD_TRUNCATE_B))
-#define CHECKSUM_ERR_FLAG (BIT(HNS3_RXD_L3E_B) | \
+#define HNS3_RXD_CKSUM_ERR_MASK (BIT(HNS3_RXD_L3E_B) | \
BIT(HNS3_RXD_L4E_B) | \
BIT(HNS3_RXD_OL3E_B) | \
BIT(HNS3_RXD_OL4E_B))
- uint32_t tmp = 0;
+ if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) {
+ rxm->ol_flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ return;
+ }
+
+ if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
+ rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ rxq->dfx_stats.l3_csum_errors++;
+ } else {
+ rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ }
+
+ if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
+ rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ rxq->dfx_stats.l4_csum_errors++;
+ } else {
+ rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+
+ if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B)))
+ rxq->dfx_stats.ol3_csum_errors++;
+
+ if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
+ rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ rxq->dfx_stats.ol4_csum_errors++;
+ }
+}
+
+static inline int
+hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
+ uint32_t bd_base_info, uint32_t l234_info)
+{
+#define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \
+ BIT(HNS3_RXD_TRUNCATE_B))
/*
* If packet len bigger than mtu when recv with no-scattered algorithm,
return -EINVAL;
}
- if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
- if (likely((l234_info & CHECKSUM_ERR_FLAG) == 0)) {
- *cksum_err = 0;
- return 0;
- }
-
- if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
- rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
- rxq->dfx_stats.l3_csum_errors++;
- tmp |= HNS3_L3_CKSUM_ERR;
- }
-
- if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
- rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- rxq->dfx_stats.l4_csum_errors++;
- tmp |= HNS3_L4_CKSUM_ERR;
- }
-
- if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
- rxq->dfx_stats.ol3_csum_errors++;
- tmp |= HNS3_OUTER_L3_CKSUM_ERR;
- }
-
- if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
- rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
- rxq->dfx_stats.ol4_csum_errors++;
- tmp |= HNS3_OUTER_L4_CKSUM_ERR;
- }
- }
- *cksum_err = tmp;
+ if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
+ hns3_rx_set_cksum_flag(rxq, rxm, l234_info);
return 0;
}
-static inline void
-hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, const uint64_t packet_type,
- const uint32_t cksum_err)
-{
- if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
- if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
- (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
- rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
- if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
- (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
- rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
- if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
- (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
- rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
- } else {
- if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
- (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
- rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
- if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
- (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
- rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
- }
-}
-
static inline uint32_t
hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
const uint32_t ol_info)
uint32_t l234_info, ol_info, bd_base_info;
struct rte_mbuf *pkt;
uint32_t retcode = 0;
- uint32_t cksum_err;
uint32_t i;
int ret;
l234_info = rxdp[i].rx.l234_info;
ol_info = rxdp[i].rx.ol_info;
bd_base_info = rxdp[i].rx.bd_base_info;
- ret = hns3_handle_bdinfo(rxq, pkt, bd_base_info,
- l234_info, &cksum_err);
+ ret = hns3_handle_bdinfo(rxq, pkt, bd_base_info, l234_info);
if (unlikely(ret)) {
retcode |= 1u << i;
continue;
}
pkt->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
- if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
- hns3_rx_set_cksum_flag(pkt, pkt->packet_type,
- cksum_err);
/* Increment bytes counter */
rxq->basic_stats.bytes += pkt->pkt_len;