From: Andrew Rybchenko Date: Sun, 24 Dec 2017 10:46:42 +0000 (+0000) Subject: net/sfc: correct Rx checksum offloads for tunnel packets X-Git-Tag: spdx-start~478 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=9701c5619744bbee8cc14fe5b427f74825b83791;p=dpdk.git net/sfc: correct Rx checksum offloads for tunnel packets In the case of tunnel packet, PKT_RX_{IP,L4}_CSUM_* flags correspond to inner packet checksums. There is only one flag to indicate bad external IPv4 header checksum. Signed-off-by: Andrew Rybchenko Reviewed-by: Ivan Malov --- diff --git a/doc/guides/nics/features/sfc_efx.ini b/doc/guides/nics/features/sfc_efx.ini index 03890f3017..6b73af92f1 100644 --- a/doc/guides/nics/features/sfc_efx.ini +++ b/doc/guides/nics/features/sfc_efx.ini @@ -24,6 +24,8 @@ Flow API = Y VLAN offload = P L3 checksum offload = Y L4 checksum offload = Y +Inner L3 checksum = Y +Inner L4 checksum = Y Packet type parsing = Y Rx descriptor status = Y Tx descriptor status = Y diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst index 994e1110b8..813cb6fd59 100644 --- a/doc/guides/nics/sfc_efx.rst +++ b/doc/guides/nics/sfc_efx.rst @@ -69,6 +69,8 @@ SFC EFX PMD has support for: - IPv4/IPv6 TCP/UDP receive checksum offload +- Inner IPv4/IPv6 TCP/UDP receive checksum offload + - Received packet type information - Receive side scaling (RSS) diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c index 41c2885856..e860a39fa6 100644 --- a/drivers/net/sfc/sfc_ef10_rx.c +++ b/drivers/net/sfc/sfc_ef10_rx.c @@ -252,6 +252,10 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, struct rte_mbuf *m) { uint32_t tun_ptype = 0; + /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */ + int8_t ip_csum_err_bit; + /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */ + int8_t l4_csum_err_bit; uint32_t l2_ptype = 0; uint32_t l3_ptype = 0; uint32_t l4_ptype = 0; @@ -282,6 +286,17 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, break; } + if (tun_ptype == 0) { + ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN; + l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN; + } else { + ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN; + l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN; + if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, + ESF_DZ_RX_IPCKSUM_ERR_LBN))) + ol_flags |= PKT_RX_EIP_CKSUM_BAD; + } + switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) { case ESE_DZ_ETH_TAG_CLASS_NONE: l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER : @@ -309,8 +324,7 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; ol_flags |= PKT_RX_RSS_HASH | - ((EFX_TEST_QWORD_BIT(rx_ev, - ESF_DZ_RX_IPCKSUM_ERR_LBN)) ? + ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD); break; case ESE_DZ_L3_CLASS_IP6_FRAG: @@ -338,16 +352,14 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP : RTE_PTYPE_INNER_L4_TCP; ol_flags |= - (EFX_TEST_QWORD_BIT(rx_ev, - ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ? + (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; break; case ESE_DZ_L4_CLASS_UDP: l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP : RTE_PTYPE_INNER_L4_UDP; ol_flags |= - (EFX_TEST_QWORD_BIT(rx_ev, - ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ? + (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; break; case ESE_DZ_L4_CLASS_UNKNOWN: