net/txgbe: support ntuple filter add and delete
[dpdk.git] / drivers / net / ice / ice_rxtx.c
index 0785f37..d052bd0 100644 (file)
@@ -43,6 +43,28 @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
                                rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
 }
 
+static inline void
+ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
+                                      struct rte_mbuf *mb,
+                                      volatile union ice_rx_flex_desc *rxdp)
+{
+       volatile struct ice_32b_rx_flex_desc_comms *desc =
+                       (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+       uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
+
+       if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+               mb->ol_flags |= PKT_RX_RSS_HASH;
+               mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+       }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+       if (desc->flow_id != 0xFFFFFFFF) {
+               mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+               mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+       }
+#endif
+}
+
 static inline void
 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
                                   struct rte_mbuf *mb,
@@ -148,7 +170,7 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
 #endif
 }
 
-static void
+void
 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
 {
        switch (rxdid) {
@@ -182,6 +204,10 @@ ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
                rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
                break;
 
+       case ICE_RXDID_COMMS_GENERIC:
+               rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
+               break;
+
        case ICE_RXDID_COMMS_OVS:
                rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
                break;
@@ -1425,6 +1451,11 @@ ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
        if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
                flags |= PKT_RX_EIP_CKSUM_BAD;
 
+       if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
+               flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+       else
+               flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
+
        return flags;
 }
 
@@ -2293,8 +2324,11 @@ ice_parse_tunneling_params(uint64_t ol_flags,
        *cd_tunneling |= (tx_offload.l2_len >> 1) <<
                ICE_TXD_CTX_QW0_NATLEN_S;
 
-       if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
-           (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
+       /**
+        * Calculate the tunneling UDP checksum.
+        * Shall be set only if L4TUNT = 01b and EIPT is not zero
+        */
+       if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
            (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
                *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
 }