#define ICE_FDIR_INSET_IPV4_NATT_ESP (\
ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
- ICE_INSET_ESP_SPI)
+ ICE_INSET_NAT_T_ESP_SPI)
#define ICE_FDIR_INSET_IPV6_NATT_ESP (\
ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
- ICE_INSET_ESP_SPI)
+ ICE_INSET_NAT_T_ESP_SPI)
static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
{pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE},
{ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
{ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
{ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
+ {ICE_INSET_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI},
+ {ICE_INSET_NAT_T_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI},
};
for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
if (!(esp_spec && esp_mask))
break;
- if (esp_mask->hdr.spi == UINT32_MAX)
- *input_set |= ICE_INSET_ESP_SPI;
+ if (esp_mask->hdr.spi == UINT32_MAX) {
+ if (l4 == RTE_FLOW_ITEM_TYPE_UDP)
+ *input_set |= ICE_INSET_NAT_T_ESP_SPI;
+ else
+ *input_set |= ICE_INSET_ESP_SPI;
+ }
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
filter->input.ip.v4.sec_parm_idx =
#define ICE_PROT_AH BIT_ULL(15)
#define ICE_PROT_L2TPV3OIP BIT_ULL(16)
#define ICE_PROT_PFCP BIT_ULL(17)
+#define ICE_PROT_NAT_T_ESP BIT_ULL(18)
/* field */
(ICE_PROT_PFCP | ICE_PFCP_S_FIELD)
#define ICE_INSET_PFCP_SEID \
(ICE_PROT_PFCP | ICE_PFCP_S_FIELD | ICE_PFCP_SEID)
+#define ICE_INSET_NAT_T_ESP_SPI \
+ (ICE_PROT_NAT_T_ESP | ICE_ESP_SPI)
/* empty pattern */
extern enum rte_flow_item_type pattern_empty[];