#define ICE_FDIR_INSET_IPV4_NATT_ESP (\
ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
- ICE_INSET_ESP_SPI)
+ ICE_INSET_NAT_T_ESP_SPI)
#define ICE_FDIR_INSET_IPV6_NATT_ESP (\
ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
- ICE_INSET_ESP_SPI)
+ ICE_INSET_NAT_T_ESP_SPI)
static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
{pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE},
{ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
{ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
{ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
+ {ICE_INSET_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI},
+ {ICE_INSET_NAT_T_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI},
};
for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
if (!(gtp_psc_spec && gtp_psc_mask))
break;
- if (gtp_psc_mask->qfi == UINT8_MAX)
+ if (gtp_psc_mask->hdr.qfi == 0x3F)
input_set_o |= ICE_INSET_GTPU_QFI;
filter->input.gtpu_data.qfi =
- gtp_psc_spec->qfi;
+ gtp_psc_spec->hdr.qfi;
break;
case RTE_FLOW_ITEM_TYPE_ESP:
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
if (!(esp_spec && esp_mask))
break;
- if (esp_mask->hdr.spi == UINT32_MAX)
- *input_set |= ICE_INSET_ESP_SPI;
+ if (esp_mask->hdr.spi == UINT32_MAX) {
+ if (l4 == RTE_FLOW_ITEM_TYPE_UDP)
+ *input_set |= ICE_INSET_NAT_T_ESP_SPI;
+ else
+ *input_set |= ICE_INSET_ESP_SPI;
+ }
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
filter->input.ip.v4.sec_parm_idx =
uint32_t array_len,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- uint32_t priority __rte_unused,
+ uint32_t priority,
void **meta,
struct rte_flow_error *error)
{
memset(filter, 0, sizeof(*filter));
item = ice_search_pattern_match_item(ad, pattern, array, array_len,
error);
+
+ if (!ad->devargs.pipe_mode_support && priority >= 1)
+ return -rte_errno;
+
if (!item)
return -rte_errno;