#define ICE_FDIR_INSET_IPV4_NATT_ESP (\
ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
- ICE_INSET_ESP_SPI)
+ ICE_INSET_NAT_T_ESP_SPI)
#define ICE_FDIR_INSET_IPV6_NATT_ESP (\
ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
- ICE_INSET_ESP_SPI)
+ ICE_INSET_NAT_T_ESP_SPI)
static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
+ {pattern_raw, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
{
struct ice_fdir_info *fdir_info = &pf->fdir;
- if (fdir_info->hash_map)
- rte_free(fdir_info->hash_map);
- if (fdir_info->hash_table)
- rte_hash_free(fdir_info->hash_table);
+ rte_free(fdir_info->hash_map);
+ rte_hash_free(fdir_info->hash_table);
fdir_info->hash_map = NULL;
fdir_info->hash_table = NULL;
{ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
{ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
{ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
+ {ICE_INSET_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI},
+ {ICE_INSET_NAT_T_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI},
};
for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
return 0;
}
+static int
+ice_fdir_add_del_raw(struct ice_pf *pf,
+ struct ice_fdir_filter_conf *filter,
+ bool add)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ rte_memcpy(pkt, filter->pkt_buf, filter->pkt_len);
+
+ struct ice_fltr_desc desc;
+ memset(&desc, 0, sizeof(desc));
+ filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
+ ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
+
+ return ice_fdir_programming(pf, &desc);
+}
+
static int
ice_fdir_add_del_filter(struct ice_pf *pf,
struct ice_fdir_filter_conf *filter,
struct ice_fdir_fltr_pattern key;
bool is_tun;
int ret;
+ int i;
+
+ if (filter->parser_ena) {
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
+ int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+ u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
+ u16 main_vsi = pf->main_vsi->idx;
+ bool fv_found = false;
+
+ struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
+ if (pi->fdir_actived_cnt != 0) {
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++)
+ if (pi->prof.fv[i].proto_id !=
+ filter->prof->fv[i].proto_id ||
+ pi->prof.fv[i].offset !=
+ filter->prof->fv[i].offset ||
+ pi->prof.fv[i].msk !=
+ filter->prof->fv[i].msk)
+ break;
+ if (i == ICE_MAX_FV_WORDS) {
+ fv_found = true;
+ pi->fdir_actived_cnt++;
+ }
+ }
+
+ if (!fv_found) {
+ ret = ice_flow_set_hw_prof(hw, main_vsi, ctrl_vsi,
+ filter->prof, ICE_BLK_FD);
+ if (ret)
+ goto error;
+ }
+
+ ret = ice_fdir_add_del_raw(pf, filter, true);
+ if (ret)
+ goto error;
+
+ if (!fv_found) {
+ for (i = 0; i < filter->prof->fv_num; i++) {
+ pi->prof.fv[i].proto_id =
+ filter->prof->fv[i].proto_id;
+ pi->prof.fv[i].offset =
+ filter->prof->fv[i].offset;
+ pi->prof.fv[i].msk = filter->prof->fv[i].msk;
+ }
+ pi->fdir_actived_cnt = 1;
+ }
+
+ if (filter->mark_flag == 1)
+ ice_fdir_rx_parsing_enable(ad, 1);
+
+ entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
+ if (!entry)
+ goto error;
+
+ rte_memcpy(entry, filter, sizeof(*filter));
+
+ flow->rule = entry;
+
+ return 0;
+ }
ice_fdir_extract_fltr_key(&key, filter);
node = ice_fdir_entry_lookup(fdir_info, &key);
if (filter->input.cnt_ena) {
struct rte_flow_action_count *act_count = &filter->act_count;
- filter->counter = ice_fdir_counter_alloc(pf,
- act_count->shared,
- act_count->id);
+ filter->counter = ice_fdir_counter_alloc(pf, 0, act_count->id);
if (!filter->counter) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
free_entry:
rte_free(entry);
return -rte_errno;
+
+error:
+ rte_free(filter->prof);
+ rte_free(filter->pkt_buf);
+ return -rte_errno;
}
static int
filter = (struct ice_fdir_filter_conf *)flow->rule;
+ if (filter->parser_ena) {
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
+ int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+ u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
+ u16 main_vsi = pf->main_vsi->idx;
+ enum ice_block blk = ICE_BLK_FD;
+ u16 vsi_num;
+
+ ret = ice_fdir_add_del_raw(pf, filter, false);
+ if (ret)
+ return -rte_errno;
+
+ struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
+ if (pi->fdir_actived_cnt != 0) {
+ pi->fdir_actived_cnt--;
+ if (!pi->fdir_actived_cnt) {
+ vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+
+ vsi_num = ice_get_hw_vsi_num(hw, main_vsi);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+ }
+ }
+
+ if (filter->mark_flag == 1)
+ ice_fdir_rx_parsing_enable(ad, 0);
+
+ flow->rule = NULL;
+
+ rte_free(filter->prof);
+ rte_free(filter->pkt_buf);
+ rte_free(filter);
+
+ return 0;
+ }
+
is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
if (filter->counter) {
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
+ const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_eth *eth_spec, *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
struct ice_fdir_extra *p_ext_data;
struct ice_fdir_v4 *p_v4 = NULL;
struct ice_fdir_v6 *p_v6 = NULL;
+ struct ice_parser_result rslt;
+ uint8_t item_num = 0;
for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
is_outer = false;
}
+ item_num++;
}
/* This loop parse flow pattern and distinguish Non-tunnel and tunnel
&input_set_i : &input_set_o;
switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_RAW: {
+ if (ad->psr == NULL)
+ return -rte_errno;
+
+ raw_spec = item->spec;
+ raw_mask = item->mask;
+
+ if (item_num != 1)
+ break;
+
+ /* convert raw spec & mask from byte string to int */
+ unsigned char *tmp_spec =
+ (uint8_t *)(uintptr_t)raw_spec->pattern;
+ unsigned char *tmp_mask =
+ (uint8_t *)(uintptr_t)raw_mask->pattern;
+ uint16_t tmp_val = 0;
+ uint8_t pkt_len = 0;
+ uint8_t tmp = 0;
+ int i, j;
+
+ pkt_len = strlen((char *)(uintptr_t)raw_spec->pattern);
+ if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
+ pkt_len)
+ return -rte_errno;
+
+ for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
+ tmp = tmp_spec[i];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val = tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val = tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val = tmp - '0';
+
+ tmp_val *= 16;
+ tmp = tmp_spec[i + 1];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_spec[j] = tmp_val + tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_spec[j] = tmp_val + tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_spec[j] = tmp_val + tmp - '0';
+
+ tmp = tmp_mask[i];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val = tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val = tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val = tmp - '0';
+
+ tmp_val *= 16;
+ tmp = tmp_mask[i + 1];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_mask[j] = tmp_val + tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_mask[j] = tmp_val + tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_mask[j] = tmp_val + tmp - '0';
+ }
+
+ pkt_len /= 2;
+
+ if (ice_parser_run(ad->psr, tmp_spec, pkt_len, &rslt))
+ return -rte_errno;
+
+ if (!tmp_mask)
+ return -rte_errno;
+
+ filter->prof = (struct ice_parser_profile *)
+ ice_malloc(&ad->hw, sizeof(*filter->prof));
+ if (!filter->prof)
+ return -ENOMEM;
+
+ if (ice_parser_profile_init(&rslt, tmp_spec, tmp_mask,
+ pkt_len, ICE_BLK_FD, true, filter->prof))
+ return -rte_errno;
+
+ u8 *pkt_buf = (u8 *)ice_malloc(&ad->hw, pkt_len + 1);
+ if (!pkt_buf)
+ return -ENOMEM;
+ rte_memcpy(pkt_buf, tmp_spec, pkt_len);
+ filter->pkt_buf = pkt_buf;
+
+ filter->pkt_len = pkt_len;
+
+ filter->parser_ena = true;
+
+ break;
+ }
+
case RTE_FLOW_ITEM_TYPE_ETH:
flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
eth_spec = item->spec;
return -rte_errno;
}
+ /* Mask for IPv4 src/dst addrs not supported */
+ if (ipv4_mask->hdr.src_addr &&
+ ipv4_mask->hdr.src_addr != UINT32_MAX)
+ return -rte_errno;
+ if (ipv4_mask->hdr.dst_addr &&
+ ipv4_mask->hdr.dst_addr != UINT32_MAX)
+ return -rte_errno;
+
if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
*input_set |= ICE_INSET_IPV4_DST;
if (ipv4_mask->hdr.src_addr == UINT32_MAX)
return -rte_errno;
}
+ /* Mask for TCP src/dst ports not supported */
+ if (tcp_mask->hdr.src_port &&
+ tcp_mask->hdr.src_port != UINT16_MAX)
+ return -rte_errno;
+ if (tcp_mask->hdr.dst_port &&
+ tcp_mask->hdr.dst_port != UINT16_MAX)
+ return -rte_errno;
+
if (tcp_mask->hdr.src_port == UINT16_MAX)
*input_set |= ICE_INSET_TCP_SRC_PORT;
if (tcp_mask->hdr.dst_port == UINT16_MAX)
return -rte_errno;
}
+ /* Mask for UDP src/dst ports not supported */
+ if (udp_mask->hdr.src_port &&
+ udp_mask->hdr.src_port != UINT16_MAX)
+ return -rte_errno;
+ if (udp_mask->hdr.dst_port &&
+ udp_mask->hdr.dst_port != UINT16_MAX)
+ return -rte_errno;
+
if (udp_mask->hdr.src_port == UINT16_MAX)
*input_set |= ICE_INSET_UDP_SRC_PORT;
if (udp_mask->hdr.dst_port == UINT16_MAX)
return -rte_errno;
}
+ /* Mask for SCTP src/dst ports not supported */
+ if (sctp_mask->hdr.src_port &&
+ sctp_mask->hdr.src_port != UINT16_MAX)
+ return -rte_errno;
+ if (sctp_mask->hdr.dst_port &&
+ sctp_mask->hdr.dst_port != UINT16_MAX)
+ return -rte_errno;
+
if (sctp_mask->hdr.src_port == UINT16_MAX)
*input_set |= ICE_INSET_SCTP_SRC_PORT;
if (sctp_mask->hdr.dst_port == UINT16_MAX)
if (!(gtp_psc_spec && gtp_psc_mask))
break;
- if (gtp_psc_mask->qfi == UINT8_MAX)
+ if (gtp_psc_mask->hdr.qfi == 0x3F)
input_set_o |= ICE_INSET_GTPU_QFI;
filter->input.gtpu_data.qfi =
- gtp_psc_spec->qfi;
+ gtp_psc_spec->hdr.qfi;
break;
case RTE_FLOW_ITEM_TYPE_ESP:
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
if (!(esp_spec && esp_mask))
break;
- if (esp_mask->hdr.spi == UINT32_MAX)
- *input_set |= ICE_INSET_ESP_SPI;
+ if (esp_mask->hdr.spi == UINT32_MAX) {
+ if (l4 == RTE_FLOW_ITEM_TYPE_UDP)
+ *input_set |= ICE_INSET_NAT_T_ESP_SPI;
+ else
+ *input_set |= ICE_INSET_ESP_SPI;
+ }
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
filter->input.ip.v4.sec_parm_idx =
uint32_t array_len,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- uint32_t priority __rte_unused,
+ uint32_t priority,
void **meta,
struct rte_flow_error *error)
{
struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
struct ice_pattern_match_item *item = NULL;
uint64_t input_set;
+ bool raw = false;
int ret;
memset(filter, 0, sizeof(*filter));
item = ice_search_pattern_match_item(ad, pattern, array, array_len,
error);
+
+ if (!ad->devargs.pipe_mode_support && priority >= 1)
+ return -rte_errno;
+
if (!item)
return -rte_errno;
ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
if (ret)
goto error;
+
+ if (item->pattern_list[0] == RTE_FLOW_ITEM_TYPE_RAW)
+ raw = true;
+
input_set = filter->input_set_o | filter->input_set_i;
+ input_set = raw ? ~input_set : input_set;
+
if (!input_set || filter->input_set_o &
~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
filter->input_set_i & ~item->input_set_mask_i) {
if (meta)
*meta = filter;
+
+ rte_free(item);
+ return ret;
error:
+ rte_free(filter->prof);
+ rte_free(filter->pkt_buf);
rte_free(item);
return ret;
}