X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_flow.c;h=51d8fdd7906afb8764189e66a1d547f40a7b24be;hb=25ae7f1a5d9d127a46f8d62d1d689f77a78138fd;hp=e522962c2fee5242817f15933fef06d5cd0e8f70;hpb=0ecc6d6c663bc9906db9f82833f7c49c97df8321;p=dpdk.git diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index e522962c2f..51d8fdd790 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -10,13 +10,14 @@ #include #include +#include #include -#include +#include #include #include -#include #include #include +#include #include "i40e_logs.h" #include "base/i40e_type.h" @@ -43,6 +44,10 @@ static int i40e_flow_destroy(struct rte_eth_dev *dev, struct rte_flow_error *error); static int i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +static int i40e_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, struct rte_flow_error *error); static int i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, const struct rte_flow_item *pattern, @@ -53,6 +58,7 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, struct rte_flow_error *error, struct rte_eth_ethertype_filter *filter); static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, struct rte_flow_error *error, struct i40e_fdir_filter_conf *filter); @@ -109,6 +115,7 @@ static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf); static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf); static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); +static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); static int i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -122,15 +129,24 @@ i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev, struct rte_flow_error *error, struct i40e_tunnel_filter_conf *filter); +static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); const struct rte_flow_ops i40e_flow_ops = { .validate = i40e_flow_validate, .create = i40e_flow_create, .destroy = i40e_flow_destroy, .flush = i40e_flow_flush, + .query = i40e_flow_query, }; -union i40e_filter_t cons_filter; -enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE; +static union i40e_filter_t cons_filter; +static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE; +/* internal pattern w/o VOID items */ +struct rte_flow_item g_items[32]; /* Pattern matched ethertype filter */ static enum rte_flow_item_type pattern_ethertype[] = { @@ -1612,6 +1628,50 @@ static enum rte_flow_item_type pattern_qinq_1[] = { RTE_FLOW_ITEM_TYPE_END, }; +static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + static struct i40e_valid_pattern i40e_supported_patterns[] = { /* Ethertype */ { pattern_ethertype, i40e_flow_parse_ethertype_filter }, @@ -1625,6 +1685,8 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter }, @@ -1633,6 +1695,8 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter }, /* FDIR - support default flow type with flexible payload */ { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter }, { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter }, @@ -1792,6 +1856,16 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter }, /* QINQ */ { pattern_qinq_1, i40e_flow_parse_qinq_filter }, + /* L2TPv3 over IP */ + { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter }, + /* L4 over port */ + { pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter }, + { pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter }, + { pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter }, + { pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter }, + { pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter }, + { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter }, }; #define NEXT_ITEM_OF_ACTION(act, actions, index) \ @@ -1937,7 +2011,8 @@ static uint16_t i40e_get_outer_vlan(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; uint64_t reg_r = 0; uint16_t reg_id; uint16_t tpid; @@ -1972,9 +2047,6 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, const struct rte_flow_item_eth *eth_spec; const struct rte_flow_item_eth *eth_mask; enum rte_flow_item_type item_type; - uint16_t outer_tpid; - - outer_tpid = i40e_get_outer_vlan(dev); for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -1987,8 +2059,8 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, item_type = item->type; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Get the MAC info. */ if (!eth_spec || !eth_mask) { rte_flow_error_set(error, EINVAL, @@ -2002,9 +2074,9 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, * Mask bits of destination MAC address must be full * of 1 or full of 0. */ - if (!is_zero_ether_addr(ð_mask->src) || - (!is_zero_ether_addr(ð_mask->dst) && - !is_broadcast_ether_addr(ð_mask->dst))) { + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -2023,7 +2095,7 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, /* If mask bits of destination MAC address * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. */ - if (is_broadcast_ether_addr(ð_mask->dst)) { + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; filter->flags |= RTE_ETHTYPE_FLAGS_MAC; } else { @@ -2031,10 +2103,10 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, } filter->ether_type = rte_be_to_cpu_16(eth_spec->type); - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6 || - filter->ether_type == ETHER_TYPE_LLDP || - filter->ether_type == outer_tpid) { + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6 || + filter->ether_type == RTE_ETHER_TYPE_LLDP || + filter->ether_type == i40e_get_outer_vlan(dev)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -2073,7 +2145,7 @@ i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, } if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { - act_q = (const struct rte_flow_action_queue *)act->conf; + act_q = act->conf; filter->queue = act_q->index; if (filter->queue >= pf->dev_data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, @@ -2248,11 +2320,18 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf, uint8_t raw_id) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint32_t flx_pit; + uint32_t flx_pit, flx_ort; uint8_t field_idx; uint16_t min_next_off = 0; /* in words */ uint8_t i; + if (raw_id) { + flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | + (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | + (layer_idx * I40E_MAX_FLXPLD_FIED); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); + } + /* Set flex pit */ for (i = 0; i < raw_id; i++) { field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; @@ -2338,6 +2417,37 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf, if (num < 0) return -EINVAL; + if (pf->support_multi_driver) { + for (i = 0; i < num; i++) + if (i40e_read_rx_ctl(hw, + I40E_GLQF_FD_MSK(i, pctype)) != + mask_reg[i]) { + PMD_DRV_LOG(ERR, "Input set setting is not" + " supported with" + " `support-multi-driver`" + " enabled!"); + return -EPERM; + } + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + if (i40e_read_rx_ctl(hw, + I40E_GLQF_FD_MSK(i, pctype)) != 0) { + PMD_DRV_LOG(ERR, "Input set setting is not" + " supported with" + " `support-multi-driver`" + " enabled!"); + return -EPERM; + } + + } else { + for (i = 0; i < num; i++) + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), 0); + } + inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), @@ -2346,13 +2456,6 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf, (uint32_t)((inset_reg >> I40E_32_BIT_WIDTH) & UINT32_MAX)); - for (i = 0; i < num; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); - - /*clear unused mask registers of the pctype */ - for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0); I40E_WRITE_FLUSH(hw); pf->fdir.input_set[pctype] = input_set; @@ -2385,17 +2488,72 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, cus_pctype = i40e_find_customized_pctype(pf, I40E_CUSTOMIZED_GTPU_IPV6); break; + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV4_L2TPV3); + else if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV6_L2TPV3); + break; + case RTE_FLOW_ITEM_TYPE_ESP: + if (!filter->input.flow_ext.is_udp) { + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4); + else if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6); + } else { + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4_UDP); + else if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6_UDP); + filter->input.flow_ext.is_udp = false; + } + break; default: PMD_DRV_LOG(ERR, "Unsupported item type"); break; } - if (cus_pctype) + if (cus_pctype && cus_pctype->valid) return cus_pctype->pctype; return I40E_FILTER_PCTYPE_INVALID; } +static void +i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter, + const struct rte_flow_item_esp *esp_spec) +{ + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV4) { + if (filter->input.flow_ext.is_udp) + filter->input.flow.esp_ipv4_udp_flow.spi = + esp_spec->hdr.spi; + else + filter->input.flow.esp_ipv4_flow.spi = + esp_spec->hdr.spi; + } + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) { + if (filter->input.flow_ext.is_udp) + filter->input.flow.esp_ipv6_udp_flow.spi = + esp_spec->hdr.spi; + else + filter->input.flow.esp_ipv6_flow.spi = + esp_spec->hdr.spi; + } +} + /* 1. Last in item should be NULL as range is not supported. * 2. Supported patterns: refer to array i40e_supported_patterns. * 3. Default supported flow type and input set: refer to array @@ -2409,6 +2567,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, */ static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, struct rte_flow_error *error, struct i40e_fdir_filter_conf *filter) @@ -2423,13 +2582,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, const struct rte_flow_item_udp *udp_spec, *udp_mask; const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_esp *esp_spec, *esp_mask; const struct rte_flow_item_raw *raw_spec, *raw_mask; const struct rte_flow_item_vf *vf_spec; + const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask; uint8_t pctype = 0; uint64_t input_set = I40E_INSET_NONE; uint16_t frag_off; enum rte_flow_item_type item_type; + enum rte_flow_item_type next_type; enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END; uint32_t i, j; @@ -2446,7 +2608,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, uint16_t flex_size; bool cfg_flex_pit = true; bool cfg_flex_msk = true; - uint16_t outer_tpid; uint16_t ether_type; uint32_t vtc_flow_cpu; bool outer_ip = true; @@ -2455,7 +2616,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, memset(off_arr, 0, sizeof(off_arr)); memset(len_arr, 0, sizeof(len_arr)); memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN); - outer_tpid = i40e_get_outer_vlan(dev); filter->input.flow_ext.customized_pctype = false; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -2468,37 +2628,71 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, item_type = item->type; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; + next_type = (item + 1)->type; + + if (next_type == RTE_FLOW_ITEM_TYPE_END && + (!eth_spec || !eth_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "NULL eth spec/mask."); + return -rte_errno; + } if (eth_spec && eth_mask) { - if (!is_zero_ether_addr(ð_mask->src) || - !is_zero_ether_addr(ð_mask->dst)) { + if (rte_is_broadcast_ether_addr(ð_mask->dst) && + rte_is_zero_ether_addr(ð_mask->src)) { + filter->input.flow.l2_flow.dst = + eth_spec->dst; + input_set |= I40E_INSET_DMAC; + } else if (rte_is_zero_ether_addr(ð_mask->dst) && + rte_is_broadcast_ether_addr(ð_mask->src)) { + filter->input.flow.l2_flow.src = + eth_spec->src; + input_set |= I40E_INSET_SMAC; + } else if (rte_is_broadcast_ether_addr(ð_mask->dst) && + rte_is_broadcast_ether_addr(ð_mask->src)) { + filter->input.flow.l2_flow.dst = + eth_spec->dst; + filter->input.flow.l2_flow.src = + eth_spec->src; + input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC); + } else if (!rte_is_zero_ether_addr(ð_mask->src) || + !rte_is_zero_ether_addr(ð_mask->dst)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid MAC_addr mask."); return -rte_errno; } - - if ((eth_mask->type & UINT16_MAX) == - UINT16_MAX) { - input_set |= I40E_INSET_LAST_ETHER_TYPE; - filter->input.flow.l2_flow.ether_type = - eth_spec->type; + } + if (eth_spec && eth_mask && + next_type == RTE_FLOW_ITEM_TYPE_END) { + if (eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid type mask."); + return -rte_errno; } ether_type = rte_be_to_cpu_16(eth_spec->type); - if (ether_type == ETHER_TYPE_IPv4 || - ether_type == ETHER_TYPE_IPv6 || - ether_type == ETHER_TYPE_ARP || - ether_type == outer_tpid) { + + if (next_type == RTE_FLOW_ITEM_TYPE_VLAN || + ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6 || + ether_type == i40e_get_outer_vlan(dev)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Unsupported ether_type."); return -rte_errno; } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + eth_spec->type; } pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; @@ -2506,10 +2700,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; + + RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE)); if (vlan_spec && vlan_mask) { if (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) { @@ -2518,6 +2712,32 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, vlan_spec->tci; } } + if (vlan_spec && vlan_mask && vlan_mask->inner_type) { + if (vlan_mask->inner_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid inner_type" + " mask."); + return -rte_errno; + } + + ether_type = + rte_be_to_cpu_16(vlan_spec->inner_type); + + if (ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6 || + ether_type == i40e_get_outer_vlan(dev)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported inner_type."); + return -rte_errno; + } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + vlan_spec->inner_type; + } pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; layer_idx = I40E_FLXPLD_L2_IDX; @@ -2525,10 +2745,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_IPV4: l3 = RTE_FLOW_ITEM_TYPE_IPV4; - ipv4_spec = - (const struct rte_flow_item_ipv4 *)item->spec; - ipv4_mask = - (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_spec = item->spec; + ipv4_mask = item->mask; pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; layer_idx = I40E_FLXPLD_L3_IDX; @@ -2560,25 +2778,45 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, /* Check if it is fragment. */ frag_off = ipv4_spec->hdr.fragment_offset; frag_off = rte_be_to_cpu_16(frag_off); - if (frag_off & IPV4_HDR_OFFSET_MASK || - frag_off & IPV4_HDR_MF_FLAG) + if (frag_off & RTE_IPV4_HDR_OFFSET_MASK || + frag_off & RTE_IPV4_HDR_MF_FLAG) pctype = I40E_FILTER_PCTYPE_FRAG_IPV4; - /* Get the filter info */ - filter->input.flow.ip4_flow.proto = - ipv4_spec->hdr.next_proto_id; - filter->input.flow.ip4_flow.tos = - ipv4_spec->hdr.type_of_service; - filter->input.flow.ip4_flow.ttl = - ipv4_spec->hdr.time_to_live; - filter->input.flow.ip4_flow.src_ip = - ipv4_spec->hdr.src_addr; - filter->input.flow.ip4_flow.dst_ip = - ipv4_spec->hdr.dst_addr; + if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) { + if (input_set & (I40E_INSET_IPV4_SRC | + I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 and L3 input set are exclusive."); + return -rte_errno; + } + } else { + /* Get the filter info */ + filter->input.flow.ip4_flow.proto = + ipv4_spec->hdr.next_proto_id; + filter->input.flow.ip4_flow.tos = + ipv4_spec->hdr.type_of_service; + filter->input.flow.ip4_flow.ttl = + ipv4_spec->hdr.time_to_live; + filter->input.flow.ip4_flow.src_ip = + ipv4_spec->hdr.src_addr; + filter->input.flow.ip4_flow.dst_ip = + ipv4_spec->hdr.dst_addr; + + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV4; + } } else if (!ipv4_spec && !ipv4_mask && !outer_ip) { filter->input.flow_ext.inner_ip = true; filter->input.flow_ext.iip_type = I40E_FDIR_IPTYPE_IPV4; + } else if (!ipv4_spec && !ipv4_mask && outer_ip) { + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV4; } else if ((ipv4_spec || ipv4_mask) && !outer_ip) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2593,10 +2831,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_IPV6: l3 = RTE_FLOW_ITEM_TYPE_IPV6; - ipv6_spec = - (const struct rte_flow_item_ipv6 *)item->spec; - ipv6_mask = - (const struct rte_flow_item_ipv6 *)item->mask; + ipv6_spec = item->spec; + ipv6_mask = item->mask; pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; layer_idx = I40E_FLXPLD_L3_IDX; @@ -2639,6 +2875,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, filter->input.flow.ipv6_flow.hop_limits = ipv6_spec->hdr.hop_limits; + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV6; + rte_memcpy(filter->input.flow.ipv6_flow.src_ip, ipv6_spec->hdr.src_addr, 16); rte_memcpy(filter->input.flow.ipv6_flow.dst_ip, @@ -2652,6 +2892,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, filter->input.flow_ext.inner_ip = true; filter->input.flow_ext.iip_type = I40E_FDIR_IPTYPE_IPV6; + } else if (!ipv6_spec && !ipv6_mask && outer_ip) { + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV6; } else if ((ipv6_spec || ipv6_mask) && !outer_ip) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2664,8 +2908,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, outer_ip = false; break; case RTE_FLOW_ITEM_TYPE_TCP: - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) pctype = @@ -2694,17 +2938,28 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, if (tcp_mask->hdr.dst_port == UINT16_MAX) input_set |= I40E_INSET_DST_PORT; - /* Get filter info */ - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { - filter->input.flow.tcp4_flow.src_port = - tcp_spec->hdr.src_port; - filter->input.flow.tcp4_flow.dst_port = - tcp_spec->hdr.dst_port; - } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { - filter->input.flow.tcp6_flow.src_port = - tcp_spec->hdr.src_port; - filter->input.flow.tcp6_flow.dst_port = - tcp_spec->hdr.dst_port; + if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) { + if (input_set & + (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 and L4 input set are exclusive."); + return -rte_errno; + } + } else { + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.flow.tcp4_flow.src_port = + tcp_spec->hdr.src_port; + filter->input.flow.tcp4_flow.dst_port = + tcp_spec->hdr.dst_port; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.flow.tcp6_flow.src_port = + tcp_spec->hdr.src_port; + filter->input.flow.tcp6_flow.dst_port = + tcp_spec->hdr.dst_port; + } } } @@ -2712,8 +2967,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_UDP: - udp_spec = (const struct rte_flow_item_udp *)item->spec; - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_spec = item->spec; + udp_mask = item->mask; if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) pctype = @@ -2738,20 +2993,31 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, if (udp_mask->hdr.dst_port == UINT16_MAX) input_set |= I40E_INSET_DST_PORT; - /* Get filter info */ - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { - filter->input.flow.udp4_flow.src_port = - udp_spec->hdr.src_port; - filter->input.flow.udp4_flow.dst_port = - udp_spec->hdr.dst_port; - } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { - filter->input.flow.udp6_flow.src_port = - udp_spec->hdr.src_port; - filter->input.flow.udp6_flow.dst_port = - udp_spec->hdr.dst_port; + if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) { + if (input_set & + (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 and L4 input set are exclusive."); + return -rte_errno; + } + } else { + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.flow.udp4_flow.src_port = + udp_spec->hdr.src_port; + filter->input.flow.udp4_flow.dst_port = + udp_spec->hdr.dst_port; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.flow.udp6_flow.src_port = + udp_spec->hdr.src_port; + filter->input.flow.udp6_flow.dst_port = + udp_spec->hdr.dst_port; + } } } - + filter->input.flow_ext.is_udp = true; layer_idx = I40E_FLXPLD_L4_IDX; break; @@ -2765,8 +3031,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, return -rte_errno; } - gtp_spec = (const struct rte_flow_item_gtp *)item->spec; - gtp_mask = (const struct rte_flow_item_gtp *)item->mask; + gtp_spec = item->spec; + gtp_mask = item->mask; if (gtp_spec && gtp_mask) { if (gtp_mask->v_pt_rsv_flags || @@ -2786,11 +3052,42 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, cus_proto = item_type; } break; + case RTE_FLOW_ITEM_TYPE_ESP: + if (!pf->esp_support) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ESP protocol"); + return -rte_errno; + } + + esp_spec = item->spec; + esp_mask = item->mask; + + if (!esp_spec || !esp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ESP item"); + return -rte_errno; + } + + if (esp_spec && esp_mask) { + if (esp_mask->hdr.spi != UINT32_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ESP mask"); + return -rte_errno; + } + i40e_flow_set_filter_spi(filter, esp_spec); + filter->input.flow_ext.customized_pctype = true; + cus_proto = item_type; + } + break; case RTE_FLOW_ITEM_TYPE_SCTP: - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_spec = item->spec; + sctp_mask = item->mask; if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) pctype = @@ -2838,8 +3135,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_RAW: - raw_spec = (const struct rte_flow_item_raw *)item->spec; - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_spec = item->spec; + raw_mask = item->mask; if (!raw_spec || !raw_mask) { rte_flow_error_set(error, EINVAL, @@ -2849,6 +3146,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, return -rte_errno; } + if (pf->support_multi_driver) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported flexible payload."); + return -rte_errno; + } + ret = i40e_flow_check_raw_item(item, raw_spec, error); if (ret < 0) return ret; @@ -2907,7 +3212,17 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, raw_id++; break; case RTE_FLOW_ITEM_TYPE_VF: - vf_spec = (const struct rte_flow_item_vf *)item->spec; + vf_spec = item->spec; + if (!attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Matching VF traffic" + " without affecting it" + " (transfer attribute)" + " is unsupported"); + return -rte_errno; + } filter->input.flow_ext.is_vf = 1; filter->input.flow_ext.dst_id = vf_spec->id; if (filter->input.flow_ext.is_vf && @@ -2919,6 +3234,36 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, return -rte_errno; } break; + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + l2tpv3oip_spec = item->spec; + l2tpv3oip_mask = item->mask; + + if (!l2tpv3oip_spec || !l2tpv3oip_mask) + break; + + if (l2tpv3oip_mask->session_id != UINT32_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid L2TPv3 mask"); + return -rte_errno; + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.flow.ip4_l2tpv3oip_flow.session_id = + l2tpv3oip_spec->session_id; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV4; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.flow.ip6_l2tpv3oip_flow.session_id = + l2tpv3oip_spec->session_id; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV6; + } + + filter->input.flow_ext.customized_pctype = true; + cus_proto = item_type; + break; default: break; } @@ -2992,14 +3337,14 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); const struct rte_flow_action *act; const struct rte_flow_action_queue *act_q; - const struct rte_flow_action_mark *mark_spec; + const struct rte_flow_action_mark *mark_spec = NULL; uint32_t index = 0; /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */ NEXT_ITEM_OF_ACTION(act, actions, index); switch (act->type) { case RTE_FLOW_ACTION_TYPE_QUEUE: - act_q = (const struct rte_flow_action_queue *)act->conf; + act_q = act->conf; filter->action.rx_queue = act_q->index; if ((!filter->input.flow_ext.is_vf && filter->action.rx_queue >= pf->dev_data->nb_rx_queues) || @@ -3018,6 +3363,12 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_PASSTHRU: filter->action.behavior = I40E_FDIR_PASSTHRU; break; + case RTE_FLOW_ACTION_TYPE_MARK: + filter->action.behavior = I40E_FDIR_PASSTHRU; + mark_spec = act->conf; + filter->action.report_status = I40E_FDIR_REPORT_ID; + filter->soft_id = mark_spec->id; + break; default: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -3030,13 +3381,36 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, NEXT_ITEM_OF_ACTION(act, actions, index); switch (act->type) { case RTE_FLOW_ACTION_TYPE_MARK: - mark_spec = (const struct rte_flow_action_mark *)act->conf; + if (mark_spec) { + /* Double MARK actions requested */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + mark_spec = act->conf; filter->action.report_status = I40E_FDIR_REPORT_ID; filter->soft_id = mark_spec->id; break; case RTE_FLOW_ACTION_TYPE_FLAG: + if (mark_spec) { + /* MARK + FLAG not supported */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS; break; + case RTE_FLOW_ACTION_TYPE_RSS: + if (filter->action.behavior != I40E_FDIR_PASSTHRU) { + /* RSS filter won't be next if FDIR did not pass thru */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + break; case RTE_FLOW_ACTION_TYPE_END: return 0; default: @@ -3066,11 +3440,13 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, struct rte_flow_error *error, union i40e_filter_t *filter) { + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_fdir_filter_conf *fdir_filter = &filter->fdir_filter; int ret; - ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter); + ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error, + fdir_filter); if (ret) return ret; @@ -3084,16 +3460,32 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, cons_filter_type = RTE_ETH_FILTER_FDIR; - if (dev->data->dev_conf.fdir_conf.mode != - RTE_FDIR_MODE_PERFECT) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "Check the mode in fdir_conf."); - return -rte_errno; + if (pf->fdir.fdir_vsi == NULL) { + /* Enable fdir when fdir flow is added at first time. */ + ret = i40e_fdir_setup(pf); + if (ret != I40E_SUCCESS) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to setup fdir."); + return -rte_errno; + } + ret = i40e_fdir_configure(dev); + if (ret < 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to configure fdir."); + goto err; + } } + /* If create the first fdir rule, enable fdir check for rx queues */ + if (TAILQ_EMPTY(&pf->fdir.fdir_list)) + i40e_fdir_rx_proc_enable(dev, 1); + return 0; +err: + i40e_fdir_teardown(pf); + return -rte_errno; } /* Parse to get the action info of a tunnel filter @@ -3121,7 +3513,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, } if (act->type == RTE_FLOW_ACTION_TYPE_VF) { - act_vf = (const struct rte_flow_action_vf *)act->conf; + act_vf = act->conf; filter->vf_id = act_vf->id; filter->is_to_vf = 1; if (filter->vf_id >= pf->vf_num) { @@ -3136,7 +3528,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, index++; NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { - act_q = (const struct rte_flow_action_queue *)act->conf; + act_q = act->conf; filter->queue_id = act_q->index; if ((!filter->is_to_vf) && (filter->queue_id >= pf->dev_data->nb_rx_queues)) { @@ -3165,57 +3557,23 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, return 0; } -static uint16_t i40e_supported_tunnel_filter_types[] = { - ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID | - ETH_TUNNEL_FILTER_IVLAN, - ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, - ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID, - ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID | - ETH_TUNNEL_FILTER_IMAC, - ETH_TUNNEL_FILTER_IMAC, -}; - -static int -i40e_check_tunnel_filter_type(uint8_t filter_type) -{ - uint8_t i; - - for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) { - if (filter_type == i40e_supported_tunnel_filter_types[i]) - return 0; - } - - return -1; -} - /* 1. Last in item should be NULL as range is not supported. - * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN, - * IMAC_TENID, OMAC_TENID_IMAC and IMAC. + * 2. Supported filter types: Source port only and Destination port only. * 3. Mask of fields which need to be matched should be * filled with 1. * 4. Mask of fields which needn't to be matched should be * filled with 0. */ static int -i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, - const struct rte_flow_item *pattern, - struct rte_flow_error *error, - struct i40e_tunnel_filter_conf *filter) +i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter) { + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; const struct rte_flow_item *item = pattern; - const struct rte_flow_item_eth *eth_spec; - const struct rte_flow_item_eth *eth_mask; - const struct rte_flow_item_vxlan *vxlan_spec; - const struct rte_flow_item_vxlan *vxlan_mask; - const struct rte_flow_item_vlan *vlan_spec; - const struct rte_flow_item_vlan *vlan_mask; - uint8_t filter_type = 0; - bool is_vni_masked = 0; - uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; enum rte_flow_item_type item_type; - bool vxlan_flag = 0; - uint32_t tenant_id_be = 0; - int ret; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -3228,67 +3586,317 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, item_type = item->type; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; - - /* Check if ETH item is used for place holder. - * If yes, both spec and mask should be NULL. - * If no, both spec and mask shouldn't be NULL. - */ - if ((!eth_spec && eth_mask) || - (eth_spec && !eth_mask)) { + if (item->spec || item->mask) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "Invalid ether spec/mask"); + "Invalid ETH item"); return -rte_errno; } - if (eth_spec && eth_mask) { - /* DST address of inner MAC shouldn't be masked. - * SRC address of Inner MAC should be masked. - */ - if (!is_broadcast_ether_addr(ð_mask->dst) || - !is_zero_ether_addr(ð_mask->src) || - eth_mask->type) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid ether spec/mask"); - return -rte_errno; - } - - if (!vxlan_flag) { - rte_memcpy(&filter->outer_mac, - ð_spec->dst, - ETHER_ADDR_LEN); - filter_type |= ETH_TUNNEL_FILTER_OMAC; - } else { - rte_memcpy(&filter->inner_mac, - ð_spec->dst, - ETHER_ADDR_LEN); - filter_type |= ETH_TUNNEL_FILTER_IMAC; - } - } break; - case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; - if (!(vlan_spec && vlan_mask)) { + case RTE_FLOW_ITEM_TYPE_IPV4: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4; + /* IPv4 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "Invalid vlan item"); + "Invalid IPv4 item"); return -rte_errno; } - if (vlan_spec && vlan_mask) { - if (vlan_mask->tci == - rte_cpu_to_be_16(I40E_TCI_MASK)) - filter->inner_vlan = - rte_be_to_cpu_16(vlan_spec->tci) & + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6; + /* IPv6 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 item"); + return -rte_errno; + } + + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + if (!udp_spec || !udp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid udp item"); + return -rte_errno; + } + + if (udp_spec->hdr.src_port != 0 && + udp_spec->hdr.dst_port != 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid udp spec"); + return -rte_errno; + } + + if (udp_spec->hdr.src_port != 0) { + filter->l4_port_type = + I40E_L4_PORT_TYPE_SRC; + filter->tenant_id = + rte_be_to_cpu_32(udp_spec->hdr.src_port); + } + + if (udp_spec->hdr.dst_port != 0) { + filter->l4_port_type = + I40E_L4_PORT_TYPE_DST; + filter->tenant_id = + rte_be_to_cpu_32(udp_spec->hdr.dst_port); + } + + filter->tunnel_type = I40E_CLOUD_TYPE_UDP; + + break; + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + if (!tcp_spec || !tcp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid tcp item"); + return -rte_errno; + } + + if (tcp_spec->hdr.src_port != 0 && + tcp_spec->hdr.dst_port != 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid tcp spec"); + return -rte_errno; + } + + if (tcp_spec->hdr.src_port != 0) { + filter->l4_port_type = + I40E_L4_PORT_TYPE_SRC; + filter->tenant_id = + rte_be_to_cpu_32(tcp_spec->hdr.src_port); + } + + if (tcp_spec->hdr.dst_port != 0) { + filter->l4_port_type = + I40E_L4_PORT_TYPE_DST; + filter->tenant_id = + rte_be_to_cpu_32(tcp_spec->hdr.dst_port); + } + + filter->tunnel_type = I40E_CLOUD_TYPE_TCP; + + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + sctp_spec = item->spec; + sctp_mask = item->mask; + + if (!sctp_spec || !sctp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid sctp item"); + return -rte_errno; + } + + if (sctp_spec->hdr.src_port != 0 && + sctp_spec->hdr.dst_port != 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid sctp spec"); + return -rte_errno; + } + + if (sctp_spec->hdr.src_port != 0) { + filter->l4_port_type = + I40E_L4_PORT_TYPE_SRC; + filter->tenant_id = + rte_be_to_cpu_32(sctp_spec->hdr.src_port); + } + + if (sctp_spec->hdr.dst_port != 0) { + filter->l4_port_type = + I40E_L4_PORT_TYPE_DST; + filter->tenant_id = + rte_be_to_cpu_32(sctp_spec->hdr.dst_port); + } + + filter->tunnel_type = I40E_CLOUD_TYPE_SCTP; + + break; + default: + break; + } + } + + return 0; +} + +static int +i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_tunnel_filter_conf *tunnel_filter = + &filter->consistent_tunnel_filter; + int ret; + + ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_TUNNEL; + + return ret; +} + +static uint16_t i40e_supported_tunnel_filter_types[] = { + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID | + ETH_TUNNEL_FILTER_IVLAN, + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID, + ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID | + ETH_TUNNEL_FILTER_IMAC, + ETH_TUNNEL_FILTER_IMAC, +}; + +static int +i40e_check_tunnel_filter_type(uint8_t filter_type) +{ + uint8_t i; + + for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) { + if (filter_type == i40e_supported_tunnel_filter_types[i]) + return 0; + } + + return -1; +} + +/* 1. Last in item should be NULL as range is not supported. + * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN, + * IMAC_TENID, OMAC_TENID_IMAC and IMAC. + * 3. Mask of fields which need to be matched should be + * filled with 1. + * 4. Mask of fields which needn't to be matched should be + * filled with 0. + */ +static int +i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter) +{ + const struct rte_flow_item *item = pattern; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_vxlan *vxlan_spec; + const struct rte_flow_item_vxlan *vxlan_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + uint8_t filter_type = 0; + bool is_vni_masked = 0; + uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; + enum rte_flow_item_type item_type; + bool vxlan_flag = 0; + uint32_t tenant_id_be = 0; + int ret; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + + /* Check if ETH item is used for place holder. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!eth_spec && eth_mask) || + (eth_spec && !eth_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ether spec/mask"); + return -rte_errno; + } + + if (eth_spec && eth_mask) { + /* DST address of inner MAC shouldn't be masked. + * SRC address of Inner MAC should be masked. + */ + if (!rte_is_broadcast_ether_addr(ð_mask->dst) || + !rte_is_zero_ether_addr(ð_mask->src) || + eth_mask->type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ether spec/mask"); + return -rte_errno; + } + + if (!vxlan_flag) { + rte_memcpy(&filter->outer_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); + filter_type |= ETH_TUNNEL_FILTER_OMAC; + } else { + rte_memcpy(&filter->inner_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); + filter_type |= ETH_TUNNEL_FILTER_IMAC; + } + } + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid vlan item"); + return -rte_errno; + } + + if (vlan_spec && vlan_mask) { + if (vlan_mask->tci == + rte_cpu_to_be_16(I40E_TCI_MASK)) + filter->inner_vlan = + rte_be_to_cpu_16(vlan_spec->tci) & I40E_TCI_MASK; filter_type |= ETH_TUNNEL_FILTER_IVLAN; } @@ -3332,10 +3940,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_VXLAN: - vxlan_spec = - (const struct rte_flow_item_vxlan *)item->spec; - vxlan_mask = - (const struct rte_flow_item_vxlan *)item->mask; + vxlan_spec = item->spec; + vxlan_mask = item->mask; /* Check if VXLAN item is used to describe protocol. * If yes, both spec and mask should be NULL. * If no, both spec and mask shouldn't be NULL. @@ -3461,8 +4067,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, item_type = item->type; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Check if ETH item is used for place holder. * If yes, both spec and mask should be NULL. @@ -3481,8 +4087,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, /* DST address of inner MAC shouldn't be masked. * SRC address of Inner MAC should be masked. */ - if (!is_broadcast_ether_addr(ð_mask->dst) || - !is_zero_ether_addr(ð_mask->src) || + if (!rte_is_broadcast_ether_addr(ð_mask->dst) || + !rte_is_zero_ether_addr(ð_mask->src) || eth_mask->type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -3494,23 +4100,22 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, if (!nvgre_flag) { rte_memcpy(&filter->outer_mac, ð_spec->dst, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); filter_type |= ETH_TUNNEL_FILTER_OMAC; } else { rte_memcpy(&filter->inner_mac, ð_spec->dst, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); filter_type |= ETH_TUNNEL_FILTER_IMAC; } } break; case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; - if (!(vlan_spec && vlan_mask)) { + vlan_spec = item->spec; + vlan_mask = item->mask; + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -3554,10 +4159,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_NVGRE: - nvgre_spec = - (const struct rte_flow_item_nvgre *)item->spec; - nvgre_mask = - (const struct rte_flow_item_nvgre *)item->mask; + nvgre_spec = item->spec; + nvgre_mask = item->mask; /* Check if NVGRE item is used to describe protocol. * If yes, both spec and mask should be NULL. * If no, both spec and mask shouldn't be NULL. @@ -3768,10 +4371,8 @@ i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_MPLS: - mpls_spec = - (const struct rte_flow_item_mpls *)item->spec; - mpls_mask = - (const struct rte_flow_item_mpls *)item->mask; + mpls_spec = item->spec; + mpls_mask = item->mask; if (!mpls_spec || !mpls_mask) { rte_flow_error_set(error, EINVAL, @@ -3907,10 +4508,8 @@ i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_GTPC: case RTE_FLOW_ITEM_TYPE_GTPU: - gtp_spec = - (const struct rte_flow_item_gtp *)item->spec; - gtp_mask = - (const struct rte_flow_item_gtp *)item->mask; + gtp_spec = item->spec; + gtp_mask = item->mask; if (!gtp_spec || !gtp_mask) { rte_flow_error_set(error, EINVAL, @@ -4021,12 +4620,11 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -4101,42 +4699,612 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, return ret; } +/** + * This function is used to do configuration i40e existing RSS with rte_flow. + * It also enable queue region configuration using flow API for i40e. + * pattern can be used indicate what parameters will be include in flow, + * like user_priority or flowtype for queue region or HASH function for RSS. + * Action is used to transmit parameter like queue index and HASH + * function for RSS, or flowtype for queue region configuration. + * For example: + * pattern: + * Case 1: try to transform patterns to pctype. valid pctype will be + * used in parse action. + * Case 2: only ETH, indicate flowtype for queue region will be parsed. + * Case 3: only VLAN, indicate user_priority for queue region will be parsed. + * So, pattern choice is depened on the purpose of configuration of + * that flow. + * action: + * action RSS will be used to transmit valid parameter with + * struct rte_flow_action_rss for all the 3 case. + */ static int -i40e_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_rss_pattern_info *p_info, + struct i40e_queue_regions *info) { - struct rte_flow_item *items; /* internal pattern w/o VOID items */ - parse_filter_t parse_filter; + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + struct rte_flow_item *items; uint32_t item_num = 0; /* non-void item number of pattern*/ uint32_t i = 0; - bool flag = false; - int ret = I40E_NOT_SUPPORTED; - - if (!pattern) { - rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, - NULL, "NULL pattern."); - return -rte_errno; + static const struct { + enum rte_flow_item_type *item_array; + uint64_t type; + } i40e_rss_pctype_patterns[] = { + { pattern_fdir_ipv4, + ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER }, + { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP }, + { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP }, + { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP }, + { pattern_fdir_ipv4_esp, ETH_RSS_ESP }, + { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP }, + { pattern_fdir_ipv6, + ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER }, + { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP }, + { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP }, + { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP }, + { pattern_ethertype, ETH_RSS_L2_PAYLOAD }, + { pattern_fdir_ipv6_esp, ETH_RSS_ESP }, + { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP }, + }; + + p_info->types = I40E_RSS_TYPE_INVALID; + + if (item->type == RTE_FLOW_ITEM_TYPE_END) { + p_info->types = I40E_RSS_TYPE_NONE; + return 0; } - if (!actions) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_NUM, - NULL, "NULL action."); - return -rte_errno; + /* Convert pattern to RSS offload types */ + while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { + if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) + item_num++; + i++; } + item_num++; - if (!attr) { + items = rte_zmalloc("i40e_pattern", + item_num * sizeof(struct rte_flow_item), 0); + if (!items) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "No memory for PMD internal items."); + return -ENOMEM; + } + + i40e_pattern_skip_void_item(items, pattern); + + for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) { + if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array, + items)) { + p_info->types = i40e_rss_pctype_patterns[i].type; + break; + } + } + + rte_free(items); + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + p_info->action_flag = 1; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + if (vlan_spec && vlan_mask) { + if (vlan_mask->tci == + rte_cpu_to_be_16(I40E_TCI_MASK)) { + info->region[0].user_priority[0] = + (rte_be_to_cpu_16( + vlan_spec->tci) >> 13) & 0x7; + info->region[0].user_priority_num = 1; + info->queue_region_number = 1; + p_info->action_flag = 0; + } + } + break; + default: + p_info->action_flag = 0; + memset(info, 0, sizeof(struct i40e_queue_regions)); + return 0; + } + } + + return 0; +} + +/** + * This function is used to parse RSS queue index, total queue number and + * hash functions, If the purpose of this configuration is for queue region + * configuration, it will set queue_region_conf flag to TRUE, else to FALSE. + * In queue region configuration, it also need to parse hardware flowtype + * and user_priority from configuration, it will also cheeck the validity + * of these parameters. For example, The queue region sizes should + * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the + * hw_flowtype or PCTYPE max index should be 63, the user priority + * max index should be 7, and so on. And also, queue index should be + * continuous sequence and queue region index should be part of RSS + * queue index for this port. + * For hash params, the pctype in action and pattern must be same. + * Set queue index must be with non-types. + */ +static int +i40e_flow_parse_rss_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct i40e_rss_pattern_info p_info, + struct i40e_queue_regions *conf_info, + union i40e_filter_t *filter) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_queue_regions *info = &pf->queue_region; + struct i40e_rte_flow_rss_conf *rss_config = + &filter->rss_conf; + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + uint16_t i, j, n, m, tmp, nb_types; + uint32_t index = 0; + uint64_t hf_bit = 1; + + static const struct { + uint64_t rss_type; + enum i40e_filter_pctype pctype; + } pctype_match_table[] = { + {ETH_RSS_FRAG_IPV4, + I40E_FILTER_PCTYPE_FRAG_IPV4}, + {ETH_RSS_NONFRAG_IPV4_TCP, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP}, + {ETH_RSS_NONFRAG_IPV4_UDP, + I40E_FILTER_PCTYPE_NONF_IPV4_UDP}, + {ETH_RSS_NONFRAG_IPV4_SCTP, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP}, + {ETH_RSS_NONFRAG_IPV4_OTHER, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER}, + {ETH_RSS_FRAG_IPV6, + I40E_FILTER_PCTYPE_FRAG_IPV6}, + {ETH_RSS_NONFRAG_IPV6_TCP, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP}, + {ETH_RSS_NONFRAG_IPV6_UDP, + I40E_FILTER_PCTYPE_NONF_IPV6_UDP}, + {ETH_RSS_NONFRAG_IPV6_SCTP, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP}, + {ETH_RSS_NONFRAG_IPV6_OTHER, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER}, + {ETH_RSS_L2_PAYLOAD, + I40E_FILTER_PCTYPE_L2_PAYLOAD}, + }; + + static const struct { + uint64_t rss_type; + enum i40e_filter_pctype pctype; + } pctype_match_table_x722[] = { + {ETH_RSS_NONFRAG_IPV4_TCP, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK}, + {ETH_RSS_NONFRAG_IPV4_UDP, + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP}, + {ETH_RSS_NONFRAG_IPV4_UDP, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP}, + {ETH_RSS_NONFRAG_IPV6_TCP, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK}, + {ETH_RSS_NONFRAG_IPV6_UDP, + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP}, + {ETH_RSS_NONFRAG_IPV6_UDP, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP}, + }; + + NEXT_ITEM_OF_ACTION(act, actions, index); + rss = act->conf; + + /** + * RSS only supports forwarding, + * check if the first not void action is RSS. + */ + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (p_info.action_flag && rss->queue_num) { + for (j = 0; j < RTE_DIM(pctype_match_table); j++) { + if (rss->types & pctype_match_table[j].rss_type) { + conf_info->region[0].hw_flowtype[0] = + (uint8_t)pctype_match_table[j].pctype; + conf_info->region[0].flowtype_num = 1; + conf_info->queue_region_number = 1; + break; + } + } + + if (hw->mac.type == I40E_MAC_X722) + for (j = 0; j < RTE_DIM(pctype_match_table_x722); j++) { + if (rss->types & + pctype_match_table_x722[j].rss_type) { + m = conf_info->region[0].flowtype_num; + conf_info->region[0].hw_flowtype[m] = + pctype_match_table_x722[j].pctype; + conf_info->region[0].flowtype_num++; + conf_info->queue_region_number = 1; + } + } + } + + /** + * Do some queue region related parameters check + * in order to keep queue index for queue region to be + * continuous sequence and also to be part of RSS + * queue index for this port. + */ + if (conf_info->queue_region_number) { + for (i = 0; i < rss->queue_num; i++) { + for (j = 0; j < rss_info->conf.queue_num; j++) { + if (rss->queue[i] == rss_info->conf.queue[j]) + break; + } + if (j == rss_info->conf.queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + } + + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + } + } + + /* Parse queue region related parameters from configuration */ + for (n = 0; n < conf_info->queue_region_number; n++) { + if (conf_info->region[n].user_priority_num || + conf_info->region[n].flowtype_num) { + if (!((rte_is_power_of_2(rss->queue_num)) && + rss->queue_num <= 64)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " + "total number of queues do not exceed the VSI allocation"); + return -rte_errno; + } + + if (conf_info->region[n].user_priority[n] >= + I40E_MAX_USER_PRIORITY) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the user priority max index is 7"); + return -rte_errno; + } + + if (conf_info->region[n].hw_flowtype[n] >= + I40E_FILTER_PCTYPE_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the hw_flowtype or PCTYPE max index is 63"); + return -rte_errno; + } + + for (i = 0; i < info->queue_region_number; i++) { + if (info->region[i].queue_num == + rss->queue_num && + info->region[i].queue_start_index == + rss->queue[0]) + break; + } + + if (i == info->queue_region_number) { + if (i > I40E_REGION_MAX_INDEX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the queue region max index is 7"); + return -rte_errno; + } + + info->region[i].queue_num = + rss->queue_num; + info->region[i].queue_start_index = + rss->queue[0]; + info->region[i].region_id = + info->queue_region_number; + + j = info->region[i].user_priority_num; + tmp = conf_info->region[n].user_priority[0]; + if (conf_info->region[n].user_priority_num) { + info->region[i].user_priority[j] = tmp; + info->region[i].user_priority_num++; + } + + for (m = 0; m < conf_info->region[n].flowtype_num; m++) { + j = info->region[i].flowtype_num; + tmp = conf_info->region[n].hw_flowtype[m]; + info->region[i].hw_flowtype[j] = tmp; + info->region[i].flowtype_num++; + } + info->queue_region_number++; + } else { + j = info->region[i].user_priority_num; + tmp = conf_info->region[n].user_priority[0]; + if (conf_info->region[n].user_priority_num) { + info->region[i].user_priority[j] = tmp; + info->region[i].user_priority_num++; + } + + for (m = 0; m < conf_info->region[n].flowtype_num; m++) { + j = info->region[i].flowtype_num; + tmp = conf_info->region[n].hw_flowtype[m]; + info->region[i].hw_flowtype[j] = tmp; + info->region[i].flowtype_num++; + } + } + } + + rss_config->queue_region_conf = TRUE; + } + + /** + * Return function if this flow is used for queue region configuration + */ + if (rss_config->queue_region_conf) + return 0; + + if (!rss) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "invalid rule"); + return -rte_errno; + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + + if (rss->queue_num && (p_info.types || rss->types)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS types must be empty while configuring queue region"); + + /* validate pattern and pctype */ + if (!(rss->types & p_info.types) && + (rss->types || p_info.types) && !rss->queue_num) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "invalid pctype"); + + nb_types = 0; + for (n = 0; n < RTE_ETH_FLOW_MAX; n++) { + if (rss->types & (hf_bit << n)) + nb_types++; + if (nb_types > 1) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + act, "multi pctype is not supported"); + } + + if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && + (p_info.types || rss->types || rss->queue_num)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "pattern, type and queues must be empty while" + " setting hash function as simple_xor"); + + if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ && + !(p_info.types && rss->types)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "pctype and queues can not be empty while" + " setting hash function as symmetric toeplitz"); + + /* Parse RSS related parameters from configuration */ + if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX || + rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key too large"); + if (rss->queue_num > RTE_DIM(rss_config->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (i40e_rss_conf_init(rss_config, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + + index++; + + /* check if the next not void action is END */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + rss_config->queue_region_conf = FALSE; + + return 0; +} + +static int +i40e_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + union i40e_filter_t *filter, + struct rte_flow_error *error) +{ + struct i40e_rss_pattern_info p_info; + struct i40e_queue_regions info; + int ret; + + memset(&info, 0, sizeof(struct i40e_queue_regions)); + memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info)); + + ret = i40e_flow_parse_rss_pattern(dev, pattern, + error, &p_info, &info); + if (ret) + return ret; + + ret = i40e_flow_parse_rss_action(dev, actions, error, + p_info, &info, filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_HASH; + + return 0; +} + +static int +i40e_config_rss_filter_set(struct rte_eth_dev *dev, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_rss_filter *rss_filter; + int ret; + + if (conf->queue_region_conf) { + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1); + } else { + ret = i40e_config_rss_filter(pf, conf, 1); + } + + if (ret) + return ret; + + rss_filter = rte_zmalloc("i40e_rss_filter", + sizeof(*rss_filter), 0); + if (rss_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + rss_filter->rss_filter_info = *conf; + /* the rule new created is always valid + * the existing rule covered by new rule will be set invalid + */ + rss_filter->rss_filter_info.valid = true; + + TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next); + + return 0; +} + +static int +i40e_config_rss_filter_del(struct rte_eth_dev *dev, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_rss_filter *rss_filter; + void *temp; + + if (conf->queue_region_conf) + i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + else + i40e_config_rss_filter(pf, conf, 0); + + TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) { + if (!memcmp(&rss_filter->rss_filter_info, conf, + sizeof(struct rte_flow_action_rss))) { + TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next); + rte_free(rss_filter); + } + } + return 0; +} + +static int +i40e_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow_item *items; /* internal pattern w/o VOID items */ + parse_filter_t parse_filter; + uint32_t item_num = 0; /* non-void item number of pattern*/ + uint32_t i = 0; + bool flag = false; + int ret = I40E_NOT_SUPPORTED; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL, "NULL attribute."); return -rte_errno; } - memset(&cons_filter, 0, sizeof(cons_filter)); + /* Get the non-void item of action */ + while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID) + i++; + + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) { + ret = i40e_parse_rss_filter(dev, attr, pattern, + actions, &cons_filter, error); + return ret; + } + + i = 0; /* Get the non-void item number of pattern */ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) @@ -4145,12 +5313,18 @@ i40e_flow_validate(struct rte_eth_dev *dev, } item_num++; - items = rte_zmalloc("i40e_pattern", - item_num * sizeof(struct rte_flow_item), 0); - if (!items) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM, - NULL, "No memory for PMD internal items."); - return -ENOMEM; + if (item_num <= ARRAY_SIZE(g_items)) { + items = g_items; + } else { + items = rte_zmalloc("i40e_pattern", + item_num * sizeof(struct rte_flow_item), 0); + if (!items) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, + "No memory for PMD internal items."); + return -ENOMEM; + } } i40e_pattern_skip_void_item(items, pattern); @@ -4162,16 +5336,21 @@ i40e_flow_validate(struct rte_eth_dev *dev, rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, pattern, "Unsupported pattern"); - rte_free(items); + + if (items != g_items) + rte_free(items); return -rte_errno; } + if (parse_filter) ret = parse_filter(dev, attr, items, actions, error, &cons_filter); + flag = true; } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns))); - rte_free(items); + if (items != g_items) + rte_free(items); return ret; } @@ -4184,21 +5363,33 @@ i40e_flow_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct rte_flow *flow; + struct rte_flow *flow = NULL; + struct i40e_fdir_info *fdir_info = &pf->fdir; int ret; - flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0); - if (!flow) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to allocate memory"); - return flow; - } - ret = i40e_flow_validate(dev, attr, pattern, actions, error); if (ret < 0) return NULL; + if (cons_filter_type == RTE_ETH_FILTER_FDIR) { + flow = i40e_fdir_entry_pool_get(fdir_info); + if (flow == NULL) { + rte_flow_error_set(error, ENOBUFS, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Fdir space full"); + + return flow; + } + } else { + flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return flow; + } + } + switch (cons_filter_type) { case RTE_ETH_FILTER_ETHERTYPE: ret = i40e_ethertype_filter_set(pf, @@ -4210,7 +5401,7 @@ i40e_flow_create(struct rte_eth_dev *dev, break; case RTE_ETH_FILTER_FDIR: ret = i40e_flow_add_del_fdir_filter(dev, - &cons_filter.fdir_filter, 1); + &cons_filter.fdir_filter, 1); if (ret) goto free_flow; flow->rule = TAILQ_LAST(&pf->fdir.fdir_list, @@ -4224,6 +5415,14 @@ i40e_flow_create(struct rte_eth_dev *dev, flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list, i40e_tunnel_filter_list); break; + case RTE_ETH_FILTER_HASH: + ret = i40e_config_rss_filter_set(dev, + &cons_filter.rss_conf); + if (ret) + goto free_flow; + flow->rule = TAILQ_LAST(&pf->rss_config_list, + i40e_rss_conf_list); + break; default: goto free_flow; } @@ -4236,7 +5435,12 @@ free_flow: rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to create flow."); - rte_free(flow); + + if (cons_filter_type != RTE_ETH_FILTER_FDIR) + rte_free(flow); + else + i40e_fdir_entry_pool_put(fdir_info, flow); + return NULL; } @@ -4247,6 +5451,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev, { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); enum rte_filter_type filter_type = flow->filter_type; + struct i40e_fdir_info *fdir_info = &pf->fdir; int ret = 0; switch (filter_type) { @@ -4260,7 +5465,17 @@ i40e_flow_destroy(struct rte_eth_dev *dev, break; case RTE_ETH_FILTER_FDIR: ret = i40e_flow_add_del_fdir_filter(dev, - &((struct i40e_fdir_filter *)flow->rule)->fdir, 0); + &((struct i40e_fdir_filter *)flow->rule)->fdir, + 0); + + /* If the last flow is destroyed, disable fdir. */ + if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) { + i40e_fdir_rx_proc_enable(dev, 0); + } + break; + case RTE_ETH_FILTER_HASH: + ret = i40e_config_rss_filter_del(dev, + &((struct i40e_rss_filter *)flow->rule)->rss_filter_info); break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", @@ -4271,7 +5486,11 @@ i40e_flow_destroy(struct rte_eth_dev *dev, if (!ret) { TAILQ_REMOVE(&pf->flow_list, flow, node); - rte_free(flow); + if (filter_type == RTE_ETH_FILTER_FDIR) + i40e_fdir_entry_pool_put(fdir_info, flow); + else + rte_free(flow); + } else rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -4322,17 +5541,17 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi; struct i40e_pf_vf *vf; - struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter; + struct i40e_aqc_cloud_filters_element_bb cld_filter; struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; struct i40e_tunnel_filter *node; bool big_buffer = 0; int ret = 0; memset(&cld_filter, 0, sizeof(cld_filter)); - ether_addr_copy((struct ether_addr *)&filter->input.outer_mac, - (struct ether_addr *)&cld_filter.element.outer_mac); - ether_addr_copy((struct ether_addr *)&filter->input.inner_mac, - (struct ether_addr *)&cld_filter.element.inner_mac); + rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac, + (struct rte_ether_addr *)&cld_filter.element.outer_mac); + rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac, + (struct rte_ether_addr *)&cld_filter.element.inner_mac); cld_filter.element.inner_vlan = filter->input.inner_vlan; cld_filter.element.flags = filter->input.flags; cld_filter.element.tenant_id = filter->input.tenant_id; @@ -4357,11 +5576,11 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, big_buffer = 1; if (big_buffer) - ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid, - &cld_filter, 1); + ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid, + &cld_filter, 1); else - ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, - &cld_filter.element, 1); + ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, + &cld_filter.element, 1); if (ret < 0) return -ENOTSUP; @@ -4404,6 +5623,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) return -rte_errno; } + ret = i40e_flow_flush_rss_filter(dev); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush RSS flows."); + return -rte_errno; + } + return ret; } @@ -4417,6 +5644,7 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf) struct rte_flow *flow; void *temp; int ret; + uint32_t i = 0; ret = i40e_fdir_flush(dev); if (!ret) { @@ -4432,13 +5660,30 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf) TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { if (flow->filter_type == RTE_ETH_FILTER_FDIR) { TAILQ_REMOVE(&pf->flow_list, flow, node); - rte_free(flow); } } + /* reset bitmap */ + rte_bitmap_reset(fdir_info->fdir_flow_pool.bitmap); + for (i = 0; i < fdir_info->fdir_space_size; i++) { + fdir_info->fdir_flow_pool.pool[i].idx = i; + rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, i); + } + + fdir_info->fdir_actual_cnt = 0; + fdir_info->fdir_guarantee_free_space = + fdir_info->fdir_guarantee_total_space; + memset(fdir_info->fdir_filter_array, + 0, + sizeof(struct i40e_fdir_filter) * + I40E_MAX_FDIR_FILTER_NUM); + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) pf->fdir.inset_flag[pctype] = 0; + + /* Disable FDIR processing as all FDIR rules are now flushed */ + i40e_fdir_rx_proc_enable(dev, 0); } return ret; @@ -4499,3 +5744,77 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf) return ret; } + +/* remove the RSS filter */ +static int +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_flow *flow; + void *temp; + int32_t ret = -EINVAL; + + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + + /* Delete RSS flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + if (flow->filter_type != RTE_ETH_FILTER_HASH) + continue; + + if (flow->rule) { + ret = i40e_config_rss_filter_del(dev, + &((struct i40e_rss_filter *)flow->rule)->rss_filter_info); + if (ret) + return ret; + } + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + + return ret; +} + +static int +i40e_flow_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, struct rte_flow_error *error) +{ + struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule; + enum rte_filter_type filter_type = flow->filter_type; + struct rte_flow_action_rss *rss_conf = data; + + if (!rss_rule) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid rule"); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_RSS: + if (filter_type != RTE_ETH_FILTER_HASH) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + return -rte_errno; + } + rte_memcpy(rss_conf, + &rss_rule->rss_filter_info.conf, + sizeof(struct rte_flow_action_rss)); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + + return 0; +}