X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_flow.c;h=3053d1fce00b745aee9956a687e5fe559b4d9a1a;hb=24ac604ef7469eb5773c2504b313dd00257f8df3;hp=ec62310034d80c2aa92f56185b0e858446202d9a;hpb=ac8d22de2394e03ba4a77d8fd24381147aafb1d3;p=dpdk.git diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index ec62310034..3053d1fce0 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -10,11 +10,11 @@ #include #include +#include #include #include #include #include -#include #include #include @@ -53,6 +53,7 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, struct rte_flow_error *error, struct rte_eth_ethertype_filter *filter); static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, struct rte_flow_error *error, struct i40e_fdir_filter_conf *filter); @@ -131,8 +132,8 @@ const struct rte_flow_ops i40e_flow_ops = { .flush = i40e_flow_flush, }; -union i40e_filter_t cons_filter; -enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE; +static union i40e_filter_t cons_filter; +static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE; /* Pattern matched ethertype filter */ static enum rte_flow_item_type pattern_ethertype[] = { @@ -2005,9 +2006,9 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, * Mask bits of destination MAC address must be full * of 1 or full of 0. */ - if (!is_zero_ether_addr(ð_mask->src) || - (!is_zero_ether_addr(ð_mask->dst) && - !is_broadcast_ether_addr(ð_mask->dst))) { + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -2026,7 +2027,7 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, /* If mask bits of destination MAC address * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. */ - if (is_broadcast_ether_addr(ð_mask->dst)) { + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; filter->flags |= RTE_ETHTYPE_FLAGS_MAC; } else { @@ -2034,9 +2035,9 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, } filter->ether_type = rte_be_to_cpu_16(eth_spec->type); - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6 || - filter->ether_type == ETHER_TYPE_LLDP || + if (filter->ether_type == RTE_ETHER_TYPE_IPv4 || + filter->ether_type == RTE_ETHER_TYPE_IPv6 || + filter->ether_type == RTE_ETHER_TYPE_LLDP || filter->ether_type == outer_tpid) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2260,8 +2261,7 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf, flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | (layer_idx * I40E_MAX_FLXPLD_FIED); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); - i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); } /* Set flex pit */ @@ -2420,6 +2420,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, */ static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, struct rte_flow_error *error, struct i40e_fdir_filter_conf *filter) @@ -2483,26 +2484,32 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, eth_mask = item->mask; if (eth_spec && eth_mask) { - if (!is_zero_ether_addr(ð_mask->src) || - !is_zero_ether_addr(ð_mask->dst)) { + if (!rte_is_zero_ether_addr(ð_mask->src) || + !rte_is_zero_ether_addr(ð_mask->dst)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid MAC_addr mask."); return -rte_errno; } + } + if (eth_spec && eth_mask && eth_mask->type) { + enum rte_flow_item_type next = (item + 1)->type; - if ((eth_mask->type & UINT16_MAX) == - UINT16_MAX) { - input_set |= I40E_INSET_LAST_ETHER_TYPE; - filter->input.flow.l2_flow.ether_type = - eth_spec->type; + if (eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid type mask."); + return -rte_errno; } ether_type = rte_be_to_cpu_16(eth_spec->type); - if (ether_type == ETHER_TYPE_IPv4 || - ether_type == ETHER_TYPE_IPv6 || - ether_type == ETHER_TYPE_ARP || + + if (next == RTE_FLOW_ITEM_TYPE_VLAN || + ether_type == RTE_ETHER_TYPE_IPv4 || + ether_type == RTE_ETHER_TYPE_IPv6 || + ether_type == RTE_ETHER_TYPE_ARP || ether_type == outer_tpid) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2510,6 +2517,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, "Unsupported ether_type."); return -rte_errno; } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + eth_spec->type; } pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; @@ -2519,6 +2529,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; + + RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE)); if (vlan_spec && vlan_mask) { if (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) { @@ -2527,6 +2539,33 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, vlan_spec->tci; } } + if (vlan_spec && vlan_mask && vlan_mask->inner_type) { + if (vlan_mask->inner_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid inner_type" + " mask."); + return -rte_errno; + } + + ether_type = + rte_be_to_cpu_16(vlan_spec->inner_type); + + if (ether_type == RTE_ETHER_TYPE_IPv4 || + ether_type == RTE_ETHER_TYPE_IPv6 || + ether_type == RTE_ETHER_TYPE_ARP || + ether_type == outer_tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported inner_type."); + return -rte_errno; + } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + vlan_spec->inner_type; + } pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; layer_idx = I40E_FLXPLD_L2_IDX; @@ -2567,8 +2606,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, /* Check if it is fragment. */ frag_off = ipv4_spec->hdr.fragment_offset; frag_off = rte_be_to_cpu_16(frag_off); - if (frag_off & IPV4_HDR_OFFSET_MASK || - frag_off & IPV4_HDR_MF_FLAG) + if (frag_off & RTE_IPV4_HDR_OFFSET_MASK || + frag_off & RTE_IPV4_HDR_MF_FLAG) pctype = I40E_FILTER_PCTYPE_FRAG_IPV4; /* Get the filter info */ @@ -2919,6 +2958,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_VF: vf_spec = item->spec; + if (!attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Matching VF traffic" + " without affecting it" + " (transfer attribute)" + " is unsupported"); + return -rte_errno; + } filter->input.flow_ext.is_vf = 1; filter->input.flow_ext.dst_id = vf_spec->id; if (filter->input.flow_ext.is_vf && @@ -3077,11 +3126,13 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, struct rte_flow_error *error, union i40e_filter_t *filter) { + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_fdir_filter_conf *fdir_filter = &filter->fdir_filter; int ret; - ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter); + ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error, + fdir_filter); if (ret) return ret; @@ -3097,14 +3148,29 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "Check the mode in fdir_conf."); - return -rte_errno; + /* Enable fdir when fdir flow is added at first time. */ + ret = i40e_fdir_setup(pf); + if (ret != I40E_SUCCESS) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to setup fdir."); + return -rte_errno; + } + ret = i40e_fdir_configure(dev); + if (ret < 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to configure fdir."); + goto err; + } + + dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT; } return 0; +err: + i40e_fdir_teardown(pf); + return -rte_errno; } /* Parse to get the action info of a tunnel filter @@ -3259,8 +3325,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, /* DST address of inner MAC shouldn't be masked. * SRC address of Inner MAC should be masked. */ - if (!is_broadcast_ether_addr(ð_mask->dst) || - !is_zero_ether_addr(ð_mask->src) || + if (!rte_is_broadcast_ether_addr(ð_mask->dst) || + !rte_is_zero_ether_addr(ð_mask->src) || eth_mask->type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -3272,12 +3338,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, if (!vxlan_flag) { rte_memcpy(&filter->outer_mac, ð_spec->dst, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); filter_type |= ETH_TUNNEL_FILTER_OMAC; } else { rte_memcpy(&filter->inner_mac, ð_spec->dst, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); filter_type |= ETH_TUNNEL_FILTER_IMAC; } } @@ -3285,7 +3351,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -3488,8 +3555,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, /* DST address of inner MAC shouldn't be masked. * SRC address of Inner MAC should be masked. */ - if (!is_broadcast_ether_addr(ð_mask->dst) || - !is_zero_ether_addr(ð_mask->src) || + if (!rte_is_broadcast_ether_addr(ð_mask->dst) || + !rte_is_zero_ether_addr(ð_mask->src) || eth_mask->type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -3501,12 +3568,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, if (!nvgre_flag) { rte_memcpy(&filter->outer_mac, ð_spec->dst, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); filter_type |= ETH_TUNNEL_FILTER_OMAC; } else { rte_memcpy(&filter->inner_mac, ð_spec->dst, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); filter_type |= ETH_TUNNEL_FILTER_IMAC; } } @@ -3515,7 +3582,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -4023,7 +4091,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev, vlan_spec = item->spec; vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -4375,7 +4444,23 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, } } + if (rss_info->conf.queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "rss only allow one valid rule"); + return -rte_errno; + } + /* Parse RSS related parameters from configuration */ + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key)) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -4646,6 +4731,13 @@ i40e_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_FDIR: ret = i40e_flow_add_del_fdir_filter(dev, &((struct i40e_fdir_filter *)flow->rule)->fdir, 0); + + /* If the last flow is destroyed, disable fdir. */ + if (!ret && !TAILQ_EMPTY(&pf->fdir.fdir_list)) { + i40e_fdir_teardown(pf); + dev->data->dev_conf.fdir_conf.mode = + RTE_FDIR_MODE_NONE; + } break; case RTE_ETH_FILTER_HASH: ret = i40e_config_rss_filter_del(dev, @@ -4711,17 +4803,17 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi; struct i40e_pf_vf *vf; - struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter; + struct i40e_aqc_cloud_filters_element_bb cld_filter; struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; struct i40e_tunnel_filter *node; bool big_buffer = 0; int ret = 0; memset(&cld_filter, 0, sizeof(cld_filter)); - ether_addr_copy((struct ether_addr *)&filter->input.outer_mac, - (struct ether_addr *)&cld_filter.element.outer_mac); - ether_addr_copy((struct ether_addr *)&filter->input.inner_mac, - (struct ether_addr *)&cld_filter.element.inner_mac); + rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac, + (struct rte_ether_addr *)&cld_filter.element.outer_mac); + rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac, + (struct rte_ether_addr *)&cld_filter.element.inner_mac); cld_filter.element.inner_vlan = filter->input.inner_vlan; cld_filter.element.flags = filter->input.flags; cld_filter.element.tenant_id = filter->input.tenant_id; @@ -4746,11 +4838,11 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, big_buffer = 1; if (big_buffer) - ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid, - &cld_filter, 1); + ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid, + &cld_filter, 1); else - ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, - &cld_filter.element, 1); + ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, + &cld_filter.element, 1); if (ret < 0) return -ENOTSUP; @@ -4838,6 +4930,8 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf) pf->fdir.inset_flag[pctype] = 0; } + i40e_fdir_teardown(pf); + return ret; }