X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_flow.c;h=f163ce5643ba08304e5c6cb8834dd4b177549359;hb=adce1f86f8d25fc10e9ac32fd59fa0bedce608ad;hp=e33da2d1bb664ba219b667aad499aeed364415b3;hpb=c41aca973b0c2026910819b924a8df1dfd926bd5;p=dpdk.git diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index e33da2d1bb..f163ce5643 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -71,6 +71,8 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev, static int i40e_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error); +static int i40e_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error); static int i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, const struct rte_flow_item *pattern, @@ -120,11 +122,15 @@ static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf, struct i40e_ethertype_filter *filter); static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, struct i40e_tunnel_filter *filter); +static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf); +static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf); +static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); const struct rte_flow_ops i40e_flow_ops = { .validate = i40e_flow_validate, .create = i40e_flow_create, .destroy = i40e_flow_destroy, + .flush = i40e_flow_flush, }; union i40e_filter_t cons_filter; @@ -1217,6 +1223,7 @@ i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern, bool is_vni_masked = 0; enum rte_flow_item_type item_type; bool vxlan_flag = 0; + uint32_t tenant_id_be = 0; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -1300,16 +1307,40 @@ i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern, } break; case RTE_FLOW_ITEM_TYPE_IPV4: + filter->ip_type = RTE_TUNNEL_IPTYPE_IPV4; + /* IPv4 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 item"); + return -rte_errno; + } + break; case RTE_FLOW_ITEM_TYPE_IPV6: + filter->ip_type = RTE_TUNNEL_IPTYPE_IPV6; + /* IPv6 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 item"); + return -rte_errno; + } + break; case RTE_FLOW_ITEM_TYPE_UDP: - /* IPv4/IPv6/UDP are used to describe protocol, - * spec amd mask should be NULL. + /* UDP is used to describe protocol, + * spec and mask should be NULL. */ if (item->spec || item->mask) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "Invalid IPv4 item"); + "Invalid UDP item"); return -rte_errno; } break; @@ -1358,8 +1389,9 @@ i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern, & I40E_TCI_MASK; if (vxlan_spec && vxlan_mask && !is_vni_masked) { /* If there's vxlan */ - rte_memcpy(&filter->tenant_id, vxlan_spec->vni, - RTE_DIM(vxlan_spec->vni)); + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + vxlan_spec->vni, 3); + filter->tenant_id = rte_be_to_cpu_32(tenant_id_be); if (!o_eth_spec && !o_eth_mask && i_eth_spec && i_eth_mask) filter->filter_type = @@ -1396,8 +1428,9 @@ i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern, /* If there's no inner vlan */ if (vxlan_spec && vxlan_mask && !is_vni_masked) { /* If there's vxlan */ - rte_memcpy(&filter->tenant_id, vxlan_spec->vni, - RTE_DIM(vxlan_spec->vni)); + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + vxlan_spec->vni, 3); + filter->tenant_id = rte_be_to_cpu_32(tenant_id_be); if (!o_eth_spec && !o_eth_mask && i_eth_spec && i_eth_mask) filter->filter_type = @@ -1720,3 +1753,124 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, return ret; } + +static int +i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret; + + ret = i40e_flow_flush_fdir_filter(pf); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush FDIR flows."); + return -rte_errno; + } + + ret = i40e_flow_flush_ethertype_filter(pf); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to ethertype flush flows."); + return -rte_errno; + } + + ret = i40e_flow_flush_tunnel_filter(pf); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush tunnel flows."); + return -rte_errno; + } + + return ret; +} + +static int +i40e_flow_flush_fdir_filter(struct i40e_pf *pf) +{ + struct rte_eth_dev *dev = pf->adapter->eth_dev; + struct i40e_fdir_info *fdir_info = &pf->fdir; + struct i40e_fdir_filter *fdir_filter; + struct rte_flow *flow; + void *temp; + int ret; + + ret = i40e_fdir_flush(dev); + if (!ret) { + /* Delete FDIR filters in FDIR list. */ + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + ret = i40e_sw_fdir_filter_del(pf, + &fdir_filter->fdir.input); + if (ret < 0) + return ret; + } + + /* Delete FDIR flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + if (flow->filter_type == RTE_ETH_FILTER_FDIR) { + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + } + } + + return ret; +} + +/* Flush all ethertype filters */ +static int +i40e_flow_flush_ethertype_filter(struct i40e_pf *pf) +{ + struct i40e_ethertype_filter_list + *ethertype_list = &pf->ethertype.ethertype_list; + struct i40e_ethertype_filter *filter; + struct rte_flow *flow; + void *temp; + int ret = 0; + + while ((filter = TAILQ_FIRST(ethertype_list))) { + ret = i40e_flow_destroy_ethertype_filter(pf, filter); + if (ret) + return ret; + } + + /* Delete ethertype flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) { + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + } + + return ret; +} + +/* Flush all tunnel filters */ +static int +i40e_flow_flush_tunnel_filter(struct i40e_pf *pf) +{ + struct i40e_tunnel_filter_list + *tunnel_list = &pf->tunnel.tunnel_list; + struct i40e_tunnel_filter *filter; + struct rte_flow *flow; + void *temp; + int ret = 0; + + while ((filter = TAILQ_FIRST(tunnel_list))) { + ret = i40e_flow_destroy_tunnel_filter(pf, filter); + if (ret) + return ret; + } + + /* Delete tunnel flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) { + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + } + + return ret; +}