X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=6438a14487b5d9d69419254ee500674975ceebad;hb=c59e2faf147339e9b8375e2698919b8c053b0666;hp=adba168d545aafc3780c4eb577ac83c90ab12c68;hpb=fbde43310fbec8da96e8c66d7a16d891f60ed630;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index adba168d54..6438a14487 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -8,6 +8,7 @@ #include #include #include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ @@ -167,7 +168,9 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, }, [MLX5_EXPANSION_VXLAN] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_VXLAN, }, [MLX5_EXPANSION_VXLAN_GPE] = { @@ -402,7 +405,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; case MLX5_MTR_COLOR: - RTE_ASSERT(priv->mtr_color_reg != REG_NONE); + MLX5_ASSERT(priv->mtr_color_reg != REG_NONE); return priv->mtr_color_reg; case MLX5_COPY_MARK: /* @@ -447,7 +450,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev, } return config->flow_mreg_c[id + start_reg - REG_C_0]; } - assert(false); + MLX5_ASSERT(false); return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "invalid feature name"); @@ -606,7 +609,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, { unsigned int i; - assert(nic_mask); + MLX5_ASSERT(nic_mask); for (i = 0; i < size; ++i) if ((nic_mask[i] | mask[i]) != nic_mask[i]) return rte_flow_error_set(error, ENOTSUP, @@ -709,17 +712,19 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] dev_flow - * Pointer to device flow structure. + * @param[in] flow + * Pointer to flow structure. + * @param[in] dev_handle + * Pointer to device flow handle structure. */ static void -flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) +flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow, + struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = dev_flow->flow; - const int mark = !!(dev_flow->actions & + const int mark = !!(dev_handle->act_flags & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; for (i = 0; i != flow->rss.queue_num; ++i) { @@ -748,7 +753,7 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) /* Increase the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { if ((tunnels_info[j].tunnel & - dev_flow->layers) == + dev_handle->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]++; break; @@ -770,10 +775,10 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) static void flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - flow_drv_rxq_flags_set(dev, dev_flow); + LIST_FOREACH(dev_handle, &flow->dev_handles, next) + flow_drv_rxq_flags_set(dev, flow, dev_handle); } /** @@ -782,20 +787,22 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) * * @param dev * Pointer to Ethernet device. - * @param[in] dev_flow - * Pointer to the device flow. + * @param[in] flow + * Pointer to flow structure. + * @param[in] dev_handle + * Pointer to the device flow handle structure. */ static void -flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) +flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow, + struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = dev_flow->flow; - const int mark = !!(dev_flow->actions & + const int mark = !!(dev_handle->act_flags & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; - assert(dev->data->dev_started); + MLX5_ASSERT(dev->data->dev_started); for (i = 0; i != flow->rss.queue_num; ++i) { int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = @@ -817,7 +824,7 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) /* Decrease the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { if ((tunnels_info[j].tunnel & - dev_flow->layers) == + dev_handle->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]--; break; @@ -840,10 +847,10 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) static void flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - flow_drv_rxq_flags_trim(dev, dev_flow); + LIST_FOREACH(dev_handle, &flow->dev_handles, next) + flow_drv_rxq_flags_trim(dev, flow, dev_handle); } /** @@ -1638,7 +1645,6 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, "\xff\xff\xff\xff\xff\xff\xff\xff", .vtc_flow = RTE_BE32(0xffffffff), .proto = 0xff, - .hop_limits = 0xff, }, }; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); @@ -1783,7 +1789,7 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, MLX5_FLOW_LAYER_OUTER_L4; int ret; - assert(flow_mask); + MLX5_ASSERT(flow_mask); if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1835,7 +1841,6 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; - uint32_t vlan_id = 0; if (item_flags & MLX5_FLOW_LAYER_TUNNEL) @@ -1862,23 +1867,8 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, return ret; if (spec) { memcpy(&id.vni[1], spec->vni, 3); - vlan_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); - vlan_id &= id.vlan_id; } - /* - * Tunnel id 0 is equivalent as not adding a VXLAN layer, if - * only this layer is defined in the Verbs specification it is - * interpreted as wildcard and all packets will match this - * rule, if it follows a full stack layer (ex: eth / ipv4 / - * udp), all packets matching the layers before will also - * match this rule. To avoid such situation, VNI 0 is - * currently refused. - */ - if (!vlan_id) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "VXLAN vni cannot be 0"); if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1917,7 +1907,6 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, uint32_t vlan_id; uint8_t vni[4]; } id = { .vlan_id = 0, }; - uint32_t vlan_id = 0; if (!priv->config.l3_vxlan_en) return rte_flow_error_set(error, ENOTSUP, @@ -1955,22 +1944,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, "VxLAN-GPE protocol" " not supported"); memcpy(&id.vni[1], spec->vni, 3); - vlan_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); - vlan_id &= id.vlan_id; } - /* - * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this - * layer is defined in the Verbs specification it is interpreted as - * wildcard and all packets will match this rule, if it follows a full - * stack layer (ex: eth / ipv4 / udp), all packets matching the layers - * before will also match this rule. To avoid such situation, VNI 0 - * is currently refused. - */ - if (!vlan_id) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "VXLAN-GPE vni cannot be 0"); if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -2135,9 +2110,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, .protocol = RTE_BE16(UINT16_MAX), }; - if (!(priv->config.hca_attr.flex_parser_protocols & - MLX5_HCA_FLEX_GENEVE_ENABLED) || - !priv->config.hca_attr.tunnel_stateless_geneve_rx) + if (!priv->config.hca_attr.tunnel_stateless_geneve_rx) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 Geneve is not enabled by device" @@ -2314,7 +2287,7 @@ flow_qrss_get_id(struct rte_eth_dev *dev) ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); if (ret) return 0; - assert(qrss_id); + MLX5_ASSERT(qrss_id); return qrss_id; } @@ -2340,11 +2313,11 @@ static void flow_mreg_split_qrss_release(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - if (dev_flow->qrss_id) - flow_qrss_free_id(dev, dev_flow->qrss_id); + LIST_FOREACH(dev_handle, &flow->dev_handles, next) + if (dev_handle->qrss_id) + flow_qrss_free_id(dev, dev_handle->qrss_id); } static int @@ -2360,7 +2333,8 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused, } static struct mlx5_flow * -flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, +flow_null_prepare(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error) @@ -2500,6 +2474,8 @@ flow_drv_validate(struct rte_eth_dev *dev, * setting backward reference to the flow should be done out of this function. * layers field is not filled either. * + * @param[in] dev + * Pointer to the dev structure. * @param[in] attr * Pointer to the flow attributes. * @param[in] items @@ -2513,7 +2489,8 @@ flow_drv_validate(struct rte_eth_dev *dev, * Pointer to device flow on success, otherwise NULL and rte_errno is set. */ static inline struct mlx5_flow * -flow_drv_prepare(const struct rte_flow *flow, +flow_drv_prepare(struct rte_eth_dev *dev, + const struct rte_flow *flow, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -2522,9 +2499,9 @@ flow_drv_prepare(const struct rte_flow *flow, const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); - return fops->prepare(attr, items, actions, error); + return fops->prepare(dev, attr, items, actions, error); } /** @@ -2566,7 +2543,7 @@ flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); return fops->translate(dev, dev_flow, attr, items, actions, error); } @@ -2593,7 +2570,7 @@ flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); return fops->apply(dev, flow, error); } @@ -2615,7 +2592,7 @@ flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type type = flow->drv_type; - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); fops->remove(dev, flow); } @@ -2637,7 +2614,7 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) enum mlx5_flow_drv_type type = flow->drv_type; flow_mreg_split_qrss_release(dev, flow); - assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); fops->destroy(dev, flow); } @@ -2663,26 +2640,6 @@ mlx5_flow_validate(struct rte_eth_dev *dev, return 0; } -/** - * Get port id item from the item list. - * - * @param[in] item - * Pointer to the list of items. - * - * @return - * Pointer to the port id item if exist, else return NULL. - */ -static const struct rte_flow_item * -find_port_id_item(const struct rte_flow_item *item) -{ - assert(item); - for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) - return item; - } - return NULL; -} - /** * Get RSS action from the action list. * @@ -2727,7 +2684,48 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) } /** - * Get QUEUE/RSS action from the action list. + * Get layer flags from the prefix flow. + * + * Some flows may be split to several subflows, the prefix subflow gets the + * match items and the suffix sub flow gets the actions. + * Some actions need the user defined match item flags to get the detail for + * the action. + * This function helps the suffix flow to get the item layer flags from prefix + * subflow. + * + * @param[in] dev_flow + * Pointer the created preifx subflow. + * + * @return + * The layers get from prefix subflow. + */ +static inline uint64_t +flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) +{ + uint64_t layers = 0; + + /* + * Layers bits could be localization, but usually the compiler will + * help to do the optimization work for source code. + * If no decap actions, use the layers directly. + */ + if (!(dev_flow->handle->act_flags & MLX5_FLOW_ACTION_DECAP)) + return dev_flow->handle->layers; + /* Convert L3 layers with decap action. */ + if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) + layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; + else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) + layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; + /* Convert L4 layers with decap action. */ + if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) + layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; + else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) + layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; + return layers; +} + +/** + * Get metadata split action information. * * @param[in] actions * Pointer to the list of actions. @@ -2736,18 +2734,38 @@ find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) * @param[out] qrss_type * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned * if no QUEUE/RSS is found. + * @param[out] encap_idx + * Pointer to the index of the encap action if exists, otherwise the last + * action index. * * @return * Total number of actions. */ static int -flow_parse_qrss_action(const struct rte_flow_action actions[], - const struct rte_flow_action **qrss) +flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], + const struct rte_flow_action **qrss, + int *encap_idx) { + const struct rte_flow_action_raw_encap *raw_encap; int actions_n = 0; + int raw_decap_idx = -1; + *encap_idx = -1; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + *encap_idx = actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + raw_decap_idx = actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap = actions->conf; + if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) + *encap_idx = raw_decap_idx != -1 ? + raw_decap_idx : actions_n; + break; case RTE_FLOW_ACTION_TYPE_QUEUE: case RTE_FLOW_ACTION_TYPE_RSS: *qrss = actions; @@ -2757,6 +2775,8 @@ flow_parse_qrss_action(const struct rte_flow_action actions[], } actions_n++; } + if (*encap_idx == -1) + *encap_idx = actions_n; /* Count RTE_FLOW_ACTION_TYPE_END. */ return actions_n + 1; } @@ -2777,7 +2797,7 @@ flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) { int actions_n = 0; - assert(mtr); + MLX5_ASSERT(mtr); *mtr = 0; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -2947,13 +2967,14 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, return NULL; cp_mreg.src = ret; /* Check if already registered. */ - assert(priv->mreg_cp_tbl); + MLX5_ASSERT(priv->mreg_cp_tbl); mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); if (mcp_res) { /* For non-default rule. */ if (mark_id != MLX5_DEFAULT_COPY_ID) mcp_res->refcnt++; - assert(mark_id != MLX5_DEFAULT_COPY_ID || mcp_res->refcnt == 1); + MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID || + mcp_res->refcnt == 1); return mcp_res; } /* Provide the full width of FLAG specific value. */ @@ -3021,7 +3042,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, mcp_res->hlist_ent.key = mark_id; ret = mlx5_hlist_insert(priv->mreg_cp_tbl, &mcp_res->hlist_ent); - assert(!ret); + MLX5_ASSERT(!ret); if (ret) goto error; return mcp_res; @@ -3050,7 +3071,7 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev, if (!mcp_res || !priv->mreg_cp_tbl) return; if (flow->copy_applied) { - assert(mcp_res->appcnt); + MLX5_ASSERT(mcp_res->appcnt); flow->copy_applied = 0; --mcp_res->appcnt; if (!mcp_res->appcnt) @@ -3062,7 +3083,7 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev, */ if (--mcp_res->refcnt) return; - assert(mcp_res->flow); + MLX5_ASSERT(mcp_res->flow); flow_list_destroy(dev, NULL, mcp_res->flow); mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); rte_free(mcp_res); @@ -3115,7 +3136,7 @@ flow_mreg_stop_copy_action(struct rte_eth_dev *dev, if (!mcp_res || !flow->copy_applied) return; - assert(mcp_res->appcnt); + MLX5_ASSERT(mcp_res->appcnt); --mcp_res->appcnt; flow->copy_applied = 0; if (!mcp_res->appcnt) @@ -3141,7 +3162,7 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) MLX5_DEFAULT_COPY_ID); if (!mcp_res) return; - assert(mcp_res->flow); + MLX5_ASSERT(mcp_res->flow); flow_list_destroy(dev, NULL, mcp_res->flow); mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); rte_free(mcp_res); @@ -3370,7 +3391,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, actions_rx++; set_tag = (void *)actions_rx; set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); - assert(set_tag->id > REG_NONE); + MLX5_ASSERT(set_tag->id > REG_NONE); set_tag->data = *flow_id; tag_action->conf = set_tag; /* Create Tx item list. */ @@ -3381,7 +3402,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, tag_item = (void *)addr; tag_item->data = *flow_id; tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); - assert(set_tag->id > REG_NONE); + MLX5_ASSERT(set_tag->id > REG_NONE); item->spec = tag_item; addr += sizeof(struct mlx5_rte_flow_item_tag); tag_item = (void *)addr; @@ -3399,12 +3420,14 @@ flow_hairpin_split(struct rte_eth_dev *dev, * The last stage of splitting chain, just creates the subflow * without any modification. * - * @param dev + * @param[in] dev * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. * @param[in, out] sub_flow * Pointer to return the created subflow, may be NULL. + * @param[in] prefix_layers + * Prefix subflow layers, may be 0. * @param[in] attr * Flow rule attributes. * @param[in] items @@ -3422,6 +3445,7 @@ static int flow_create_split_inner(struct rte_eth_dev *dev, struct rte_flow *flow, struct mlx5_flow **sub_flow, + uint64_t prefix_layers, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -3429,13 +3453,19 @@ flow_create_split_inner(struct rte_eth_dev *dev, { struct mlx5_flow *dev_flow; - dev_flow = flow_drv_prepare(flow, attr, items, actions, error); + dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error); if (!dev_flow) return -rte_errno; dev_flow->flow = flow; dev_flow->external = external; /* Subflow object was created, we must include one in the list. */ - LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next); + /* + * If dev_flow is as one of the suffix flow, some actions in suffix + * flow may need some user defined item layer flags. + */ + if (prefix_layers) + dev_flow->handle->layers = prefix_layers; if (sub_flow) *sub_flow = dev_flow; return flow_drv_translate(dev, dev_flow, attr, items, actions, error); @@ -3455,6 +3485,10 @@ flow_create_split_inner(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[out] sfx_items + * Suffix flow match items (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] actions_sfx @@ -3471,66 +3505,60 @@ flow_create_split_inner(struct rte_eth_dev *dev, */ static int flow_meter_split_prep(struct rte_eth_dev *dev, + const struct rte_flow_item items[], + struct rte_flow_item sfx_items[], const struct rte_flow_action actions[], struct rte_flow_action actions_sfx[], struct rte_flow_action actions_pre[]) { - struct rte_flow_action *tag_action; + struct rte_flow_action *tag_action = NULL; + struct rte_flow_item *tag_item; struct mlx5_rte_flow_action_set_tag *set_tag; struct rte_flow_error error; const struct rte_flow_action_raw_encap *raw_encap; const struct rte_flow_action_raw_decap *raw_decap; + struct mlx5_rte_flow_item_tag *tag_spec; + struct mlx5_rte_flow_item_tag *tag_mask; uint32_t tag_id; + bool copy_vlan = false; - /* Add the extra tag action first. */ - tag_action = actions_pre; - tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; - actions_pre++; /* Prepare the actions for prefix and suffix flow. */ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + struct rte_flow_action **action_cur = NULL; + switch (actions->type) { case RTE_FLOW_ACTION_TYPE_METER: + /* Add the extra tag action first. */ + tag_action = actions_pre; + tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG; + actions_pre++; + action_cur = &actions_pre; + break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: - memcpy(actions_pre, actions, - sizeof(struct rte_flow_action)); - actions_pre++; + action_cur = &actions_pre; break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: raw_encap = actions->conf; - if (raw_encap->size > - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4))) { - memcpy(actions_sfx, actions, - sizeof(struct rte_flow_action)); - actions_sfx++; - } else { - rte_memcpy(actions_pre, actions, - sizeof(struct rte_flow_action)); - actions_pre++; - } + if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) + action_cur = &actions_pre; break; case RTE_FLOW_ACTION_TYPE_RAW_DECAP: raw_decap = actions->conf; - /* Size 0 decap means 50 bytes as vxlan decap. */ - if (raw_decap->size && (raw_decap->size < - (sizeof(struct rte_flow_item_eth) + - sizeof(struct rte_flow_item_ipv4)))) { - memcpy(actions_sfx, actions, - sizeof(struct rte_flow_action)); - actions_sfx++; - } else { - rte_memcpy(actions_pre, actions, - sizeof(struct rte_flow_action)); - actions_pre++; - } + if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) + action_cur = &actions_pre; + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + copy_vlan = true; break; default: - memcpy(actions_sfx, actions, - sizeof(struct rte_flow_action)); - actions_sfx++; break; } + if (!action_cur) + action_cur = &actions_sfx; + memcpy(*action_cur, actions, sizeof(struct rte_flow_action)); + (*action_cur)++; } /* Add end action to the actions. */ actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; @@ -3544,7 +3572,44 @@ flow_meter_split_prep(struct rte_eth_dev *dev, */ tag_id = flow_qrss_get_id(dev); set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; + assert(tag_action); tag_action->conf = set_tag; + /* Prepare the suffix subflow items. */ + tag_item = sfx_items++; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int item_type = items->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_PORT_ID: + memcpy(sfx_items, items, sizeof(*sfx_items)); + sfx_items++; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + if (copy_vlan) { + memcpy(sfx_items, items, sizeof(*sfx_items)); + /* + * Convert to internal match item, it is used + * for vlan push and set vid. + */ + sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN; + sfx_items++; + } + break; + default: + break; + } + } + sfx_items->type = RTE_FLOW_ITEM_TYPE_END; + sfx_items++; + tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; + tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS; + tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); + tag_mask = tag_spec + 1; + tag_mask->data = 0xffffff00; + tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; + tag_item->spec = tag_spec; + tag_item->last = NULL; + tag_item->mask = tag_mask; return tag_id; } @@ -3677,6 +3742,8 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * Number of actions in the list. * @param[out] error * Perform verbose error reporting if not NULL. + * @param[in] encap_idx + * The encap action inndex. * * @return * 0 on success, negative value otherwise @@ -3685,7 +3752,8 @@ static int flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, struct rte_flow_action *ext_actions, const struct rte_flow_action *actions, - int actions_n, struct rte_flow_error *error) + int actions_n, struct rte_flow_error *error, + int encap_idx) { struct mlx5_flow_action_copy_mreg *cp_mreg = (struct mlx5_flow_action_copy_mreg *) @@ -3700,15 +3768,24 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, if (ret < 0) return ret; cp_mreg->src = ret; - memcpy(ext_actions, actions, - sizeof(*ext_actions) * actions_n); - ext_actions[actions_n - 1] = (struct rte_flow_action){ - .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, - .conf = cp_mreg, - }; - ext_actions[actions_n] = (struct rte_flow_action){ - .type = RTE_FLOW_ACTION_TYPE_END, - }; + if (encap_idx != 0) + memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); + if (encap_idx == actions_n - 1) { + ext_actions[actions_n - 1] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = cp_mreg, + }; + ext_actions[actions_n] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + } else { + ext_actions[encap_idx] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = cp_mreg, + }; + memcpy(ext_actions + encap_idx + 1, actions + encap_idx, + sizeof(*ext_actions) * (actions_n - encap_idx)); + } return 0; } @@ -3726,6 +3803,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, * Pointer to Ethernet device. * @param[in] flow * Parent flow structure pointer. + * @param[in] prefix_layers + * Prefix flow layer flags. * @param[in] attr * Flow rule attributes. * @param[in] items @@ -3742,6 +3821,7 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, static int flow_create_split_metadata(struct rte_eth_dev *dev, struct rte_flow *flow, + uint64_t prefix_layers, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -3756,15 +3836,18 @@ flow_create_split_metadata(struct rte_eth_dev *dev, int mtr_sfx = 0; size_t act_size; int actions_n; + int encap_idx; int ret; /* Check whether extensive metadata feature is engaged. */ if (!config->dv_flow_en || config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || !mlx5_flow_ext_mreg_supported(dev)) - return flow_create_split_inner(dev, flow, NULL, attr, items, - actions, external, error); - actions_n = flow_parse_qrss_action(actions, &qrss); + return flow_create_split_inner(dev, flow, NULL, prefix_layers, + attr, items, actions, external, + error); + actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, + &encap_idx); if (qrss) { /* Exclude hairpin flows from splitting. */ if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { @@ -3839,17 +3922,17 @@ flow_create_split_metadata(struct rte_eth_dev *dev, "metadata flow"); /* Create the action list appended with copy register. */ ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, - actions_n, error); + actions_n, error, encap_idx); if (ret < 0) goto exit; } /* Add the unmodified original or prefix subflow. */ - ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, - ext_actions ? ext_actions : actions, - external, error); + ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr, + items, ext_actions ? ext_actions : + actions, external, error); if (ret < 0) goto exit; - assert(dev_flow); + MLX5_ASSERT(dev_flow); if (qrss) { const struct rte_flow_attr q_attr = { .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, @@ -3880,7 +3963,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, .type = RTE_FLOW_ACTION_TYPE_END, }, }; - uint64_t hash_fields = dev_flow->hash_fields; + uint64_t layers = flow_get_prefix_layer_flags(dev_flow); /* * Configure the tag item only if there is no meter subflow. @@ -3889,7 +3972,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, */ if (qrss_id) { /* Not meter subflow. */ - assert(!mtr_sfx); + MLX5_ASSERT(!mtr_sfx); /* * Put unique id in prefix flow due to it is destroyed * after suffix flow and id will be freed after there @@ -3897,8 +3980,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev, * reallocation becomes possible (for example, for * other flows in other threads). */ - dev_flow->qrss_id = qrss_id; - qrss_id = 0; + dev_flow->handle->qrss_id = qrss_id; ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); if (ret < 0) @@ -3907,14 +3989,15 @@ flow_create_split_metadata(struct rte_eth_dev *dev, } dev_flow = NULL; /* Add suffix subflow to execute Q/RSS. */ - ret = flow_create_split_inner(dev, flow, &dev_flow, + ret = flow_create_split_inner(dev, flow, &dev_flow, layers, &q_attr, mtr_sfx ? items : q_items, q_actions, external, error); if (ret < 0) goto exit; - assert(dev_flow); - dev_flow->hash_fields = hash_fields; + /* qrss ID should be freed if failed. */ + qrss_id = 0; + MLX5_ASSERT(dev_flow); } exit: @@ -3967,7 +4050,6 @@ flow_create_split_meter(struct rte_eth_dev *dev, struct rte_flow_action *sfx_actions = NULL; struct rte_flow_action *pre_actions = NULL; struct rte_flow_item *sfx_items = NULL; - const struct rte_flow_item *sfx_port_id_item; struct mlx5_flow *dev_flow = NULL; struct rte_flow_attr sfx_attr = *attr; uint32_t mtr = 0; @@ -3980,13 +4062,11 @@ flow_create_split_meter(struct rte_eth_dev *dev, if (priv->mtr_en) actions_n = flow_check_meter_action(actions, &mtr); if (mtr) { - struct mlx5_rte_flow_item_tag *tag_spec; - struct mlx5_rte_flow_item_tag *tag_mask; /* The five prefix actions: meter, decap, encap, tag, end. */ act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + - sizeof(struct rte_flow_action_set_tag); - /* tag, end. */ -#define METER_SUFFIX_ITEM 3 + sizeof(struct mlx5_rte_flow_action_set_tag); + /* tag, vlan, port id, end. */ +#define METER_SUFFIX_ITEM 4 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + sizeof(struct mlx5_rte_flow_item_tag) * 2; sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); @@ -3995,51 +4075,34 @@ flow_create_split_meter(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "no memory to split " "meter flow"); + sfx_items = (struct rte_flow_item *)((char *)sfx_actions + + act_size); pre_actions = sfx_actions + actions_n; - mtr_tag_id = flow_meter_split_prep(dev, actions, sfx_actions, - pre_actions); + mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items, + actions, sfx_actions, + pre_actions); if (!mtr_tag_id) { ret = -rte_errno; goto exit; } /* Add the prefix subflow. */ - ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items, - pre_actions, external, error); + ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr, + items, pre_actions, external, + error); if (ret) { ret = -rte_errno; goto exit; } - dev_flow->mtr_flow_id = mtr_tag_id; - /* Prepare the suffix flow match pattern. */ - sfx_items = (struct rte_flow_item *)((char *)sfx_actions + - act_size); - tag_spec = (struct mlx5_rte_flow_item_tag *)(sfx_items + - METER_SUFFIX_ITEM); - tag_spec->data = dev_flow->mtr_flow_id << MLX5_MTR_COLOR_BITS; - tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, - error); - tag_mask = tag_spec + 1; - tag_mask->data = 0xffffff00; - sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG; - sfx_items->spec = tag_spec; - sfx_items->last = NULL; - sfx_items->mask = tag_mask; - sfx_items++; - sfx_port_id_item = find_port_id_item(items); - if (sfx_port_id_item) { - memcpy(sfx_items, sfx_port_id_item, - sizeof(*sfx_items)); - sfx_items++; - } - sfx_items->type = RTE_FLOW_ITEM_TYPE_END; - sfx_items -= sfx_port_id_item ? 2 : 1; + dev_flow->handle->mtr_flow_id = mtr_tag_id; /* Setting the sfx group atrr. */ sfx_attr.group = sfx_attr.transfer ? (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : MLX5_FLOW_TABLE_LEVEL_SUFFIX; } /* Add the prefix subflow. */ - ret = flow_create_split_metadata(dev, flow, &sfx_attr, + ret = flow_create_split_metadata(dev, flow, dev_flow ? + flow_get_prefix_layer_flags(dev_flow) : + 0, &sfx_attr, sfx_items ? sfx_items : items, sfx_actions ? sfx_actions : actions, external, error); @@ -4096,7 +4159,7 @@ flow_create_split_outer(struct rte_eth_dev *dev, ret = flow_create_split_meter(dev, flow, attr, items, actions, external, error); - assert(ret <= 0); + MLX5_ASSERT(ret <= 0); return ret; } @@ -4153,13 +4216,16 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, } items_tx; struct rte_flow_expand_rss *buf = &expand_buffer.buf; const struct rte_flow_action *p_actions_rx = actions; - int ret; uint32_t i; uint32_t flow_size; int hairpin_flow = 0; uint32_t hairpin_id = 0; struct rte_flow_attr attr_tx = { .priority = 0 }; + int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, + error); + if (ret < 0) + return NULL; hairpin_flow = flow_check_hairpin_split(dev, attr, actions); if (hairpin_flow > 0) { if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { @@ -4171,10 +4237,6 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, &hairpin_id); p_actions_rx = actions_rx.actions; } - ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, - error); - if (ret < 0) - goto error_before_flow; flow_size = sizeof(struct rte_flow); rss = flow_get_rss_action(p_actions_rx); if (rss) @@ -4190,8 +4252,8 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, flow->drv_type = flow_get_drv_type(dev, attr); if (hairpin_id != 0) flow->hairpin_flow_id = hairpin_id; - assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && - flow->drv_type < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && + flow->drv_type < MLX5_FLOW_TYPE_MAX); flow->rss.queue = (void *)(flow + 1); if (rss) { /* @@ -4202,7 +4264,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; } - LIST_INIT(&flow->dev_flows); + LIST_INIT(&flow->dev_handles); if (rss && rss->types) { unsigned int graph_root; @@ -4211,12 +4273,14 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, items, rss->types, mlx5_support_expansion, graph_root); - assert(ret > 0 && + MLX5_ASSERT(ret > 0 && (unsigned int)ret < sizeof(expand_buffer.buffer)); } else { buf->entries = 1; buf->entry[0].pattern = (void *)(uintptr_t)items; } + /* Reset device flow index to 0. */ + priv->flow_idx = 0; for (i = 0; i < buf->entries; ++i) { /* * The splitter may create multiple dev_flows, @@ -4235,13 +4299,13 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, attr_tx.group = MLX5_HAIRPIN_TX_TABLE; attr_tx.ingress = 0; attr_tx.egress = 1; - dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items, + dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items, actions_hairpin_tx.actions, error); if (!dev_flow) goto error; dev_flow->flow = flow; dev_flow->external = 0; - LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next); ret = flow_drv_translate(dev, dev_flow, &attr_tx, items_tx.items, actions_hairpin_tx.actions, error); @@ -4264,7 +4328,11 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, if (ret) goto error; } - if (dev->data->dev_started) { + /* + * If the flow is external (from application) OR device is started, then + * the flow will be applied immediately. + */ + if (external || dev->data->dev_started) { ret = flow_drv_apply(dev, flow, error); if (ret < 0) goto error; @@ -4279,13 +4347,13 @@ error_before_flow: hairpin_id); return NULL; error: - assert(flow); + MLX5_ASSERT(flow); flow_mreg_del_copy_action(dev, flow); ret = rte_errno; /* Save rte_errno before cleanup. */ if (flow->hairpin_flow_id) mlx5_flow_id_release(priv->sh->flow_id_pool, flow->hairpin_flow_id); - assert(flow); + MLX5_ASSERT(flow); flow_drv_destroy(dev, flow); rte_free(flow); rte_errno = ret; /* Restore rte_errno. */ @@ -4356,6 +4424,17 @@ mlx5_flow_create(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; + /* + * If the device is not started yet, it is not allowed to created a + * flow from application. PMD default flows and traffic control flows + * are not affected. + */ + if (unlikely(!dev->data->dev_started)) { + rte_errno = ENODEV; + DRV_LOG(DEBUG, "port %u is not started when " + "inserting a flow", dev->data->port_id); + return NULL; + } return flow_list_create(dev, &priv->flows, attr, items, actions, true, error); } @@ -4401,15 +4480,25 @@ flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. + * @param active + * If flushing is called avtively. */ void -mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) +mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list, + bool active) { + uint32_t num_flushed = 0; + while (!TAILQ_EMPTY(list)) { struct rte_flow *flow; flow = TAILQ_FIRST(list); flow_list_destroy(dev, list, flow); + num_flushed++; + } + if (active) { + DRV_LOG(INFO, "port %u: %u flows flushed before stopping", + dev->data->port_id, num_flushed); } } @@ -4474,6 +4563,66 @@ error: return -rte_errno; } +/** + * Stop all default actions for flows. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_stop_default(struct rte_eth_dev *dev) +{ + flow_mreg_del_default_copy_action(dev); +} + +/** + * Start all default actions for flows. + * + * @param dev + * Pointer to Ethernet device. + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_start_default(struct rte_eth_dev *dev) +{ + struct rte_flow_error error; + + /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ + return flow_mreg_add_default_copy_action(dev, &error); +} + +/** + * Allocate intermediate resources for flow creation. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!priv->inter_flows) + priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS, + sizeof(struct mlx5_flow), 0); +} + +/** + * Free intermediate resources for flows. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_free_intermediate(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + rte_free(priv->inter_flows); + priv->inter_flows = NULL; +} + /** * Verify the flow list is empty * @@ -4689,7 +4838,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; - mlx5_flow_list_flush(dev, &priv->flows); + mlx5_flow_list_flush(dev, &priv->flows, false); return 0; } @@ -4737,7 +4886,7 @@ flow_drv_query(struct rte_eth_dev *dev, const struct mlx5_flow_driver_ops *fops; enum mlx5_flow_drv_type ftype = flow->drv_type; - assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); + MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(ftype); return fops->query(dev, flow, actions, data, error); @@ -5002,7 +5151,7 @@ flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = NULL; - assert(fdir_flow); + MLX5_ASSERT(fdir_flow); TAILQ_FOREACH(flow, &priv->flows, next) { if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { DRV_LOG(DEBUG, "port %u found FDIR flow %p", @@ -5051,7 +5200,7 @@ flow_fdir_filter_add(struct rte_eth_dev *dev, NULL); if (!flow) goto error; - assert(!flow->fdir); + MLX5_ASSERT(!flow->fdir); flow->fdir = fdir_flow; DRV_LOG(DEBUG, "port %u created FDIR flow %p", dev->data->port_id, (void *)flow); @@ -5131,7 +5280,7 @@ flow_fdir_filter_flush(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - mlx5_flow_list_flush(dev, &priv->flows); + mlx5_flow_list_flush(dev, &priv->flows, false); } /**