X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=5127e40a0b2dc00a4ce307355cd9dae589a28529;hb=b7ed955a20eee5979eaecc9fab500a176e2741c5;hp=4c0b7ed67f8d360fcc29639d666df7222e270ff2;hpb=4bb14c83df952bdb54a735873d4a5347b38e1117;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 4c0b7ed67f..5127e40a0b 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -29,12 +28,17 @@ #include "mlx5.h" #include "mlx5_defs.h" -#include "mlx5_prm.h" #include "mlx5_glue.h" #include "mlx5_flow.h" +#include "mlx5_prm.h" +#include "mlx5_rxtx.h" #ifdef HAVE_IBV_FLOW_DV_SUPPORT +#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS +#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0 +#endif + union flow_dv_attr { struct { uint32_t valid:1; @@ -125,6 +129,45 @@ struct field_modify_info modify_tcp[] = { {0, 0, 0}, }; +/** + * Acquire the synchronizing object to protect multithreaded access + * to shared dv context. Lock occurs only if context is actually + * shared, i.e. we have multiport IB device and representors are + * created. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + */ +static void +flow_d_shared_lock(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (sh->dv_refcnt > 1) { + int ret; + + ret = pthread_mutex_lock(&sh->dv_mutex); + assert(!ret); + (void)ret; + } +} + +static void +flow_d_shared_unlock(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (sh->dv_refcnt > 1) { + int ret; + + ret = pthread_mutex_unlock(&sh->dv_mutex); + assert(!ret); + (void)ret; + } +} + /** * Convert modify-header action to DV specification. * @@ -568,6 +611,36 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev, return 0; } +/** + * Validate count action. + * + * @param[in] dev + * device otr. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_count(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!priv->config.devx) + goto notsup_err; +#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS + return 0; +#endif +notsup_err: + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "count action not supported"); +} + /** * Validate the L2 encap action. * @@ -774,13 +847,23 @@ flow_dv_encap_decap_resource_register struct mlx5_flow *dev_flow, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; struct mlx5_flow_dv_encap_decap_resource *cache_resource; + struct rte_flow *flow = dev_flow->flow; + struct mlx5dv_dr_ns *ns; + + resource->flags = flow->group ? 0 : 1; + if (flow->ingress) + ns = sh->rx_ns; + else + ns = sh->tx_ns; /* Lookup a matching resource from cache. */ - LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) { + LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) { if (resource->reformat_type == cache_resource->reformat_type && resource->ft_type == cache_resource->ft_type && + resource->flags == cache_resource->flags && resource->size == cache_resource->size && !memcmp((const void *)resource->buf, (const void *)cache_resource->buf, @@ -802,10 +885,10 @@ flow_dv_encap_decap_resource_register *cache_resource = *resource; cache_resource->verbs_action = mlx5_glue->dv_create_flow_action_packet_reformat - (priv->ctx, cache_resource->size, - (cache_resource->size ? cache_resource->buf : NULL), - cache_resource->reformat_type, - cache_resource->ft_type); + (sh->ctx, cache_resource->reformat_type, + cache_resource->ft_type, ns, cache_resource->flags, + cache_resource->size, + (cache_resource->size ? cache_resource->buf : NULL)); if (!cache_resource->verbs_action) { rte_free(cache_resource); return rte_flow_error_set(error, ENOMEM, @@ -814,7 +897,7 @@ flow_dv_encap_decap_resource_register } rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); - LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next); + LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next); dev_flow->dv.encap_decap = cache_resource; DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", (void *)cache_resource, @@ -822,6 +905,69 @@ flow_dv_encap_decap_resource_register return 0; } +/** + * Find existing table jump resource or create and register a new one. + * + * @param dev[in, out] + * Pointer to rte_eth_dev structure. + * @param[in, out] resource + * Pointer to jump table resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_jump_tbl_resource_register + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_jump_tbl_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_jump_tbl_resource *cache_resource; + + /* Lookup a matching resource from cache. */ + LIST_FOREACH(cache_resource, &sh->jump_tbl, next) { + if (resource->tbl == cache_resource->tbl) { + DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + rte_atomic32_inc(&cache_resource->refcnt); + dev_flow->dv.jump = cache_resource; + return 0; + } + } + /* Register new jump table resource. */ + cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + *cache_resource = *resource; + cache_resource->action = + mlx5_glue->dr_create_flow_action_dest_flow_tbl + (resource->tbl->obj); + if (!cache_resource->action) { + rte_free(cache_resource); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + } + rte_atomic32_init(&cache_resource->refcnt); + rte_atomic32_inc(&cache_resource->refcnt); + LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next); + dev_flow->dv.jump = cache_resource; + DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + return 0; +} + /** * Get the size of specific rte_flow_item_type * @@ -1384,6 +1530,37 @@ flow_dv_validate_action_modify_ttl(const uint64_t action_flags, return ret; } +/** + * Validate jump action. + * + * @param[in] action + * Pointer to the modify action. + * @param[in] group + * The group of the current flow. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_jump(const struct rte_flow_action *action, + uint32_t group, + struct rte_flow_error *error) +{ + if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "action configuration not set"); + if (group >= ((const struct rte_flow_action_jump *)action->conf)->group) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "target group must be higher then" + " the current flow group"); + return 0; +} + + /** * Find existing modify-header resource or create and register a new one. * @@ -1406,11 +1583,16 @@ flow_dv_modify_hdr_resource_register struct mlx5_flow *dev_flow, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; struct mlx5_flow_dv_modify_hdr_resource *cache_resource; + struct mlx5dv_dr_ns *ns = + resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ? + sh->tx_ns : sh->rx_ns; + /* Lookup a matching resource from cache. */ - LIST_FOREACH(cache_resource, &priv->modify_cmds, next) { + LIST_FOREACH(cache_resource, &sh->modify_cmds, next) { if (resource->ft_type == cache_resource->ft_type && resource->actions_num == cache_resource->actions_num && !memcmp((const void *)resource->actions, @@ -1434,11 +1616,11 @@ flow_dv_modify_hdr_resource_register *cache_resource = *resource; cache_resource->verbs_action = mlx5_glue->dv_create_flow_action_modify_header - (priv->ctx, + (sh->ctx, cache_resource->ft_type, + ns, 0, cache_resource->actions_num * sizeof(cache_resource->actions[0]), - (uint64_t *)cache_resource->actions, - cache_resource->ft_type); + (uint64_t *)cache_resource->actions); if (!cache_resource->verbs_action) { rte_free(cache_resource); return rte_flow_error_set(error, ENOMEM, @@ -1447,7 +1629,7 @@ flow_dv_modify_hdr_resource_register } rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); - LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next); + LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next); dev_flow->dv.modify_hdr = cache_resource; DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", (void *)cache_resource, @@ -1455,6 +1637,92 @@ flow_dv_modify_hdr_resource_register return 0; } +/** + * Get or create a flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] shared + * Indicate if this counter is shared with other flows. + * @param[in] id + * Counter identifier. + * + * @return + * pointer to flow counter on success, NULL otherwise and rte_errno is set. + */ +static struct mlx5_flow_counter * +flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter *cnt = NULL; + struct mlx5_devx_counter_set *dcs = NULL; + int ret; + + if (!priv->config.devx) { + ret = -ENOTSUP; + goto error_exit; + } + if (shared) { + LIST_FOREACH(cnt, &priv->flow_counters, next) { + if (cnt->shared && cnt->id == id) { + cnt->ref_cnt++; + return cnt; + } + } + } + cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0); + dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0); + if (!dcs || !cnt) { + ret = -ENOMEM; + goto error_exit; + } + ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs); + if (ret) + goto error_exit; + struct mlx5_flow_counter tmpl = { + .shared = shared, + .ref_cnt = 1, + .id = id, + .dcs = dcs, + }; + tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0); + if (!tmpl.action) { + ret = errno; + goto error_exit; + } + *cnt = tmpl; + LIST_INSERT_HEAD(&priv->flow_counters, cnt, next); + return cnt; +error_exit: + rte_free(cnt); + rte_free(dcs); + rte_errno = -ret; + return NULL; +} + +/** + * Release a flow counter. + * + * @param[in] counter + * Pointer to the counter handler. + */ +static void +flow_dv_counter_release(struct mlx5_flow_counter *counter) +{ + int ret; + + if (!counter) + return; + if (--counter->ref_cnt == 0) { + ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj); + if (ret) + DRV_LOG(ERR, "Failed to free devx counters, %d", ret); + LIST_REMOVE(counter, next); + rte_free(counter->dcs); + rte_free(counter); + } +} + /** * Verify the @p attributes will be correctly understood by the NIC and store * them in the @p flow if everything is correct. @@ -1474,14 +1742,16 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, const struct rte_flow_attr *attributes, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; uint32_t priority_max = priv->config.flow_prio - 1; +#ifndef HAVE_MLX5DV_DR if (attributes->group) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "groups is not supported"); +#endif if (attributes->priority != MLX5_FLOW_PRIO_RSVD && attributes->priority >= priority_max) return rte_flow_error_set(error, ENOTSUP, @@ -1516,7 +1786,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, * Pointer to the error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -1528,7 +1798,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, uint64_t action_flags = 0; uint64_t item_flags = 0; uint64_t last_item = 0; - int tunnel = 0; uint8_t next_protocol = 0xff; int actions_n = 0; @@ -1538,7 +1807,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (ret < 0) return ret; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { - tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); switch (items->type) { case RTE_FLOW_ITEM_TYPE_VOID: break; @@ -1560,7 +1829,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ITEM_TYPE_IPV4: ret = mlx5_flow_validate_item_ipv4(items, item_flags, - error); + NULL, error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -1581,7 +1850,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ITEM_TYPE_IPV6: ret = mlx5_flow_validate_item_ipv6(items, item_flags, - error); + NULL, error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : @@ -1710,14 +1979,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_RSS: ret = mlx5_flow_validate_action_rss(actions, action_flags, dev, - attr, error); + attr, item_flags, + error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_RSS; ++actions_n; break; case RTE_FLOW_ACTION_TYPE_COUNT: - ret = mlx5_flow_validate_action_count(dev, attr, error); + ret = flow_dv_validate_action_count(dev, error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_COUNT; @@ -1847,6 +2117,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, MLX5_FLOW_ACTION_SET_TTL : MLX5_FLOW_ACTION_DEC_TTL; break; + case RTE_FLOW_ACTION_TYPE_JUMP: + ret = flow_dv_validate_action_jump(actions, + attr->group, error); + if (ret) + return ret; + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_JUMP; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1876,7 +2154,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * * @return * Pointer to mlx5_flow object on success, - * otherwise NULL and rte_ernno is set. + * otherwise NULL and rte_errno is set. */ static struct mlx5_flow * flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused, @@ -2054,11 +2332,13 @@ flow_dv_translate_item_vlan(void *matcher, void *key, * Flow pattern to translate. * @param[in] inner * Item is inner pattern. + * @param[in] group + * The group to insert the rule. */ static void flow_dv_translate_item_ipv4(void *matcher, void *key, const struct rte_flow_item *item, - int inner) + int inner, uint32_t group) { const struct rte_flow_item_ipv4 *ipv4_m = item->mask; const struct rte_flow_item_ipv4 *ipv4_v = item->spec; @@ -2085,7 +2365,10 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, outer_headers); headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); + if (group == 0) + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); + else + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4); if (!ipv4_v) return; @@ -2127,11 +2410,13 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, * Flow pattern to translate. * @param[in] inner * Item is inner pattern. + * @param[in] group + * The group to insert the rule. */ static void flow_dv_translate_item_ipv6(void *matcher, void *key, const struct rte_flow_item *item, - int inner) + int inner, uint32_t group) { const struct rte_flow_item_ipv6 *ipv6_m = item->mask; const struct rte_flow_item_ipv6 *ipv6_v = item->spec; @@ -2168,7 +2453,10 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, outer_headers); headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); + if (group == 0) + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); + else + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6); if (!ipv6_v) return; @@ -2608,10 +2896,91 @@ flow_dv_matcher_enable(uint32_t *match_criteria) match_criteria_enable |= (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; - +#ifdef HAVE_MLX5DV_DR + match_criteria_enable |= + (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) << + MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT; +#endif return match_criteria_enable; } + +/** + * Get a flow table. + * + * @param dev[in, out] + * Pointer to rte_eth_dev structure. + * @param[in] table_id + * Table id to use. + * @param[in] egress + * Direction of the table. + * @param[out] error + * pointer to error structure. + * + * @return + * Returns tables resource based on the index, NULL in case of failed. + */ +static struct mlx5_flow_tbl_resource * +flow_dv_tbl_resource_get(struct rte_eth_dev *dev, + uint32_t table_id, uint8_t egress, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_tbl_resource *tbl; + +#ifdef HAVE_MLX5DV_DR + if (egress) { + tbl = &sh->tx_tbl[table_id]; + if (!tbl->obj) + tbl->obj = mlx5_glue->dr_create_flow_tbl + (sh->tx_ns, table_id); + } else { + tbl = &sh->rx_tbl[table_id]; + if (!tbl->obj) + tbl->obj = mlx5_glue->dr_create_flow_tbl + (sh->rx_ns, table_id); + } + if (!tbl->obj) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create table"); + return NULL; + } + rte_atomic32_inc(&tbl->refcnt); + return tbl; +#else + (void)error; + (void)tbl; + if (egress) + return &sh->tx_tbl[table_id]; + else + return &sh->rx_tbl[table_id]; +#endif +} + +/** + * Release a flow table. + * + * @param[in] tbl + * Table resource to be released. + * + * @return + * Returns 0 if table was released, else return 1; + */ +static int +flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl) +{ + if (!tbl) + return 0; + if (rte_atomic32_dec_and_test(&tbl->refcnt)) { + mlx5_glue->dr_destroy_flow_tbl(tbl->obj); + tbl->obj = NULL; + return 0; + } + return 1; +} + /** * Register the flow matcher. * @@ -2633,18 +3002,21 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; struct mlx5_flow_dv_matcher *cache_matcher; struct mlx5dv_flow_matcher_attr dv_attr = { .type = IBV_FLOW_ATTR_NORMAL, .match_mask = (void *)&matcher->mask, }; + struct mlx5_flow_tbl_resource *tbl = NULL; /* Lookup from cache. */ - LIST_FOREACH(cache_matcher, &priv->matchers, next) { + LIST_FOREACH(cache_matcher, &sh->matchers, next) { if (matcher->crc == cache_matcher->crc && matcher->priority == cache_matcher->priority && matcher->egress == cache_matcher->egress && + matcher->group == cache_matcher->group && !memcmp((const void *)matcher->mask.buf, (const void *)cache_matcher->mask.buf, cache_matcher->mask.size)) { @@ -2665,6 +3037,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate matcher memory"); + tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR, + matcher->egress, error); + if (!tbl) { + rte_free(cache_matcher); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create table"); + } *cache_matcher = *matcher; dv_attr.match_criteria_enable = flow_dv_matcher_enable(cache_matcher->mask.buf); @@ -2672,23 +3052,142 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, if (matcher->egress) dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; cache_matcher->matcher_object = - mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr); + mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj); if (!cache_matcher->matcher_object) { rte_free(cache_matcher); +#ifdef HAVE_MLX5DV_DR + flow_dv_tbl_resource_release(tbl); +#endif return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create matcher"); } rte_atomic32_inc(&cache_matcher->refcnt); - LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next); + LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next); dev_flow->dv.matcher = cache_matcher; DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d", cache_matcher->priority, cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher, rte_atomic32_read(&cache_matcher->refcnt)); + rte_atomic32_inc(&tbl->refcnt); + return 0; +} + +/** + * Add source vport match to the specified matcher. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] port + * Source vport value to match + * @param[in] mask + * Mask + */ +static void +flow_dv_translate_source_vport(void *matcher, void *key, + int16_t port, uint16_t mask) +{ + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + + MLX5_SET(fte_match_set_misc, misc_m, source_port, mask); + MLX5_SET(fte_match_set_misc, misc_v, source_port, port); +} + +/** + * Find existing tag resource or create and register a new one. + * + * @param dev[in, out] + * Pointer to rte_eth_dev structure. + * @param[in, out] resource + * Pointer to tag resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_tag_resource_register + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_tag_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_tag_resource *cache_resource; + + /* Lookup a matching resource from cache. */ + LIST_FOREACH(cache_resource, &sh->tags, next) { + if (resource->tag == cache_resource->tag) { + DRV_LOG(DEBUG, "tag resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + rte_atomic32_inc(&cache_resource->refcnt); + dev_flow->flow->tag_resource = cache_resource; + return 0; + } + } + /* Register new resource. */ + cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + *cache_resource = *resource; + cache_resource->action = mlx5_glue->dv_create_flow_action_tag + (resource->tag); + if (!cache_resource->action) { + rte_free(cache_resource); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + } + rte_atomic32_init(&cache_resource->refcnt); + rte_atomic32_inc(&cache_resource->refcnt); + LIST_INSERT_HEAD(&sh->tags, cache_resource, next); + dev_flow->flow->tag_resource = cache_resource; + DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); return 0; } +/** + * Release the tag. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to mlx5_flow. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_tag_release(struct rte_eth_dev *dev, + struct mlx5_flow_dv_tag_resource *tag) +{ + assert(tag); + DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", + dev->data->port_id, (void *)tag, + rte_atomic32_read(&tag->refcnt)); + if (rte_atomic32_dec_and_test(&tag->refcnt)) { + claim_zero(mlx5_glue->destroy_flow_action(tag->action)); + LIST_REMOVE(tag, next); + DRV_LOG(DEBUG, "port %u tag %p: removed", + dev->data->port_id, (void *)tag); + rte_free(tag); + return 0; + } + return 1; +} + /** * Fill the flow with DV spec. * @@ -2706,7 +3205,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, * Pointer to the error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int flow_dv_translate(struct rte_eth_dev *dev, @@ -2716,7 +3215,7 @@ flow_dv_translate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = dev_flow->flow; uint64_t item_flags = 0; uint64_t last_item = 0; @@ -2734,6 +3233,7 @@ flow_dv_translate(struct rte_eth_dev *dev, MLX5DV_FLOW_TABLE_TYPE_NIC_RX }; union flow_dv_attr flow_attr = { .attr = 0 }; + struct mlx5_flow_dv_tag_resource tag_resource; if (priority == MLX5_FLOW_PRIO_RSVD) priority = priv->config.flow_prio - 1; @@ -2741,32 +3241,39 @@ flow_dv_translate(struct rte_eth_dev *dev, const struct rte_flow_action_queue *queue; const struct rte_flow_action_rss *rss; const struct rte_flow_action *action = actions; + const struct rte_flow_action_count *count = action->conf; const uint8_t *rss_key; + const struct rte_flow_action_jump *jump_data; + struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource; + struct mlx5_flow_tbl_resource *tbl; switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: break; case RTE_FLOW_ACTION_TYPE_FLAG: - dev_flow->dv.actions[actions_n].type = - MLX5DV_FLOW_ACTION_TAG; - dev_flow->dv.actions[actions_n].tag_value = + tag_resource.tag = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); - actions_n++; + if (!flow->tag_resource) + if (flow_dv_tag_resource_register + (dev, &tag_resource, dev_flow, error)) + return errno; + dev_flow->dv.actions[actions_n++] = + flow->tag_resource->action; action_flags |= MLX5_FLOW_ACTION_FLAG; break; case RTE_FLOW_ACTION_TYPE_MARK: - dev_flow->dv.actions[actions_n].type = - MLX5DV_FLOW_ACTION_TAG; - dev_flow->dv.actions[actions_n].tag_value = - mlx5_flow_mark_set - (((const struct rte_flow_action_mark *) - (actions->conf))->id); - actions_n++; + tag_resource.tag = mlx5_flow_mark_set + (((const struct rte_flow_action_mark *) + (actions->conf))->id); + if (!flow->tag_resource) + if (flow_dv_tag_resource_register + (dev, &tag_resource, dev_flow, error)) + return errno; + dev_flow->dv.actions[actions_n++] = + flow->tag_resource->action; action_flags |= MLX5_FLOW_ACTION_MARK; break; case RTE_FLOW_ACTION_TYPE_DROP: - dev_flow->dv.actions[actions_n].type = - MLX5DV_FLOW_ACTION_DROP; action_flags |= MLX5_FLOW_ACTION_DROP; break; case RTE_FLOW_ACTION_TYPE_QUEUE: @@ -2789,16 +3296,40 @@ flow_dv_translate(struct rte_eth_dev *dev, flow->rss.level = rss->level; action_flags |= MLX5_FLOW_ACTION_RSS; break; + case RTE_FLOW_ACTION_TYPE_COUNT: + if (!priv->config.devx) { + rte_errno = ENOTSUP; + goto cnt_err; + } + flow->counter = flow_dv_counter_new(dev, count->shared, + count->id); + if (flow->counter == NULL) + goto cnt_err; + dev_flow->dv.actions[actions_n++] = + flow->counter->action; + action_flags |= MLX5_FLOW_ACTION_COUNT; + break; +cnt_err: + if (rte_errno == ENOTSUP) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "count action not supported"); + else + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "cannot create counter" + " object."); case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: if (flow_dv_create_action_l2_encap(dev, actions, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n].type = - MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION; - dev_flow->dv.actions[actions_n].action = + dev_flow->dv.actions[actions_n++] = dev_flow->dv.encap_decap->verbs_action; - actions_n++; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ? MLX5_FLOW_ACTION_VXLAN_ENCAP : @@ -2809,11 +3340,8 @@ flow_dv_translate(struct rte_eth_dev *dev, if (flow_dv_create_action_l2_decap(dev, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n].type = - MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION; - dev_flow->dv.actions[actions_n].action = + dev_flow->dv.actions[actions_n++] = dev_flow->dv.encap_decap->verbs_action; - actions_n++; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ? MLX5_FLOW_ACTION_VXLAN_DECAP : @@ -2825,9 +3353,7 @@ flow_dv_translate(struct rte_eth_dev *dev, if (flow_dv_create_action_raw_encap (dev, actions, dev_flow, attr, error)) return -rte_errno; - dev_flow->dv.actions[actions_n].type = - MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION; - dev_flow->dv.actions[actions_n].action = + dev_flow->dv.actions[actions_n++] = dev_flow->dv.encap_decap->verbs_action; } else { /* Handle encap without preceding decap. */ @@ -2835,12 +3361,9 @@ flow_dv_translate(struct rte_eth_dev *dev, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n].type = - MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION; - dev_flow->dv.actions[actions_n].action = + dev_flow->dv.actions[actions_n++] = dev_flow->dv.encap_decap->verbs_action; } - actions_n++; action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP; break; case RTE_FLOW_ACTION_TYPE_RAW_DECAP: @@ -2855,15 +3378,37 @@ flow_dv_translate(struct rte_eth_dev *dev, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n].type = - MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION; - dev_flow->dv.actions[actions_n].action = + dev_flow->dv.actions[actions_n++] = dev_flow->dv.encap_decap->verbs_action; - actions_n++; } /* If decap is followed by encap, handle it at encap. */ action_flags |= MLX5_FLOW_ACTION_RAW_DECAP; break; + case RTE_FLOW_ACTION_TYPE_JUMP: + jump_data = action->conf; + tbl = flow_dv_tbl_resource_get(dev, jump_data->group * + MLX5_GROUP_FACTOR, + attr->egress, error); + if (!tbl) + return rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create jump action."); + jump_tbl_resource.tbl = tbl; + if (flow_dv_jump_tbl_resource_register + (dev, &jump_tbl_resource, dev_flow, error)) { + flow_dv_tbl_resource_release(tbl); + return rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create jump action."); + } + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.jump->action; + action_flags |= MLX5_FLOW_ACTION_JUMP; + break; case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: if (flow_dv_convert_action_modify_mac(&res, actions, @@ -2928,11 +3473,8 @@ flow_dv_translate(struct rte_eth_dev *dev, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n].type = - MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION; - dev_flow->dv.actions[actions_n].action = + dev_flow->dv.actions[actions_n++] = dev_flow->dv.modify_hdr->verbs_action; - actions_n++; } break; default: @@ -2941,6 +3483,19 @@ flow_dv_translate(struct rte_eth_dev *dev, } dev_flow->dv.actions_n = actions_n; flow->actions = action_flags; + if (attr->ingress && !attr->transfer && + (priv->representor || priv->master)) { + /* It was validated - we support unidirection flows only. */ + assert(!attr->egress); + /* + * Add matching on source vport index only + * for ingress rules in E-Switch configurations. + */ + flow_dv_translate_source_vport(matcher.mask.buf, + dev_flow->dv.value.buf, + priv->vport_id, + 0xffff); + } for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); void *match_mask = matcher.mask.buf; @@ -2965,7 +3520,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_IPV4: flow_dv_translate_item_ipv4(match_mask, match_value, - items, tunnel); + items, tunnel, attr->group); matcher.priority = MLX5_PRIORITY_MAP_L3; dev_flow->dv.hash_fields |= mlx5_flow_hashfields_adjust @@ -2977,7 +3532,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_IPV6: flow_dv_translate_item_ipv6(match_mask, match_value, - items, tunnel); + items, tunnel, attr->group); matcher.priority = MLX5_PRIORITY_MAP_L3; dev_flow->dv.hash_fields |= mlx5_flow_hashfields_adjust @@ -3055,6 +3610,7 @@ flow_dv_translate(struct rte_eth_dev *dev, matcher.priority = mlx5_flow_adjust_priority(dev, priority, matcher.priority); matcher.egress = attr->egress; + matcher.group = attr->group; if (flow_dv_matcher_register(dev, &matcher, dev_flow, error)) return -rte_errno; return 0; @@ -3094,9 +3650,9 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get drop hash queue"); goto error; } - dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP; - dv->actions[n].qp = dv->hrxq->qp; - n++; + dv->actions[n++] = + mlx5_glue->dv_create_flow_action_dest_ibv_qp + (dv->hrxq->qp); } else if (flow->actions & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) { struct mlx5_hrxq *hrxq; @@ -3121,9 +3677,9 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, goto error; } dv->hrxq = hrxq; - dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP; - dv->actions[n].qp = hrxq->qp; - n++; + dv->actions[n++] = + mlx5_glue->dv_create_flow_action_dest_ibv_qp + (dv->hrxq->qp); } dv->flow = mlx5_glue->dv_create_flow(dv->matcher->matcher_object, @@ -3170,6 +3726,9 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, struct mlx5_flow *flow) { struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_tbl_resource *tbl; assert(matcher->matcher_object); DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", @@ -3179,6 +3738,11 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, claim_zero(mlx5_glue->dv_destroy_flow_matcher (matcher->matcher_object)); LIST_REMOVE(matcher, next); + if (matcher->egress) + tbl = &sh->tx_tbl[matcher->group]; + else + tbl = &sh->rx_tbl[matcher->group]; + flow_dv_tbl_resource_release(tbl); rte_free(matcher); DRV_LOG(DEBUG, "port %u matcher %p: removed", dev->data->port_id, (void *)matcher); @@ -3218,6 +3782,38 @@ flow_dv_encap_decap_resource_release(struct mlx5_flow *flow) return 1; } +/** + * Release an jump to table action resource. + * + * @param flow + * Pointer to mlx5_flow. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow) +{ + struct mlx5_flow_dv_jump_tbl_resource *cache_resource = + flow->dv.jump; + + assert(cache_resource->action); + DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->action)); + LIST_REMOVE(cache_resource, next); + flow_dv_tbl_resource_release(cache_resource->tbl); + rte_free(cache_resource); + DRV_LOG(DEBUG, "jump table resource %p: removed", + (void *)cache_resource); + return 0; + } + return 1; +} + /** * Release a modify-header resource. * @@ -3268,7 +3864,7 @@ flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) LIST_FOREACH(dev_flow, &flow->dev_flows, next) { dv = &dev_flow->dv; if (dv->flow) { - claim_zero(mlx5_glue->destroy_flow(dv->flow)); + claim_zero(mlx5_glue->dv_destroy_flow(dv->flow)); dv->flow = NULL; } if (dv->hrxq) { @@ -3279,8 +3875,6 @@ flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) dv->hrxq = NULL; } } - if (flow->counter) - flow->counter = NULL; } /** @@ -3299,6 +3893,14 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) if (!flow) return; flow_dv_remove(dev, flow); + if (flow->counter) { + flow_dv_counter_release(flow->counter); + flow->counter = NULL; + } + if (flow->tag_resource) { + flow_dv_tag_release(dev, flow->tag_resource); + flow->tag_resource = NULL; + } while (!LIST_EMPTY(&flow->dev_flows)) { dev_flow = LIST_FIRST(&flow->dev_flows); LIST_REMOVE(dev_flow, next); @@ -3308,10 +3910,68 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) flow_dv_encap_decap_resource_release(dev_flow); if (dev_flow->dv.modify_hdr) flow_dv_modify_hdr_resource_release(dev_flow); + if (dev_flow->dv.jump) + flow_dv_jump_tbl_resource_release(dev_flow); rte_free(dev_flow); } } +/** + * Query a dv flow rule for its statistics via devx. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the sub flow. + * @param[out] data + * data retrieved by the query. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, + void *data, struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_query_count *qc = data; + uint64_t pkts = 0; + uint64_t bytes = 0; + int err; + + if (!priv->config.devx) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not supported"); + if (flow->counter) { + err = mlx5_devx_cmd_flow_counter_query + (flow->counter->dcs, + qc->reset, &pkts, &bytes); + if (err) + return rte_flow_error_set + (error, err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot read counters"); + qc->hits_set = 1; + qc->bytes_set = 1; + qc->hits = pkts - flow->counter->hits; + qc->bytes = bytes - flow->counter->bytes; + if (qc->reset) { + flow->counter->hits = pkts; + flow->counter->bytes = bytes; + } + return 0; + } + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not available"); +} + /** * Query a flow. * @@ -3319,26 +3979,95 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) * @see rte_flow_ops */ static int -flow_dv_query(struct rte_eth_dev *dev __rte_unused, +flow_dv_query(struct rte_eth_dev *dev, struct rte_flow *flow __rte_unused, const struct rte_flow_action *actions __rte_unused, void *data __rte_unused, struct rte_flow_error *error __rte_unused) { - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "flow query with DV is not supported"); + int ret = -EINVAL; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow_dv_query_count(dev, flow, data, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + return ret; +} + +/* + * Mutex-protected thunk to flow_dv_translate(). + */ +static int +flow_d_translate(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + + flow_d_shared_lock(dev); + ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error); + flow_d_shared_unlock(dev); + return ret; } +/* + * Mutex-protected thunk to flow_dv_apply(). + */ +static int +flow_d_apply(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + + flow_d_shared_lock(dev); + ret = flow_dv_apply(dev, flow, error); + flow_d_shared_unlock(dev); + return ret; +} + +/* + * Mutex-protected thunk to flow_dv_remove(). + */ +static void +flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + flow_d_shared_lock(dev); + flow_dv_remove(dev, flow); + flow_d_shared_unlock(dev); +} + +/* + * Mutex-protected thunk to flow_dv_destroy(). + */ +static void +flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + flow_d_shared_lock(dev); + flow_dv_destroy(dev, flow); + flow_d_shared_unlock(dev); +} const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .validate = flow_dv_validate, .prepare = flow_dv_prepare, - .translate = flow_dv_translate, - .apply = flow_dv_apply, - .remove = flow_dv_remove, - .destroy = flow_dv_destroy, + .translate = flow_d_translate, + .apply = flow_d_apply, + .remove = flow_d_remove, + .destroy = flow_d_destroy, .query = flow_dv_query, };