X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_verbs.c;h=6b86437abcb964c49f8afd0312eb2c396575bf13;hb=bf89db4409bb43659ee49626f76869a31bb55150;hp=f1489f119551e29c590a206f13389154a8b4e8bc;hpb=c3d3b14099bc4e422bb6642ddc6fc3b384291e4b;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index f1489f1195..6b86437abc 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -64,7 +64,7 @@ flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev, MLX5_ASSERT(pool); if (ppool) *ppool = pool; - return &pool->counters_raw[idx % MLX5_COUNTERS_PER_POOL]; + return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); } /** @@ -83,7 +83,7 @@ flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev, */ static int flow_verbs_counter_create(struct rte_eth_dev *dev, - struct mlx5_flow_counter *counter) + struct mlx5_flow_counter_ext *counter) { #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) struct mlx5_priv *priv = dev->data->dev_private; @@ -145,14 +145,15 @@ flow_verbs_counter_create(struct rte_eth_dev *dev, * Counter identifier. * * @return - * A pointer to the counter, NULL otherwise and rte_errno is set. + * Index to the counter, 0 otherwise and rte_errno is set. */ -static struct mlx5_flow_counter * +static uint32_t flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0); struct mlx5_flow_counter_pool *pool = NULL; + struct mlx5_flow_counter_ext *cnt_ext = NULL; struct mlx5_flow_counter *cnt = NULL; uint32_t n_valid = rte_atomic16_read(&cont->n_valid); uint32_t pool_idx; @@ -163,12 +164,10 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) { pool = cont->pools[pool_idx]; for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { - cnt = &pool->counters_raw[i]; - if (cnt->shared && cnt->id == id) { - cnt->ref_cnt++; - return (struct mlx5_flow_counter *) - (uintptr_t) - MLX5_MAKE_CNT_IDX(pool_idx, i); + cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i); + if (cnt_ext->shared && cnt_ext->id == id) { + cnt_ext->ref_cnt++; + return MLX5_MAKE_CNT_IDX(pool_idx, i); } } } @@ -177,7 +176,7 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) pool = cont->pools[pool_idx]; if (!pool) continue; - cnt = TAILQ_FIRST(&pool->counters); + cnt = TAILQ_FIRST(&pool->counters[0]); if (cnt) break; } @@ -191,7 +190,7 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) (n_valid + MLX5_CNT_CONTAINER_RESIZE); pools = rte_zmalloc(__func__, size, 0); if (!pools) - return NULL; + return 0; if (n_valid) { memcpy(pools, cont->pools, sizeof(struct mlx5_flow_counter_pool *) * @@ -202,35 +201,38 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) cont->n += MLX5_CNT_CONTAINER_RESIZE; } /* Allocate memory for new pool*/ - size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL; + size = sizeof(*pool) + (sizeof(*cnt_ext) + sizeof(*cnt)) * + MLX5_COUNTERS_PER_POOL; pool = rte_calloc(__func__, 1, size, 0); if (!pool) - return NULL; + return 0; + pool->type |= CNT_POOL_TYPE_EXT; for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { - cnt = &pool->counters_raw[i]; - TAILQ_INSERT_HEAD(&pool->counters, cnt, next); + cnt = MLX5_POOL_GET_CNT(pool, i); + TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); } - cnt = &pool->counters_raw[0]; + cnt = MLX5_POOL_GET_CNT(pool, 0); cont->pools[n_valid] = pool; pool_idx = n_valid; rte_atomic16_add(&cont->n_valid, 1); TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); } - cnt->id = id; - cnt->shared = shared; - cnt->ref_cnt = 1; + i = MLX5_CNT_ARRAY_IDX(pool, cnt); + cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i); + cnt_ext->id = id; + cnt_ext->shared = shared; + cnt_ext->ref_cnt = 1; cnt->hits = 0; cnt->bytes = 0; /* Create counter with Verbs. */ - ret = flow_verbs_counter_create(dev, cnt); + ret = flow_verbs_counter_create(dev, cnt_ext); if (!ret) { - TAILQ_REMOVE(&pool->counters, cnt, next); - return (struct mlx5_flow_counter *)(uintptr_t) - MLX5_MAKE_CNT_IDX(pool_idx, (cnt - pool->counters_raw)); + TAILQ_REMOVE(&pool->counters[0], cnt, next); + return MLX5_MAKE_CNT_IDX(pool_idx, i); } /* Some error occurred in Verbs library. */ rte_errno = -ret; - return NULL; + return 0; } /** @@ -239,26 +241,27 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) * @param[in] dev * Pointer to the Ethernet device structure. * @param[in] counter - * Pointer to the counter handler. + * Index to the counter handler. */ static void -flow_verbs_counter_release(struct rte_eth_dev *dev, - struct mlx5_flow_counter *counter) +flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter) { struct mlx5_flow_counter_pool *pool; struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_ext *cnt_ext; - cnt = flow_verbs_counter_get_by_idx(dev, (uintptr_t)(void *)counter, + cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool); - if (--counter->ref_cnt == 0) { + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + if (--cnt_ext->ref_cnt == 0) { #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) - claim_zero(mlx5_glue->destroy_counter_set(cnt->cs)); - cnt->cs = NULL; + claim_zero(mlx5_glue->destroy_counter_set(cnt_ext->cs)); + cnt_ext->cs = NULL; #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) - claim_zero(mlx5_glue->destroy_counters(cnt->cs)); - cnt->cs = NULL; + claim_zero(mlx5_glue->destroy_counters(cnt_ext->cs)); + cnt_ext->cs = NULL; #endif - TAILQ_INSERT_HEAD(&pool->counters, cnt, next); + TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); } } @@ -275,15 +278,17 @@ flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, { #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) - if (flow->counter && flow->counter->cs) { + if (flow->counter) { + struct mlx5_flow_counter_pool *pool; struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx - (dev, (uintptr_t)(void *) - flow->counter, NULL); + (dev, flow->counter, &pool); + struct mlx5_flow_counter_ext *cnt_ext = MLX5_CNT_TO_CNT_EXT + (pool, cnt); struct rte_flow_query_count *qc = data; uint64_t counters[2] = {0, 0}; #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) struct ibv_query_counter_set_attr query_cs_attr = { - .cs = cnt->cs, + .cs = cnt_ext->cs, .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, }; struct ibv_counter_set_data query_out = { @@ -294,7 +299,7 @@ flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, &query_out); #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) int err = mlx5_glue->query_counters - (cnt->cs, counters, + (cnt_ext->cs, counters, RTE_DIM(counters), IBV_READ_COUNTERS_ATTR_PREFER_CACHED); #endif @@ -674,6 +679,28 @@ flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow, udp.val.src_port &= udp.mask.src_port; udp.val.dst_port &= udp.mask.dst_port; } + item++; + while (item->type == RTE_FLOW_ITEM_TYPE_VOID) + item++; + if (!(udp.val.dst_port & udp.mask.dst_port)) { + switch ((item)->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN); + udp.mask.dst_port = 0xffff; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE); + udp.mask.dst_port = 0xffff; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS); + udp.mask.dst_port = 0xffff; + break; + default: + break; + } + } + flow_verbs_spec_add(&dev_flow->verbs, &udp, size); } @@ -937,21 +964,19 @@ flow_verbs_translate_action_drop * the input is valid and that there is space to insert the requested action * into the flow. * - * @param[in] dev_flow - * Pointer to mlx5_flow. + * @param[in] rss_desc + * Pointer to mlx5_flow_rss_desc. * @param[in] action * Action configuration. */ static void -flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow, +flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc, const struct rte_flow_action *action) { const struct rte_flow_action_queue *queue = action->conf; - struct rte_flow *flow = dev_flow->flow; - if (flow->rss.queue) - (*flow->rss.queue)[0] = queue->index; - flow->rss.queue_num = 1; + rss_desc->queue[0] = queue->index; + rss_desc->queue_num = 1; } /** @@ -959,28 +984,23 @@ flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow, * the input is valid and that there is space to insert the requested action * into the flow. * + * @param[in] rss_desc + * Pointer to mlx5_flow_rss_desc. * @param[in] action * Action configuration. - * @param[in, out] action_flags - * Pointer to the detected actions. - * @param[in] dev_flow - * Pointer to mlx5_flow. */ static void -flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow, +flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc, const struct rte_flow_action *action) { const struct rte_flow_action_rss *rss = action->conf; const uint8_t *rss_key; - struct rte_flow *flow = dev_flow->flow; - if (flow->rss.queue) - memcpy((*flow->rss.queue), rss->queue, - rss->queue_num * sizeof(uint16_t)); - flow->rss.queue_num = rss->queue_num; + memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t)); + rss_desc->queue_num = rss->queue_num; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN); + memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); /* * rss->level and rss.types should be set in advance when expanding * items for RSS. @@ -1062,9 +1082,11 @@ flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, { const struct rte_flow_action_count *count = action->conf; struct rte_flow *flow = dev_flow->flow; - struct mlx5_flow_counter *cnt = NULL; #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + struct mlx5_flow_counter_pool *pool; + struct mlx5_flow_counter *cnt = NULL; + struct mlx5_flow_counter_ext *cnt_ext; unsigned int size = sizeof(struct ibv_flow_spec_counter_action); struct ibv_flow_spec_counter_action counter = { .type = IBV_FLOW_SPEC_ACTION_COUNT, @@ -1082,13 +1104,15 @@ flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, "cannot get counter" " context."); } - cnt = flow_verbs_counter_get_by_idx(dev, (uintptr_t)(void *) - flow->counter, NULL); #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) - counter.counter_set_handle = cnt->cs->handle; + cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + counter.counter_set_handle = cnt_ext->cs->handle; flow_verbs_spec_add(&dev_flow->verbs, &counter, size); #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) - counter.counters = cnt->cs; + cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + counter.counters = cnt_ext->cs; flow_verbs_spec_add(&dev_flow->verbs, &counter, size); #endif return 0; @@ -1107,6 +1131,8 @@ flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, * Pointer to the list of actions. * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] hairpin + * Number of hairpin TX actions, 0 means classic flow. * @param[out] error * Pointer to the error structure. * @@ -1119,6 +1145,7 @@ flow_verbs_validate(struct rte_eth_dev *dev, const struct rte_flow_item items[], const struct rte_flow_action actions[], bool external __rte_unused, + int hairpin __rte_unused, struct rte_flow_error *error) { int ret; @@ -1497,6 +1524,7 @@ flow_verbs_prepare(struct rte_eth_dev *dev, struct rte_flow_error *error) { size_t size = 0; + uint32_t handle_idx = 0; struct mlx5_flow *dev_flow; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; @@ -1516,7 +1544,8 @@ flow_verbs_prepare(struct rte_eth_dev *dev, "not free temporary device flow"); return NULL; } - dev_handle = rte_calloc(__func__, 1, MLX5_FLOW_HANDLE_VERBS_SIZE, 0); + dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + &handle_idx); if (!dev_handle) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -1526,10 +1555,12 @@ flow_verbs_prepare(struct rte_eth_dev *dev, /* No multi-thread supporting. */ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; dev_flow->handle = dev_handle; + dev_flow->handle_idx = handle_idx; /* Memcpy is used, only size needs to be cleared to 0. */ dev_flow->verbs.size = 0; dev_flow->verbs.attr.num_of_specs = 0; dev_flow->ingress = attr->ingress; + dev_flow->hash_fields = 0; /* Need to set transfer attribute: not supported in Verbs mode. */ return dev_flow; } @@ -1566,6 +1597,9 @@ flow_verbs_translate(struct rte_eth_dev *dev, uint64_t priority = attr->priority; uint32_t subpriority = 0; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) + priv->rss_desc) + [!!priv->flow_nested_idx]; if (priority == MLX5_FLOW_PRIO_RSVD) priority = priv->config.flow_prio - 1; @@ -1578,22 +1612,27 @@ flow_verbs_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_FLAG: flow_verbs_translate_action_flag(dev_flow, actions); action_flags |= MLX5_FLOW_ACTION_FLAG; + dev_flow->handle->mark = 1; break; case RTE_FLOW_ACTION_TYPE_MARK: flow_verbs_translate_action_mark(dev_flow, actions); action_flags |= MLX5_FLOW_ACTION_MARK; + dev_flow->handle->mark = 1; break; case RTE_FLOW_ACTION_TYPE_DROP: flow_verbs_translate_action_drop(dev_flow, actions); action_flags |= MLX5_FLOW_ACTION_DROP; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; break; case RTE_FLOW_ACTION_TYPE_QUEUE: - flow_verbs_translate_action_queue(dev_flow, actions); + flow_verbs_translate_action_queue(rss_desc, actions); action_flags |= MLX5_FLOW_ACTION_QUEUE; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; break; case RTE_FLOW_ACTION_TYPE_RSS: - flow_verbs_translate_action_rss(dev_flow, actions); + flow_verbs_translate_action_rss(rss_desc, actions); action_flags |= MLX5_FLOW_ACTION_RSS; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; break; case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_verbs_translate_action_count(dev_flow, @@ -1610,7 +1649,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, "action not supported"); } } - dev_flow->handle->act_flags = action_flags; + dev_flow->act_flags = action_flags; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); @@ -1639,7 +1678,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, subpriority = MLX5_PRIORITY_MAP_L3; dev_flow->hash_fields |= mlx5_flow_hashfields_adjust - (dev_flow, tunnel, + (rss_desc, tunnel, MLX5_IPV4_LAYER_TYPES, MLX5_IPV4_IBV_RX_HASH); item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -1651,7 +1690,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, subpriority = MLX5_PRIORITY_MAP_L3; dev_flow->hash_fields |= mlx5_flow_hashfields_adjust - (dev_flow, tunnel, + (rss_desc, tunnel, MLX5_IPV6_LAYER_TYPES, MLX5_IPV6_IBV_RX_HASH); item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : @@ -1663,7 +1702,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, subpriority = MLX5_PRIORITY_MAP_L4; dev_flow->hash_fields |= mlx5_flow_hashfields_adjust - (dev_flow, tunnel, ETH_RSS_TCP, + (rss_desc, tunnel, ETH_RSS_TCP, (IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)); item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : @@ -1675,7 +1714,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, subpriority = MLX5_PRIORITY_MAP_L4; dev_flow->hash_fields |= mlx5_flow_hashfields_adjust - (dev_flow, tunnel, ETH_RSS_UDP, + (rss_desc, tunnel, ETH_RSS_UDP, (IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)); item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : @@ -1716,7 +1755,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, /* Other members of attr will be ignored. */ dev_flow->verbs.attr.priority = mlx5_flow_adjust_priority(dev, priority, subpriority); - dev_flow->verbs.attr.port = (uint8_t)priv->ibv_port; + dev_flow->verbs.attr.port = (uint8_t)priv->dev_port; return 0; } @@ -1731,21 +1770,28 @@ flow_verbs_translate(struct rte_eth_dev *dev, static void flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *handle; + uint32_t handle_idx; if (!flow) return; - LIST_FOREACH(handle, &flow->dev_handles, next) { + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, handle, next) { if (handle->ib_flow) { claim_zero(mlx5_glue->destroy_flow(handle->ib_flow)); handle->ib_flow = NULL; } - if (handle->hrxq) { - if (handle->act_flags & MLX5_FLOW_ACTION_DROP) + /* hrxq is union, don't touch it only the flag is set. */ + if (handle->rix_hrxq) { + if (handle->fate_action == MLX5_FLOW_FATE_DROP) { mlx5_hrxq_drop_release(dev); - else - mlx5_hrxq_release(dev, handle->hrxq); - handle->hrxq = NULL; + handle->rix_hrxq = 0; + } else if (handle->fate_action == + MLX5_FLOW_FATE_QUEUE) { + mlx5_hrxq_release(dev, handle->rix_hrxq); + handle->rix_hrxq = 0; + } } if (handle->vf_vlan.tag && handle->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); @@ -1763,19 +1809,26 @@ flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow) static void flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *handle; if (!flow) return; flow_verbs_remove(dev, flow); - while (!LIST_EMPTY(&flow->dev_handles)) { - handle = LIST_FIRST(&flow->dev_handles); - LIST_REMOVE(handle, next); - rte_free(handle); + while (flow->dev_handles) { + uint32_t tmp_idx = flow->dev_handles; + + handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); + if (!handle) + return; + flow->dev_handles = handle->next.next; + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); } if (flow->counter) { flow_verbs_counter_release(dev, flow->counter); - flow->counter = NULL; + flow->counter = 0; } } @@ -1799,15 +1852,17 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *handle; struct mlx5_flow *dev_flow; + struct mlx5_hrxq *hrxq; + uint32_t dev_handles; int err; int idx; - for (idx = priv->flow_idx - 1; idx >= 0; idx--) { + for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; handle = dev_flow->handle; - if (handle->act_flags & MLX5_FLOW_ACTION_DROP) { - handle->hrxq = mlx5_hrxq_drop_new(dev); - if (!handle->hrxq) { + if (handle->fate_action == MLX5_FLOW_FATE_DROP) { + hrxq = mlx5_hrxq_drop_new(dev); + if (!hrxq) { rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -1815,22 +1870,27 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, goto error; } } else { - struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; + struct mlx5_flow_rss_desc *rss_desc = + &((struct mlx5_flow_rss_desc *)priv->rss_desc) + [!!priv->flow_nested_idx]; - MLX5_ASSERT(flow->rss.queue); - hrxq = mlx5_hrxq_get(dev, flow->rss.key, + MLX5_ASSERT(rss_desc->queue_num); + hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN, dev_flow->hash_fields, - (*flow->rss.queue), - flow->rss.queue_num); - if (!hrxq) - hrxq = mlx5_hrxq_new(dev, flow->rss.key, + rss_desc->queue, + rss_desc->queue_num); + if (!hrxq_idx) + hrxq_idx = mlx5_hrxq_new(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN, dev_flow->hash_fields, - (*flow->rss.queue), - flow->rss.queue_num, + rss_desc->queue, + rss_desc->queue_num, !!(handle->layers & MLX5_FLOW_LAYER_TUNNEL)); + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -1838,9 +1898,10 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get hash queue"); goto error; } - handle->hrxq = hrxq; + handle->rix_hrxq = hrxq_idx; } - handle->ib_flow = mlx5_glue->create_flow(handle->hrxq->qp, + MLX5_ASSERT(hrxq); + handle->ib_flow = mlx5_glue->create_flow(hrxq->qp, &dev_flow->verbs.attr); if (!handle->ib_flow) { rte_flow_error_set(error, errno, @@ -1863,13 +1924,18 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, return 0; error: err = rte_errno; /* Save rte_errno before cleanup. */ - LIST_FOREACH(handle, &flow->dev_handles, next) { - if (handle->hrxq) { - if (handle->act_flags & MLX5_FLOW_ACTION_DROP) + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + dev_handles, handle, next) { + /* hrxq is union, don't touch it only the flag is set. */ + if (handle->rix_hrxq) { + if (handle->fate_action == MLX5_FLOW_FATE_DROP) { mlx5_hrxq_drop_release(dev); - else - mlx5_hrxq_release(dev, handle->hrxq); - handle->hrxq = NULL; + handle->rix_hrxq = 0; + } else if (handle->fate_action == + MLX5_FLOW_FATE_QUEUE) { + mlx5_hrxq_release(dev, handle->rix_hrxq); + handle->rix_hrxq = 0; + } } if (handle->vf_vlan.tag && handle->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);