X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=5bb252e6970e228aadbf9fafc961f31a245796a0;hb=eb10fe7fb150d3a9c1ef01134403f85890d5e06e;hp=c7702c51d30f03d1ab2ac659cbe2ae16f62c6f73;hpb=ffd5b302baabdc96fcd6deb58d8cd3ec39020c5c;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index c7702c51d3..5bb252e697 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -285,7 +285,7 @@ static void flow_dv_shared_lock(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; if (sh->dv_refcnt > 1) { int ret; @@ -300,7 +300,7 @@ static void flow_dv_shared_unlock(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; if (sh->dv_refcnt > 1) { int ret; @@ -1639,18 +1639,6 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev, return 0; } -/* - * GTP flags are contained in 1 byte of the format: - * ------------------------------------------- - * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 | - * |-----------------------------------------| - * | value | Version | PT | Res | E | S | PN | - * ------------------------------------------- - * - * Matching is supported only for GTP flags E, S, PN. - */ -#define MLX5_GTP_FLAGS_MASK 0x07 - /** * Validate VLAN item. * @@ -1724,6 +1712,18 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item, return 0; } +/* + * GTP flags are contained in 1 byte of the format: + * ------------------------------------------- + * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 | + * |-----------------------------------------| + * | value | Version | PT | Res | E | S | PN | + * ------------------------------------------- + * + * Matching is supported only for GTP flags E, S, PN. + */ +#define MLX5_GTP_FLAGS_MASK 0x07 + /** * Validate GTP item. * @@ -1881,6 +1881,9 @@ flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, const struct rte_flow_item_vlan *vlan_m = items->mask; const struct rte_flow_item_vlan *vlan_v = items->spec; + /* If VLAN item in pattern doesn't contain data, return here. */ + if (!vlan_v) + return; if (!vlan_m) vlan_m = &nic_mask; /* Only full match values are accepted */ @@ -2557,7 +2560,7 @@ flow_dv_encap_decap_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_encap_decap_resource *cache_resource; struct mlx5dv_dr_domain *domain; uint32_t idx = 0; @@ -2694,7 +2697,7 @@ flow_dv_port_id_action_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_port_id_action_resource *cache_resource; uint32_t idx = 0; @@ -2722,7 +2725,7 @@ flow_dv_port_id_action_resource_register *cache_resource = *resource; /* * Depending on rdma_core version the glue routine calls - * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port) + * either mlx5dv_dr_action_create_dest_ib_port(domain, dev_port) * or mlx5dv_dr_action_create_dest_vport(domain, vport_id). */ cache_resource->action = @@ -2769,7 +2772,7 @@ flow_dv_push_vlan_action_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; struct mlx5dv_dr_domain *domain; uint32_t idx = 0; @@ -3943,7 +3946,7 @@ flow_dv_modify_hdr_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_modify_hdr_resource *cache_resource; struct mlx5dv_dr_domain *ns; uint32_t actions_len; @@ -4038,7 +4041,7 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, idx -= MLX5_CNT_BATCH_OFFSET; batch = 1; } - cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0, age); + cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n); pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL]; MLX5_ASSERT(pool); @@ -4101,7 +4104,7 @@ static struct mlx5_counter_stats_mem_mng * flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_devx_mkey_attr mkey_attr; struct mlx5_counter_stats_mem_mng *mem_mng; volatile struct flow_counter_stats *raw_data; @@ -4127,14 +4130,15 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) } mkey_attr.addr = (uintptr_t)mem; mkey_attr.size = size; - mkey_attr.umem_id = mem_mng->umem->umem_id; + mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); mkey_attr.pd = sh->pdn; mkey_attr.log_entity_size = 0; mkey_attr.pg_access = 0; mkey_attr.klm_array = NULL; mkey_attr.klm_num = 0; if (priv->config.hca_attr.relaxed_ordering_write && - priv->config.hca_attr.relaxed_ordering_read) + priv->config.hca_attr.relaxed_ordering_read && + !haswell_broadwell_cpu) mkey_attr.relaxed_ordering = 1; mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); if (!mem_mng->dm) { @@ -4164,69 +4168,55 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) * Whether the pool is for Aging counter. * * @return - * The new container pointer on success, otherwise NULL and rte_errno is set. + * 0 on success, otherwise negative errno value and rte_errno is set. */ -static struct mlx5_pools_container * +static int flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch, uint32_t age) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_pools_container *cont = - MLX5_CNT_CONTAINER(priv->sh, batch, 0, age); - struct mlx5_pools_container *new_cont = - MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0, age); + struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, + age); struct mlx5_counter_stats_mem_mng *mem_mng = NULL; + void *old_pools = cont->pools; uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE; uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; - int i; + void *pools = rte_calloc(__func__, 1, mem_size, 0); - /* Fallback mode has no background thread. Skip the check. */ - if (!priv->counter_fallback && - cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1, age)) { - /* The last resize still hasn't detected by the host thread. */ - rte_errno = EAGAIN; - return NULL; - } - new_cont->pools = rte_calloc(__func__, 1, mem_size, 0); - if (!new_cont->pools) { + if (!pools) { rte_errno = ENOMEM; - return NULL; + return -ENOMEM; } - if (cont->n) - memcpy(new_cont->pools, cont->pools, cont->n * - sizeof(struct mlx5_flow_counter_pool *)); + if (old_pools) + memcpy(pools, old_pools, cont->n * + sizeof(struct mlx5_flow_counter_pool *)); /* * Fallback mode query the counter directly, no background query * resources are needed. */ if (!priv->counter_fallback) { + int i; + mem_mng = flow_dv_create_counter_stat_mem_mng(dev, - MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); + MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); if (!mem_mng) { - rte_free(new_cont->pools); - return NULL; + rte_free(pools); + return -ENOMEM; } for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i, next); - } else { - /* - * Release the old container pools directly as no background - * thread helps that. - */ - rte_free(cont->pools); } - new_cont->n = resize; - rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid)); - TAILQ_INIT(&new_cont->pool_list); - TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next); - new_cont->init_mem_mng = mem_mng; - rte_cio_wmb(); - /* Flip the master container. */ - priv->sh->cmng.mhi[batch][age] ^= (uint8_t)1; - return new_cont; + rte_spinlock_lock(&cont->resize_sl); + cont->n = resize; + cont->mem_mng = mem_mng; + cont->pools = pools; + rte_spinlock_unlock(&cont->resize_sl); + if (old_pools) + rte_free(old_pools); + return 0; } /** @@ -4298,22 +4288,19 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, * @return * The pool container pointer on success, NULL otherwise and rte_errno is set. */ -static struct mlx5_pools_container * +static struct mlx5_flow_counter_pool * flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, uint32_t batch, uint32_t age) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool; struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, - 0, age); + age); int16_t n_valid = rte_atomic16_read(&cont->n_valid); uint32_t size = sizeof(*pool); - if (cont->n == n_valid) { - cont = flow_dv_container_resize(dev, batch, age); - if (!cont) - return NULL; - } + if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age)) + return NULL; size += MLX5_COUNTERS_PER_POOL * CNT_SIZE; size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE); size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE); @@ -4324,8 +4311,8 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, } pool->min_dcs = dcs; if (!priv->counter_fallback) - pool->raw = cont->init_mem_mng->raws + n_valid % - MLX5_CNT_CONTAINER_RESIZE; + pool->raw = cont->mem_mng->raws + n_valid % + MLX5_CNT_CONTAINER_RESIZE; pool->raw_hw = NULL; pool->type = 0; pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT); @@ -4353,7 +4340,7 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, /* Pool initialization must be updated before host thread access. */ rte_cio_wmb(); rte_atomic16_add(&cont->n_valid, 1); - return cont; + return pool; } /** @@ -4377,7 +4364,7 @@ flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev, struct mlx5_flow_counter_pool *other; struct mlx5_pools_container *cont; - cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0, (age ^ 0x1)); + cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1)); other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id); if (!other) return; @@ -4402,10 +4389,10 @@ flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev, * Whether the pool is for counter that was allocated for aging. * * @return - * The counter container pointer and @p cnt_free is set on success, + * The counter pool pointer and @p cnt_free is set on success, * NULL otherwise and rte_errno is set. */ -static struct mlx5_pools_container * +static struct mlx5_flow_counter_pool * flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, struct mlx5_flow_counter **cnt_free, uint32_t batch, uint32_t age) @@ -4417,7 +4404,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt; uint32_t i; - cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0, age); + cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); if (!batch) { /* bulk_bitmap must be 0 for single counter allocation. */ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); @@ -4425,12 +4412,11 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, return NULL; pool = flow_dv_find_pool_by_id(cont, dcs->id); if (!pool) { - cont = flow_dv_pool_create(dev, dcs, batch, age); - if (!cont) { + pool = flow_dv_pool_create(dev, dcs, batch, age); + if (!pool) { mlx5_devx_cmd_destroy(dcs); return NULL; } - pool = TAILQ_FIRST(&cont->pool_list); } else if (dcs->id < pool->min_dcs->id) { rte_atomic64_set(&pool->a64_dcs, (int64_t)(uintptr_t)dcs); @@ -4442,7 +4428,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, TAILQ_INSERT_HEAD(&pool->counters, cnt, next); MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; *cnt_free = cnt; - return cont; + return pool; } /* bulk_bitmap is in 128 counters units. */ if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) @@ -4451,18 +4437,17 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, rte_errno = ENODATA; return NULL; } - cont = flow_dv_pool_create(dev, dcs, batch, age); - if (!cont) { + pool = flow_dv_pool_create(dev, dcs, batch, age); + if (!pool) { mlx5_devx_cmd_destroy(dcs); return NULL; } - pool = TAILQ_FIRST(&cont->pool_list); for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { cnt = MLX5_POOL_GET_CNT(pool, i); TAILQ_INSERT_HEAD(&pool->counters, cnt, next); } *cnt_free = MLX5_POOL_GET_CNT(pool, 0); - return cont; + return pool; } /** @@ -4482,15 +4467,15 @@ static struct mlx5_flow_counter_ext * flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id, struct mlx5_flow_counter_pool **ppool) { - static struct mlx5_flow_counter_ext *cnt; + struct mlx5_flow_counter_ext *cnt; struct mlx5_flow_counter_pool *pool; - uint32_t i; + uint32_t i, j; uint32_t n_valid = rte_atomic16_read(&cont->n_valid); for (i = 0; i < n_valid; i++) { pool = cont->pools[i]; - for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { - cnt = MLX5_GET_POOL_CNT_EXT(pool, i); + for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { + cnt = MLX5_GET_POOL_CNT_EXT(pool, j); if (cnt->ref_cnt && cnt->shared && cnt->id == id) { if (ppool) *ppool = cont->pools[i]; @@ -4536,7 +4521,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, */ uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0; struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, - 0, age); + age); uint32_t cnt_idx; if (!priv->config.devx) { @@ -4575,10 +4560,9 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, cnt_free = NULL; } if (!cnt_free) { - cont = flow_dv_counter_pool_prepare(dev, &cnt_free, batch, age); - if (!cont) + pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch, age); + if (!pool) return 0; - pool = TAILQ_FIRST(&cont->pool_list); } if (!batch) cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); @@ -7222,7 +7206,7 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_tbl_resource *tbl; union mlx5_flow_tbl_key table_key = { { @@ -7307,7 +7291,7 @@ flow_dv_tbl_resource_release(struct rte_eth_dev *dev, struct mlx5_flow_tbl_resource *tbl) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_tbl_data_entry *tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); @@ -7352,7 +7336,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_matcher *cache_matcher; struct mlx5dv_flow_matcher_attr dv_attr = { .type = IBV_FLOW_ATTR_NORMAL, @@ -7451,7 +7435,7 @@ flow_dv_tag_resource_register struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_tag_resource *cache_resource; struct mlx5_hlist_entry *entry; @@ -7515,7 +7499,7 @@ flow_dv_tag_release(struct rte_eth_dev *dev, uint32_t tag_idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_tag_resource *tag; tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); @@ -7573,7 +7557,7 @@ flow_dv_translate_action_port_id(struct rte_eth_dev *dev, * This parameter is transferred to * mlx5dv_dr_action_create_dest_ib_port(). */ - *dst_port_id = priv->ibv_port; + *dst_port_id = priv->dev_port; #else /* * Legacy mode, no LAG configurations is supported. @@ -7848,12 +7832,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, if (flow_dv_translate_action_port_id(dev, action, &port_id, error)) return -rte_errno; - memset(&port_id_resource, 0, sizeof(port_id_resource)); port_id_resource.port_id = port_id; + MLX5_ASSERT(!handle->rix_port_id_action); if (flow_dv_port_id_action_resource_register (dev, &port_id_resource, dev_flow, error)) return -rte_errno; - MLX5_ASSERT(!handle->rix_port_id_action); dev_flow->dv.actions[actions_n++] = dev_flow->dv.port_id_action->action; action_flags |= MLX5_FLOW_ACTION_PORT_ID; @@ -9164,7 +9147,7 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, uint32_t color_reg_c_idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_match_params mask = { .size = sizeof(mask.buf), };