X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=62edc4fa850746a83c371537f95f4feec264deec;hb=3186a3a49c3a33502ba6189a80b8317c0a064830;hp=71d9f88a950c270c878be960f625cb84c14b51c5;hpb=4fd5e14848871a682840642fdd6ad776d0017080;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 71d9f88a95..62edc4fa85 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -426,6 +426,8 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, unsigned int off_b; uint32_t mask; uint32_t data; + bool next_field = true; + bool next_dcopy = true; if (i >= MLX5_MAX_MODIFY_NUM) return rte_flow_error_set(error, EINVAL, @@ -443,15 +445,13 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, size_b = sizeof(uint32_t) * CHAR_BIT - off_b - __builtin_clz(mask); MLX5_ASSERT(size_b); - size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b; actions[i] = (struct mlx5_modification_cmd) { .action_type = type, .field = field->id, .offset = off_b, - .length = size_b, + .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ? + 0 : size_b, }; - /* Convert entire record to expected big-endian format. */ - actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); if (type == MLX5_MODIFICATION_TYPE_COPY) { MLX5_ASSERT(dcopy); actions[i].dst_field = dcopy->id; @@ -459,7 +459,27 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, (int)dcopy->offset < 0 ? off_b : dcopy->offset; /* Convert entire record to big-endian format. */ actions[i].data1 = rte_cpu_to_be_32(actions[i].data1); - ++dcopy; + /* + * Destination field overflow. Copy leftovers of + * a source field to the next destination field. + */ + if ((size_b > dcopy->size * CHAR_BIT) && dcopy->size) { + actions[i].length = dcopy->size * CHAR_BIT; + field->offset += dcopy->size; + next_field = false; + } + /* + * Not enough bits in a source filed to fill a + * destination field. Switch to the next source. + */ + if (dcopy->size > field->size && + (size_b == field->size * CHAR_BIT)) { + actions[i].length = field->size * CHAR_BIT; + dcopy->offset += field->size * CHAR_BIT; + next_dcopy = false; + } + if (next_dcopy) + ++dcopy; } else { MLX5_ASSERT(item->spec); data = flow_dv_fetch_field((const uint8_t *)item->spec + @@ -468,8 +488,11 @@ flow_dv_convert_modify_action(struct rte_flow_item *item, data = (data & mask) >> off_b; actions[i].data1 = rte_cpu_to_be_32(data); } + /* Convert entire record to expected big-endian format. */ + actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); + if (next_field) + ++field; ++i; - ++field; } while (field->size); if (resource->actions_num == i) return rte_flow_error_set(error, EINVAL, @@ -1239,8 +1262,8 @@ flow_dv_convert_action_set_meta const struct rte_flow_action_set_meta *conf, struct rte_flow_error *error) { - uint32_t data = conf->data; - uint32_t mask = conf->mask; + uint32_t mask = rte_cpu_to_be_32(conf->mask); + uint32_t data = rte_cpu_to_be_32(conf->data) & mask; struct rte_flow_item item = { .spec = &data, .mask = &mask, @@ -1253,25 +1276,14 @@ flow_dv_convert_action_set_meta if (reg < 0) return reg; MLX5_ASSERT(reg != REG_NON); - /* - * In datapath code there is no endianness - * coversions for perfromance reasons, all - * pattern conversions are done in rte_flow. - */ if (reg == REG_C_0) { struct mlx5_priv *priv = dev->data->dev_private; uint32_t msk_c0 = priv->sh->dv_regc0_mask; - uint32_t shl_c0; + uint32_t shl_c0 = rte_bsf32(msk_c0); - MLX5_ASSERT(msk_c0); -#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN - shl_c0 = rte_bsf32(msk_c0); -#else - shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0); -#endif - mask <<= shl_c0; - data <<= shl_c0; - MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask))); + data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0); + mask = rte_cpu_to_be_32(mask) & msk_c0; + mask = rte_cpu_to_be_32(mask << shl_c0); } reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; /* The routine expects parameters in memory as big-endian ones. */ @@ -1356,7 +1368,8 @@ flow_dv_convert_action_modify_ipv6_dscp } static int -mlx5_flow_item_field_width(enum rte_flow_field_id field) +mlx5_flow_item_field_width(struct mlx5_dev_config *config, + enum rte_flow_field_id field) { switch (field) { case RTE_FLOW_FIELD_START: @@ -1404,7 +1417,12 @@ mlx5_flow_item_field_width(enum rte_flow_field_id field) case RTE_FLOW_FIELD_MARK: return 24; case RTE_FLOW_FIELD_META: - return 32; + if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16) + return 16; + else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32) + return 32; + else + return 0; case RTE_FLOW_FIELD_POINTER: case RTE_FLOW_FIELD_VALUE: return 64; @@ -1424,7 +1442,10 @@ mlx5_flow_field_id_to_modify_info const struct rte_flow_attr *attr, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; uint32_t idx = 0; + uint32_t off = 0; uint64_t val = 0; switch (data->field) { case RTE_FLOW_FIELD_START: @@ -1432,61 +1453,63 @@ mlx5_flow_field_id_to_modify_info MLX5_ASSERT(false); break; case RTE_FLOW_FIELD_MAC_DST: + off = data->offset > 16 ? data->offset - 16 : 0; if (mask) { - if (data->offset < 32) { - info[idx] = (struct field_modify_info){4, 0, - MLX5_MODI_OUT_DMAC_47_16}; - if (width < 32) { - mask[idx] = - rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + if (data->offset < 16) { + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_DMAC_15_0}; + if (width < 16) { + mask[idx] = rte_cpu_to_be_16(0xffff >> + (16 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); - width -= 32; + mask[idx] = RTE_BE16(0xffff); + width -= 16; } if (!width) break; ++idx; } - info[idx] = (struct field_modify_info){2, 4 * idx, - MLX5_MODI_OUT_DMAC_15_0}; - mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); - } else { - if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, 4 * idx, MLX5_MODI_OUT_DMAC_47_16}; - info[idx] = (struct field_modify_info){2, 0, + mask[idx] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); + } else { + if (data->offset < 16) + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_DMAC_15_0}; + info[idx] = (struct field_modify_info){4, off, + MLX5_MODI_OUT_DMAC_47_16}; } break; case RTE_FLOW_FIELD_MAC_SRC: + off = data->offset > 16 ? data->offset - 16 : 0; if (mask) { - if (data->offset < 32) { - info[idx] = (struct field_modify_info){4, 0, - MLX5_MODI_OUT_SMAC_47_16}; - if (width < 32) { - mask[idx] = - rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + if (data->offset < 16) { + info[idx] = (struct field_modify_info){2, 0, + MLX5_MODI_OUT_SMAC_15_0}; + if (width < 16) { + mask[idx] = rte_cpu_to_be_16(0xffff >> + (16 - width)); width = 0; } else { - mask[idx] = RTE_BE32(0xffffffff); - width -= 32; + mask[idx] = RTE_BE16(0xffff); + width -= 16; } if (!width) break; ++idx; } - info[idx] = (struct field_modify_info){2, 4 * idx, - MLX5_MODI_OUT_SMAC_15_0}; - mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width)); - } else { - if (data->offset < 32) - info[idx++] = (struct field_modify_info){4, 0, + info[idx] = (struct field_modify_info){4, 4 * idx, MLX5_MODI_OUT_SMAC_47_16}; - info[idx] = (struct field_modify_info){2, 0, + mask[idx] = rte_cpu_to_be_32((0xffffffff >> + (32 - width)) << off); + } else { + if (data->offset < 16) + info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_SMAC_15_0}; + info[idx] = (struct field_modify_info){4, off, + MLX5_MODI_OUT_SMAC_47_16}; } break; case RTE_FLOW_FIELD_VLAN_TYPE: @@ -1777,17 +1800,28 @@ mlx5_flow_field_id_to_modify_info break; case RTE_FLOW_FIELD_META: { + unsigned int xmeta = config->dv_xmeta_en; int reg = flow_dv_get_metadata_reg(dev, attr, error); if (reg < 0) return; MLX5_ASSERT(reg != REG_NON); MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field)); - info[idx] = (struct field_modify_info){4, 0, - reg_to_field[reg]}; - if (mask) - mask[idx] = - rte_cpu_to_be_32(0xffffffff >> - (32 - width)); + if (xmeta == MLX5_XMETA_MODE_META16) { + info[idx] = (struct field_modify_info){2, 0, + reg_to_field[reg]}; + if (mask) + mask[idx] = rte_cpu_to_be_16(0xffff >> + (16 - width)); + } else if (xmeta == MLX5_XMETA_MODE_META32) { + info[idx] = (struct field_modify_info){4, 0, + reg_to_field[reg]}; + if (mask) + mask[idx] = + rte_cpu_to_be_32(0xffffffff >> + (32 - width)); + } else { + MLX5_ASSERT(false); + } } break; case RTE_FLOW_FIELD_POINTER: @@ -1799,7 +1833,12 @@ mlx5_flow_field_id_to_modify_info val = data->value; for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) { if (mask[idx]) { - if (dst_width > 16) { + if (dst_width == 48) { + /*special case for MAC addresses */ + value[idx] = rte_cpu_to_be_16(val); + val >>= 16; + dst_width -= 16; + } else if (dst_width > 16) { value[idx] = rte_cpu_to_be_32(val); val >>= 32; } else if (dst_width > 8) { @@ -1845,6 +1884,8 @@ flow_dv_convert_action_modify_field const struct rte_flow_attr *attr, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; const struct rte_flow_action_modify_field *conf = (const struct rte_flow_action_modify_field *)(action->conf); struct rte_flow_item item; @@ -1855,7 +1896,8 @@ flow_dv_convert_action_modify_field uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; uint32_t type; - uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field); + uint32_t dst_width = mlx5_flow_item_field_width(config, + conf->dst.field); if (conf->src.field == RTE_FLOW_FIELD_POINTER || conf->src.field == RTE_FLOW_FIELD_VALUE) { @@ -4710,10 +4752,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, struct mlx5_dev_config *config = &priv->config; const struct rte_flow_action_modify_field *action_modify_field = action->conf; - uint32_t dst_width = - mlx5_flow_item_field_width(action_modify_field->dst.field); - uint32_t src_width = - mlx5_flow_item_field_width(action_modify_field->src.field); + uint32_t dst_width = mlx5_flow_item_field_width(config, + action_modify_field->dst.field); + uint32_t src_width = mlx5_flow_item_field_width(config, + action_modify_field->src.field); ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); if (ret) @@ -4766,8 +4808,10 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev, "inner header fields modification" " is not supported"); } - if (action_modify_field->dst.field == - action_modify_field->src.field) + if ((action_modify_field->dst.field == + action_modify_field->src.field) && + (action_modify_field->dst.level == + action_modify_field->src.level)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "source and destination fields" @@ -6086,28 +6130,33 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter) return; cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); - /* - * If the counter action is shared by ID, the l3t_clear_entry function - * reduces its references counter. If after the reduction the action is - * still referenced, the function returns here and does not release it. - */ - if (IS_LEGACY_SHARED_CNT(counter) && - mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id)) - return; - /* - * If the counter action is shared by indirect action API, the atomic - * function reduces its references counter. If after the reduction the - * action is still referenced, the function returns here and does not - * release it. - * When the counter action is not shared neither by ID nor by indirect - * action API, shared info is 1 before the reduction, so this condition - * is failed and function doesn't return here. - */ - if (!IS_LEGACY_SHARED_CNT(counter) && - __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, __ATOMIC_RELAXED)) - return; - if (pool->is_aged) + if (pool->is_aged) { flow_dv_counter_remove_from_age(dev, counter, cnt); + } else { + /* + * If the counter action is shared by ID, the l3t_clear_entry + * function reduces its references counter. If after the + * reduction the action is still referenced, the function + * returns here and does not release it. + */ + if (IS_LEGACY_SHARED_CNT(counter) && + mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, + cnt->shared_info.id)) + return; + /* + * If the counter action is shared by indirect action API, + * the atomic function reduces its references counter. + * If after the reduction the action is still referenced, the + * function returns here and does not release it. + * When the counter action is not shared neither by ID nor by + * indirect action API, shared info is 1 before the reduction, + * so this condition is failed and function doesn't return here. + */ + if (!IS_LEGACY_SHARED_CNT(counter) && + __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, + __ATOMIC_RELAXED)) + return; + } cnt->pool = pool; /* * Put the counter back to list to be updated in none fallback mode. @@ -9195,27 +9244,14 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev, if (reg < 0) return; MLX5_ASSERT(reg != REG_NON); - /* - * In datapath code there is no endianness - * coversions for perfromance reasons, all - * pattern conversions are done in rte_flow. - */ - value = rte_cpu_to_be_32(value); - mask = rte_cpu_to_be_32(mask); if (reg == REG_C_0) { struct mlx5_priv *priv = dev->data->dev_private; uint32_t msk_c0 = priv->sh->dv_regc0_mask; uint32_t shl_c0 = rte_bsf32(msk_c0); -#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN - uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask); - value >>= shr_c0; - mask >>= shr_c0; -#endif - value <<= shl_c0; + mask &= msk_c0; mask <<= shl_c0; - MLX5_ASSERT(msk_c0); - MLX5_ASSERT(!(~msk_c0 & mask)); + value <<= shl_c0; } flow_dv_match_meta_reg(matcher, key, reg, value, mask); } @@ -14707,12 +14743,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, MLX5_ASSERT(dev_flow.dv.tag_resource); act_cnt->rix_mark = dev_flow.handle->dvh.rix_tag; - if (action_flags & MLX5_FLOW_ACTION_QUEUE) { - dev_flow.handle->rix_hrxq = - mtr_policy->sub_policys[domain][0]->rix_hrxq[i]; - flow_drv_rxq_flags_set(dev, - dev_flow.handle); - } action_flags |= MLX5_FLOW_ACTION_MARK; break; } @@ -14760,12 +14790,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, "set tag action"); act_cnt->modify_hdr = dev_flow.handle->dvh.modify_hdr; - if (action_flags & MLX5_FLOW_ACTION_QUEUE) { - dev_flow.handle->rix_hrxq = - mtr_policy->sub_policys[domain][0]->rix_hrxq[i]; - flow_drv_rxq_flags_set(dev, - dev_flow.handle); - } action_flags |= MLX5_FLOW_ACTION_SET_TAG; break; } @@ -14809,41 +14833,20 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, } case RTE_FLOW_ACTION_TYPE_QUEUE: { - struct mlx5_hrxq *hrxq; - uint32_t hrxq_idx; - struct mlx5_flow_rss_desc rss_desc; - struct mlx5_flow_meter_sub_policy *sub_policy = - mtr_policy->sub_policys[domain][0]; - if (i >= MLX5_MTR_RTE_COLORS) return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_POLICY, NULL, "cannot create policy " "fate queue for this color"); - memset(&rss_desc, 0, - sizeof(struct mlx5_flow_rss_desc)); - rss_desc.queue_num = 1; - rss_desc.const_q = act->conf; - hrxq = flow_dv_hrxq_prepare(dev, &dev_flow, - &rss_desc, &hrxq_idx); - if (!hrxq) - return -rte_mtr_error_set(error, - ENOTSUP, - RTE_MTR_ERROR_TYPE_METER_POLICY, - NULL, - "cannot create policy fate queue"); - sub_policy->rix_hrxq[i] = hrxq_idx; + act_cnt->queue = + ((const struct rte_flow_action_queue *) + (act->conf))->index; act_cnt->fate_action = MLX5_FLOW_FATE_QUEUE; dev_flow.handle->fate_action = MLX5_FLOW_FATE_QUEUE; - if (action_flags & MLX5_FLOW_ACTION_MARK || - action_flags & MLX5_FLOW_ACTION_SET_TAG) { - dev_flow.handle->rix_hrxq = hrxq_idx; - flow_drv_rxq_flags_set(dev, - dev_flow.handle); - } + mtr_policy->is_queue = 1; action_flags |= MLX5_FLOW_ACTION_QUEUE; break; } @@ -16057,6 +16060,73 @@ rss_sub_policy_error: return NULL; } + +/** + * Destroy the sub policy table with RX queue. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] mtr_policy + * Pointer to meter policy table. + */ +static void +flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter_sub_policy *sub_policy = NULL; + uint32_t domain = MLX5_MTR_DOMAIN_INGRESS; + uint32_t i, j; + uint16_t sub_policy_num, new_policy_num; + + rte_spinlock_lock(&mtr_policy->sl); + for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { + switch (mtr_policy->act_cnt[i].fate_action) { + case MLX5_FLOW_FATE_SHARED_RSS: + sub_policy_num = (mtr_policy->sub_policy_num >> + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) & + MLX5_MTR_SUB_POLICY_NUM_MASK; + new_policy_num = sub_policy_num; + for (j = 0; j < sub_policy_num; j++) { + sub_policy = + mtr_policy->sub_policys[domain][j]; + if (sub_policy) { + __flow_dv_destroy_sub_policy_rules(dev, + sub_policy); + if (sub_policy != + mtr_policy->sub_policys[domain][0]) { + mtr_policy->sub_policys[domain][j] = + NULL; + mlx5_ipool_free + (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY], + sub_policy->idx); + new_policy_num--; + } + } + } + if (new_policy_num != sub_policy_num) { + mtr_policy->sub_policy_num &= + ~(MLX5_MTR_SUB_POLICY_NUM_MASK << + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)); + mtr_policy->sub_policy_num |= + (new_policy_num & + MLX5_MTR_SUB_POLICY_NUM_MASK) << + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain); + } + break; + case MLX5_FLOW_FATE_QUEUE: + sub_policy = mtr_policy->sub_policys[domain][0]; + __flow_dv_destroy_sub_policy_rules(dev, + sub_policy); + break; + default: + /*Other actions without queue and do nothing*/ + break; + } + } + rte_spinlock_unlock(&mtr_policy->sl); +} + /** * Validate the batch counter support in root table. * @@ -16666,6 +16736,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .create_def_policy = flow_dv_create_def_policy, .destroy_def_policy = flow_dv_destroy_def_policy, .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare, + .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq, .counter_alloc = flow_dv_counter_allocate, .counter_free = flow_dv_counter_free, .counter_query = flow_dv_counter_query,