static int
flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
+static int
+flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
+ uint32_t encap_decap_idx);
+
+static int
+flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
+ uint32_t port_id);
+
/**
* Initialize flow attributes structure according to flow items' types.
*
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- if (sh->dv_refcnt > 1) {
+ if (sh->refcnt > 1) {
int ret;
ret = pthread_mutex_lock(&sh->dv_mutex);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- if (sh->dv_refcnt > 1) {
+ if (sh->refcnt > 1) {
int ret;
ret = pthread_mutex_unlock(&sh->dv_mutex);
}
static enum mlx5_modification_field reg_to_field[] = {
- [REG_NONE] = MLX5_MODI_OUT_NONE,
+ [REG_NON] = MLX5_MODI_OUT_NONE,
[REG_A] = MLX5_MODI_META_DATA_REG_A,
[REG_B] = MLX5_MODI_META_DATA_REG_B,
[REG_C_0] = MLX5_MODI_META_REG_C_0,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
- MLX5_ASSERT(conf->id != REG_NONE);
+ MLX5_ASSERT(conf->id != REG_NON);
MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
actions[i] = (struct mlx5_modification_cmd) {
.action_type = MLX5_MODIFICATION_TYPE_SET,
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
if (ret < 0)
return ret;
- MLX5_ASSERT(ret != REG_NONE);
+ MLX5_ASSERT(ret != REG_NON);
MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
reg_type = reg_to_field[ret];
MLX5_ASSERT(reg_type > 0);
.mask = &mask,
};
struct field_modify_info reg_c_x[] = {
- {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
- {0, 0, 0},
+ [1] = {0, 0, 0},
};
int reg;
mask = rte_cpu_to_be_32(mask) & msk_c0;
mask = rte_cpu_to_be_32(mask << shl_c0);
}
- reg_c_x[0].id = reg_to_field[reg];
+ reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_mark),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
return 0;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_meta),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
return ret;
}
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_tag),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
if (mask->index != 0xff)
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
if (ret < 0)
return ret;
- MLX5_ASSERT(ret != REG_NONE);
+ MLX5_ASSERT(ret != REG_NON);
return 0;
}
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_port_id_mask,
sizeof(struct rte_flow_item_port_id),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret)
return ret;
if (!spec)
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_vlan),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret)
return ret;
if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
RTE_FLOW_ERROR_TYPE_ITEM, item,
"Match is supported for GTP"
" flags only");
- return mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_gtp),
- error);
+ return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_gtp),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
+}
+
+/**
+ * Validate IPV4 item.
+ * Use existing validation function mlx5_flow_validate_item_ipv4(), and
+ * add specific validation of fragment_offset field,
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint64_t last_item,
+ uint16_t ether_type,
+ struct rte_flow_error *error)
+{
+ int ret;
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *last = item->last;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ rte_be16_t fragment_offset_spec = 0;
+ rte_be16_t fragment_offset_last = 0;
+ const struct rte_flow_item_ipv4 nic_ipv4_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .fragment_offset = RTE_BE16(0xffff),
+ .next_proto_id = 0xff,
+ .time_to_live = 0xff,
+ },
+ };
+
+ ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
+ ether_type, &nic_ipv4_mask,
+ MLX5_ITEM_RANGE_ACCEPTED, error);
+ if (ret < 0)
+ return ret;
+ if (spec && mask)
+ fragment_offset_spec = spec->hdr.fragment_offset &
+ mask->hdr.fragment_offset;
+ if (!fragment_offset_spec)
+ return 0;
+ /*
+ * spec and mask are valid, enforce using full mask to make sure the
+ * complete value is used correctly.
+ */
+ if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
+ != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ item, "must use full mask for"
+ " fragment_offset");
+ /*
+ * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
+ * indicating this is 1st fragment of fragmented packet.
+ * This is not yet supported in MLX5, return appropriate error message.
+ */
+ if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "match on first fragment not "
+ "supported");
+ if (fragment_offset_spec && !last)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "specified value not supported");
+ /* spec and last are valid, validate the specified range. */
+ fragment_offset_last = last->hdr.fragment_offset &
+ mask->hdr.fragment_offset;
+ /*
+ * Match on fragment_offset spec 0x2001 and last 0x3fff
+ * means MF is 1 and frag-offset is > 0.
+ * This packet is fragment 2nd and onward, excluding last.
+ * This is not yet supported in MLX5, return appropriate
+ * error message.
+ */
+ if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
+ fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ last, "match on following "
+ "fragments not supported");
+ /*
+ * Match on fragment_offset spec 0x0001 and last 0x1fff
+ * means MF is 0 and frag-offset is > 0.
+ * This packet is last fragment of fragmented packet.
+ * This is not yet supported in MLX5, return appropriate
+ * error message.
+ */
+ if (fragment_offset_spec == RTE_BE16(1) &&
+ fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ last, "match on last "
+ "fragment not supported");
+ /*
+ * Match on fragment_offset spec 0x0001 and last 0x3fff
+ * means MF and/or frag-offset is not 0.
+ * This is a fragmented packet.
+ * Other range values are invalid and rejected.
+ */
+ if (!(fragment_offset_spec == RTE_BE16(1) &&
+ fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
+ "specified range not supported");
+ return 0;
+}
+
+/**
+ * Validate IPV6 fragment extension item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
+ const struct rte_flow_item_ipv6_frag_ext *last = item->last;
+ const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
+ rte_be16_t frag_data_spec = 0;
+ rte_be16_t frag_data_last = 0;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret = 0;
+ struct rte_flow_item_ipv6_frag_ext nic_mask = {
+ .hdr = {
+ .next_header = 0xff,
+ .frag_data = RTE_BE16(0xffff),
+ },
+ };
+
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "ipv6 fragment extension item cannot "
+ "follow L4 item.");
+ if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
+ (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "ipv6 fragment extension item must "
+ "follow ipv6 item");
+ if (spec && mask)
+ frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
+ if (!frag_data_spec)
+ return 0;
+ /*
+ * spec and mask are valid, enforce using full mask to make sure the
+ * complete value is used correctly.
+ */
+ if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
+ RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ item, "must use full mask for"
+ " frag_data");
+ /*
+ * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
+ * This is 1st fragment of fragmented packet.
+ */
+ if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "match on first fragment not "
+ "supported");
+ if (frag_data_spec && !last)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "specified value not supported");
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv6_frag_ext),
+ MLX5_ITEM_RANGE_ACCEPTED, error);
+ if (ret)
+ return ret;
+ /* spec and last are valid, validate the specified range. */
+ frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
+ /*
+ * Match on frag_data spec 0x0009 and last 0xfff9
+ * means M is 1 and frag-offset is > 0.
+ * This packet is fragment 2nd and onward, excluding last.
+ * This is not yet supported in MLX5, return appropriate
+ * error message.
+ */
+ if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
+ RTE_IPV6_EHDR_MF_MASK) &&
+ frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ last, "match on following "
+ "fragments not supported");
+ /*
+ * Match on frag_data spec 0x0008 and last 0xfff8
+ * means M is 0 and frag-offset is > 0.
+ * This packet is last fragment of fragmented packet.
+ * This is not yet supported in MLX5, return appropriate
+ * error message.
+ */
+ if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
+ frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ last, "match on last "
+ "fragment not supported");
+ /* Other range values are invalid and rejected. */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
+ "specified range not supported");
}
/**
return 0;
}
+/**
+ * Match encap_decap resource.
+ *
+ * @param entry
+ * Pointer to exist resource entry object.
+ * @param ctx
+ * Pointer to new encap_decap resource.
+ *
+ * @return
+ * 0 on matching, -1 otherwise.
+ */
+static int
+flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
+{
+ struct mlx5_flow_dv_encap_decap_resource *resource;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+
+ resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx;
+ cache_resource = container_of(entry,
+ struct mlx5_flow_dv_encap_decap_resource,
+ entry);
+ if (resource->entry.key == cache_resource->entry.key &&
+ resource->reformat_type == cache_resource->reformat_type &&
+ resource->ft_type == cache_resource->ft_type &&
+ resource->flags == cache_resource->flags &&
+ resource->size == cache_resource->size &&
+ !memcmp((const void *)resource->buf,
+ (const void *)cache_resource->buf,
+ resource->size))
+ return 0;
+ return -1;
+}
+
/**
* Find existing encap/decap resource or create and register a new one.
*
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
- uint32_t idx = 0;
+ struct mlx5_hlist_entry *entry;
+ union mlx5_flow_encap_decap_key encap_decap_key = {
+ {
+ .ft_type = resource->ft_type,
+ .refmt_type = resource->reformat_type,
+ .buf_size = resource->size,
+ .table_level = !!dev_flow->dv.group,
+ .cksum = 0,
+ }
+ };
int ret;
resource->flags = dev_flow->dv.group ? 0 : 1;
domain = sh->rx_domain;
else
domain = sh->tx_domain;
+ encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
+ resource->size, 0);
+ resource->entry.key = encap_decap_key.v64;
/* Lookup a matching resource from cache. */
- ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
- cache_resource, next) {
- if (resource->reformat_type == cache_resource->reformat_type &&
- resource->ft_type == cache_resource->ft_type &&
- resource->flags == cache_resource->flags &&
- resource->size == cache_resource->size &&
- !memcmp((const void *)resource->buf,
- (const void *)cache_resource->buf,
- resource->size)) {
- DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.rix_encap_decap = idx;
- dev_flow->dv.encap_decap = cache_resource;
- return 0;
- }
+ entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key,
+ flow_dv_encap_decap_resource_match,
+ (void *)resource);
+ if (entry) {
+ cache_resource = container_of(entry,
+ struct mlx5_flow_dv_encap_decap_resource, entry);
+ DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx;
+ dev_flow->dv.encap_decap = cache_resource;
+ return 0;
}
/* Register new encap/decap resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
+ cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap;
ret = mlx5_flow_os_create_flow_action_packet_reformat
(sh->ctx, domain, cache_resource,
&cache_resource->action);
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
- ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
- dev_flow->handle->dvh.rix_encap_decap, cache_resource,
- next);
+ if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry,
+ flow_dv_encap_decap_resource_match,
+ (void *)cache_resource)) {
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+ cache_resource->idx);
+ return rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "action exist");
+ }
dev_flow->dv.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
(dev, &res, dev_flow, error);
}
+static int fdb_mirror;
+
/**
* Validate the modify-header actions.
*
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have encap action before"
" modify action");
+ if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't support sample action before"
+ " modify action for E-Switch"
+ " mirroring");
return 0;
}
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"jump with meter not support");
+ if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "E-Switch mirroring can't support"
+ " Sample action and jump action in"
+ " same flow now");
if (!action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_age *age = action->conf;
- if (!priv->config.devx || priv->counter_fallback)
+ if (!priv->config.devx || priv->sh->cmng.counter_fallback)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"configuration cannot be null");
- if (age->timeout >= UINT16_MAX / 2 / 10)
- return rte_flow_error_set(error, ENOTSUP,
+ if (!(age->timeout))
+ return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "Max age time: 3275 seconds");
+ "invalid timeout value 0");
if (action_flags & MLX5_FLOW_ACTION_AGE)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "Duplicate age ctions set");
+ "duplicate age actions set");
return 0;
}
}
/**
- * Find existing modify-header resource or create and register a new one.
+ * Match modify-header resource.
*
- * @param dev[in, out]
- * Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to modify-header resource.
- * @parm[in, out] dev_flow
- * Pointer to the dev_flow.
- * @param[out] error
- * pointer to error structure.
+ * @param entry
+ * Pointer to exist resource entry object.
+ * @param ctx
+ * Pointer to new modify-header resource.
*
* @return
- * 0 on success otherwise -errno and errno is set.
+ * 0 on matching, -1 otherwise.
*/
static int
-flow_dv_modify_hdr_resource_register
- (struct rte_eth_dev *dev,
- struct mlx5_flow_dv_modify_hdr_resource *resource,
- struct mlx5_flow *dev_flow,
- struct rte_flow_error *error)
+flow_dv_modify_hdr_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_modify_hdr_resource *resource;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
- struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
- int ret;
- resource->flags = dev_flow->dv.group ? 0 :
- MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
- if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
- resource->flags))
- return rte_flow_error_set(error, EOVERFLOW,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "too many modify header items");
- if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
- ns = sh->fdb_domain;
- else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
- ns = sh->tx_domain;
- else
- ns = sh->rx_domain;
- /* Lookup a matching resource from cache. */
+ resource = (struct mlx5_flow_dv_modify_hdr_resource *)ctx;
+ cache_resource = container_of(entry,
+ struct mlx5_flow_dv_modify_hdr_resource,
+ entry);
actions_len = resource->actions_num * sizeof(resource->actions[0]);
- LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
- if (resource->ft_type == cache_resource->ft_type &&
- resource->actions_num == cache_resource->actions_num &&
- resource->flags == cache_resource->flags &&
- !memcmp((const void *)resource->actions,
- (const void *)cache_resource->actions,
- actions_len)) {
- DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.modify_hdr = cache_resource;
- return 0;
- }
- }
- /* Register new modify-header resource. */
- cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(*cache_resource) + actions_len, 0,
- SOCKET_ID_ANY);
- if (!cache_resource)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- *cache_resource = *resource;
- rte_memcpy(cache_resource->actions, resource->actions, actions_len);
- ret = mlx5_flow_os_create_flow_action_modify_header
- (sh->ctx, ns, cache_resource,
- actions_len, &cache_resource->action);
- if (ret) {
- mlx5_free(cache_resource);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create action");
- }
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->handle->dvh.modify_hdr = cache_resource;
- DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- return 0;
+ if (resource->entry.key == cache_resource->entry.key &&
+ resource->ft_type == cache_resource->ft_type &&
+ resource->actions_num == cache_resource->actions_num &&
+ resource->flags == cache_resource->flags &&
+ !memcmp((const void *)resource->actions,
+ (const void *)cache_resource->actions,
+ actions_len))
+ return 0;
+ return -1;
}
/**
- * Get DV flow counter by index.
+ * Validate the sample action.
*
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the sample action.
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] idx
- * mlx5 flow counter index in the container.
- * @param[out] ppool
- * mlx5 flow counter pool in the container,
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * Pointer to the counter, NULL otherwise.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct mlx5_flow_counter *
-flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
- uint32_t idx,
- struct mlx5_flow_counter_pool **ppool)
+static int
+flow_dv_validate_action_sample(uint64_t action_flags,
+ const struct rte_flow_action *action,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont;
- struct mlx5_flow_counter_pool *pool;
- uint32_t batch = 0, age = 0;
-
- idx--;
- age = MLX_CNT_IS_AGE(idx);
- idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
- if (idx >= MLX5_CNT_BATCH_OFFSET) {
- idx -= MLX5_CNT_BATCH_OFFSET;
- batch = 1;
- }
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
- MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
- pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
- MLX5_ASSERT(pool);
- if (ppool)
- *ppool = pool;
- return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
-}
+ struct mlx5_dev_config *dev_conf = &priv->config;
+ const struct rte_flow_action_sample *sample = action->conf;
+ const struct rte_flow_action *act;
+ uint64_t sub_action_flags = 0;
+ uint16_t queue_index = 0xFFFF;
+ int actions_n = 0;
+ int ret;
+ fdb_mirror = 0;
-/**
- * Check the devx counter belongs to the pool.
- *
- * @param[in] pool
- * Pointer to the counter pool.
- * @param[in] id
- * The counter devx ID.
- *
+ if (!sample)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be NULL");
+ if (sample->ratio == 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "ratio value starts from 1");
+ if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "sample action not supported");
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Multiple sample actions not "
+ "supported");
+ if (action_flags & MLX5_FLOW_ACTION_METER)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, meter should "
+ "be after sample action");
+ if (action_flags & MLX5_FLOW_ACTION_JUMP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, jump should "
+ "be after sample action");
+ act = sample->actions;
+ for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
+ if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "too many actions");
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_validate_action_queue(act,
+ sub_action_flags,
+ dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ queue_index = ((const struct rte_flow_action_queue *)
+ (act->conf))->index;
+ sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = flow_dv_validate_action_mark(dev, act,
+ sub_action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+ sub_action_flags |= MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_MARK_EXT;
+ else
+ sub_action_flags |= MLX5_FLOW_ACTION_MARK;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_dv_validate_action_count(dev, error);
+ if (ret < 0)
+ return ret;
+ sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ ret = flow_dv_validate_action_port_id(dev,
+ sub_action_flags,
+ act,
+ attr,
+ error);
+ if (ret)
+ return ret;
+ sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ ret = flow_dv_validate_action_raw_encap_decap
+ (dev, NULL, act->conf, attr, &sub_action_flags,
+ &actions_n, error);
+ if (ret < 0)
+ return ret;
+ ++actions_n;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Doesn't support optional "
+ "action");
+ }
+ }
+ if (attr->ingress && !attr->transfer) {
+ if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Ingress must has a dest "
+ "QUEUE for Sample");
+ } else if (attr->egress && !attr->transfer) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Sample Only support Ingress "
+ "or E-Switch");
+ } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
+ MLX5_ASSERT(attr->transfer);
+ if (sample->ratio > 1)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "E-Switch doesn't support "
+ "any optional action "
+ "for sampling");
+ fdb_mirror = 1;
+ if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action QUEUE");
+ if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "E-Switch must has a dest "
+ "port for mirroring");
+ }
+ /* Continue validation for Xcap actions.*/
+ if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
+ (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
+ MLX5_FLOW_XCAP_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap and decap "
+ "combination aren't "
+ "supported");
+ if (!attr->transfer && attr->ingress && (sub_action_flags &
+ MLX5_FLOW_ACTION_ENCAP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ }
+ return 0;
+}
+
+/**
+ * Find existing modify-header resource or create and register a new one.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] resource
+ * Pointer to modify-header resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_modify_hdr_resource_register
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
+ struct mlx5dv_dr_domain *ns;
+ uint32_t actions_len;
+ struct mlx5_hlist_entry *entry;
+ union mlx5_flow_modify_hdr_key hdr_mod_key = {
+ {
+ .ft_type = resource->ft_type,
+ .actions_num = resource->actions_num,
+ .group = dev_flow->dv.group,
+ .cksum = 0,
+ }
+ };
+ int ret;
+
+ resource->flags = dev_flow->dv.group ? 0 :
+ MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+ if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
+ resource->flags))
+ return rte_flow_error_set(error, EOVERFLOW,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "too many modify header items");
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ ns = sh->fdb_domain;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
+ ns = sh->tx_domain;
+ else
+ ns = sh->rx_domain;
+ /* Lookup a matching resource from cache. */
+ actions_len = resource->actions_num * sizeof(resource->actions[0]);
+ hdr_mod_key.cksum = __rte_raw_cksum(resource->actions, actions_len, 0);
+ resource->entry.key = hdr_mod_key.v64;
+ entry = mlx5_hlist_lookup_ex(sh->modify_cmds, resource->entry.key,
+ flow_dv_modify_hdr_resource_match,
+ (void *)resource);
+ if (entry) {
+ cache_resource = container_of(entry,
+ struct mlx5_flow_dv_modify_hdr_resource,
+ entry);
+ DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
+ return 0;
+
+ }
+ /* Register new modify-header resource. */
+ cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*cache_resource) + actions_len, 0,
+ SOCKET_ID_ANY);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ rte_memcpy(cache_resource->actions, resource->actions, actions_len);
+ ret = mlx5_flow_os_create_flow_action_modify_header
+ (sh->ctx, ns, cache_resource,
+ actions_len, &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create action");
+ }
+ rte_atomic32_init(&cache_resource->refcnt);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry,
+ flow_dv_modify_hdr_resource_match,
+ (void *)cache_resource)) {
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
+ mlx5_free(cache_resource);
+ return rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "action exist");
+ }
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
+ DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
+}
+
+/**
+ * Get DV flow counter by index.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] idx
+ * mlx5 flow counter index in the container.
+ * @param[out] ppool
+ * mlx5 flow counter pool in the container,
+ *
+ * @return
+ * Pointer to the counter, NULL otherwise.
+ */
+static struct mlx5_flow_counter *
+flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
+ uint32_t idx,
+ struct mlx5_flow_counter_pool **ppool)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ struct mlx5_flow_counter_pool *pool;
+
+ /* Decrease to original index and clear shared bit. */
+ idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
+ MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
+ pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
+ MLX5_ASSERT(pool);
+ if (ppool)
+ *ppool = pool;
+ return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
+}
+
+/**
+ * Check the devx counter belongs to the pool.
+ *
+ * @param[in] pool
+ * Pointer to the counter pool.
+ * @param[in] id
+ * The counter devx ID.
+ *
* @return
* True if counter belongs to the pool, false otherwise.
*/
/**
* Get a pool by devx counter ID.
*
- * @param[in] cont
- * Pointer to the counter container.
+ * @param[in] cmng
+ * Pointer to the counter management.
* @param[in] id
* The counter devx ID.
*
* The counter pool pointer if exists, NULL otherwise,
*/
static struct mlx5_flow_counter_pool *
-flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
+flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
{
uint32_t i;
+ struct mlx5_flow_counter_pool *pool = NULL;
+ rte_spinlock_lock(&cmng->pool_update_sl);
/* Check last used pool. */
- if (cont->last_pool_idx != POOL_IDX_INVALID &&
- flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
- return cont->pools[cont->last_pool_idx];
+ if (cmng->last_pool_idx != POOL_IDX_INVALID &&
+ flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
+ pool = cmng->pools[cmng->last_pool_idx];
+ goto out;
+ }
/* ID out of range means no suitable pool in the container. */
- if (id > cont->max_id || id < cont->min_id)
- return NULL;
+ if (id > cmng->max_id || id < cmng->min_id)
+ goto out;
/*
* Find the pool from the end of the container, since mostly counter
* ID is sequence increasing, and the last pool should be the needed
* one.
*/
- i = rte_atomic16_read(&cont->n_valid);
+ i = cmng->n_valid;
while (i--) {
- struct mlx5_flow_counter_pool *pool = cont->pools[i];
-
- if (flow_dv_is_counter_in_pool(pool, id))
- return pool;
- }
- return NULL;
-}
-
-/**
- * Allocate a new memory for the counter values wrapped by all the needed
- * management.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] raws_n
- * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
- *
- * @return
- * The new memory management pointer on success, otherwise NULL and rte_errno
- * is set.
- */
-static struct mlx5_counter_stats_mem_mng *
-flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_devx_mkey_attr mkey_attr;
- struct mlx5_counter_stats_mem_mng *mem_mng;
- volatile struct flow_counter_stats *raw_data;
- int size = (sizeof(struct flow_counter_stats) *
- MLX5_COUNTERS_PER_POOL +
- sizeof(struct mlx5_counter_stats_raw)) * raws_n +
- sizeof(struct mlx5_counter_stats_mem_mng);
- size_t pgsize = rte_mem_page_size();
- if (pgsize == (size_t)-1) {
- DRV_LOG(ERR, "Failed to get mem page size");
- rte_errno = ENOMEM;
- return NULL;
- }
- uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize,
- SOCKET_ID_ANY);
- int i;
+ struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
- if (!mem) {
- rte_errno = ENOMEM;
- return NULL;
- }
- mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
- size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!mem_mng->umem) {
- rte_errno = errno;
- mlx5_free(mem);
- return NULL;
- }
- mkey_attr.addr = (uintptr_t)mem;
- mkey_attr.size = size;
- mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
- mkey_attr.pd = sh->pdn;
- mkey_attr.log_entity_size = 0;
- mkey_attr.pg_access = 0;
- mkey_attr.klm_array = NULL;
- mkey_attr.klm_num = 0;
- if (priv->config.hca_attr.relaxed_ordering_write &&
- priv->config.hca_attr.relaxed_ordering_read &&
- !haswell_broadwell_cpu)
- mkey_attr.relaxed_ordering = 1;
- mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
- if (!mem_mng->dm) {
- mlx5_glue->devx_umem_dereg(mem_mng->umem);
- rte_errno = errno;
- mlx5_free(mem);
- return NULL;
- }
- mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
- raw_data = (volatile struct flow_counter_stats *)mem;
- for (i = 0; i < raws_n; ++i) {
- mem_mng->raws[i].mem_mng = mem_mng;
- mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
+ if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
+ pool = pool_tmp;
+ break;
+ }
}
- LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
- return mem_mng;
+out:
+ rte_spinlock_unlock(&cmng->pool_update_sl);
+ return pool;
}
/**
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] batch
- * Whether the pool is for counter that was allocated by batch command.
- * @param[in] age
- * Whether the pool is for Aging counter.
*
* @return
* 0 on success, otherwise negative errno value and rte_errno is set.
*/
static int
-flow_dv_container_resize(struct rte_eth_dev *dev,
- uint32_t batch, uint32_t age)
+flow_dv_container_resize(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- age);
- struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
- void *old_pools = cont->pools;
- uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ void *old_pools = cmng->pools;
+ uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
return -ENOMEM;
}
if (old_pools)
- memcpy(pools, old_pools, cont->n *
+ memcpy(pools, old_pools, cmng->n *
sizeof(struct mlx5_flow_counter_pool *));
- /*
- * Fallback mode query the counter directly, no background query
- * resources are needed.
- */
- if (!priv->counter_fallback) {
- int i;
-
- mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
- MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
- if (!mem_mng) {
- mlx5_free(pools);
- return -ENOMEM;
- }
- for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
- LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
- mem_mng->raws +
- MLX5_CNT_CONTAINER_RESIZE +
- i, next);
- }
- rte_spinlock_lock(&cont->resize_sl);
- cont->n = resize;
- cont->mem_mng = mem_mng;
- cont->pools = pools;
- rte_spinlock_unlock(&cont->resize_sl);
+ cmng->n = resize;
+ cmng->pools = pools;
if (old_pools)
mlx5_free(old_pools);
return 0;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt;
- struct mlx5_flow_counter_ext *cnt_ext = NULL;
int offset;
cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
MLX5_ASSERT(pool);
- if (counter < MLX5_CNT_BATCH_OFFSET) {
- cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
- if (priv->counter_fallback)
- return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
+ if (priv->sh->cmng.counter_fallback)
+ return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
0, pkts, bytes, 0, NULL, NULL, 0);
- }
-
rte_spinlock_lock(&pool->sl);
- /*
- * The single counters allocation may allocate smaller ID than the
- * current allocated in parallel to the host reading.
- * In this case the new counter values must be reported as 0.
- */
- if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
+ if (!pool->raw) {
*pkts = 0;
*bytes = 0;
} else {
* Pointer to the Ethernet device structure.
* @param[out] dcs
* The devX counter handle.
- * @param[in] batch
- * Whether the pool is for counter that was allocated by batch command.
* @param[in] age
* Whether the pool is for counter that was allocated for aging.
* @param[in/out] cont_cur
*/
static struct mlx5_flow_counter_pool *
flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
- uint32_t batch, uint32_t age)
+ uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool;
- struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- age);
- int16_t n_valid = rte_atomic16_read(&cont->n_valid);
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ bool fallback = priv->sh->cmng.counter_fallback;
uint32_t size = sizeof(*pool);
- if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age))
- return NULL;
- size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
- size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
- size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
+ size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
+ size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
if (!pool) {
rte_errno = ENOMEM;
return NULL;
}
- pool->min_dcs = dcs;
- if (!priv->counter_fallback)
- pool->raw = cont->mem_mng->raws + n_valid %
- MLX5_CNT_CONTAINER_RESIZE;
- pool->raw_hw = NULL;
- pool->type = 0;
- pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT);
- pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE);
+ pool->raw = NULL;
+ pool->is_aged = !!age;
pool->query_gen = 0;
+ pool->min_dcs = dcs;
rte_spinlock_init(&pool->sl);
+ rte_spinlock_init(&pool->csl);
TAILQ_INIT(&pool->counters[0]);
TAILQ_INIT(&pool->counters[1]);
- TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
- pool->index = n_valid;
- cont->pools[n_valid] = pool;
- if (!batch) {
+ pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
+ rte_spinlock_lock(&cmng->pool_update_sl);
+ pool->index = cmng->n_valid;
+ if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
+ mlx5_free(pool);
+ rte_spinlock_unlock(&cmng->pool_update_sl);
+ return NULL;
+ }
+ cmng->pools[pool->index] = pool;
+ cmng->n_valid++;
+ if (unlikely(fallback)) {
int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
- if (base < cont->min_id)
- cont->min_id = base;
- if (base > cont->max_id)
- cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
- cont->last_pool_idx = pool->index;
+ if (base < cmng->min_id)
+ cmng->min_id = base;
+ if (base > cmng->max_id)
+ cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
+ cmng->last_pool_idx = pool->index;
}
- /* Pool initialization must be updated before host thread access. */
- rte_cio_wmb();
- rte_atomic16_add(&cont->n_valid, 1);
+ rte_spinlock_unlock(&cmng->pool_update_sl);
return pool;
}
-/**
- * Update the minimum dcs-id for aged or no-aged counter pool.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] pool
- * Current counter pool.
- * @param[in] batch
- * Whether the pool is for counter that was allocated by batch command.
- * @param[in] age
- * Whether the counter is for aging.
- */
-static void
-flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev,
- struct mlx5_flow_counter_pool *pool,
- uint32_t batch, uint32_t age)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_pool *other;
- struct mlx5_pools_container *cont;
-
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1));
- other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id);
- if (!other)
- return;
- if (pool->min_dcs->id < other->min_dcs->id) {
- rte_atomic64_set(&other->a64_dcs,
- rte_atomic64_read(&pool->a64_dcs));
- } else {
- rte_atomic64_set(&pool->a64_dcs,
- rte_atomic64_read(&other->a64_dcs));
- }
-}
/**
* Prepare a new counter and/or a new counter pool.
*
* Pointer to the Ethernet device structure.
* @param[out] cnt_free
* Where to put the pointer of a new counter.
- * @param[in] batch
- * Whether the pool is for counter that was allocated by batch command.
* @param[in] age
* Whether the pool is for counter that was allocated for aging.
*
static struct mlx5_flow_counter_pool *
flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
struct mlx5_flow_counter **cnt_free,
- uint32_t batch, uint32_t age)
+ uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_pools_container *cont;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
struct mlx5_flow_counter_pool *pool;
struct mlx5_counters tmp_tq;
struct mlx5_devx_obj *dcs = NULL;
struct mlx5_flow_counter *cnt;
+ enum mlx5_counter_type cnt_type =
+ age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
+ bool fallback = priv->sh->cmng.counter_fallback;
uint32_t i;
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
- if (!batch) {
+ if (fallback) {
/* bulk_bitmap must be 0 for single counter allocation. */
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
if (!dcs)
return NULL;
- pool = flow_dv_find_pool_by_id(cont, dcs->id);
+ pool = flow_dv_find_pool_by_id(cmng, dcs->id);
if (!pool) {
- pool = flow_dv_pool_create(dev, dcs, batch, age);
+ pool = flow_dv_pool_create(dev, dcs, age);
if (!pool) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
- } else if (dcs->id < pool->min_dcs->id) {
- rte_atomic64_set(&pool->a64_dcs,
- (int64_t)(uintptr_t)dcs);
}
- flow_dv_counter_update_min_dcs(dev,
- pool, batch, age);
i = dcs->id % MLX5_COUNTERS_PER_POOL;
cnt = MLX5_POOL_GET_CNT(pool, i);
cnt->pool = pool;
- MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
+ cnt->dcs_when_free = dcs;
*cnt_free = cnt;
return pool;
}
- /* bulk_bitmap is in 128 counters units. */
- if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
- dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
if (!dcs) {
rte_errno = ENODATA;
return NULL;
}
- pool = flow_dv_pool_create(dev, dcs, batch, age);
+ pool = flow_dv_pool_create(dev, dcs, age);
if (!pool) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
cnt->pool = pool;
TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
}
- rte_spinlock_lock(&cont->csl);
- TAILQ_CONCAT(&cont->counters, &tmp_tq, next);
- rte_spinlock_unlock(&cont->csl);
+ rte_spinlock_lock(&cmng->csl[cnt_type]);
+ TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
+ rte_spinlock_unlock(&cmng->csl[cnt_type]);
*cnt_free = MLX5_POOL_GET_CNT(pool, 0);
(*cnt_free)->pool = pool;
return pool;
}
-/**
- * Search for existed shared counter.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] id
- * The shared counter ID to search.
- * @param[out] ppool
- * mlx5 flow counter pool in the container,
- *
- * @return
- * NULL if not existed, otherwise pointer to the shared extend counter.
- */
-static struct mlx5_flow_counter_ext *
-flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id,
- struct mlx5_flow_counter_pool **ppool)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- union mlx5_l3t_data data;
- uint32_t cnt_idx;
-
- if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword)
- return NULL;
- cnt_idx = data.dword;
- /*
- * Shared counters don't have age info. The counter extend is after
- * the counter datat structure.
- */
- return (struct mlx5_flow_counter_ext *)
- ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1);
-}
-
/**
* Allocate a flow counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] shared
- * Indicate if this counter is shared with other flows.
- * @param[in] id
- * Counter identifier.
- * @param[in] group
- * Counter flow group.
* @param[in] age
* Whether the counter was allocated for aging.
*
* Index to flow counter on success, 0 otherwise and rte_errno is set.
*/
static uint32_t
-flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
- uint16_t group, uint32_t age)
+flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt_free = NULL;
- struct mlx5_flow_counter_ext *cnt_ext = NULL;
- /*
- * Currently group 0 flow counter cannot be assigned to a flow if it is
- * not the first one in the batch counter allocation, so it is better
- * to allocate counters one by one for these flows in a separate
- * container.
- * A counter can be shared between different groups so need to take
- * shared counters from the single container.
- */
- uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
- struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
- age);
+ bool fallback = priv->sh->cmng.counter_fallback;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ enum mlx5_counter_type cnt_type =
+ age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
uint32_t cnt_idx;
if (!priv->config.devx) {
rte_errno = ENOTSUP;
return 0;
}
- if (shared) {
- cnt_ext = flow_dv_counter_shared_search(dev, id, &pool);
- if (cnt_ext) {
- if (cnt_ext->ref_cnt + 1 == 0) {
- rte_errno = E2BIG;
- return 0;
- }
- cnt_ext->ref_cnt++;
- cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
- (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
- + 1;
- return cnt_idx;
- }
- }
/* Get free counters from container. */
- rte_spinlock_lock(&cont->csl);
- cnt_free = TAILQ_FIRST(&cont->counters);
+ rte_spinlock_lock(&cmng->csl[cnt_type]);
+ cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
if (cnt_free)
- TAILQ_REMOVE(&cont->counters, cnt_free, next);
- rte_spinlock_unlock(&cont->csl);
- if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free,
- batch, age))
+ TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
+ rte_spinlock_unlock(&cmng->csl[cnt_type]);
+ if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
goto err;
pool = cnt_free->pool;
- if (!batch)
- cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
+ if (fallback)
+ cnt_free->dcs_when_active = cnt_free->dcs_when_free;
/* Create a DV counter action only in the first time usage. */
if (!cnt_free->action) {
uint16_t offset;
struct mlx5_devx_obj *dcs;
int ret;
- if (batch) {
+ if (!fallback) {
offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
dcs = pool->min_dcs;
} else {
offset = 0;
- dcs = cnt_ext->dcs;
+ dcs = cnt_free->dcs_when_free;
}
ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
&cnt_free->action);
}
cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
MLX5_CNT_ARRAY_IDX(pool, cnt_free));
- cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
- cnt_idx += age * MLX5_CNT_AGE_OFFSET;
/* Update the counter reset values. */
if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
&cnt_free->bytes))
goto err;
- if (cnt_ext) {
- cnt_ext->shared = shared;
- cnt_ext->ref_cnt = 1;
- cnt_ext->id = id;
- if (shared) {
- union mlx5_l3t_data data;
-
- data.dword = cnt_idx;
- if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
- return 0;
- }
- }
- if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
+ if (!fallback && !priv->sh->cmng.query_thread_on)
/* Start the asynchronous batch query by the host thread. */
mlx5_set_query_alarm(priv->sh);
return cnt_idx;
err:
if (cnt_free) {
cnt_free->pool = pool;
- rte_spinlock_lock(&cont->csl);
- TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next);
- rte_spinlock_unlock(&cont->csl);
+ if (fallback)
+ cnt_free->dcs_when_free = cnt_free->dcs_when_active;
+ rte_spinlock_lock(&cmng->csl[cnt_type]);
+ TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
+ rte_spinlock_unlock(&cmng->csl[cnt_type]);
}
return 0;
}
+/**
+ * Allocate a shared flow counter.
+ *
+ * @param[in] ctx
+ * Pointer to the shared counter configuration.
+ * @param[in] data
+ * Pointer to save the allocated counter index.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise and rte_errno is set.
+ */
+
+static int32_t
+flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
+{
+ struct mlx5_shared_counter_conf *conf = ctx;
+ struct rte_eth_dev *dev = conf->dev;
+ struct mlx5_flow_counter *cnt;
+
+ data->dword = flow_dv_counter_alloc(dev, 0);
+ data->dword |= MLX5_CNT_SHARED_OFFSET;
+ cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
+ cnt->shared_info.id = conf->id;
+ return 0;
+}
+
+/**
+ * Get a shared flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] id
+ * Counter identifier.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t
+flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_shared_counter_conf conf = {
+ .dev = dev,
+ .id = id,
+ };
+ union mlx5_l3t_data data = {
+ .dword = 0,
+ };
+
+ mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
+ flow_dv_counter_alloc_shared_cb, &conf);
+ return data.dword;
+}
+
/**
* Get age param from counter index.
*
struct mlx5_age_info *age_info;
struct mlx5_age_param *age_param;
struct mlx5_priv *priv = dev->data->dev_private;
+ uint16_t expected = AGE_CANDIDATE;
age_info = GET_PORT_AGE_INFO(priv);
age_param = flow_dv_counter_idx_get_age(dev, counter);
- if (rte_atomic16_cmpset((volatile uint16_t *)
- &age_param->state,
- AGE_CANDIDATE, AGE_FREE)
- != AGE_CANDIDATE) {
+ if (!__atomic_compare_exchange_n(&age_param->state, &expected,
+ AGE_FREE, false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED)) {
/**
* We need the lock even it is age timeout,
* since counter may still in process.
rte_spinlock_lock(&age_info->aged_sl);
TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
rte_spinlock_unlock(&age_info->aged_sl);
+ __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
}
- rte_atomic16_set(&age_param->state, AGE_FREE);
}
+
/**
* Release a flow counter.
*
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt;
- struct mlx5_flow_counter_ext *cnt_ext = NULL;
+ enum mlx5_counter_type cnt_type;
if (!counter)
return;
cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
MLX5_ASSERT(pool);
- if (counter < MLX5_CNT_BATCH_OFFSET) {
- cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
- if (cnt_ext) {
- if (--cnt_ext->ref_cnt)
- return;
- if (cnt_ext->shared)
- mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
- cnt_ext->id);
- }
- }
- if (IS_AGE_POOL(pool))
+ if (IS_SHARED_CNT(counter) &&
+ mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
+ return;
+ if (pool->is_aged)
flow_dv_counter_remove_from_age(dev, counter, cnt);
cnt->pool = pool;
/*
* function both operate with the different list.
*
*/
- if (!priv->counter_fallback)
+ if (!priv->sh->cmng.counter_fallback) {
+ rte_spinlock_lock(&pool->csl);
TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
- else
- TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER
- (priv->sh, 0, 0))->counters),
+ rte_spinlock_unlock(&pool->csl);
+ } else {
+ cnt->dcs_when_free = cnt->dcs_when_active;
+ cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
+ MLX5_COUNTER_TYPE_ORIGIN;
+ rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
+ TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
cnt, next);
+ rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
+ }
}
/**
.dst_port = RTE_BE16(UINT16_MAX),
}
};
- const struct rte_flow_item_ipv4 nic_ipv4_mask = {
- .hdr = {
- .src_addr = RTE_BE32(0xffffffff),
- .dst_addr = RTE_BE32(0xffffffff),
- .type_of_service = 0xff,
- .next_proto_id = 0xff,
- .time_to_live = 0xff,
- },
- };
const struct rte_flow_item_ipv6 nic_ipv6_mask = {
.hdr = {
.src_addr =
.proto = 0xff,
.hop_limits = 0xff,
},
+ .has_frag_ext = 1,
};
const struct rte_flow_item_ecpri nic_ecpri_mask = {
.hdr = {
case RTE_FLOW_ITEM_TYPE_IPV4:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
- ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- last_item,
- ether_type,
- &nic_ipv4_mask,
- error);
+ ret = flow_dv_validate_item_ipv4(items, item_flags,
+ last_item, ether_type,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
next_protocol = 0xff;
}
break;
+ case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ ret = flow_dv_validate_item_ipv6_frag_ext(items,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->spec)->hdr.next_header;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
(items, item_flags,
action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
rw_act_num += MLX5_ACT_NUM_SET_DSCP;
break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ ret = flow_dv_validate_action_sample(action_flags,
+ actions, dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ ++actions_n;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
ipv4_m->hdr.time_to_live);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
+ !!(ipv4_m->hdr.fragment_offset));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+ !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
}
/**
ipv6_m->hdr.hop_limits);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
+ !!(ipv6_m->has_frag_ext));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+ !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
+}
+
+/**
+ * Add IPV6 fragment extension item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
+ const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
+ const struct rte_flow_item_ipv6_frag_ext nic_mask = {
+ .hdr = {
+ .next_header = 0xff,
+ .frag_data = RTE_BE16(0xffff),
+ },
+ };
+ void *headers_m;
+ void *headers_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ /* IPv6 fragment extension item exists, so packet is IP fragment. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
+ if (!ipv6_frag_ext_v)
+ return;
+ if (!ipv6_frag_ext_m)
+ ipv6_frag_ext_m = &nic_mask;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+ ipv6_frag_ext_m->hdr.next_header);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ ipv6_frag_ext_v->hdr.next_header &
+ ipv6_frag_ext_m->hdr.next_header);
}
/**
const struct rte_flow_item_nvgre *nvgre_v = item->spec;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- const char *tni_flow_id_m = (const char *)nvgre_m->tni;
- const char *tni_flow_id_v = (const char *)nvgre_v->tni;
+ const char *tni_flow_id_m;
+ const char *tni_flow_id_v;
char *gre_key_m;
char *gre_key_v;
int size;
return;
if (!nvgre_m)
nvgre_m = &rte_flow_item_nvgre_mask;
+ tni_flow_id_m = (const char *)nvgre_m->tni;
+ tni_flow_id_v = (const char *)nvgre_v->tni;
size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
return;
if (!icmp6_m)
icmp6_m = &rte_flow_item_icmp6_mask;
- /*
- * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
- * If only the protocol is specified, no need to match the frag.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
icmp6_v->type & icmp6_m->type);
{
const struct rte_flow_item_icmp *icmp_m = item->mask;
const struct rte_flow_item_icmp *icmp_v = item->spec;
+ uint32_t icmp_header_data_m = 0;
+ uint32_t icmp_header_data_v = 0;
void *headers_m;
void *headers_v;
void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
return;
if (!icmp_m)
icmp_m = &rte_flow_item_icmp_mask;
- /*
- * Force flow only to match the non-fragmented IPv4 ICMP packets.
- * If only the protocol is specified, no need to match the frag.
- */
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
icmp_m->hdr.icmp_type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
icmp_m->hdr.icmp_code);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
+ icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
+ icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
+ if (icmp_header_data_m) {
+ icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
+ icmp_header_data_v |=
+ rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
+ MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
+ icmp_header_data_m);
+ MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
+ icmp_header_data_v & icmp_header_data_m);
+ }
}
/**
uint32_t counter;
struct mlx5_age_param *age_param;
- counter = flow_dv_counter_alloc(dev,
- count ? count->shared : 0,
- count ? count->id : 0,
- dev_flow->dv.group, !!age);
+ if (count && count->shared)
+ counter = flow_dv_counter_get_shared(dev, count->id);
+ else
+ counter = flow_dv_counter_alloc(dev, !!age);
if (!counter || age == NULL)
return counter;
age_param = flow_dv_counter_idx_get_age(dev, counter);
- /*
- * The counter age accuracy may have a bit delay. Have 3/4
- * second bias on the timeount in order to let it age in time.
- */
age_param->context = age->context ? age->context :
(void *)(uintptr_t)(dev_flow->flow_idx);
- /*
- * The counter age accuracy may have a bit delay. Have 3/4
- * second bias on the timeount in order to let it age in time.
- */
- age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY;
- /* Set expire time in unit of 0.1 sec. */
+ age_param->timeout = age->timeout;
age_param->port_id = dev->data->port_id;
- age_param->expire = age_param->timeout +
- rte_rdtsc() / (rte_get_tsc_hz() / 10);
- rte_atomic16_set(&age_param->state, AGE_CANDIDATE);
+ __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
return counter;
}
/**
/**
* Set the hash fields according to the @p flow information.
*
- * @param[in] dev_flow
- * Pointer to the mlx5_flow.
- * @param[in] rss_desc
- * Pointer to the mlx5_flow_rss_desc.
+ * @param[in] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] rss_desc
+ * Pointer to the mlx5_flow_rss_desc.
+ */
+static void
+flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
+ struct mlx5_flow_rss_desc *rss_desc)
+{
+ uint64_t items = dev_flow->handle->layers;
+ int rss_inner = 0;
+ uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
+
+ dev_flow->hash_fields = 0;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (rss_desc->level >= 2) {
+ dev_flow->hash_fields |= IBV_RX_HASH_INNER;
+ rss_inner = 1;
+ }
+#endif
+ if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
+ if (rss_types & MLX5_IPV4_LAYER_TYPES) {
+ if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
+ else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
+ else
+ dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
+ }
+ } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
+ if (rss_types & MLX5_IPV6_LAYER_TYPES) {
+ if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
+ else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
+ else
+ dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+ }
+ }
+ if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
+ if (rss_types & ETH_RSS_UDP) {
+ if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_SRC_PORT_UDP;
+ else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_DST_PORT_UDP;
+ else
+ dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+ }
+ } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
+ if (rss_types & ETH_RSS_TCP) {
+ if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_SRC_PORT_TCP;
+ else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_DST_PORT_TCP;
+ else
+ dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+ }
+ }
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] rss_desc
+ * Pointer to the mlx5_flow_rss_desc.
+ * @param[out] hrxq_idx
+ * Hash Rx queue index.
+ *
+ * @return
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_hrxq *
+flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint32_t *hrxq_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_handle *dh = dev_flow->handle;
+ struct mlx5_hrxq *hrxq;
+
+ MLX5_ASSERT(rss_desc->queue_num);
+ *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue,
+ rss_desc->queue_num);
+ if (!*hrxq_idx) {
+ *hrxq_idx = mlx5_hrxq_new
+ (dev, rss_desc->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue,
+ rss_desc->queue_num,
+ !!(dh->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (!*hrxq_idx)
+ return NULL;
+ }
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ *hrxq_idx);
+ return hrxq;
+}
+
+/**
+ * Find existing sample resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[in] resource
+ * Pointer to sample resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[in, out] sample_dv_actions
+ * Pointer to sample actions list.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_sample_resource_register(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct mlx5_flow_dv_sample_resource *resource,
+ struct mlx5_flow *dev_flow,
+ void **sample_dv_actions,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_sample_resource *cache_resource;
+ struct mlx5dv_dr_flow_sampler_attr sampler_attr;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_tbl_resource *tbl;
+ uint32_t idx = 0;
+ const uint32_t next_ft_step = 1;
+ uint32_t next_ft_id = resource->ft_id + next_ft_step;
+
+ /* Lookup a matching resource from cache. */
+ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list,
+ idx, cache_resource, next) {
+ if (resource->ratio == cache_resource->ratio &&
+ resource->ft_type == cache_resource->ft_type &&
+ resource->ft_id == cache_resource->ft_id &&
+ resource->set_action == cache_resource->set_action &&
+ !memcmp((void *)&resource->sample_act,
+ (void *)&cache_resource->sample_act,
+ sizeof(struct mlx5_flow_sub_actions_list))) {
+ DRV_LOG(DEBUG, "sample resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
+ dev_flow->handle->dvh.rix_sample = idx;
+ dev_flow->dv.sample_res = cache_resource;
+ return 0;
+ }
+ }
+ /* Register new sample resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE],
+ &dev_flow->handle->dvh.rix_sample);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ /* Create normal path table level */
+ tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
+ attr->egress, attr->transfer, error);
+ if (!tbl) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "fail to create normal path table "
+ "for sample");
+ goto error;
+ }
+ cache_resource->normal_path_tbl = tbl;
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
+ cache_resource->default_miss =
+ mlx5_glue->dr_create_flow_action_default_miss();
+ if (!cache_resource->default_miss) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create default miss "
+ "action");
+ goto error;
+ }
+ sample_dv_actions[resource->sample_act.actions_num++] =
+ cache_resource->default_miss;
+ }
+ /* Create a DR sample action */
+ sampler_attr.sample_ratio = cache_resource->ratio;
+ sampler_attr.default_next_table = tbl->obj;
+ sampler_attr.num_sample_actions = resource->sample_act.actions_num;
+ sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
+ &sample_dv_actions[0];
+ sampler_attr.action = cache_resource->set_action;
+ cache_resource->verbs_action =
+ mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
+ if (!cache_resource->verbs_action) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create sample action");
+ goto error;
+ }
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
+ ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list,
+ dev_flow->handle->dvh.rix_sample, cache_resource,
+ next);
+ dev_flow->dv.sample_res = cache_resource;
+ DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ return 0;
+error:
+ if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
+ if (cache_resource->default_miss)
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->default_miss));
+ } else {
+ if (cache_resource->sample_idx.rix_hrxq &&
+ !mlx5_hrxq_release(dev,
+ cache_resource->sample_idx.rix_hrxq))
+ cache_resource->sample_idx.rix_hrxq = 0;
+ if (cache_resource->sample_idx.rix_tag &&
+ !flow_dv_tag_release(dev,
+ cache_resource->sample_idx.rix_tag))
+ cache_resource->sample_idx.rix_tag = 0;
+ if (cache_resource->sample_idx.cnt) {
+ flow_dv_counter_release(dev,
+ cache_resource->sample_idx.cnt);
+ cache_resource->sample_idx.cnt = 0;
+ }
+ }
+ if (cache_resource->normal_path_tbl)
+ flow_dv_tbl_resource_release(dev,
+ cache_resource->normal_path_tbl);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE],
+ dev_flow->handle->dvh.rix_sample);
+ dev_flow->handle->dvh.rix_sample = 0;
+ return -rte_errno;
+}
+
+/**
+ * Find existing destination array resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[in] resource
+ * Pointer to destination array resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct mlx5_flow_dv_dest_array_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
+ struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ struct mlx5dv_dr_domain *domain;
+ uint32_t idx = 0;
+
+ /* Lookup a matching resource from cache. */
+ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ sh->dest_array_list,
+ idx, cache_resource, next) {
+ if (resource->num_of_dest == cache_resource->num_of_dest &&
+ resource->ft_type == cache_resource->ft_type &&
+ !memcmp((void *)cache_resource->sample_act,
+ (void *)resource->sample_act,
+ (resource->num_of_dest *
+ sizeof(struct mlx5_flow_sub_actions_list)))) {
+ DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
+ dev_flow->handle->dvh.rix_dest_array = idx;
+ dev_flow->dv.dest_array_res = cache_resource;
+ return 0;
+ }
+ }
+ /* Register new destination array resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &dev_flow->handle->dvh.rix_dest_array);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ if (attr->transfer)
+ domain = sh->fdb_domain;
+ else if (attr->ingress)
+ domain = sh->rx_domain;
+ else
+ domain = sh->tx_domain;
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5dv_dr_action_dest_attr),
+ 0, SOCKET_ID_ANY);
+ if (!dest_attr[idx]) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate resource memory");
+ goto error;
+ }
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
+ sample_act = &resource->sample_act[idx];
+ if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
+ dest_attr[idx]->dest = sample_act->dr_queue_action;
+ } else if (sample_act->action_flags ==
+ (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
+ dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
+ dest_attr[idx]->dest_reformat = &dest_reformat[idx];
+ dest_attr[idx]->dest_reformat->reformat =
+ sample_act->dr_encap_action;
+ dest_attr[idx]->dest_reformat->dest =
+ sample_act->dr_port_id_action;
+ } else if (sample_act->action_flags ==
+ MLX5_FLOW_ACTION_PORT_ID) {
+ dest_attr[idx]->dest = sample_act->dr_port_id_action;
+ }
+ }
+ /* create a dest array actioin */
+ cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
+ (domain,
+ cache_resource->num_of_dest,
+ dest_attr);
+ if (!cache_resource->action) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot create destination array action");
+ goto error;
+ }
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
+ ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &sh->dest_array_list,
+ dev_flow->handle->dvh.rix_dest_array, cache_resource,
+ next);
+ dev_flow->dv.dest_array_res = cache_resource;
+ DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ for (idx = 0; idx < resource->num_of_dest; idx++)
+ mlx5_free(dest_attr[idx]);
+ return 0;
+error:
+ for (idx = 0; idx < resource->num_of_dest; idx++) {
+ struct mlx5_flow_sub_actions_idx *act_res =
+ &cache_resource->sample_idx[idx];
+ if (act_res->rix_hrxq &&
+ !mlx5_hrxq_release(dev,
+ act_res->rix_hrxq))
+ act_res->rix_hrxq = 0;
+ if (act_res->rix_encap_decap &&
+ !flow_dv_encap_decap_resource_release(dev,
+ act_res->rix_encap_decap))
+ act_res->rix_encap_decap = 0;
+ if (act_res->rix_port_id_action &&
+ !flow_dv_port_id_action_resource_release(dev,
+ act_res->rix_port_id_action))
+ act_res->rix_port_id_action = 0;
+ if (dest_attr[idx])
+ mlx5_free(dest_attr[idx]);
+ }
+
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ dev_flow->handle->dvh.rix_dest_array);
+ dev_flow->handle->dvh.rix_dest_array = 0;
+ return -rte_errno;
+}
+
+/**
+ * Convert Sample action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to action structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in, out] num_of_dest
+ * Pointer to the num of destination.
+ * @param[in, out] sample_actions
+ * Pointer to sample actions list.
+ * @param[in, out] res
+ * Pointer to sample resource.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate_action_sample(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ uint32_t *num_of_dest,
+ void **sample_actions,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_sample *sample_action;
+ const struct rte_flow_action *sub_actions;
+ const struct rte_flow_action_queue *queue;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ struct mlx5_flow_sub_actions_idx *sample_idx;
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)
+ [!!priv->flow_nested_idx];
+ uint64_t action_flags = 0;
+
+ sample_act = &res->sample_act;
+ sample_idx = &res->sample_idx;
+ sample_action = (const struct rte_flow_action_sample *)action->conf;
+ res->ratio = sample_action->ratio;
+ sub_actions = sample_action->actions;
+ for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
+ int type = sub_actions->type;
+ uint32_t pre_rix = 0;
+ void *pre_r;
+ switch (type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ {
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ queue = sub_actions->conf;
+ rss_desc->queue_num = 1;
+ rss_desc->queue[0] = queue->index;
+ hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create fate queue");
+ sample_act->dr_queue_action = hrxq->action;
+ sample_idx->rix_hrxq = hrxq_idx;
+ sample_actions[sample_act->actions_num++] =
+ hrxq->action;
+ (*num_of_dest)++;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_QUEUE;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ {
+ uint32_t tag_be = mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (sub_actions->conf))->id);
+
+ dev_flow->handle->mark = 1;
+ pre_rix = dev_flow->handle->dvh.rix_tag;
+ /* Save the mark resource before sample */
+ pre_r = dev_flow->dv.tag_resource;
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ sample_act->dr_tag_action =
+ dev_flow->dv.tag_resource->action;
+ sample_idx->rix_tag =
+ dev_flow->handle->dvh.rix_tag;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_tag_action;
+ /* Recover the mark resource after sample */
+ dev_flow->dv.tag_resource = pre_r;
+ dev_flow->handle->dvh.rix_tag = pre_rix;
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ {
+ uint32_t counter;
+
+ counter = flow_dv_translate_create_counter(dev,
+ dev_flow, sub_actions->conf, 0);
+ if (!counter)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create counter"
+ " object.");
+ sample_idx->cnt = counter;
+ sample_act->dr_cnt_action =
+ (flow_dv_counter_get_by_idx(dev,
+ counter, NULL))->action;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_cnt_action;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ {
+ struct mlx5_flow_dv_port_id_action_resource
+ port_id_resource;
+ uint32_t port_id = 0;
+
+ memset(&port_id_resource, 0, sizeof(port_id_resource));
+ /* Save the port id resource before sample */
+ pre_rix = dev_flow->handle->rix_port_id_action;
+ pre_r = dev_flow->dv.port_id_action;
+ if (flow_dv_translate_action_port_id(dev, sub_actions,
+ &port_id, error))
+ return -rte_errno;
+ port_id_resource.port_id = port_id;
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource, dev_flow, error))
+ return -rte_errno;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
+ sample_idx->rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_port_id_action;
+ /* Recover the port id resource after sample */
+ dev_flow->dv.port_id_action = pre_r;
+ dev_flow->handle->rix_port_id_action = pre_rix;
+ (*num_of_dest)++;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ /* Save the encap resource before sample */
+ pre_rix = dev_flow->handle->dvh.rix_encap_decap;
+ pre_r = dev_flow->dv.encap_decap;
+ if (flow_dv_create_action_l2_encap(dev, sub_actions,
+ dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
+ sample_idx->rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_actions[sample_act->actions_num++] =
+ sample_act->dr_encap_action;
+ /* Recover the encap resource after sample */
+ dev_flow->dv.encap_decap = pre_r;
+ dev_flow->handle->dvh.rix_encap_decap = pre_rix;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Not support for sampler action");
+ }
+ }
+ sample_act->action_flags = action_flags;
+ res->ft_id = dev_flow->dv.group;
+ if (attr->transfer) {
+ union {
+ uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
+ uint64_t set_action;
+ } action_ctx = { .set_action = 0 };
+
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ MLX5_SET(set_action_in, action_ctx.action_in, action_type,
+ MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, action_ctx.action_in, field,
+ MLX5_MODI_META_REG_C_0);
+ MLX5_SET(set_action_in, action_ctx.action_in, data,
+ priv->vport_meta_tag);
+ res->set_action = action_ctx.set_action;
+ } else if (attr->ingress) {
+ res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ }
+ return 0;
+}
+
+/**
+ * Convert Sample action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] num_of_dest
+ * The num of destination.
+ * @param[in, out] res
+ * Pointer to sample resource.
+ * @param[in, out] mdest_res
+ * Pointer to destination array resource.
+ * @param[in] sample_actions
+ * Pointer to sample path actions list.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static void
-flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
- struct mlx5_flow_rss_desc *rss_desc)
+static int
+flow_dv_create_action_sample(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ uint32_t num_of_dest,
+ struct mlx5_flow_dv_sample_resource *res,
+ struct mlx5_flow_dv_dest_array_resource *mdest_res,
+ void **sample_actions,
+ uint64_t action_flags,
+ struct rte_flow_error *error)
{
- uint64_t items = dev_flow->handle->layers;
- int rss_inner = 0;
- uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
-
- dev_flow->hash_fields = 0;
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (rss_desc->level >= 2) {
- dev_flow->hash_fields |= IBV_RX_HASH_INNER;
- rss_inner = 1;
- }
-#endif
- if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
- if (rss_types & MLX5_IPV4_LAYER_TYPES) {
- if (rss_types & ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
- else if (rss_types & ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
- else
- dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
- }
- } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
- if (rss_types & MLX5_IPV6_LAYER_TYPES) {
- if (rss_types & ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
- else if (rss_types & ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
- else
- dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ /* update normal path action resource into last index of array */
+ uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
+ struct mlx5_flow_sub_actions_list *sample_act =
+ &mdest_res->sample_act[dest_index];
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)
+ [!!priv->flow_nested_idx];
+ uint32_t normal_idx = 0;
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx;
+
+ if (num_of_dest > 1) {
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
+ /* Handle QP action for mirroring */
+ hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
+ rss_desc, &hrxq_idx);
+ if (!hrxq)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create rx queue");
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
+ sample_act->dr_queue_action = hrxq->action;
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ dev_flow->handle->rix_hrxq = hrxq_idx;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
}
- }
- if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
- if (rss_types & ETH_RSS_UDP) {
- if (rss_types & ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_UDP;
- else if (rss_types & ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_UDP;
- else
- dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_encap_decap =
+ dev_flow->handle->dvh.rix_encap_decap;
+ sample_act->dr_encap_action =
+ dev_flow->dv.encap_decap->action;
}
- } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
- if (rss_types & ETH_RSS_TCP) {
- if (rss_types & ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_TCP;
- else if (rss_types & ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_TCP;
- else
- dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+ if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
+ normal_idx++;
+ mdest_res->sample_idx[dest_index].rix_port_id_action =
+ dev_flow->handle->rix_port_id_action;
+ sample_act->dr_port_id_action =
+ dev_flow->dv.port_id_action->action;
}
+ sample_act->actions_num = normal_idx;
+ /* update sample action resource into first index of array */
+ mdest_res->ft_type = res->ft_type;
+ memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
+ sizeof(struct mlx5_flow_sub_actions_idx));
+ memcpy(&mdest_res->sample_act[0], &res->sample_act,
+ sizeof(struct mlx5_flow_sub_actions_list));
+ mdest_res->num_of_dest = num_of_dest;
+ if (flow_dv_dest_array_resource_register(dev, attr, mdest_res,
+ dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't create sample "
+ "action");
+ } else {
+ if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
+ sample_actions, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "can't create sample action");
}
+ return 0;
}
/**
void *match_value = dev_flow->dv.value.buf;
uint8_t next_protocol = 0xff;
struct rte_vlan_hdr vlan = { 0 };
+ struct mlx5_flow_dv_dest_array_resource mdest_res;
+ struct mlx5_flow_dv_sample_resource sample_res;
+ void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+ struct mlx5_flow_sub_actions_list *sample_act;
+ uint32_t sample_act_pos = UINT32_MAX;
+ uint32_t num_of_dest = 0;
+ int tmp_actions_n = 0;
uint32_t table;
int ret = 0;
+ memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
+ memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ /* update normal path action resource into last index of array */
+ sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
!!priv->fdb_def_rule, &table, error);
if (ret)
const struct rte_flow_action_rss *rss;
const struct rte_flow_action *action = actions;
const uint8_t *rss_key;
- const struct rte_flow_action_jump *jump_data;
const struct rte_flow_action_meter *mtr;
struct mlx5_flow_tbl_resource *tbl;
uint32_t port_id = 0;
int action_type = actions->type;
const struct rte_flow_action *found_action = NULL;
struct mlx5_flow_meter *fm = NULL;
+ uint32_t jump_group = 0;
if (!mlx5_flow_os_action_supported(action_type))
return rte_flow_error_set(error, ENOTSUP,
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ num_of_dest++;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
action_flags |= MLX5_FLOW_ACTION_FLAG;
rss_desc->queue[0] = queue->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ num_of_dest++;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
rss = actions->conf;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
dev_flow->dv.encap_decap->action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- jump_data = action->conf;
+ jump_group = ((const struct rte_flow_action_jump *)
+ action->conf)->group;
+ if (dev_flow->external && jump_group <
+ MLX5_MAX_TABLES_EXTERNAL)
+ jump_group *= MLX5_FLOW_TABLE_FACTOR;
ret = mlx5_flow_group_to_table(attr, dev_flow->external,
- jump_data->group,
+ jump_group,
!!priv->fdb_def_rule,
&table, error);
if (ret)
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ sample_act_pos = actions_n;
+ ret = flow_dv_translate_action_sample(dev,
+ actions,
+ dev_flow, attr,
+ &num_of_dest,
+ sample_actions,
+ &sample_res,
+ error);
+ if (ret < 0)
+ return ret;
+ actions_n++;
+ action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ /* put encap action into group if work with port id */
+ if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
+ (action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (mhdr_res->actions_num) {
NULL,
"cannot create counter"
" object.");
- dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.actions[actions_n] =
(flow_dv_counter_get_by_idx(dev,
flow->counter, NULL))->action;
+ actions_n++;
+ }
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
+ ret = flow_dv_create_action_sample(dev,
+ dev_flow, attr,
+ num_of_dest,
+ &sample_res,
+ &mdest_res,
+ sample_actions,
+ action_flags,
+ error);
+ if (ret < 0)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create sample action");
+ if (num_of_dest > 1) {
+ dev_flow->dv.actions[sample_act_pos] =
+ dev_flow->dv.dest_array_res->action;
+ } else {
+ dev_flow->dv.actions[sample_act_pos] =
+ dev_flow->dv.sample_res->verbs_action;
+ }
}
break;
default:
modify_action_position == UINT32_MAX)
modify_action_position = actions_n++;
}
+ /*
+ * For multiple destination (sample action with ratio=1), the encap
+ * action and port id action will be combined into group action.
+ * So need remove the original these actions in the flow and only
+ * use the sample action instead of.
+ */
+ if (num_of_dest > 1 && sample_act->dr_port_id_action) {
+ int i;
+ void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+
+ for (i = 0; i < actions_n; i++) {
+ if ((sample_act->dr_encap_action &&
+ sample_act->dr_encap_action ==
+ dev_flow->dv.actions[i]) ||
+ (sample_act->dr_port_id_action &&
+ sample_act->dr_port_id_action ==
+ dev_flow->dv.actions[i]))
+ continue;
+ temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
+ }
+ memcpy((void *)dev_flow->dv.actions,
+ (void *)temp_actions,
+ tmp_actions_n * sizeof(void *));
+ actions_n = tmp_actions_n;
+ }
dev_flow->dv.actions_n = actions_n;
dev_flow->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
next_protocol = 0xff;
}
break;
+ case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ flow_dv_translate_item_ipv6_frag_ext(match_mask,
+ match_value,
+ items, tunnel);
+ last_item = tunnel ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->spec)->hdr.next_header;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(match_mask, match_value,
items, tunnel);
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
struct mlx5_hrxq *drop_hrxq;
- drop_hrxq = mlx5_hrxq_drop_new(dev);
+ drop_hrxq = mlx5_drop_action_create(dev);
if (!drop_hrxq) {
rte_flow_error_set
(error, errno,
}
/*
* Drop queues will be released by the specify
- * mlx5_hrxq_drop_release() function. Assign
+ * mlx5_drop_action_destroy() function. Assign
* the special index to hrxq to mark the queue
* has been allocated.
*/
dh->rix_hrxq = UINT32_MAX;
dv->actions[n++] = drop_hrxq->action;
}
- } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
+ } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
+ !dv_h->rix_sample && !dv_h->rix_dest_array) {
struct mlx5_hrxq *hrxq;
uint32_t hrxq_idx;
struct mlx5_flow_rss_desc *rss_desc =
if (!hrxq_idx) {
hrxq_idx = mlx5_hrxq_new
(dev, rss_desc->key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- rss_desc->queue,
- rss_desc->queue_num,
- !!(dh->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ rss_desc->queue,
+ rss_desc->queue_num,
+ !!(dh->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
}
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
/* hrxq is union, don't clear it if the flag is not set. */
if (dh->rix_hrxq) {
if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
- mlx5_hrxq_drop_release(dev);
+ mlx5_drop_action_destroy(dev);
dh->rix_hrxq = 0;
} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
mlx5_hrxq_release(dev, dh->rix_hrxq);
*
* @param dev
* Pointer to Ethernet device.
- * @param handle
- * Pointer to mlx5_flow_handle.
+ * @param encap_decap_idx
+ * Index of encap decap resource.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+ uint32_t encap_decap_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t idx = handle->dvh.rix_encap_decap;
+ uint32_t idx = encap_decap_idx;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
- &priv->sh->encaps_decaps, idx,
- cache_resource, next);
+ mlx5_hlist_remove(priv->sh->encaps_decaps,
+ &cache_resource->entry);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
DRV_LOG(DEBUG, "encap/decap resource %p: removed",
(void *)cache_resource);
/**
* Release a modify-header resource.
*
+ * @param dev
+ * Pointer to Ethernet device.
* @param handle
* Pointer to mlx5_flow_handle.
*
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
+flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
handle->dvh.modify_hdr;
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
- LIST_REMOVE(cache_resource, next);
+ mlx5_hlist_remove(priv->sh->modify_cmds,
+ &cache_resource->entry);
mlx5_free(cache_resource);
DRV_LOG(DEBUG, "modify-header resource %p: removed",
(void *)cache_resource);
*/
static int
flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow_handle *handle)
+ uint32_t port_id)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
- uint32_t idx = handle->rix_port_id_action;
+ uint32_t idx = port_id;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
idx);
return;
switch (handle->fate_action) {
case MLX5_FLOW_FATE_DROP:
- mlx5_hrxq_drop_release(dev);
+ mlx5_drop_action_destroy(dev);
break;
case MLX5_FLOW_FATE_QUEUE:
mlx5_hrxq_release(dev, handle->rix_hrxq);
flow_dv_jump_tbl_resource_release(dev, handle);
break;
case MLX5_FLOW_FATE_PORT_ID:
- flow_dv_port_id_action_resource_release(dev, handle);
+ flow_dv_port_id_action_resource_release(dev,
+ handle->rix_port_id_action);
break;
case MLX5_FLOW_FATE_DEFAULT_MISS:
flow_dv_default_miss_resource_release(dev);
handle->rix_fate = 0;
}
+/**
+ * Release an sample resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_sample_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t idx = handle->dvh.rix_sample;
+ struct mlx5_flow_dv_sample_resource *cache_resource;
+
+ cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
+ idx);
+ if (!cache_resource)
+ return 0;
+ MLX5_ASSERT(cache_resource->verbs_action);
+ DRV_LOG(DEBUG, "sample resource %p: refcnt %d--",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
+ if (cache_resource->verbs_action)
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->verbs_action));
+ if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
+ if (cache_resource->default_miss)
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->default_miss));
+ }
+ if (cache_resource->normal_path_tbl)
+ flow_dv_tbl_resource_release(dev,
+ cache_resource->normal_path_tbl);
+ }
+ if (cache_resource->sample_idx.rix_hrxq &&
+ !mlx5_hrxq_release(dev,
+ cache_resource->sample_idx.rix_hrxq))
+ cache_resource->sample_idx.rix_hrxq = 0;
+ if (cache_resource->sample_idx.rix_tag &&
+ !flow_dv_tag_release(dev,
+ cache_resource->sample_idx.rix_tag))
+ cache_resource->sample_idx.rix_tag = 0;
+ if (cache_resource->sample_idx.cnt) {
+ flow_dv_counter_release(dev,
+ cache_resource->sample_idx.cnt);
+ cache_resource->sample_idx.cnt = 0;
+ }
+ if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) {
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
+ &priv->sh->sample_action_list, idx,
+ cache_resource, next);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx);
+ DRV_LOG(DEBUG, "sample resource %p: removed",
+ (void *)cache_resource);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Release an destination array resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *handle)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_dest_array_resource *cache_resource;
+ struct mlx5_flow_sub_actions_idx *mdest_act_res;
+ uint32_t idx = handle->dvh.rix_dest_array;
+ uint32_t i = 0;
+
+ cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ idx);
+ if (!cache_resource)
+ return 0;
+ MLX5_ASSERT(cache_resource->action);
+ DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--",
+ (void *)cache_resource,
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
+ if (cache_resource->action)
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->action));
+ for (; i < cache_resource->num_of_dest; i++) {
+ mdest_act_res = &cache_resource->sample_idx[i];
+ if (mdest_act_res->rix_hrxq) {
+ mlx5_hrxq_release(dev,
+ mdest_act_res->rix_hrxq);
+ mdest_act_res->rix_hrxq = 0;
+ }
+ if (mdest_act_res->rix_encap_decap) {
+ flow_dv_encap_decap_resource_release(dev,
+ mdest_act_res->rix_encap_decap);
+ mdest_act_res->rix_encap_decap = 0;
+ }
+ if (mdest_act_res->rix_port_id_action) {
+ flow_dv_port_id_action_resource_release(dev,
+ mdest_act_res->rix_port_id_action);
+ mdest_act_res->rix_port_id_action = 0;
+ }
+ if (mdest_act_res->rix_tag) {
+ flow_dv_tag_release(dev,
+ mdest_act_res->rix_tag);
+ mdest_act_res->rix_tag = 0;
+ }
+ }
+ ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+ &priv->sh->dest_array_list, idx,
+ cache_resource, next);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx);
+ DRV_LOG(DEBUG, "destination array resource %p: removed",
+ (void *)cache_resource);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Remove the flow from the NIC but keeps it in memory.
* Lock free, (mutex should be acquired by caller).
flow->dev_handles = dev_handle->next.next;
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
+ if (dev_handle->dvh.rix_sample)
+ flow_dv_sample_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.rix_dest_array)
+ flow_dv_dest_array_resource_release(dev, dev_handle);
if (dev_handle->dvh.rix_encap_decap)
- flow_dv_encap_decap_resource_release(dev, dev_handle);
+ flow_dv_encap_decap_resource_release(dev,
+ dev_handle->dvh.rix_encap_decap);
if (dev_handle->dvh.modify_hdr)
- flow_dv_modify_hdr_resource_release(dev_handle);
+ flow_dv_modify_hdr_resource_release(dev, dev_handle);
if (dev_handle->dvh.rix_push_vlan)
flow_dv_push_vlan_action_resource_release(dev,
dev_handle);
"counters are not available");
}
+/**
+ * Query a flow rule AGE action for aging information.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the sub flow.
+ * @param[out] data
+ * data retrieved by the query.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
+ void *data, struct rte_flow_error *error)
+{
+ struct rte_flow_query_age *resp = data;
+
+ if (flow->counter) {
+ struct mlx5_age_param *age_param =
+ flow_dv_counter_idx_get_age(dev, flow->counter);
+
+ if (!age_param || !age_param->timeout)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot read age data");
+ resp->aged = __atomic_load_n(&age_param->state,
+ __ATOMIC_RELAXED) ==
+ AGE_TMOUT ? 1 : 0;
+ resp->sec_since_last_hit_valid = !resp->aged;
+ if (resp->sec_since_last_hit_valid)
+ resp->sec_since_last_hit =
+ __atomic_load_n(&age_param->sec_since_last_hit,
+ __ATOMIC_RELAXED);
+ return 0;
+ }
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "age data not available");
+}
+
/**
* Query a flow.
*
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_query_count(dev, flow, data, error);
break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_dv_query_age(dev, flow, data, error);
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
return -1;
}
+/**
+ * Validate the batch counter support in root table.
+ *
+ * Create a simple flow with invalid counter and drop action on root table to
+ * validate if batch counter with offset on root table is supported or not.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_match_params mask = {
+ .size = sizeof(mask.buf),
+ };
+ struct mlx5_flow_dv_match_params value = {
+ .size = sizeof(value.buf),
+ };
+ struct mlx5dv_flow_matcher_attr dv_attr = {
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .priority = 0,
+ .match_criteria_enable = 0,
+ .match_mask = (void *)&mask,
+ };
+ void *actions[2] = { 0 };
+ struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
+ struct mlx5_devx_obj *dcs = NULL;
+ void *matcher = NULL;
+ void *flow = NULL;
+ int i, ret = -1;
+
+ tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, NULL);
+ if (!tbl)
+ goto err;
+ dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, NULL);
+ if (!dest_tbl)
+ goto err;
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+ if (!dcs)
+ goto err;
+ ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
+ &actions[0]);
+ if (ret)
+ goto err;
+ ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
+ (dest_tbl->obj, &actions[1]);
+ if (ret)
+ goto err;
+ dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
+ &matcher);
+ if (ret)
+ goto err;
+ ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
+ actions, &flow);
+err:
+ /*
+ * If batch counter with offset is not supported, the driver will not
+ * validate the invalid offset value, flow create should success.
+ * In this case, it means batch counter is not supported in root table.
+ *
+ * Otherwise, if flow create is failed, counter offset is supported.
+ */
+ if (flow) {
+ DRV_LOG(INFO, "Batch counter is not supported in root "
+ "table. Switch to fallback mode.");
+ rte_errno = ENOTSUP;
+ ret = -rte_errno;
+ claim_zero(mlx5_flow_os_destroy_flow(flow));
+ } else {
+ /* Check matcher to make sure validate fail at flow create. */
+ if (!matcher || (matcher && errno != EINVAL))
+ DRV_LOG(ERR, "Unexpected error in counter offset "
+ "support detection");
+ ret = 0;
+ }
+ for (i = 0; i < 2; i++) {
+ if (actions[i])
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (actions[i]));
+ }
+ if (matcher)
+ claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
+ if (tbl)
+ flow_dv_tbl_resource_release(dev, tbl);
+ if (dest_tbl)
+ flow_dv_tbl_resource_release(dev, dest_tbl);
+ if (dcs)
+ claim_zero(mlx5_devx_cmd_destroy(dcs));
+ return ret;
+}
+
/**
* Query a devx counter.
*
uint32_t cnt;
flow_dv_shared_lock(dev);
- cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0);
+ cnt = flow_dv_counter_alloc(dev, 0);
flow_dv_shared_unlock(dev);
return cnt;
}
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+