#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
#endif
-#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
- sizeof(struct rte_flow_item_ipv4))
/* VLAN header definitions */
#define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
#define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
uint32_t attr;
};
+static int
+flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_tbl_resource *tbl);
+
/**
* Initialize flow attributes structure according to flow items' types.
*
* Pointer to item specification.
* @param[out] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
*/
static void
-flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
+flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
+ struct mlx5_flow *dev_flow, bool tunnel_decap)
{
+ uint64_t layers = dev_flow->handle->layers;
+
+ /*
+ * If layers is already initialized, it means this dev_flow is the
+ * suffix flow, the layers flags is set by the prefix flow. Need to
+ * use the layer flags from prefix flow as the suffix flow may not
+ * have the user defined items as the flow is split.
+ */
+ if (layers) {
+ if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ attr->ipv4 = 1;
+ else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+ attr->ipv6 = 1;
+ if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+ attr->tcp = 1;
+ else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+ attr->udp = 1;
+ attr->valid = 1;
+ return;
+ }
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ uint8_t next_protocol = 0xff;
switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ if (tunnel_decap)
+ attr->attr = 0;
+ break;
case RTE_FLOW_ITEM_TYPE_IPV4:
if (!attr->ipv6)
attr->ipv4 = 1;
+ if (item->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ item->mask)->hdr.next_proto_id)
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (item->spec))->hdr.next_proto_id &
+ ((const struct rte_flow_item_ipv4 *)
+ (item->mask))->hdr.next_proto_id;
+ if ((next_protocol == IPPROTO_IPIP ||
+ next_protocol == IPPROTO_IPV6) && tunnel_decap)
+ attr->attr = 0;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
if (!attr->ipv4)
attr->ipv6 = 1;
+ if (item->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ item->mask)->hdr.proto)
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ (item->spec))->hdr.proto &
+ ((const struct rte_flow_item_ipv6 *)
+ (item->mask))->hdr.proto;
+ if ((next_protocol == IPPROTO_IPIP ||
+ next_protocol == IPPROTO_IPV6) && tunnel_decap)
+ attr->attr = 0;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
if (!attr->tcp)
const struct rte_flow_action_of_set_vlan_vid *conf =
(const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
int i = resource->actions_num;
- struct mlx5_modification_cmd *actions = &resource->actions[i];
+ struct mlx5_modification_cmd *actions = resource->actions;
struct field_modify_info *field = modify_vlan_out_first_vid;
if (i >= MLX5_MAX_MODIFY_NUM)
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
const struct rte_flow_item *items,
- union flow_dv_attr *attr,
- struct rte_flow_error *error)
+ union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
+ bool tunnel_decap, struct rte_flow_error *error)
{
const struct rte_flow_action_set_tp *conf =
(const struct rte_flow_action_set_tp *)(action->conf);
struct field_modify_info *field;
if (!attr->valid)
- flow_dv_attr_init(items, attr);
+ flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->udp) {
memset(&udp, 0, sizeof(udp));
memset(&udp_mask, 0, sizeof(udp_mask));
item.spec = &udp;
item.mask = &udp_mask;
field = modify_udp;
- }
- if (attr->tcp) {
+ } else {
+ MLX5_ASSERT(attr->tcp);
memset(&tcp, 0, sizeof(tcp));
memset(&tcp_mask, 0, sizeof(tcp_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
const struct rte_flow_item *items,
- union flow_dv_attr *attr,
- struct rte_flow_error *error)
+ union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
+ bool tunnel_decap, struct rte_flow_error *error)
{
const struct rte_flow_action_set_ttl *conf =
(const struct rte_flow_action_set_ttl *)(action->conf);
struct field_modify_info *field;
if (!attr->valid)
- flow_dv_attr_init(items, attr);
+ flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->ipv4) {
memset(&ipv4, 0, sizeof(ipv4));
memset(&ipv4_mask, 0, sizeof(ipv4_mask));
item.spec = &ipv4;
item.mask = &ipv4_mask;
field = modify_ipv4;
- }
- if (attr->ipv6) {
+ } else {
+ MLX5_ASSERT(attr->ipv6);
memset(&ipv6, 0, sizeof(ipv6));
memset(&ipv6_mask, 0, sizeof(ipv6_mask));
ipv6.hdr.hop_limits = conf->ttl_value;
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
flow_dv_convert_action_modify_dec_ttl
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_item *items,
- union flow_dv_attr *attr,
- struct rte_flow_error *error)
+ union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
+ bool tunnel_decap, struct rte_flow_error *error)
{
struct rte_flow_item item;
struct rte_flow_item_ipv4 ipv4;
struct field_modify_info *field;
if (!attr->valid)
- flow_dv_attr_init(items, attr);
+ flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->ipv4) {
memset(&ipv4, 0, sizeof(ipv4));
memset(&ipv4_mask, 0, sizeof(ipv4_mask));
item.spec = &ipv4;
item.mask = &ipv4_mask;
field = modify_ipv4;
- }
- if (attr->ipv6) {
+ } else {
+ MLX5_ASSERT(attr->ipv6);
memset(&ipv6, 0, sizeof(ipv6));
memset(&ipv6_mask, 0, sizeof(ipv6_mask));
ipv6.hdr.hop_limits = 0xFF;
"mark id exceeds the limit");
if (!mask)
mask = &nic_mask;
+ if (!mask->id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "mask cannot be zero");
+
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_mark),
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
item->spec,
"data cannot be empty");
- if (!spec->data)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
- "data cannot be zero");
if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
}
if (!mask)
mask = &rte_flow_item_meta_mask;
+ if (!mask->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "mask cannot be zero");
+
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_meta),
"data cannot be empty");
if (!mask)
mask = &rte_flow_item_tag_mask;
+ if (!mask->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "mask cannot be zero");
+
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_tag),
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_priv *priv = dev->data->dev_private;
(void)action;
(void)attr;
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after pop VLAN action");
+ if (!attr->transfer && priv->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "pop vlan action for VF representor "
+ "not supported on NIC table");
return 0;
}
if (items == NULL)
return;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
- items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
- ;
- if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int type = items->type;
+
+ if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
+ type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
+ break;
+ }
+ if (items->type != RTE_FLOW_ITEM_TYPE_END) {
const struct rte_flow_item_vlan *vlan_m = items->mask;
const struct rte_flow_item_vlan *vlan_v = items->spec;
/* Only full match values are accepted */
if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
- vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
+ vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
vlan->vlan_tci |=
rte_be_to_cpu_16(vlan_v->tci &
MLX5DV_FLOW_VLAN_PCP_MASK_BE);
/**
* Validate the push VLAN action.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] item_flags
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_push_vlan(uint64_t action_flags,
- uint64_t item_flags __rte_unused,
+flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
+ uint64_t action_flags,
+ const struct rte_flow_item_vlan *vlan_m,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
+ const struct mlx5_priv *priv = dev->data->dev_private;
if (!attr->transfer && attr->ingress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after push VLAN");
+ if (!attr->transfer && priv->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "push vlan action for VF representor "
+ "not supported on NIC table");
+ if (vlan_m &&
+ (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
+ (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
+ MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
+ !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
+ !(mlx5_flow_find_action
+ (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "not full match mask on VLAN PCP and "
+ "there is no of_set_vlan_pcp action, "
+ "push VLAN action cannot figure out "
+ "PCP value");
+ if (vlan_m &&
+ (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
+ (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
+ MLX5DV_FLOW_VLAN_VID_MASK_BE &&
+ !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
+ !(mlx5_flow_find_action
+ (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "not full match mask on VLAN VID and "
+ "there is no of_set_vlan_vid action, "
+ "push VLAN action cannot figure out "
+ "VID value");
(void)attr;
return 0;
}
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"meta data must be within reg C0");
- if (!(conf->data & conf->mask))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "zero value has no effect");
return 0;
}
/**
* Validate the L2 encap action.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the action structure.
+ * @param[in] attr
+ * Pointer to flow attributes.
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_l2_encap(uint64_t action_flags,
+flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
+ uint64_t action_flags,
const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ const struct mlx5_priv *priv = dev->data->dev_private;
+
if (!(action->conf))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can only have a single encap action "
"in a flow");
+ if (!attr->transfer && priv->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "encap action for VF representor "
+ "not supported on NIC table");
return 0;
}
/**
* Validate a decap action.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] attr
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_decap(uint64_t action_flags,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
+flow_dv_validate_action_decap(struct rte_eth_dev *dev,
+ uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
+ const struct mlx5_priv *priv = dev->data->dev_private;
+
if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
NULL,
"decap action not supported for "
"egress");
+ if (!attr->transfer && priv->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "decap action for VF representor "
+ "not supported on NIC table");
return 0;
}
/**
* Validate the raw encap and decap actions.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] decap
* Pointer to the decap action.
* @param[in] encap
*/
static int
flow_dv_validate_action_raw_encap_decap
- (const struct rte_flow_action_raw_decap *decap,
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action_raw_decap *decap,
const struct rte_flow_action_raw_encap *encap,
const struct rte_flow_attr *attr, uint64_t *action_flags,
int *actions_n, struct rte_flow_error *error)
{
+ const struct mlx5_priv *priv = dev->data->dev_private;
int ret;
if (encap && (!encap->size || !encap->data))
"encap combination");
}
if (decap) {
- ret = flow_dv_validate_action_decap(*action_flags, attr, error);
+ ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
+ error);
if (ret < 0)
return ret;
*action_flags |= MLX5_FLOW_ACTION_DECAP;
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"more than one encap action");
+ if (!attr->transfer && priv->representor)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "encap action for VF representor "
+ "not supported on NIC table");
*action_flags |= MLX5_FLOW_ACTION_ENCAP;
++(*actions_n);
}
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
- resource->flags = dev_flow->group ? 0 : 1;
+ resource->flags = dev_flow->dv.group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
(void *)&tbl_data->jump, cnt);
} else {
+ /* old jump should not make the table ref++. */
+ flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
MLX5_ASSERT(tbl_data->jump.action);
DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
- dev_flow->dv.jump = &tbl_data->jump;
+ dev_flow->handle->dvh.jump = &tbl_data->jump;
return 0;
}
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->handle->dvh.port_id_action = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->handle->dvh.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->handle->dvh.push_vlan_res = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->handle->dvh.push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L4))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no transport layer "
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no TCP item in"
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no TCP item in"
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L3))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
- resource->flags =
- dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+ resource->flags = dev_flow->dv.group ? 0 :
+ MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
resource->flags))
return rte_flow_error_set(error, EOVERFLOW,
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
}
-#define MLX5_CNT_CONTAINER_RESIZE 64
-
/**
- * Get or create a flow counter.
+ * Get DV flow counter by index.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] shared
- * Indicate if this counter is shared with other flows.
- * @param[in] id
- * Counter identifier.
+ * @param[in] idx
+ * mlx5 flow counter index in the container.
+ * @param[out] ppool
+ * mlx5 flow counter pool in the container,
*
* @return
- * pointer to flow counter on success, NULL otherwise and rte_errno is set.
+ * Pointer to the counter, NULL otherwise.
*/
static struct mlx5_flow_counter *
-flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
- uint32_t id)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter *cnt = NULL;
- struct mlx5_devx_obj *dcs = NULL;
-
- if (!priv->config.devx) {
- rte_errno = ENOTSUP;
- return NULL;
- }
- if (shared) {
- TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
- if (cnt->shared && cnt->id == id) {
- cnt->ref_cnt++;
- return cnt;
- }
- }
- }
- dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
- if (!dcs)
- return NULL;
- cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
- if (!cnt) {
- claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
- rte_errno = ENOMEM;
- return NULL;
- }
- struct mlx5_flow_counter tmpl = {
- .shared = shared,
- .ref_cnt = 1,
- .id = id,
- .dcs = dcs,
- };
- tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
- if (!tmpl.action) {
- claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
- rte_errno = errno;
- rte_free(cnt);
- return NULL;
- }
- *cnt = tmpl;
- TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
- return cnt;
-}
-
-/**
- * Release a flow counter.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] counter
- * Pointer to the counter handler.
- */
-static void
-flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
- struct mlx5_flow_counter *counter)
+flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
+ uint32_t idx,
+ struct mlx5_flow_counter_pool **ppool)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_pools_container *cont;
+ struct mlx5_flow_counter_pool *pool;
+ uint32_t batch = 0;
- if (!counter)
- return;
- if (--counter->ref_cnt == 0) {
- TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
- claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
- rte_free(counter);
- }
-}
-
-/**
- * Query a devx flow counter.
- *
- * @param[in] dev
- * Pointer to the Ethernet device structure.
- * @param[in] cnt
- * Pointer to the flow counter.
- * @param[out] pkts
- * The statistics value of packets.
- * @param[out] bytes
- * The statistics value of bytes.
- *
- * @return
- * 0 on success, otherwise a negative errno value and rte_errno is set.
- */
-static inline int
-_flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
- struct mlx5_flow_counter *cnt, uint64_t *pkts,
- uint64_t *bytes)
-{
- return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
- 0, NULL, NULL, 0);
-}
-
-/**
- * Get a pool by a counter.
- *
- * @param[in] cnt
- * Pointer to the counter.
- *
- * @return
- * The counter pool.
- */
-static struct mlx5_flow_counter_pool *
-flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
-{
- if (!cnt->batch) {
- cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
- return (struct mlx5_flow_counter_pool *)cnt - 1;
+ idx--;
+ if (idx >= MLX5_CNT_BATCH_OFFSET) {
+ idx -= MLX5_CNT_BATCH_OFFSET;
+ batch = 1;
}
- return cnt->pool;
+ cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0);
+ MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
+ pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
+ MLX5_ASSERT(pool);
+ if (ppool)
+ *ppool = pool;
+ return &pool->counters_raw[idx % MLX5_COUNTERS_PER_POOL];
}
/**
static struct mlx5_flow_counter_pool *
flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
{
- struct mlx5_flow_counter_pool *pool;
+ uint32_t i;
+ uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
- TAILQ_FOREACH(pool, &cont->pool_list, next) {
+ for (i = 0; i < n_valid; i++) {
+ struct mlx5_flow_counter_pool *pool = cont->pools[i];
int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
- MLX5_COUNTERS_PER_POOL;
+ MLX5_COUNTERS_PER_POOL;
- if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
+ if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) {
+ /*
+ * Move the pool to the head, as counter allocate
+ * always gets the first pool in the container.
+ */
+ if (pool != TAILQ_FIRST(&cont->pool_list)) {
+ TAILQ_REMOVE(&cont->pool_list, pool, next);
+ TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
+ }
return pool;
- };
+ }
+ }
return NULL;
}
mkey_attr.pg_access = 0;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
+ mkey_attr.relaxed_ordering = 1;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
MLX5_CNT_CONTAINER(priv->sh, batch, 0);
struct mlx5_pools_container *new_cont =
MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
- struct mlx5_counter_stats_mem_mng *mem_mng;
+ struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
int i;
- if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
+ /* Fallback mode has no background thread. Skip the check. */
+ if (!priv->counter_fallback &&
+ cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
/* The last resize still hasn't detected by the host thread. */
rte_errno = EAGAIN;
return NULL;
if (cont->n)
memcpy(new_cont->pools, cont->pools, cont->n *
sizeof(struct mlx5_flow_counter_pool *));
- mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
- MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
- if (!mem_mng) {
- rte_free(new_cont->pools);
- return NULL;
+ /*
+ * Fallback mode query the counter directly, no background query
+ * resources are needed.
+ */
+ if (!priv->counter_fallback) {
+ mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
+ MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
+ if (!mem_mng) {
+ rte_free(new_cont->pools);
+ return NULL;
+ }
+ for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
+ LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
+ mem_mng->raws +
+ MLX5_CNT_CONTAINER_RESIZE +
+ i, next);
+ } else {
+ /*
+ * Release the old container pools directly as no background
+ * thread helps that.
+ */
+ rte_free(cont->pools);
}
- for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
- LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
- mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
- i, next);
new_cont->n = resize;
rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
TAILQ_INIT(&new_cont->pool_list);
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] cnt
- * Pointer to the flow counter.
+ * Index to the flow counter.
* @param[out] pkts
* The statistics value of packets.
* @param[out] bytes
* 0 on success, otherwise a negative errno value and rte_errno is set.
*/
static inline int
-_flow_dv_query_count(struct rte_eth_dev *dev,
- struct mlx5_flow_counter *cnt, uint64_t *pkts,
+_flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
uint64_t *bytes)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_pool *pool =
- flow_dv_counter_pool_get(cnt);
- int offset = cnt - &pool->counters_raw[0];
+ struct mlx5_flow_counter_pool *pool = NULL;
+ struct mlx5_flow_counter *cnt;
+ struct mlx5_flow_counter_ext *cnt_ext = NULL;
+ int offset;
- if (priv->counter_fallback)
- return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
+ cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
+ MLX5_ASSERT(pool);
+ if (counter < MLX5_CNT_BATCH_OFFSET) {
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ if (priv->counter_fallback)
+ return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
+ 0, pkts, bytes, 0, NULL, NULL, 0);
+ }
rte_spinlock_lock(&pool->sl);
/*
* current allocated in parallel to the host reading.
* In this case the new counter values must be reported as 0.
*/
- if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
+ if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
*pkts = 0;
*bytes = 0;
} else {
+ offset = cnt - &pool->counters_raw[0];
*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
}
* The devX counter handle.
* @param[in] batch
* Whether the pool is for counter that was allocated by batch command.
+ * @param[in/out] cont_cur
+ * Pointer to the container pointer, it will be update in pool resize.
*
* @return
- * A new pool pointer on success, NULL otherwise and rte_errno is set.
+ * The pool container pointer on success, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_flow_counter_pool *
+static struct mlx5_pools_container *
flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
uint32_t batch)
{
if (!cont)
return NULL;
}
- size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
- sizeof(struct mlx5_flow_counter);
+ size = sizeof(*pool);
+ if (!batch)
+ size += MLX5_COUNTERS_PER_POOL *
+ sizeof(struct mlx5_flow_counter_ext);
pool = rte_calloc(__func__, 1, size, 0);
if (!pool) {
rte_errno = ENOMEM;
return NULL;
}
pool->min_dcs = dcs;
- pool->raw = cont->init_mem_mng->raws + n_valid %
+ if (!priv->counter_fallback)
+ pool->raw = cont->init_mem_mng->raws + n_valid %
MLX5_CNT_CONTAINER_RESIZE;
pool->raw_hw = NULL;
rte_spinlock_init(&pool->sl);
/*
* The generation of the new allocated counters in this pool is 0, 2 in
* the pool generation makes all the counters valid for allocation.
+ * The start and end query generation protect the counters be released
+ * between the query and update gap period will not be reallocated
+ * without the last query finished and stats updated to the memory.
*/
- rte_atomic64_set(&pool->query_gen, 0x2);
+ rte_atomic64_set(&pool->start_query_gen, 0x2);
+ /*
+ * There's no background query thread for fallback mode, set the
+ * end_query_gen to the maximum value since no need to wait for
+ * statistics update.
+ */
+ rte_atomic64_set(&pool->end_query_gen, priv->counter_fallback ?
+ INT64_MAX : 0x2);
TAILQ_INIT(&pool->counters);
- TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
+ TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
+ pool->index = n_valid;
cont->pools[n_valid] = pool;
/* Pool initialization must be updated before host thread access. */
rte_cio_wmb();
rte_atomic16_add(&cont->n_valid, 1);
- return pool;
+ return cont;
}
/**
* Whether the pool is for counter that was allocated by batch command.
*
* @return
- * The free counter pool pointer and @p cnt_free is set on success,
+ * The counter container pointer and @p cnt_free is set on success,
* NULL otherwise and rte_errno is set.
*/
-static struct mlx5_flow_counter_pool *
+static struct mlx5_pools_container *
flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
struct mlx5_flow_counter **cnt_free,
uint32_t batch)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_pools_container *cont;
struct mlx5_flow_counter_pool *pool;
struct mlx5_devx_obj *dcs = NULL;
struct mlx5_flow_counter *cnt;
uint32_t i;
+ cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0);
if (!batch) {
/* bulk_bitmap must be 0 for single counter allocation. */
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
if (!dcs)
return NULL;
- pool = flow_dv_find_pool_by_id
- (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
+ pool = flow_dv_find_pool_by_id(cont, dcs->id);
if (!pool) {
- pool = flow_dv_pool_create(dev, dcs, batch);
- if (!pool) {
+ cont = flow_dv_pool_create(dev, dcs, batch);
+ if (!cont) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
+ pool = TAILQ_FIRST(&cont->pool_list);
} else if (dcs->id < pool->min_dcs->id) {
rte_atomic64_set(&pool->a64_dcs,
(int64_t)(uintptr_t)dcs);
}
- cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
+ i = dcs->id % MLX5_COUNTERS_PER_POOL;
+ cnt = &pool->counters_raw[i];
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
- cnt->dcs = dcs;
+ MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
*cnt_free = cnt;
- return pool;
+ return cont;
}
/* bulk_bitmap is in 128 counters units. */
if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
rte_errno = ENODATA;
return NULL;
}
- pool = flow_dv_pool_create(dev, dcs, batch);
- if (!pool) {
+ cont = flow_dv_pool_create(dev, dcs, batch);
+ if (!cont) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
+ pool = TAILQ_FIRST(&cont->pool_list);
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = &pool->counters_raw[i];
- cnt->pool = pool;
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
}
*cnt_free = &pool->counters_raw[0];
- return pool;
+ return cont;
}
/**
* Pointer to the relevant counter pool container.
* @param[in] id
* The shared counter ID to search.
+ * @param[out] ppool
+ * mlx5 flow counter pool in the container,
*
* @return
- * NULL if not existed, otherwise pointer to the shared counter.
+ * NULL if not existed, otherwise pointer to the shared extend counter.
*/
-static struct mlx5_flow_counter *
-flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
- uint32_t id)
+static struct mlx5_flow_counter_ext *
+flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id,
+ struct mlx5_flow_counter_pool **ppool)
{
- static struct mlx5_flow_counter *cnt;
+ static struct mlx5_flow_counter_ext *cnt;
struct mlx5_flow_counter_pool *pool;
- int i;
+ uint32_t i;
+ uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
- TAILQ_FOREACH(pool, &cont->pool_list, next) {
+ for (i = 0; i < n_valid; i++) {
+ pool = cont->pools[i];
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
- cnt = &pool->counters_raw[i];
- if (cnt->ref_cnt && cnt->shared && cnt->id == id)
+ cnt = MLX5_GET_POOL_CNT_EXT(pool, i);
+ if (cnt->ref_cnt && cnt->shared && cnt->id == id) {
+ if (ppool)
+ *ppool = cont->pools[i];
return cnt;
+ }
}
}
return NULL;
* Counter flow group.
*
* @return
- * pointer to flow counter on success, NULL otherwise and rte_errno is set.
+ * Index to flow counter on success, 0 otherwise and rte_errno is set.
*/
-static struct mlx5_flow_counter *
+static uint32_t
flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
uint16_t group)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt_free = NULL;
+ struct mlx5_flow_counter_ext *cnt_ext = NULL;
/*
* Currently group 0 flow counter cannot be assigned to a flow if it is
* not the first one in the batch counter allocation, so it is better
* A counter can be shared between different groups so need to take
* shared counters from the single container.
*/
- uint32_t batch = (group && !shared) ? 1 : 0;
+ uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
0);
+ uint32_t cnt_idx;
- if (priv->counter_fallback)
- return flow_dv_counter_alloc_fallback(dev, shared, id);
if (!priv->config.devx) {
rte_errno = ENOTSUP;
- return NULL;
+ return 0;
}
if (shared) {
- cnt_free = flow_dv_counter_shared_search(cont, id);
- if (cnt_free) {
- if (cnt_free->ref_cnt + 1 == 0) {
+ cnt_ext = flow_dv_counter_shared_search(cont, id, &pool);
+ if (cnt_ext) {
+ if (cnt_ext->ref_cnt + 1 == 0) {
rte_errno = E2BIG;
- return NULL;
+ return 0;
}
- cnt_free->ref_cnt++;
- return cnt_free;
+ cnt_ext->ref_cnt++;
+ cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
+ (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
+ + 1;
+ return cnt_idx;
}
}
/* Pools which has a free counters are in the start. */
* updated too.
*/
cnt_free = TAILQ_FIRST(&pool->counters);
- if (cnt_free && cnt_free->query_gen + 1 <
- rte_atomic64_read(&pool->query_gen))
+ if (cnt_free && cnt_free->query_gen <
+ rte_atomic64_read(&pool->end_query_gen))
break;
cnt_free = NULL;
}
if (!cnt_free) {
- pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
- if (!pool)
- return NULL;
+ cont = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
+ if (!cont)
+ return 0;
+ pool = TAILQ_FIRST(&cont->pool_list);
}
- cnt_free->batch = batch;
+ if (!batch)
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
/* Create a DV counter action only in the first time usage. */
if (!cnt_free->action) {
uint16_t offset;
dcs = pool->min_dcs;
} else {
offset = 0;
- dcs = cnt_free->dcs;
+ dcs = cnt_ext->dcs;
}
cnt_free->action = mlx5_glue->dv_create_flow_action_counter
(dcs->obj, offset);
if (!cnt_free->action) {
rte_errno = errno;
- return NULL;
+ return 0;
}
}
+ cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
+ (cnt_free - pool->counters_raw));
+ cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
/* Update the counter reset values. */
- if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
+ if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
&cnt_free->bytes))
- return NULL;
- cnt_free->shared = shared;
- cnt_free->ref_cnt = 1;
- cnt_free->id = id;
- if (!priv->sh->cmng.query_thread_on)
+ return 0;
+ if (cnt_ext) {
+ cnt_ext->shared = shared;
+ cnt_ext->ref_cnt = 1;
+ cnt_ext->id = id;
+ }
+ if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
/* Start the asynchronous batch query by the host thread. */
mlx5_set_query_alarm(priv->sh);
TAILQ_REMOVE(&pool->counters, cnt_free, next);
TAILQ_REMOVE(&cont->pool_list, pool, next);
TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
}
- return cnt_free;
+ return cnt_idx;
}
/**
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] counter
- * Pointer to the counter handler.
+ * Index to the counter handler.
*/
static void
-flow_dv_counter_release(struct rte_eth_dev *dev,
- struct mlx5_flow_counter *counter)
+flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
{
- struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter_pool *pool = NULL;
+ struct mlx5_flow_counter *cnt;
+ struct mlx5_flow_counter_ext *cnt_ext = NULL;
if (!counter)
return;
- if (priv->counter_fallback) {
- flow_dv_counter_release_fallback(dev, counter);
- return;
- }
- if (--counter->ref_cnt == 0) {
- struct mlx5_flow_counter_pool *pool =
- flow_dv_counter_pool_get(counter);
-
- /* Put the counter in the end - the last updated one. */
- TAILQ_INSERT_TAIL(&pool->counters, counter, next);
- counter->query_gen = rte_atomic64_read(&pool->query_gen);
+ cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
+ MLX5_ASSERT(pool);
+ if (counter < MLX5_CNT_BATCH_OFFSET) {
+ cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+ if (cnt_ext && --cnt_ext->ref_cnt)
+ return;
}
+ /* Put the counter in the end - the last updated one. */
+ TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
+ /*
+ * Counters released between query trigger and handler need
+ * to wait the next round of query. Since the packets arrive
+ * in the gap period will not be taken into account to the
+ * old counter.
+ */
+ cnt->query_gen = rte_atomic64_read(&pool->start_query_gen);
}
/**
const struct rte_flow_action_raw_decap *decap;
const struct rte_flow_action_raw_encap *encap;
const struct rte_flow_action_rss *rss;
- struct rte_flow_item_tcp nic_tcp_mask = {
+ const struct rte_flow_item_tcp nic_tcp_mask = {
.hdr = {
.tcp_flags = 0xFF,
.src_port = RTE_BE16(UINT16_MAX),
.dst_port = RTE_BE16(UINT16_MAX),
}
};
+ const struct rte_flow_item_ipv4 nic_ipv4_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ .time_to_live = 0xff,
+ },
+ };
+ const struct rte_flow_item_ipv6 nic_ipv6_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
uint16_t queue_index = 0xFFFF;
+ const struct rte_flow_item_vlan *vlan_m = NULL;
if (items == NULL)
return -1;
} else {
ether_type = 0;
}
+ /* Store outer VLAN mask for of_push_vlan action. */
+ if (!tunnel)
+ vlan_m = items->mask;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
last_item,
- ether_type, NULL,
+ ether_type,
+ &nic_ipv4_mask,
error);
if (ret < 0)
return ret;
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
last_item,
- ether_type, NULL,
+ ether_type,
+ &nic_ipv6_mask,
error);
if (ret < 0)
return ret;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- ret = flow_dv_validate_action_push_vlan(action_flags,
- item_flags,
+ ret = flow_dv_validate_action_push_vlan(dev,
+ action_flags,
+ vlan_m,
actions, attr,
error);
if (ret < 0)
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- ret = flow_dv_validate_action_l2_encap(action_flags,
- actions, error);
+ ret = flow_dv_validate_action_l2_encap(dev,
+ action_flags,
+ actions, attr,
+ error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
- ret = flow_dv_validate_action_decap(action_flags, attr,
- error);
+ ret = flow_dv_validate_action_decap(dev, action_flags,
+ attr, error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
ret = flow_dv_validate_action_raw_encap_decap
- (NULL, actions->conf, attr, &action_flags,
+ (dev, NULL, actions->conf, attr, &action_flags,
&actions_n, error);
if (ret < 0)
return ret;
encap = actions->conf;
}
ret = flow_dv_validate_action_raw_encap_decap
- (decap ? decap : &empty_decap, encap,
+ (dev,
+ decap ? decap : &empty_decap, encap,
attr, &action_flags, &actions_n,
error);
if (ret < 0)
* Internal preparation function. Allocates the DV flow size,
* this size is constant.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* otherwise NULL and rte_errno is set.
*/
static struct mlx5_flow *
-flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_dv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
{
- size_t size = sizeof(struct mlx5_flow);
+ size_t size = sizeof(struct mlx5_flow_handle);
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
- dev_flow = rte_calloc(__func__, 1, size, 0);
- if (!dev_flow) {
+ /* In case of corrupting the memory. */
+ if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+ rte_flow_error_set(error, ENOSPC,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not free temporary device flow");
+ return NULL;
+ }
+ dev_handle = rte_calloc(__func__, 1, size, 0);
+ if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "not enough memory to create flow");
+ "not enough memory to create flow handle");
return NULL;
}
+ /* No multi-thread supporting. */
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+ dev_flow->handle = dev_handle;
dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ /*
+ * The matching value needs to be cleared to 0 before using. In the
+ * past, it will be automatically cleared when using rte_*alloc
+ * API. The time consumption will be almost the same as before.
+ */
+ memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
dev_flow->ingress = attr->ingress;
- dev_flow->transfer = attr->transfer;
+ dev_flow->dv.transfer = attr->transfer;
return dev_flow;
}
/* The value must be in the range of the mask. */
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(eth_m->type));
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
- *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
if (eth_v->type) {
/* When ethertype is present set mask for tagged VLAN. */
MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
/* Set value for tagged VLAN if ethertype is 802.1Q. */
if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
- eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ))
+ eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
1);
+ /* Return here to avoid setting match on ethertype. */
+ return;
+ }
}
+ /*
+ * HW supports match on one Ethertype, the Ethertype following the last
+ * VLAN tag of the packet (see PRM).
+ * Set match on ethertype only if ETH header is not followed by VLAN.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(eth_m->type));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
+ *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
}
/**
* This is workaround, masks are not supported,
* and pre-validated.
*/
- dev_flow->dv.vf_vlan.tag =
+ dev_flow->handle->vf_vlan.tag =
rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
tci_m = rte_be_to_cpu_16(vlan_m->tci);
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv4(void *matcher, void *key,
const struct rte_flow_item *item,
+ const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
.dst_addr = RTE_BE32(0xffffffff),
.type_of_service = 0xff,
.next_proto_id = 0xff,
+ .time_to_live = 0xff,
},
};
void *headers_m;
else
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ * This should be done even if item->spec is empty.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
if (!ipv4_v)
return;
if (!ipv4_m)
ipv4_m->hdr.next_proto_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv4_m->hdr.time_to_live);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv6(void *matcher, void *key,
const struct rte_flow_item *item,
+ const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
else
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ * This should be done even if item->spec is empty.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
if (!ipv6_v)
return;
if (!ipv6_m)
ipv6_m->hdr.proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv6_v->hdr.proto & ipv6_m->hdr.proto);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ /* Hop limit. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv6_m->hdr.hop_limits);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
}
/**
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
+ /* GRE K bit must be on and should already be validated */
+ MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
if (!key_v)
return;
if (!key_m)
key_m = &gre_key_default_mask;
- /* GRE K bit must be on and should already be validated */
- MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
- MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
rte_be_to_cpu_32(*key_m) >> 8);
MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
(void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
rte_atomic32_inc(&cache_matcher->refcnt);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->handle->dvh.matcher = cache_matcher;
/* old matcher should not make the table ref++. */
flow_dv_tbl_resource_release(dev, tbl);
return 0;
/* only matcher ref++, table ref++ already done above in get API. */
rte_atomic32_inc(&cache_matcher->refcnt);
LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->handle->dvh.matcher = cache_matcher;
DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->handle->dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
}
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->handle->dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
{
struct rte_flow *flow = dev_flow->flow;
- uint64_t items = dev_flow->layers;
+ uint64_t items = dev_flow->handle->layers;
int rss_inner = 0;
uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
struct rte_flow *flow = dev_flow->flow;
+ struct mlx5_flow_handle *handle = dev_flow->handle;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint64_t action_flags = 0;
!!priv->fdb_def_rule, &table, error);
if (ret)
return ret;
- dev_flow->group = table;
+ dev_flow->dv.group = table;
if (attr->transfer)
mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
if (priority == MLX5_FLOW_PRIO_RSVD)
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.port_id_action->action;
+ handle->dvh.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
break;
}
tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
- if (!dev_flow->dv.tag_resource)
- if (flow_dv_tag_resource_register
- (dev, tag_be, dev_flow, error))
- return -rte_errno;
+ /*
+ * Only one FLAG or MARK is supported per device flow
+ * right now. So the pointer to the tag resource must be
+ * zero before the register process.
+ */
+ MLX5_ASSERT(!handle->dvh.tag_resource);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ handle->dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
- if (!dev_flow->dv.tag_resource)
- if (flow_dv_tag_resource_register
- (dev, tag_be, dev_flow, error))
- return -rte_errno;
+ MLX5_ASSERT(!handle->dvh.tag_resource);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ handle->dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_SET_META:
if (flow_dv_convert_action_set_meta
goto cnt_err;
}
flow->counter = flow_dv_counter_alloc(dev,
- count->shared,
- count->id,
- dev_flow->group);
- if (flow->counter == NULL)
+ count->shared,
+ count->id,
+ dev_flow->dv.group);
+ if (!flow->counter)
goto cnt_err;
dev_flow->dv.actions[actions_n++] =
- flow->counter->action;
+ (flow_dv_counter_get_by_idx(dev,
+ flow->counter, NULL))->action;
action_flags |= MLX5_FLOW_ACTION_COUNT;
break;
cnt_err:
action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- flow_dev_get_vlan_info_from_items(items, &vlan);
+ if (!(action_flags &
+ MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
+ flow_dev_get_vlan_info_from_items(items, &vlan);
vlan.eth_proto = rte_be_to_cpu_16
((((const struct rte_flow_action_of_push_vlan *)
actions->conf)->ethertype));
(dev, attr, &vlan, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.push_vlan_res->action;
+ handle->dvh.push_vlan_res->action;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
"cannot create jump action.");
}
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.jump->action;
+ handle->dvh.jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
if (flow_dv_convert_action_modify_tp
(mhdr_res, actions, items,
- &flow_attr, error))
+ &flow_attr, dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
break;
case RTE_FLOW_ACTION_TYPE_DEC_TTL:
if (flow_dv_convert_action_modify_dec_ttl
- (mhdr_res, items, &flow_attr, error))
+ (mhdr_res, items, &flow_attr, dev_flow,
+ !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_TTL:
if (flow_dv_convert_action_modify_ttl
- (mhdr_res, actions, items,
- &flow_attr, error))
+ (mhdr_res, actions, items, &flow_attr,
+ dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TTL;
break;
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
- dev_flow->dv.modify_hdr->verbs_action;
+ handle->dvh.modify_hdr->verbs_action;
}
break;
default:
modify_action_position = actions_n++;
}
dev_flow->dv.actions_n = actions_n;
- dev_flow->actions = action_flags;
+ handle->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel,
- dev_flow->group);
+ items, item_flags, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel,
- dev_flow->group);
+ items, item_flags, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
#endif
- dev_flow->layers = item_flags;
+ /*
+ * Layers may be already initialized from prefix flow if this dev_flow
+ * is the suffix flow.
+ */
+ handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow);
/* Register matcher. */
/* reserved field no needs to be set to 0 here. */
tbl_key.domain = attr->transfer;
tbl_key.direction = attr->egress;
- tbl_key.table_id = dev_flow->group;
+ tbl_key.table_id = dev_flow->dv.group;
if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
return -rte_errno;
return 0;
__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct mlx5_flow_dv *dv;
+ struct mlx5_flow_dv_workspace *dv;
+ struct mlx5_flow_handle *dh;
+ struct mlx5_flow_handle_dv *dv_h;
struct mlx5_flow *dev_flow;
struct mlx5_priv *priv = dev->data->dev_private;
int n;
int err;
+ int idx;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
dv = &dev_flow->dv;
+ dh = dev_flow->handle;
+ dv_h = &dh->dvh;
n = dv->actions_n;
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
- if (dev_flow->transfer) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (dv->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
- dv->hrxq = mlx5_hrxq_drop_new(dev);
- if (!dv->hrxq) {
+ dh->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!dh->hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
"cannot get drop hash queue");
goto error;
}
- dv->actions[n++] = dv->hrxq->action;
+ dv->actions[n++] = dh->hrxq->action;
}
- } else if (dev_flow->actions &
+ } else if (dh->act_flags &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
- !!(dev_flow->layers &
+ !!(dh->layers &
MLX5_FLOW_LAYER_TUNNEL));
}
if (!hrxq) {
"cannot get hash queue");
goto error;
}
- dv->hrxq = hrxq;
- dv->actions[n++] = dv->hrxq->action;
+ dh->hrxq = hrxq;
+ dv->actions[n++] = dh->hrxq->action;
}
- dv->flow =
- mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
+ dh->ib_flow =
+ mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
(void *)&dv->value, n,
dv->actions);
- if (!dv->flow) {
+ if (!dh->ib_flow) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
goto error;
}
if (priv->vmwa_context &&
- dev_flow->dv.vf_vlan.tag &&
- !dev_flow->dv.vf_vlan.created) {
+ dh->vf_vlan.tag && !dh->vf_vlan.created) {
/*
* The rule contains the VLAN pattern.
* For VF we are going to create VLAN
* interface to make hypervisor set correct
* e-Switch vport context.
*/
- mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+ mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
}
}
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- struct mlx5_flow_dv *dv = &dev_flow->dv;
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ LIST_FOREACH(dh, &flow->dev_handles, next) {
+ if (dh->hrxq) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_matcher_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_handle *handle)
{
- struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+ struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
MLX5_ASSERT(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
/**
* Release an encap/decap resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_encap_decap_resource *cache_resource =
- flow->dv.encap_decap;
+ handle->dvh.encap_decap;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_handle *handle)
{
- struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+ struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
+ handle->dvh.jump;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(cache_resource,
struct mlx5_flow_tbl_data_entry, jump);
/**
* Release a modify-header resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
- flow->dv.modify_hdr;
+ handle->dvh.modify_hdr;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
/**
* Release port ID action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_port_id_action_resource *cache_resource =
- flow->dv.port_id_action;
+ handle->dvh.port_id_action;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
/**
* Release push vlan action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
- flow->dv.push_vlan_res;
+ handle->dvh.push_vlan_res;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
static void
__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow_dv *dv;
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dh;
if (!flow)
return;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dv = &dev_flow->dv;
- if (dv->flow) {
- claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
- dv->flow = NULL;
+ LIST_FOREACH(dh, &flow->dev_handles, next) {
+ if (dh->ib_flow) {
+ claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
+ dh->ib_flow = NULL;
}
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dh->hrxq) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
}
static void
__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
if (!flow)
return;
__flow_dv_remove(dev, flow);
if (flow->counter) {
flow_dv_counter_release(dev, flow->counter);
- flow->counter = NULL;
+ flow->counter = 0;
}
if (flow->meter) {
mlx5_flow_meter_detach(flow->meter);
flow->meter = NULL;
}
- while (!LIST_EMPTY(&flow->dev_flows)) {
- dev_flow = LIST_FIRST(&flow->dev_flows);
- LIST_REMOVE(dev_flow, next);
- if (dev_flow->dv.matcher)
- flow_dv_matcher_release(dev, dev_flow);
- if (dev_flow->dv.encap_decap)
- flow_dv_encap_decap_resource_release(dev_flow);
- if (dev_flow->dv.modify_hdr)
- flow_dv_modify_hdr_resource_release(dev_flow);
- if (dev_flow->dv.jump)
- flow_dv_jump_tbl_resource_release(dev, dev_flow);
- if (dev_flow->dv.port_id_action)
- flow_dv_port_id_action_resource_release(dev_flow);
- if (dev_flow->dv.push_vlan_res)
- flow_dv_push_vlan_action_resource_release(dev_flow);
- if (dev_flow->dv.tag_resource)
- flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
- rte_free(dev_flow);
+ while (!LIST_EMPTY(&flow->dev_handles)) {
+ dev_handle = LIST_FIRST(&flow->dev_handles);
+ LIST_REMOVE(dev_handle, next);
+ if (dev_handle->dvh.matcher)
+ flow_dv_matcher_release(dev, dev_handle);
+ if (dev_handle->dvh.encap_decap)
+ flow_dv_encap_decap_resource_release(dev_handle);
+ if (dev_handle->dvh.modify_hdr)
+ flow_dv_modify_hdr_resource_release(dev_handle);
+ if (dev_handle->dvh.jump)
+ flow_dv_jump_tbl_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.port_id_action)
+ flow_dv_port_id_action_resource_release(dev_handle);
+ if (dev_handle->dvh.push_vlan_res)
+ flow_dv_push_vlan_action_resource_release(dev_handle);
+ if (dev_handle->dvh.tag_resource)
+ flow_dv_tag_release(dev,
+ dev_handle->dvh.tag_resource);
+ rte_free(dev_handle);
}
}
"counters are not supported");
if (flow->counter) {
uint64_t pkts, bytes;
+ struct mlx5_flow_counter *cnt;
+
+ cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
+ NULL);
int err = _flow_dv_query_count(dev, flow->counter, &pkts,
&bytes);
NULL, "cannot read counters");
qc->hits_set = 1;
qc->bytes_set = 1;
- qc->hits = pkts - flow->counter->hits;
- qc->bytes = bytes - flow->counter->bytes;
+ qc->hits = pkts - cnt->hits;
+ qc->bytes = bytes - cnt->bytes;
if (qc->reset) {
- flow->counter->hits = pkts;
- flow->counter->bytes = bytes;
+ cnt->hits = pkts;
+ cnt->bytes = bytes;
}
return 0;
}
if (mtd->egress.tbl)
claim_zero(flow_dv_tbl_resource_release(dev,
mtd->egress.tbl));
+ if (mtd->egress.sfx_tbl)
+ claim_zero(flow_dv_tbl_resource_release(dev,
+ mtd->egress.sfx_tbl));
if (mtd->ingress.color_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->ingress.color_matcher));
if (mtd->ingress.tbl)
claim_zero(flow_dv_tbl_resource_release(dev,
mtd->ingress.tbl));
+ if (mtd->ingress.sfx_tbl)
+ claim_zero(flow_dv_tbl_resource_release(dev,
+ mtd->ingress.sfx_tbl));
if (mtd->transfer.color_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->transfer.color_matcher));
if (mtd->transfer.tbl)
claim_zero(flow_dv_tbl_resource_release(dev,
mtd->transfer.tbl));
+ if (mtd->transfer.sfx_tbl)
+ claim_zero(flow_dv_tbl_resource_release(dev,
+ mtd->transfer.sfx_tbl));
if (mtd->drop_actn)
claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
rte_free(mtd);
.match_mask = (void *)&mask,
};
void *actions[METER_ACTIONS];
- struct mlx5_flow_tbl_resource **sfx_tbl;
struct mlx5_meter_domain_info *dtb;
struct rte_flow_error error;
int i = 0;
- if (transfer) {
- sfx_tbl = &sh->fdb_mtr_sfx_tbl;
+ if (transfer)
dtb = &mtb->transfer;
- } else if (egress) {
- sfx_tbl = &sh->tx_mtr_sfx_tbl;
+ else if (egress)
dtb = &mtb->egress;
- } else {
- sfx_tbl = &sh->rx_mtr_sfx_tbl;
+ else
dtb = &mtb->ingress;
- }
- /* If the suffix table in missing, create it. */
- if (!(*sfx_tbl)) {
- *sfx_tbl = flow_dv_tbl_resource_get(dev,
- MLX5_FLOW_TABLE_LEVEL_SUFFIX,
- egress, transfer, &error);
- if (!(*sfx_tbl)) {
- DRV_LOG(ERR, "Failed to create meter suffix table.");
- return -1;
- }
- }
/* Create the meter table with METER level. */
dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
egress, transfer, &error);
DRV_LOG(ERR, "Failed to create meter policer table.");
return -1;
}
+ /* Create the meter suffix table with SUFFIX level. */
+ dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
+ MLX5_FLOW_TABLE_LEVEL_SUFFIX,
+ egress, transfer, &error);
+ if (!dtb->sfx_tbl) {
+ DRV_LOG(ERR, "Failed to create meter suffix table.");
+ return -1;
+ }
/* Create matchers, Any and Color. */
dv_attr.priority = 3;
dv_attr.match_criteria_enable = 0;
}
/* Create meter count actions */
for (i = 0; i <= RTE_MTR_DROPPED; i++) {
+ struct mlx5_flow_counter *cnt;
if (!fm->policer_stats.cnt[i])
continue;
- mtb->count_actns[i] = fm->policer_stats.cnt[i]->action;
+ cnt = flow_dv_counter_get_by_idx(dev,
+ fm->policer_stats.cnt[i], NULL);
+ mtb->count_actns[i] = cnt->action;
}
/* Create drop action. */
mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
* Pointer to flow meter structure.
* @param[in] mtb
* Pointer to DV meter table set.
- * @param[in] sfx_tb
- * Pointer to suffix table.
* @param[in] mtr_reg_c
* Color match REG_C.
*
static int
flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
struct mlx5_meter_domain_info *dtb,
- struct mlx5_flow_tbl_resource *sfx_tb,
uint8_t mtr_reg_c)
{
struct mlx5_flow_dv_match_params matcher = {
int i;
/* Create jump action. */
- if (!sfx_tb)
- return -1;
if (!dtb->jump_actn)
dtb->jump_actn =
mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (sfx_tb->obj);
+ (dtb->sfx_tbl->obj);
if (!dtb->jump_actn) {
DRV_LOG(ERR, "Failed to create policer jump action.");
goto error;
if (attr->egress) {
ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
- priv->sh->tx_mtr_sfx_tbl,
priv->mtr_color_reg);
if (ret) {
DRV_LOG(ERR, "Failed to create egress policer.");
}
if (attr->ingress) {
ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
- priv->sh->rx_mtr_sfx_tbl,
priv->mtr_color_reg);
if (ret) {
DRV_LOG(ERR, "Failed to create ingress policer.");
}
if (attr->transfer) {
ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
- priv->sh->fdb_mtr_sfx_tbl,
priv->mtr_color_reg);
if (ret) {
DRV_LOG(ERR, "Failed to create transfer policer.");
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] cnt
- * Pointer to the flow counter.
+ * Index to the flow counter.
* @param[in] clear
* Set to clear the counter statistics.
* @param[out] pkts
* 0 on success, otherwise return -1.
*/
static int
-flow_dv_counter_query(struct rte_eth_dev *dev,
- struct mlx5_flow_counter *cnt, bool clear,
+flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
uint64_t *pkts, uint64_t *bytes)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter *cnt;
uint64_t inn_pkts, inn_bytes;
int ret;
if (!priv->config.devx)
return -1;
- ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
+
+ ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
if (ret)
return -1;
+ cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
*pkts = inn_pkts - cnt->hits;
*bytes = inn_bytes - cnt->bytes;
if (clear) {
/*
* Mutex-protected thunk to lock-free flow_dv_counter_alloc().
*/
-static struct mlx5_flow_counter *
+static uint32_t
flow_dv_counter_allocate(struct rte_eth_dev *dev)
{
- struct mlx5_flow_counter *cnt;
+ uint32_t cnt;
flow_dv_shared_lock(dev);
cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
* Mutex-protected thunk to lock-free flow_dv_counter_release().
*/
static void
-flow_dv_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
+flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
{
flow_dv_shared_lock(dev);
flow_dv_counter_release(dev, cnt);