flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
struct mlx5_flow *dev_flow, bool tunnel_decap)
{
+ uint64_t layers = dev_flow->handle->layers;
+
/*
* If layers is already initialized, it means this dev_flow is the
* suffix flow, the layers flags is set by the prefix flow. Need to
* use the layer flags from prefix flow as the suffix flow may not
* have the user defined items as the flow is split.
*/
- if (dev_flow->layers) {
- if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ if (layers) {
+ if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
attr->ipv4 = 1;
- else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+ else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
attr->ipv6 = 1;
- if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+ if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
attr->tcp = 1;
- else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+ else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
attr->udp = 1;
attr->valid = 1;
return;
item.spec = &udp;
item.mask = &udp_mask;
field = modify_udp;
- }
- if (attr->tcp) {
+ } else {
+ MLX5_ASSERT(attr->tcp);
memset(&tcp, 0, sizeof(tcp));
memset(&tcp_mask, 0, sizeof(tcp_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
item.spec = &ipv4;
item.mask = &ipv4_mask;
field = modify_ipv4;
- }
- if (attr->ipv6) {
+ } else {
+ MLX5_ASSERT(attr->ipv6);
memset(&ipv6, 0, sizeof(ipv6));
memset(&ipv6_mask, 0, sizeof(ipv6_mask));
ipv6.hdr.hop_limits = conf->ttl_value;
item.spec = &ipv4;
item.mask = &ipv4_mask;
field = modify_ipv4;
- }
- if (attr->ipv6) {
+ } else {
+ MLX5_ASSERT(attr->ipv6);
memset(&ipv6, 0, sizeof(ipv6));
memset(&ipv6_mask, 0, sizeof(ipv6_mask));
ipv6.hdr.hop_limits = 0xFF;
"mark id exceeds the limit");
if (!mask)
mask = &nic_mask;
+ if (!mask->id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "mask cannot be zero");
+
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_mark),
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
item->spec,
"data cannot be empty");
- if (!spec->data)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
- "data cannot be zero");
if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
}
if (!mask)
mask = &rte_flow_item_meta_mask;
+ if (!mask->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "mask cannot be zero");
+
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_meta),
"data cannot be empty");
if (!mask)
mask = &rte_flow_item_tag_mask;
+ if (!mask->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "mask cannot be zero");
+
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_tag),
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
- resource->flags = dev_flow->group ? 0 : 1;
+ resource->flags = dev_flow->dv.group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
- dev_flow->dv.jump = &tbl_data->jump;
+ dev_flow->handle->dvh.jump = &tbl_data->jump;
return 0;
}
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->handle->dvh.port_id_action = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->handle->dvh.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->handle->dvh.push_vlan_res = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->handle->dvh.push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
- resource->flags =
- dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+ resource->flags = dev_flow->dv.group ? 0 :
+ MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
resource->flags))
return rte_flow_error_set(error, EOVERFLOW,
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
* Internal preparation function. Allocates the DV flow size,
* this size is constant.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* otherwise NULL and rte_errno is set.
*/
static struct mlx5_flow *
-flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_dv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
{
- size_t size = sizeof(struct mlx5_flow);
+ size_t size = sizeof(struct mlx5_flow_handle);
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
- dev_flow = rte_calloc(__func__, 1, size, 0);
- if (!dev_flow) {
+ /* In case of corrupting the memory. */
+ if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+ rte_flow_error_set(error, ENOSPC,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not free temporary device flow");
+ return NULL;
+ }
+ dev_handle = rte_calloc(__func__, 1, size, 0);
+ if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "not enough memory to create flow");
+ "not enough memory to create flow handle");
return NULL;
}
+ /* No multi-thread supporting. */
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+ dev_flow->handle = dev_handle;
dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ /*
+ * The matching value needs to be cleared to 0 before using. In the
+ * past, it will be automatically cleared when using rte_*alloc
+ * API. The time consumption will be almost the same as before.
+ */
+ memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
dev_flow->ingress = attr->ingress;
- dev_flow->transfer = attr->transfer;
+ dev_flow->dv.transfer = attr->transfer;
return dev_flow;
}
* This is workaround, masks are not supported,
* and pre-validated.
*/
- dev_flow->dv.vf_vlan.tag =
+ dev_flow->handle->vf_vlan.tag =
rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
tci_m = rte_be_to_cpu_16(vlan_m->tci);
(void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
rte_atomic32_inc(&cache_matcher->refcnt);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->handle->dvh.matcher = cache_matcher;
/* old matcher should not make the table ref++. */
flow_dv_tbl_resource_release(dev, tbl);
return 0;
/* only matcher ref++, table ref++ already done above in get API. */
rte_atomic32_inc(&cache_matcher->refcnt);
LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->handle->dvh.matcher = cache_matcher;
DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->handle->dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
}
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->handle->dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
{
struct rte_flow *flow = dev_flow->flow;
- uint64_t items = dev_flow->layers;
+ uint64_t items = dev_flow->handle->layers;
int rss_inner = 0;
uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
struct rte_flow *flow = dev_flow->flow;
+ struct mlx5_flow_handle *handle = dev_flow->handle;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint64_t action_flags = 0;
!!priv->fdb_def_rule, &table, error);
if (ret)
return ret;
- dev_flow->group = table;
+ dev_flow->dv.group = table;
if (attr->transfer)
mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
if (priority == MLX5_FLOW_PRIO_RSVD)
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.port_id_action->action;
+ handle->dvh.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
break;
}
tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
- if (!dev_flow->dv.tag_resource)
- if (flow_dv_tag_resource_register
- (dev, tag_be, dev_flow, error))
- return -rte_errno;
+ /*
+ * Only one FLAG or MARK is supported per device flow
+ * right now. So the pointer to the tag resource must be
+ * zero before the register process.
+ */
+ MLX5_ASSERT(!handle->dvh.tag_resource);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ handle->dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
- if (!dev_flow->dv.tag_resource)
- if (flow_dv_tag_resource_register
- (dev, tag_be, dev_flow, error))
- return -rte_errno;
+ MLX5_ASSERT(!handle->dvh.tag_resource);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ handle->dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_SET_META:
if (flow_dv_convert_action_set_meta
goto cnt_err;
}
flow->counter = flow_dv_counter_alloc(dev,
- count->shared,
- count->id,
- dev_flow->group);
+ count->shared,
+ count->id,
+ dev_flow->dv.group);
if (flow->counter == NULL)
goto cnt_err;
dev_flow->dv.actions[actions_n++] =
(dev, attr, &vlan, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.push_vlan_res->action;
+ handle->dvh.push_vlan_res->action;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
"cannot create jump action.");
}
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.jump->action;
+ handle->dvh.jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
- dev_flow->dv.modify_hdr->verbs_action;
+ handle->dvh.modify_hdr->verbs_action;
}
break;
default:
modify_action_position = actions_n++;
}
dev_flow->dv.actions_n = actions_n;
- dev_flow->actions = action_flags;
+ handle->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
items, item_flags, tunnel,
- dev_flow->group);
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
items, item_flags, tunnel,
- dev_flow->group);
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
* Layers may be already initialized from prefix flow if this dev_flow
* is the suffix flow.
*/
- dev_flow->layers |= item_flags;
+ handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow);
/* Register matcher. */
/* reserved field no needs to be set to 0 here. */
tbl_key.domain = attr->transfer;
tbl_key.direction = attr->egress;
- tbl_key.table_id = dev_flow->group;
+ tbl_key.table_id = dev_flow->dv.group;
if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
return -rte_errno;
return 0;
__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct mlx5_flow_dv *dv;
+ struct mlx5_flow_dv_workspace *dv;
+ struct mlx5_flow_handle *dh;
+ struct mlx5_flow_handle_dv *dv_h;
struct mlx5_flow *dev_flow;
struct mlx5_priv *priv = dev->data->dev_private;
int n;
int err;
+ int idx;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
dv = &dev_flow->dv;
+ dh = dev_flow->handle;
+ dv_h = &dh->dvh;
n = dv->actions_n;
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
- if (dev_flow->transfer) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (dv->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
- dv->hrxq = mlx5_hrxq_drop_new(dev);
- if (!dv->hrxq) {
+ dh->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!dh->hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
"cannot get drop hash queue");
goto error;
}
- dv->actions[n++] = dv->hrxq->action;
+ dv->actions[n++] = dh->hrxq->action;
}
- } else if (dev_flow->actions &
+ } else if (dh->act_flags &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
- !!(dev_flow->layers &
+ !!(dh->layers &
MLX5_FLOW_LAYER_TUNNEL));
}
if (!hrxq) {
"cannot get hash queue");
goto error;
}
- dv->hrxq = hrxq;
- dv->actions[n++] = dv->hrxq->action;
+ dh->hrxq = hrxq;
+ dv->actions[n++] = dh->hrxq->action;
}
- dv->flow =
- mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
+ dh->ib_flow =
+ mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
(void *)&dv->value, n,
dv->actions);
- if (!dv->flow) {
+ if (!dh->ib_flow) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
goto error;
}
if (priv->vmwa_context &&
- dev_flow->dv.vf_vlan.tag &&
- !dev_flow->dv.vf_vlan.created) {
+ dh->vf_vlan.tag && !dh->vf_vlan.created) {
/*
* The rule contains the VLAN pattern.
* For VF we are going to create VLAN
* interface to make hypervisor set correct
* e-Switch vport context.
*/
- mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+ mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
}
}
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- struct mlx5_flow_dv *dv = &dev_flow->dv;
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ LIST_FOREACH(dh, &flow->dev_handles, next) {
+ if (dh->hrxq) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_matcher_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_handle *handle)
{
- struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+ struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
MLX5_ASSERT(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
/**
* Release an encap/decap resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_encap_decap_resource *cache_resource =
- flow->dv.encap_decap;
+ handle->dvh.encap_decap;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_handle *handle)
{
- struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+ struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
+ handle->dvh.jump;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(cache_resource,
struct mlx5_flow_tbl_data_entry, jump);
/**
* Release a modify-header resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
- flow->dv.modify_hdr;
+ handle->dvh.modify_hdr;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
/**
* Release port ID action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_port_id_action_resource *cache_resource =
- flow->dv.port_id_action;
+ handle->dvh.port_id_action;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
/**
* Release push vlan action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
- flow->dv.push_vlan_res;
+ handle->dvh.push_vlan_res;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
static void
__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow_dv *dv;
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dh;
if (!flow)
return;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dv = &dev_flow->dv;
- if (dv->flow) {
- claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
- dv->flow = NULL;
+ LIST_FOREACH(dh, &flow->dev_handles, next) {
+ if (dh->ib_flow) {
+ claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
+ dh->ib_flow = NULL;
}
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dh->hrxq) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
}
static void
__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
if (!flow)
return;
mlx5_flow_meter_detach(flow->meter);
flow->meter = NULL;
}
- while (!LIST_EMPTY(&flow->dev_flows)) {
- dev_flow = LIST_FIRST(&flow->dev_flows);
- LIST_REMOVE(dev_flow, next);
- if (dev_flow->dv.matcher)
- flow_dv_matcher_release(dev, dev_flow);
- if (dev_flow->dv.encap_decap)
- flow_dv_encap_decap_resource_release(dev_flow);
- if (dev_flow->dv.modify_hdr)
- flow_dv_modify_hdr_resource_release(dev_flow);
- if (dev_flow->dv.jump)
- flow_dv_jump_tbl_resource_release(dev, dev_flow);
- if (dev_flow->dv.port_id_action)
- flow_dv_port_id_action_resource_release(dev_flow);
- if (dev_flow->dv.push_vlan_res)
- flow_dv_push_vlan_action_resource_release(dev_flow);
- if (dev_flow->dv.tag_resource)
- flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
- rte_free(dev_flow);
+ while (!LIST_EMPTY(&flow->dev_handles)) {
+ dev_handle = LIST_FIRST(&flow->dev_handles);
+ LIST_REMOVE(dev_handle, next);
+ if (dev_handle->dvh.matcher)
+ flow_dv_matcher_release(dev, dev_handle);
+ if (dev_handle->dvh.encap_decap)
+ flow_dv_encap_decap_resource_release(dev_handle);
+ if (dev_handle->dvh.modify_hdr)
+ flow_dv_modify_hdr_resource_release(dev_handle);
+ if (dev_handle->dvh.jump)
+ flow_dv_jump_tbl_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.port_id_action)
+ flow_dv_port_id_action_resource_release(dev_handle);
+ if (dev_handle->dvh.push_vlan_res)
+ flow_dv_push_vlan_action_resource_release(dev_handle);
+ if (dev_handle->dvh.tag_resource)
+ flow_dv_tag_release(dev,
+ dev_handle->dvh.tag_resource);
+ rte_free(dev_handle);
}
}