/**< jump resource, at most one for each table created. */
};
-/*
- * Max number of actions per DV flow.
- * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
- * In rdma-core file providers/mlx5/verbs.c
- */
-#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
+/* Verbs specification header. */
+struct ibv_spec_header {
+ enum ibv_flow_spec_type type;
+ uint16_t size;
+};
+
+struct mlx5_flow_rss {
+ uint32_t level;
+ uint32_t queue_num; /**< Number of entries in @p queue. */
+ uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+ uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+};
-/* DV flows structure. */
-struct mlx5_flow_dv {
- struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+/** Device flow handle structure for DV mode only. */
+struct mlx5_flow_handle_dv {
/* Flow DV api: */
struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
- struct mlx5_flow_dv_match_params value;
- /**< Holds the value that the packet is compared to. */
struct mlx5_flow_dv_encap_decap_resource *encap_decap;
/**< Pointer to encap/decap resource in cache. */
struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
/**< Pointer to modify header resource in cache. */
- struct ibv_flow *flow; /**< Installed flow. */
struct mlx5_flow_dv_jump_tbl_resource *jump;
/**< Pointer to the jump action resource. */
struct mlx5_flow_dv_port_id_action_resource *port_id_action;
/**< Pointer to push VLAN action resource in cache. */
struct mlx5_flow_dv_tag_resource *tag_resource;
/**< pointer to the tag action. */
+};
+
+/** Device flow handle structure: used both for creating & destroying. */
+struct mlx5_flow_handle {
+ uint64_t layers;
+ /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+ uint64_t act_flags;
+ /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
+ void *ib_flow; /**< Verbs flow pointer. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+ struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
+ union {
+ uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
+ uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+ };
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
- /**< Action list. */
+ struct mlx5_flow_handle_dv dvh;
#endif
- int actions_n; /**< number of actions. */
};
-/* Verbs specification header. */
-struct ibv_spec_header {
- enum ibv_flow_spec_type type;
- uint16_t size;
-};
+/*
+ * Max number of actions per DV flow.
+ * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
+ * in rdma-core file providers/mlx5/verbs.c.
+ */
+#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
-/** Handles information leading to a drop fate. */
-struct mlx5_flow_verbs {
- LIST_ENTRY(mlx5_flow_verbs) next;
- unsigned int size; /**< Size of the attribute. */
- struct {
- struct ibv_flow_attr *attr;
- /**< Pointer to the Specification buffer. */
- uint8_t *specs; /**< Pointer to the specifications. */
- };
- struct ibv_flow *flow; /**< Verbs flow pointer. */
- struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
- struct mlx5_vf_vlan vf_vlan;
- /**< Structure for VF VLAN workaround. */
+/** Device flow structure only for DV flow creation. */
+struct mlx5_flow_resource_dv {
+ uint32_t group; /**< The group index. */
+ uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
+ int actions_n; /**< number of actions. */
+ void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
+ struct mlx5_flow_dv_match_params value;
+ /**< Holds the value that the packet is compared to. */
};
-struct mlx5_flow_rss {
- uint32_t level;
- uint32_t queue_num; /**< Number of entries in @p queue. */
- uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
- uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
- uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+/** Device flow structure only for Verbs flow creation. */
+struct mlx5_flow_resource_verbs {
+ unsigned int size; /**< Size of the attribute. */
+ struct ibv_flow_attr *attr; /**< Pointer to the Specification buffer. */
+ uint8_t *specs; /**< Pointer to the specifications. */
};
/** Device flow structure. */
struct mlx5_flow {
- LIST_ENTRY(mlx5_flow) next;
+ LIST_ENTRY(mlx5_flow) next; /**< Pointer to next device flow. */
struct rte_flow *flow; /**< Pointer to the main flow. */
- uint64_t layers;
- /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
- uint64_t actions;
- /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+ bool external; /**< true if the flow is created external to PMD. */
uint8_t ingress; /**< 1 if the flow is ingress. */
- uint32_t group; /**< The group index. */
- uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
union {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5_flow_dv dv;
+ struct mlx5_flow_resource_dv dv;
#endif
- struct mlx5_flow_verbs verbs;
- };
- union {
- uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
- uint32_t mtr_flow_id; /**< Unique meter match flow id. */
+ struct mlx5_flow_resource_verbs verbs;
};
- bool external; /**< true if the flow is created external to PMD. */
+ struct mlx5_flow_handle handle;
};
/* Flow meter state. */
flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
struct mlx5_flow *dev_flow, bool tunnel_decap)
{
+ uint64_t layers = dev_flow->handle.layers;
+
/*
* If layers is already initialized, it means this dev_flow is the
* suffix flow, the layers flags is set by the prefix flow. Need to
* use the layer flags from prefix flow as the suffix flow may not
* have the user defined items as the flow is split.
*/
- if (dev_flow->layers) {
- if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ if (layers) {
+ if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
attr->ipv4 = 1;
- else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+ else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
attr->ipv6 = 1;
- if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+ if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
attr->tcp = 1;
- else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+ else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
attr->udp = 1;
attr->valid = 1;
return;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
- resource->flags = dev_flow->group ? 0 : 1;
+ resource->flags = dev_flow->dv.group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->handle.dvh.encap_decap = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->handle.dvh.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
- dev_flow->dv.jump = &tbl_data->jump;
+ dev_flow->handle.dvh.jump = &tbl_data->jump;
return 0;
}
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->handle.dvh.port_id_action = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->handle.dvh.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->handle.dvh.push_vlan_res = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->handle.dvh.push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
- resource->flags =
- dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+ resource->flags = dev_flow->dv.group ? 0 :
+ MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
resource->flags))
return rte_flow_error_set(error, EOVERFLOW,
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->handle.dvh.modify_hdr = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->handle.dvh.modify_hdr = cache_resource;
DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
}
dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
dev_flow->ingress = attr->ingress;
- dev_flow->transfer = attr->transfer;
+ dev_flow->dv.transfer = attr->transfer;
return dev_flow;
}
* This is workaround, masks are not supported,
* and pre-validated.
*/
- dev_flow->dv.vf_vlan.tag =
+ dev_flow->handle.vf_vlan.tag =
rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
tci_m = rte_be_to_cpu_16(vlan_m->tci);
(void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
rte_atomic32_inc(&cache_matcher->refcnt);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->handle.dvh.matcher = cache_matcher;
/* old matcher should not make the table ref++. */
flow_dv_tbl_resource_release(dev, tbl);
return 0;
/* only matcher ref++, table ref++ already done above in get API. */
rte_atomic32_inc(&cache_matcher->refcnt);
LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->handle.dvh.matcher = cache_matcher;
DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->handle.dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
}
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->handle.dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
{
struct rte_flow *flow = dev_flow->flow;
- uint64_t items = dev_flow->layers;
+ uint64_t items = dev_flow->handle.layers;
int rss_inner = 0;
uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
!!priv->fdb_def_rule, &table, error);
if (ret)
return ret;
- dev_flow->group = table;
+ dev_flow->dv.group = table;
if (attr->transfer)
mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
if (priority == MLX5_FLOW_PRIO_RSVD)
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.port_id_action->action;
+ dev_flow->handle.dvh.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
break;
}
tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
- if (!dev_flow->dv.tag_resource)
+ if (!dev_flow->handle.dvh.tag_resource)
if (flow_dv_tag_resource_register
(dev, tag_be, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ dev_flow->handle.dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
- if (!dev_flow->dv.tag_resource)
+ if (!dev_flow->handle.dvh.tag_resource)
if (flow_dv_tag_resource_register
(dev, tag_be, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ dev_flow->handle.dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_SET_META:
if (flow_dv_convert_action_set_meta
goto cnt_err;
}
flow->counter = flow_dv_counter_alloc(dev,
- count->shared,
- count->id,
- dev_flow->group);
+ count->shared,
+ count->id,
+ dev_flow->dv.group);
if (flow->counter == NULL)
goto cnt_err;
dev_flow->dv.actions[actions_n++] =
(dev, attr, &vlan, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.push_vlan_res->action;
+ dev_flow->handle.dvh.push_vlan_res->action;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->handle.dvh.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->handle.dvh.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->handle.dvh.encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->handle.dvh.encap_decap->verbs_action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->handle.dvh.encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
"cannot create jump action.");
}
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.jump->action;
+ dev_flow->handle.dvh.jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
- dev_flow->dv.modify_hdr->verbs_action;
+ dev_flow->handle.dvh.modify_hdr->verbs_action;
}
break;
default:
modify_action_position = actions_n++;
}
dev_flow->dv.actions_n = actions_n;
- dev_flow->actions = action_flags;
+ dev_flow->handle.act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
items, item_flags, tunnel,
- dev_flow->group);
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
items, item_flags, tunnel,
- dev_flow->group);
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
* Layers may be already initialized from prefix flow if this dev_flow
* is the suffix flow.
*/
- dev_flow->layers |= item_flags;
+ dev_flow->handle.layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow);
/* Register matcher. */
/* reserved field no needs to be set to 0 here. */
tbl_key.domain = attr->transfer;
tbl_key.direction = attr->egress;
- tbl_key.table_id = dev_flow->group;
+ tbl_key.table_id = dev_flow->dv.group;
if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
return -rte_errno;
return 0;
__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct mlx5_flow_dv *dv;
+ struct mlx5_flow_resource_dv *dv;
+ struct mlx5_flow_handle *dh;
+ struct mlx5_flow_handle_dv *dv_h;
struct mlx5_flow *dev_flow;
struct mlx5_priv *priv = dev->data->dev_private;
int n;
int err;
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ dh = &dev_flow->handle;
dv = &dev_flow->dv;
n = dv->actions_n;
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
- if (dev_flow->transfer) {
+ dv_h = &dh->dvh;
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (dv->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
- dv->hrxq = mlx5_hrxq_drop_new(dev);
- if (!dv->hrxq) {
+ dh->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!dh->hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
"cannot get drop hash queue");
goto error;
}
- dv->actions[n++] = dv->hrxq->action;
+ dv->actions[n++] = dh->hrxq->action;
}
- } else if (dev_flow->actions &
+ } else if (dh->act_flags &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
- !!(dev_flow->layers &
+ !!(dev_flow->handle.layers &
MLX5_FLOW_LAYER_TUNNEL));
}
if (!hrxq) {
"cannot get hash queue");
goto error;
}
- dv->hrxq = hrxq;
- dv->actions[n++] = dv->hrxq->action;
+ dh->hrxq = hrxq;
+ dv->actions[n++] = dh->hrxq->action;
}
- dv->flow =
- mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
+ dh->ib_flow =
+ mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
(void *)&dv->value, n,
dv->actions);
- if (!dv->flow) {
+ if (!dh->ib_flow) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
goto error;
}
if (priv->vmwa_context &&
- dev_flow->dv.vf_vlan.tag &&
- !dev_flow->dv.vf_vlan.created) {
+ dh->vf_vlan.tag && !dh->vf_vlan.created) {
/*
* The rule contains the VLAN pattern.
* For VF we are going to create VLAN
* interface to make hypervisor set correct
* e-Switch vport context.
*/
- mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+ mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
}
}
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- struct mlx5_flow_dv *dv = &dev_flow->dv;
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ struct mlx5_flow_handle *dh_tmp = &dev_flow->handle;
+ if (dh_tmp->hrxq) {
+ if (dh_tmp->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dh_tmp->hrxq);
+ dh_tmp->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dh_tmp->vf_vlan.tag && dh_tmp->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh_tmp->vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
flow_dv_matcher_release(struct rte_eth_dev *dev,
struct mlx5_flow *flow)
{
- struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+ struct mlx5_flow_dv_matcher *matcher = flow->handle.dvh.matcher;
MLX5_ASSERT(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
{
struct mlx5_flow_dv_encap_decap_resource *cache_resource =
- flow->dv.encap_decap;
+ flow->handle.dvh.encap_decap;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow *flow)
{
- struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+ struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
+ flow->handle.dvh.jump;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(cache_resource,
struct mlx5_flow_tbl_data_entry, jump);
flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
{
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
- flow->dv.modify_hdr;
+ flow->handle.dvh.modify_hdr;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
{
struct mlx5_flow_dv_port_id_action_resource *cache_resource =
- flow->dv.port_id_action;
+ flow->handle.dvh.port_id_action;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
{
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
- flow->dv.push_vlan_res;
+ flow->handle.dvh.push_vlan_res;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
static void
__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow_dv *dv;
+ struct mlx5_flow_handle *dh;
struct mlx5_flow *dev_flow;
if (!flow)
return;
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dv = &dev_flow->dv;
- if (dv->flow) {
- claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
- dv->flow = NULL;
+ dh = &dev_flow->handle;
+ if (dh->ib_flow) {
+ claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
+ dh->ib_flow = NULL;
}
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dh->hrxq) {
+ if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
}
while (!LIST_EMPTY(&flow->dev_flows)) {
dev_flow = LIST_FIRST(&flow->dev_flows);
LIST_REMOVE(dev_flow, next);
- if (dev_flow->dv.matcher)
+ if (dev_flow->handle.dvh.matcher)
flow_dv_matcher_release(dev, dev_flow);
- if (dev_flow->dv.encap_decap)
+ if (dev_flow->handle.dvh.encap_decap)
flow_dv_encap_decap_resource_release(dev_flow);
- if (dev_flow->dv.modify_hdr)
+ if (dev_flow->handle.dvh.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_flow);
- if (dev_flow->dv.jump)
+ if (dev_flow->handle.dvh.jump)
flow_dv_jump_tbl_resource_release(dev, dev_flow);
- if (dev_flow->dv.port_id_action)
+ if (dev_flow->handle.dvh.port_id_action)
flow_dv_port_id_action_resource_release(dev_flow);
- if (dev_flow->dv.push_vlan_res)
+ if (dev_flow->handle.dvh.push_vlan_res)
flow_dv_push_vlan_action_resource_release(dev_flow);
- if (dev_flow->dv.tag_resource)
- flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
+ if (dev_flow->handle.dvh.tag_resource)
+ flow_dv_tag_release(dev,
+ dev_flow->handle.dvh.tag_resource);
rte_free(dev_flow);
}
}
* Size in bytes of the specification to copy.
*/
static void
-flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
+flow_verbs_spec_add(struct mlx5_flow_resource_verbs *verbs,
+ void *src, unsigned int size)
{
void *dst;
else
flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
if (!tunnel)
- dev_flow->verbs.vf_vlan.tag =
+ dev_flow->handle.vf_vlan.tag =
rte_be_to_cpu_16(spec->tci) & 0x0fff;
}
const struct rte_flow_item *item __rte_unused,
uint64_t item_flags)
{
- struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
+ struct mlx5_flow_resource_verbs *verbs = &dev_flow->verbs;
#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
struct ibv_flow_spec_tunnel tunnel = {
dev_flow->verbs.attr = (void *)(dev_flow + 1);
dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
dev_flow->ingress = attr->ingress;
- dev_flow->transfer = attr->transfer;
+ /* Need to set transfer attribute: not supported in Verbs mode. */
return dev_flow;
}
"action not supported");
}
}
- dev_flow->actions = action_flags;
+ dev_flow->handle.act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
"item not supported");
}
}
- dev_flow->layers = item_flags;
+ dev_flow->handle.layers = item_flags;
dev_flow->verbs.attr->priority =
mlx5_flow_adjust_priority(dev, priority, subpriority);
dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
static void
flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow_verbs *verbs;
+ struct mlx5_flow_handle *dh;
struct mlx5_flow *dev_flow;
if (!flow)
return;
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- verbs = &dev_flow->verbs;
- if (verbs->flow) {
- claim_zero(mlx5_glue->destroy_flow(verbs->flow));
- verbs->flow = NULL;
+ dh = &dev_flow->handle;
+ if (dh->ib_flow) {
+ claim_zero(mlx5_glue->destroy_flow(dh->ib_flow));
+ dh->ib_flow = NULL;
}
- if (verbs->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dh->hrxq) {
+ if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
- }
- if (dev_flow->verbs.vf_vlan.tag &&
- dev_flow->verbs.vf_vlan.created) {
- mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
}
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_verbs *verbs;
+ struct mlx5_flow_handle *dh;
struct mlx5_flow *dev_flow;
int err;
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- verbs = &dev_flow->verbs;
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
- verbs->hrxq = mlx5_hrxq_drop_new(dev);
- if (!verbs->hrxq) {
+ dh = &dev_flow->handle;
+ if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP) {
+ dh->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!dh->hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
flow->rss.queue_num);
if (!hrxq)
hrxq = mlx5_hrxq_new(dev, flow->rss.key,
- MLX5_RSS_HASH_KEY_LEN,
- dev_flow->hash_fields,
- (*flow->rss.queue),
- flow->rss.queue_num,
- !!(dev_flow->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ MLX5_RSS_HASH_KEY_LEN,
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
+ flow->rss.queue_num,
+ !!(dev_flow->handle.layers &
+ MLX5_FLOW_LAYER_TUNNEL));
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"cannot get hash queue");
goto error;
}
- verbs->hrxq = hrxq;
+ dh->hrxq = hrxq;
}
- verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
- verbs->attr);
- if (!verbs->flow) {
+ dh->ib_flow = mlx5_glue->create_flow(dh->hrxq->qp,
+ dev_flow->verbs.attr);
+ if (!dh->ib_flow) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
goto error;
}
if (priv->vmwa_context &&
- dev_flow->verbs.vf_vlan.tag &&
- !dev_flow->verbs.vf_vlan.created) {
+ dev_flow->handle.vf_vlan.tag &&
+ !dev_flow->handle.vf_vlan.created) {
/*
* The rule contains the VLAN pattern.
* For VF we are going to create VLAN
* interface to make hypervisor set correct
* e-Switch vport context.
*/
- mlx5_vlan_vmwa_acquire(dev, &dev_flow->verbs.vf_vlan);
+ mlx5_vlan_vmwa_acquire(dev, &dev_flow->handle.vf_vlan);
}
}
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- verbs = &dev_flow->verbs;
- if (verbs->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ dh = &dev_flow->handle;
+ if (dh->hrxq) {
+ if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
- }
- if (dev_flow->verbs.vf_vlan.tag &&
- dev_flow->verbs.vf_vlan.created) {
- mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;