*/
mlx5_flow_list_flush(dev, &priv->flows, true);
mlx5_flow_meter_flush(dev, NULL);
+ /* Free the intermediate buffers for flow creation. */
+ mlx5_flow_free_intermediate(dev);
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
err = ENOTSUP;
goto error;
}
+ /*
+ * Allocate the buffer for flow creating, just once.
+ * The allocation must be done before any flow creating.
+ */
+ mlx5_flow_alloc_intermediate(eth_dev);
/* Query availibility of metadata reg_c's. */
err = mlx5_flow_discover_mreg_c(eth_dev);
if (err < 0) {
struct mlx5_drop drop_queue; /* Flow drop queues. */
struct mlx5_flows flows; /* RTE Flow rules. */
struct mlx5_flows ctrl_flows; /* Control flow rules. */
+ void *inter_flows; /* Intermediate resources for flow creation. */
+ int flow_idx; /* Intermediate device flow index. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
int mlx5_flow_start_default(struct rte_eth_dev *dev);
void mlx5_flow_stop_default(struct rte_eth_dev *dev);
+void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev);
+void mlx5_flow_free_intermediate(struct rte_eth_dev *dev);
int mlx5_flow_verify(struct rte_eth_dev *dev);
int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- * Pointer to device flow structure.
+ * @param[in] flow
+ * Pointer to flow structure.
+ * @param[in] dev_handle
+ * Pointer to device flow handle structure.
*/
static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->handle.act_flags &
+ const int mark = !!(dev_handle->act_flags &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
for (i = 0; i != flow->rss.queue_num; ++i) {
/* Increase the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
if ((tunnels_info[j].tunnel &
- dev_flow->handle.layers) ==
+ dev_handle->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]++;
break;
static void
flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_set(dev, dev_flow);
+ LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ flow_drv_rxq_flags_set(dev, flow, dev_handle);
}
/**
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] dev_flow
- * Pointer to the device flow.
+ * @param[in] flow
+ * Pointer to flow structure.
+ * @param[in] dev_handle
+ * Pointer to the device flow handle structure.
*/
static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->handle.act_flags &
+ const int mark = !!(dev_handle->act_flags &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
MLX5_ASSERT(dev->data->dev_started);
/* Decrease the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
if ((tunnels_info[j].tunnel &
- dev_flow->handle.layers) ==
+ dev_handle->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]--;
break;
static void
flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_trim(dev, dev_flow);
+ LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ flow_drv_rxq_flags_trim(dev, flow, dev_handle);
}
/**
flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- if (dev_flow->handle.qrss_id)
- flow_qrss_free_id(dev, dev_flow->handle.qrss_id);
+ LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ if (dev_handle->qrss_id)
+ flow_qrss_free_id(dev, dev_handle->qrss_id);
}
static int
}
static struct mlx5_flow *
-flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
* setting backward reference to the flow should be done out of this function.
* layers field is not filled either.
*
+ * @param[in] dev
+ * Pointer to the dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to device flow on success, otherwise NULL and rte_errno is set.
*/
static inline struct mlx5_flow *
-flow_drv_prepare(const struct rte_flow *flow,
+flow_drv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow *flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
- return fops->prepare(attr, items, actions, error);
+ return fops->prepare(dev, attr, items, actions, error);
}
/**
* help to do the optimization work for source code.
* If no decap actions, use the layers directly.
*/
- if (!(dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DECAP))
- return dev_flow->handle.layers;
+ if (!(dev_flow->handle->act_flags & MLX5_FLOW_ACTION_DECAP))
+ return dev_flow->handle->layers;
/* Convert L3 layers with decap action. */
- if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
/* Convert L4 layers with decap action. */
- if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+ if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
- else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+ else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
return layers;
}
* The last stage of splitting chain, just creates the subflow
* without any modification.
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
* @param[in] flow
* Parent flow structure pointer.
{
struct mlx5_flow *dev_flow;
- dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+ dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error);
if (!dev_flow)
return -rte_errno;
dev_flow->flow = flow;
dev_flow->external = external;
/* Subflow object was created, we must include one in the list. */
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
/*
* If dev_flow is as one of the suffix flow, some actions in suffix
* flow may need some user defined item layer flags.
*/
if (prefix_layers)
- dev_flow->handle.layers = prefix_layers;
+ dev_flow->handle->layers = prefix_layers;
if (sub_flow)
*sub_flow = dev_flow;
return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
* reallocation becomes possible (for example, for
* other flows in other threads).
*/
- dev_flow->handle.qrss_id = qrss_id;
+ dev_flow->handle->qrss_id = qrss_id;
ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
error);
if (ret < 0)
ret = -rte_errno;
goto exit;
}
- dev_flow->handle.mtr_flow_id = mtr_tag_id;
+ dev_flow->handle->mtr_flow_id = mtr_tag_id;
/* Setting the sfx group atrr. */
sfx_attr.group = sfx_attr.transfer ?
(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
}
- LIST_INIT(&flow->dev_flows);
+ LIST_INIT(&flow->dev_handles);
if (rss && rss->types) {
unsigned int graph_root;
buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
+ /* Reset device flow index to 0. */
+ priv->flow_idx = 0;
for (i = 0; i < buf->entries; ++i) {
/*
* The splitter may create multiple dev_flows,
attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
attr_tx.ingress = 0;
attr_tx.egress = 1;
- dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+ dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
actions_hairpin_tx.actions, error);
if (!dev_flow)
goto error;
dev_flow->flow = flow;
dev_flow->external = 0;
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
ret = flow_drv_translate(dev, dev_flow, &attr_tx,
items_tx.items,
actions_hairpin_tx.actions, error);
*
* @param dev
* Pointer to Ethernet device.
- * @param list
- * Pointer to a TAILQ flow list.
*/
void
mlx5_flow_stop_default(struct rte_eth_dev *dev)
return flow_mreg_add_default_copy_action(dev, &error);
}
+/**
+ * Allocate intermediate resources for flow creation.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!priv->inter_flows)
+ priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS,
+ sizeof(struct mlx5_flow), 0);
+}
+
+/**
+ * Free intermediate resources for flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ rte_free(priv->inter_flows);
+ priv->inter_flows = NULL;
+}
+
/**
* Verify the flow list is empty
*
/** Device flow handle structure: used both for creating & destroying. */
struct mlx5_flow_handle {
+ LIST_ENTRY(mlx5_flow_handle) next;
+ /**< Pointer to next device flow handle. */
uint64_t layers;
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
uint64_t act_flags;
#endif
};
+/*
+ * Size for Verbs device flow handle structure only. Do not use the DV only
+ * structure in Verbs. No DV flows attributes will be accessed.
+ * Macro offsetof() could also be used here.
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#define MLX5_FLOW_HANDLE_VERBS_SIZE \
+ (sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
+#else
+#define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
+#endif
+
/*
* Max number of actions per DV flow.
* See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
/** Device flow structure only for DV flow creation. */
-struct mlx5_flow_resource_dv {
+struct mlx5_flow_dv_workspace {
uint32_t group; /**< The group index. */
uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
int actions_n; /**< number of actions. */
/**< Holds the value that the packet is compared to. */
};
+/*
+ * Maximal Verbs flow specifications & actions size.
+ * Some elements are mutually exclusive, but enough space should be allocated.
+ * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
+ * 2. One tunnel header (exception: GRE + MPLS),
+ * SPEC length: GRE == tunnel.
+ * Actions: 1. 1 Mark OR Flag.
+ * 2. 1 Drop (if any).
+ * 3. No limitation for counters, but it makes no sense to support too
+ * many counters in a single device flow.
+ */
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+#define MLX5_VERBS_MAX_SPEC_SIZE \
+ ( \
+ (2 * (sizeof(struct ibv_flow_spec_eth) + \
+ sizeof(struct ibv_flow_spec_ipv6) + \
+ sizeof(struct ibv_flow_spec_tcp_udp)) + \
+ sizeof(struct ibv_flow_spec_gre) + \
+ sizeof(struct ibv_flow_spec_mpls)) \
+ )
+#else
+#define MLX5_VERBS_MAX_SPEC_SIZE \
+ ( \
+ (2 * (sizeof(struct ibv_flow_spec_eth) + \
+ sizeof(struct ibv_flow_spec_ipv6) + \
+ sizeof(struct ibv_flow_spec_tcp_udp)) + \
+ sizeof(struct ibv_flow_spec_tunnel)) \
+ )
+#endif
+
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
+ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+#define MLX5_VERBS_MAX_ACT_SIZE \
+ ( \
+ sizeof(struct ibv_flow_spec_action_tag) + \
+ sizeof(struct ibv_flow_spec_action_drop) + \
+ sizeof(struct ibv_flow_spec_counter_action) * 4 \
+ )
+#else
+#define MLX5_VERBS_MAX_ACT_SIZE \
+ ( \
+ sizeof(struct ibv_flow_spec_action_tag) + \
+ sizeof(struct ibv_flow_spec_action_drop) \
+ )
+#endif
+
+#define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
+ (MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
+
/** Device flow structure only for Verbs flow creation. */
-struct mlx5_flow_resource_verbs {
+struct mlx5_flow_verbs_workspace {
unsigned int size; /**< Size of the attribute. */
- struct ibv_flow_attr *attr; /**< Pointer to the Specification buffer. */
- uint8_t *specs; /**< Pointer to the specifications. */
+ struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
+ uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
+ /**< Specifications & actions buffer of verbs flow. */
};
+/** Maximal number of device sub-flows supported. */
+#define MLX5_NUM_MAX_DEV_FLOWS 32
+
/** Device flow structure. */
struct mlx5_flow {
- LIST_ENTRY(mlx5_flow) next; /**< Pointer to next device flow. */
struct rte_flow *flow; /**< Pointer to the main flow. */
uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
bool external; /**< true if the flow is created external to PMD. */
uint8_t ingress; /**< 1 if the flow is ingress. */
union {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5_flow_resource_dv dv;
+ struct mlx5_flow_dv_workspace dv;
#endif
- struct mlx5_flow_resource_verbs verbs;
+ struct mlx5_flow_verbs_workspace verbs;
};
- struct mlx5_flow_handle handle;
+ struct mlx5_flow_handle *handle;
};
/* Flow meter state. */
struct mlx5_flow_mreg_copy_resource *mreg_copy;
/**< pointer to metadata register copy table resource. */
struct mlx5_flow_meter *meter; /**< Holds flow meter. */
- LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
- /**< Device flows that are part of the flow. */
+ LIST_HEAD(dev_handles, mlx5_flow_handle) dev_handles;
+ /**< Device flow handles that are part of the flow. */
struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
bool external,
struct rte_flow_error *error);
typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
- (const struct rte_flow_attr *attr, const struct rte_flow_item items[],
+ (struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
const struct rte_flow_action actions[], struct rte_flow_error *error);
typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
struct mlx5_flow *dev_flow, bool tunnel_decap)
{
- uint64_t layers = dev_flow->handle.layers;
+ uint64_t layers = dev_flow->handle->layers;
/*
* If layers is already initialized, it means this dev_flow is the
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle.dvh.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
- dev_flow->handle.dvh.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
- dev_flow->handle.dvh.jump = &tbl_data->jump;
+ dev_flow->handle->dvh.jump = &tbl_data->jump;
return 0;
}
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle.dvh.port_id_action = cache_resource;
+ dev_flow->handle->dvh.port_id_action = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
- dev_flow->handle.dvh.port_id_action = cache_resource;
+ dev_flow->handle->dvh.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle.dvh.push_vlan_res = cache_resource;
+ dev_flow->handle->dvh.push_vlan_res = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
- dev_flow->handle.dvh.push_vlan_res = cache_resource;
+ dev_flow->handle->dvh.push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle.dvh.modify_hdr = cache_resource;
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->handle.dvh.modify_hdr = cache_resource;
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
* Internal preparation function. Allocates the DV flow size,
* this size is constant.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* otherwise NULL and rte_errno is set.
*/
static struct mlx5_flow *
-flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_dv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
{
- size_t size = sizeof(struct mlx5_flow);
+ size_t size = sizeof(struct mlx5_flow_handle);
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
- dev_flow = rte_calloc(__func__, 1, size, 0);
- if (!dev_flow) {
+ /* In case of corrupting the memory. */
+ if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+ rte_flow_error_set(error, ENOSPC,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not free temporary device flow");
+ return NULL;
+ }
+ dev_handle = rte_calloc(__func__, 1, size, 0);
+ if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "not enough memory to create flow");
+ "not enough memory to create flow handle");
return NULL;
}
+ /* No multi-thread supporting. */
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+ dev_flow->handle = dev_handle;
dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ /*
+ * The matching value needs to be cleared to 0 before using. In the
+ * past, it will be automatically cleared when using rte_*alloc
+ * API. The time consumption will be almost the same as before.
+ */
+ memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
dev_flow->ingress = attr->ingress;
dev_flow->dv.transfer = attr->transfer;
return dev_flow;
* This is workaround, masks are not supported,
* and pre-validated.
*/
- dev_flow->handle.vf_vlan.tag =
+ dev_flow->handle->vf_vlan.tag =
rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
tci_m = rte_be_to_cpu_16(vlan_m->tci);
(void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
rte_atomic32_inc(&cache_matcher->refcnt);
- dev_flow->handle.dvh.matcher = cache_matcher;
+ dev_flow->handle->dvh.matcher = cache_matcher;
/* old matcher should not make the table ref++. */
flow_dv_tbl_resource_release(dev, tbl);
return 0;
/* only matcher ref++, table ref++ already done above in get API. */
rte_atomic32_inc(&cache_matcher->refcnt);
LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
- dev_flow->handle.dvh.matcher = cache_matcher;
+ dev_flow->handle->dvh.matcher = cache_matcher;
DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle.dvh.tag_resource = cache_resource;
+ dev_flow->handle->dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
}
- dev_flow->handle.dvh.tag_resource = cache_resource;
+ dev_flow->handle->dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
{
struct rte_flow *flow = dev_flow->flow;
- uint64_t items = dev_flow->handle.layers;
+ uint64_t items = dev_flow->handle->layers;
int rss_inner = 0;
uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
struct rte_flow *flow = dev_flow->flow;
+ struct mlx5_flow_handle *handle = dev_flow->handle;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint64_t action_flags = 0;
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.port_id_action->action;
+ handle->dvh.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
break;
}
tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
- if (!dev_flow->handle.dvh.tag_resource)
- if (flow_dv_tag_resource_register
- (dev, tag_be, dev_flow, error))
- return -rte_errno;
+ /*
+ * Only one FLAG or MARK is supported per device flow
+ * right now. So the pointer to the tag resource must be
+ * zero before the register process.
+ */
+ MLX5_ASSERT(!handle->dvh.tag_resource);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.tag_resource->action;
+ handle->dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
- if (!dev_flow->handle.dvh.tag_resource)
- if (flow_dv_tag_resource_register
- (dev, tag_be, dev_flow, error))
- return -rte_errno;
+ MLX5_ASSERT(!handle->dvh.tag_resource);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.tag_resource->action;
+ handle->dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_SET_META:
if (flow_dv_convert_action_set_meta
(dev, attr, &vlan, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.push_vlan_res->action;
+ handle->dvh.push_vlan_res->action;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
"cannot create jump action.");
}
dev_flow->dv.actions[actions_n++] =
- dev_flow->handle.dvh.jump->action;
+ handle->dvh.jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
- dev_flow->handle.dvh.modify_hdr->verbs_action;
+ handle->dvh.modify_hdr->verbs_action;
}
break;
default:
modify_action_position = actions_n++;
}
dev_flow->dv.actions_n = actions_n;
- dev_flow->handle.act_flags = action_flags;
+ handle->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
* Layers may be already initialized from prefix flow if this dev_flow
* is the suffix flow.
*/
- dev_flow->handle.layers |= item_flags;
+ handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow);
/* Register matcher. */
__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct mlx5_flow_resource_dv *dv;
+ struct mlx5_flow_dv_workspace *dv;
struct mlx5_flow_handle *dh;
struct mlx5_flow_handle_dv *dv_h;
struct mlx5_flow *dev_flow;
struct mlx5_priv *priv = dev->data->dev_private;
int n;
int err;
+ int idx;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dh = &dev_flow->handle;
+ for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
dv = &dev_flow->dv;
- n = dv->actions_n;
+ dh = dev_flow->handle;
dv_h = &dh->dvh;
+ n = dv->actions_n;
if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
if (dv->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
- !!(dev_flow->handle.layers &
+ !!(dh->layers &
MLX5_FLOW_LAYER_TUNNEL));
}
if (!hrxq) {
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- struct mlx5_flow_handle *dh_tmp = &dev_flow->handle;
- if (dh_tmp->hrxq) {
- if (dh_tmp->act_flags & MLX5_FLOW_ACTION_DROP)
+ LIST_FOREACH(dh, &flow->dev_handles, next) {
+ if (dh->hrxq) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dh_tmp->hrxq);
- dh_tmp->hrxq = NULL;
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
- if (dh_tmp->vf_vlan.tag && dh_tmp->vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dh_tmp->vf_vlan);
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_matcher_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_handle *handle)
{
- struct mlx5_flow_dv_matcher *matcher = flow->handle.dvh.matcher;
+ struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
MLX5_ASSERT(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
/**
* Release an encap/decap resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_encap_decap_resource *cache_resource =
- flow->handle.dvh.encap_decap;
+ handle->dvh.encap_decap;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
- flow->handle.dvh.jump;
+ handle->dvh.jump;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(cache_resource,
struct mlx5_flow_tbl_data_entry, jump);
/**
* Release a modify-header resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
- flow->handle.dvh.modify_hdr;
+ handle->dvh.modify_hdr;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
/**
* Release port ID action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_port_id_action_resource *cache_resource =
- flow->handle.dvh.port_id_action;
+ handle->dvh.port_id_action;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
/**
* Release push vlan action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
- flow->handle.dvh.push_vlan_res;
+ handle->dvh.push_vlan_res;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_handle *dh;
- struct mlx5_flow *dev_flow;
if (!flow)
return;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dh = &dev_flow->handle;
+ LIST_FOREACH(dh, &flow->dev_handles, next) {
if (dh->ib_flow) {
claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
dh->ib_flow = NULL;
}
if (dh->hrxq) {
- if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, dh->hrxq);
static void
__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
if (!flow)
return;
mlx5_flow_meter_detach(flow->meter);
flow->meter = NULL;
}
- while (!LIST_EMPTY(&flow->dev_flows)) {
- dev_flow = LIST_FIRST(&flow->dev_flows);
- LIST_REMOVE(dev_flow, next);
- if (dev_flow->handle.dvh.matcher)
- flow_dv_matcher_release(dev, dev_flow);
- if (dev_flow->handle.dvh.encap_decap)
- flow_dv_encap_decap_resource_release(dev_flow);
- if (dev_flow->handle.dvh.modify_hdr)
- flow_dv_modify_hdr_resource_release(dev_flow);
- if (dev_flow->handle.dvh.jump)
- flow_dv_jump_tbl_resource_release(dev, dev_flow);
- if (dev_flow->handle.dvh.port_id_action)
- flow_dv_port_id_action_resource_release(dev_flow);
- if (dev_flow->handle.dvh.push_vlan_res)
- flow_dv_push_vlan_action_resource_release(dev_flow);
- if (dev_flow->handle.dvh.tag_resource)
+ while (!LIST_EMPTY(&flow->dev_handles)) {
+ dev_handle = LIST_FIRST(&flow->dev_handles);
+ LIST_REMOVE(dev_handle, next);
+ if (dev_handle->dvh.matcher)
+ flow_dv_matcher_release(dev, dev_handle);
+ if (dev_handle->dvh.encap_decap)
+ flow_dv_encap_decap_resource_release(dev_handle);
+ if (dev_handle->dvh.modify_hdr)
+ flow_dv_modify_hdr_resource_release(dev_handle);
+ if (dev_handle->dvh.jump)
+ flow_dv_jump_tbl_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.port_id_action)
+ flow_dv_port_id_action_resource_release(dev_handle);
+ if (dev_handle->dvh.push_vlan_res)
+ flow_dv_push_vlan_action_resource_release(dev_handle);
+ if (dev_handle->dvh.tag_resource)
flow_dv_tag_release(dev,
- dev_flow->handle.dvh.tag_resource);
- rte_free(dev_flow);
+ dev_handle->dvh.tag_resource);
+ rte_free(dev_handle);
}
}
* Size in bytes of the specification to copy.
*/
static void
-flow_verbs_spec_add(struct mlx5_flow_resource_verbs *verbs,
+flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
void *src, unsigned int size)
{
void *dst;
MLX5_ASSERT(verbs->specs);
dst = (void *)(verbs->specs + verbs->size);
memcpy(dst, src, size);
- ++verbs->attr->num_of_specs;
+ ++verbs->attr.num_of_specs;
verbs->size += size;
}
if (!(item_flags & l2m))
flow_verbs_spec_add(&dev_flow->verbs, ð, size);
else
- flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
+ flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð);
if (!tunnel)
- dev_flow->handle.vf_vlan.tag =
+ dev_flow->handle->vf_vlan.tag =
rte_be_to_cpu_16(spec->tci) & 0x0fff;
}
const struct rte_flow_item *item __rte_unused,
uint64_t item_flags)
{
- struct mlx5_flow_resource_verbs *verbs = &dev_flow->verbs;
+ struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
struct ibv_flow_spec_tunnel tunnel = {
}
#endif
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
- flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+ flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
IBV_FLOW_SPEC_IPV4_EXT,
IPPROTO_GRE);
else
- flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+ flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
IBV_FLOW_SPEC_IPV6,
IPPROTO_GRE);
flow_verbs_spec_add(verbs, &tunnel, size);
* The required size is calculate based on the actions and items. This function
* also returns the detected actions and items for later use.
*
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* is set.
*/
static struct mlx5_flow *
-flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_verbs_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
+ size_t size = 0;
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
size += flow_verbs_get_actions_size(actions);
size += flow_verbs_get_items_size(items);
- dev_flow = rte_calloc(__func__, 1, size, 0);
- if (!dev_flow) {
+ if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
+ rte_flow_error_set(error, E2BIG,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Verbs spec/action size too large");
+ return NULL;
+ }
+ /* In case of corrupting the memory. */
+ if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+ rte_flow_error_set(error, ENOSPC,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not free temporary device flow");
+ return NULL;
+ }
+ dev_handle = rte_calloc(__func__, 1, MLX5_FLOW_HANDLE_VERBS_SIZE, 0);
+ if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "not enough memory to create flow");
+ "not enough memory to create flow handle");
return NULL;
}
- dev_flow->verbs.attr = (void *)(dev_flow + 1);
- dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
+ /* No multi-thread supporting. */
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+ dev_flow->handle = dev_handle;
+ /* Memcpy is used, only size needs to be cleared to 0. */
+ dev_flow->verbs.size = 0;
+ dev_flow->verbs.attr.num_of_specs = 0;
dev_flow->ingress = attr->ingress;
/* Need to set transfer attribute: not supported in Verbs mode. */
return dev_flow;
"action not supported");
}
}
- dev_flow->handle.act_flags = action_flags;
+ dev_flow->handle->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
"item not supported");
}
}
- dev_flow->handle.layers = item_flags;
- dev_flow->verbs.attr->priority =
+ dev_flow->handle->layers = item_flags;
+ /* Other members of attr will be ignored. */
+ dev_flow->verbs.attr.priority =
mlx5_flow_adjust_priority(dev, priority, subpriority);
- dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
+ dev_flow->verbs.attr.port = (uint8_t)priv->ibv_port;
return 0;
}
static void
flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow_handle *dh;
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *handle;
if (!flow)
return;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dh = &dev_flow->handle;
- if (dh->ib_flow) {
- claim_zero(mlx5_glue->destroy_flow(dh->ib_flow));
- dh->ib_flow = NULL;
+ LIST_FOREACH(handle, &flow->dev_handles, next) {
+ if (handle->ib_flow) {
+ claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
+ handle->ib_flow = NULL;
}
- if (dh->hrxq) {
- if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+ if (handle->hrxq) {
+ if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dh->hrxq);
- dh->hrxq = NULL;
+ mlx5_hrxq_release(dev, handle->hrxq);
+ handle->hrxq = NULL;
}
- if (dh->vf_vlan.tag && dh->vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+ if (handle->vf_vlan.tag && handle->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
}
}
static void
flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *handle;
if (!flow)
return;
flow_verbs_remove(dev, flow);
- while (!LIST_EMPTY(&flow->dev_flows)) {
- dev_flow = LIST_FIRST(&flow->dev_flows);
- LIST_REMOVE(dev_flow, next);
- rte_free(dev_flow);
+ while (!LIST_EMPTY(&flow->dev_handles)) {
+ handle = LIST_FIRST(&flow->dev_handles);
+ LIST_REMOVE(handle, next);
+ rte_free(handle);
}
if (flow->counter) {
flow_verbs_counter_release(dev, flow->counter);
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_handle *dh;
+ struct mlx5_flow_handle *handle;
struct mlx5_flow *dev_flow;
int err;
-
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dh = &dev_flow->handle;
- if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP) {
- dh->hrxq = mlx5_hrxq_drop_new(dev);
- if (!dh->hrxq) {
+ int idx;
+
+ for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
+ handle = dev_flow->handle;
+ if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+ handle->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!handle->hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
- !!(dev_flow->handle.layers &
+ !!(handle->layers &
MLX5_FLOW_LAYER_TUNNEL));
if (!hrxq) {
rte_flow_error_set
"cannot get hash queue");
goto error;
}
- dh->hrxq = hrxq;
+ handle->hrxq = hrxq;
}
- dh->ib_flow = mlx5_glue->create_flow(dh->hrxq->qp,
- dev_flow->verbs.attr);
- if (!dh->ib_flow) {
+ handle->ib_flow = mlx5_glue->create_flow(handle->hrxq->qp,
+ &dev_flow->verbs.attr);
+ if (!handle->ib_flow) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
goto error;
}
if (priv->vmwa_context &&
- dev_flow->handle.vf_vlan.tag &&
- !dev_flow->handle.vf_vlan.created) {
+ handle->vf_vlan.tag && !handle->vf_vlan.created) {
/*
* The rule contains the VLAN pattern.
* For VF we are going to create VLAN
* interface to make hypervisor set correct
* e-Switch vport context.
*/
- mlx5_vlan_vmwa_acquire(dev, &dev_flow->handle.vf_vlan);
+ mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
}
}
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dh = &dev_flow->handle;
- if (dh->hrxq) {
- if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+ LIST_FOREACH(handle, &flow->dev_handles, next) {
+ if (handle->hrxq) {
+ if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dh->hrxq);
- dh->hrxq = NULL;
+ mlx5_hrxq_release(dev, handle->hrxq);
+ handle->hrxq = NULL;
}
- if (dh->vf_vlan.tag && dh->vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+ if (handle->vf_vlan.tag && handle->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
mlx5_txq_stop(dev);
return -rte_errno;
}
+ /* Set started flag here for the following steps like control flow. */
dev->data->dev_started = 1;
ret = mlx5_rx_intr_vec_enable(dev);
if (ret) {