* The hash fields that should be used.
*/
uint64_t
-mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
+mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
int tunnel __rte_unused, uint64_t layer_types,
uint64_t hash_fields)
{
- struct rte_flow *flow = dev_flow->flow;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- int rss_request_inner = flow->rss.level >= 2;
+ int rss_request_inner = rss_desc->level >= 2;
/* Check RSS hash level for tunnel. */
if (tunnel && rss_request_inner)
return 0;
#endif
/* Check if requested layer matches RSS hash fields. */
- if (!(flow->rss.types & layer_types))
+ if (!(rss_desc->types & layer_types))
return 0;
return hash_fields;
}
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- * Pointer to device flow structure.
+ * @param[in] dev_handle
+ * Pointer to device flow handle structure.
*/
static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->actions &
- (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int mark = dev_handle->mark;
+ const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct mlx5_hrxq *hrxq;
unsigned int i;
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->rss.queue)[i];
+ if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+ return;
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ dev_handle->rix_hrxq);
+ if (!hrxq)
+ return;
+ for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+ int idx = hrxq->ind_table->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
/* Increase the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
if ((tunnels_info[j].tunnel &
- dev_flow->layers) ==
+ dev_handle->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]++;
break;
static void
flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_set(dev, dev_flow);
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
+ flow_drv_rxq_flags_set(dev, dev_handle);
}
/**
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] dev_flow
- * Pointer to the device flow.
+ * @param[in] dev_handle
+ * Pointer to the device flow handle structure.
*/
static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
+ struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->actions &
- (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int mark = dev_handle->mark;
+ const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct mlx5_hrxq *hrxq;
unsigned int i;
+ if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+ return;
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+ dev_handle->rix_hrxq);
+ if (!hrxq)
+ return;
MLX5_ASSERT(dev->data->dev_started);
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->rss.queue)[i];
+ for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+ int idx = hrxq->ind_table->queues[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl, rxq);
/* Decrease the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
if ((tunnels_info[j].tunnel &
- dev_flow->layers) ==
+ dev_handle->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]--;
break;
static void
flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_trim(dev, dev_flow);
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
+ flow_drv_rxq_flags_trim(dev, dev_handle);
}
/**
flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t handle_idx;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- if (dev_flow->qrss_id)
- flow_qrss_free_id(dev, dev_flow->qrss_id);
+ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
+ handle_idx, dev_handle, next)
+ if (dev_handle->split_flow_id)
+ flow_qrss_free_id(dev, dev_handle->split_flow_id);
}
static int
}
static struct mlx5_flow *
-flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
* setting backward reference to the flow should be done out of this function.
* layers field is not filled either.
*
+ * @param[in] dev
+ * Pointer to the dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to device flow on success, otherwise NULL and rte_errno is set.
*/
static inline struct mlx5_flow *
-flow_drv_prepare(const struct rte_flow *flow,
+flow_drv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow *flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
- return fops->prepare(attr, items, actions, error);
+ return fops->prepare(dev, attr, items, actions, error);
}
/**
{
uint64_t layers = 0;
- /* If no decap actions, use the layers directly. */
- if (!(dev_flow->actions & MLX5_FLOW_ACTION_DECAP))
- return dev_flow->layers;
+ /*
+ * Layers bits could be localization, but usually the compiler will
+ * help to do the optimization work for source code.
+ * If no decap actions, use the layers directly.
+ */
+ if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
+ return dev_flow->handle->layers;
/* Convert L3 layers with decap action. */
- if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
/* Convert L4 layers with decap action. */
- if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+ if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
- else if (dev_flow->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+ else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
return layers;
}
[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
};
struct mlx5_flow_mreg_copy_resource *mcp_res;
+ uint32_t idx = 0;
int ret;
/* Fill the register fileds in the flow. */
};
}
/* Build a new entry. */
- mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0);
+ mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
if (!mcp_res) {
rte_errno = ENOMEM;
return NULL;
}
+ mcp_res->idx = idx;
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
error:
if (mcp_res->flow)
flow_list_destroy(dev, NULL, mcp_res->flow);
- rte_free(mcp_res);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
return NULL;
}
flow_mreg_del_copy_action(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
struct mlx5_priv *priv = dev->data->dev_private;
+ if (!flow->rix_mreg_copy)
+ return;
+ mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+ flow->rix_mreg_copy);
if (!mcp_res || !priv->mreg_cp_tbl)
return;
if (flow->copy_applied) {
MLX5_ASSERT(mcp_res->flow);
flow_list_destroy(dev, NULL, mcp_res->flow);
mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
- rte_free(mcp_res);
- flow->mreg_copy = NULL;
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+ flow->rix_mreg_copy = 0;
}
/**
flow_mreg_start_copy_action(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
- if (!mcp_res || flow->copy_applied)
+ if (!flow->rix_mreg_copy || flow->copy_applied)
+ return 0;
+ mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+ flow->rix_mreg_copy);
+ if (!mcp_res)
return 0;
if (!mcp_res->appcnt) {
ret = flow_drv_apply(dev, mcp_res->flow, NULL);
flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_priv *priv = dev->data->dev_private;
- if (!mcp_res || !flow->copy_applied)
+ if (!flow->rix_mreg_copy || !flow->copy_applied)
+ return;
+ mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
+ flow->rix_mreg_copy);
+ if (!mcp_res)
return;
MLX5_ASSERT(mcp_res->appcnt);
--mcp_res->appcnt;
MLX5_ASSERT(mcp_res->flow);
flow_list_destroy(dev, NULL, mcp_res->flow);
mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
- rte_free(mcp_res);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
}
/**
(dev, MLX5_FLOW_MARK_DEFAULT, error);
if (!mcp_res)
return -rte_errno;
- flow->mreg_copy = mcp_res;
+ flow->rix_mreg_copy = mcp_res->idx;
if (dev->data->dev_started) {
mcp_res->appcnt++;
flow->copy_applied = 1;
flow_mreg_add_copy_action(dev, mark->id, error);
if (!mcp_res)
return -rte_errno;
- flow->mreg_copy = mcp_res;
+ flow->rix_mreg_copy = mcp_res->idx;
if (dev->data->dev_started) {
mcp_res->appcnt++;
flow->copy_applied = 1;
* The last stage of splitting chain, just creates the subflow
* without any modification.
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
* @param[in] flow
* Parent flow structure pointer.
{
struct mlx5_flow *dev_flow;
- dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+ dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error);
if (!dev_flow)
return -rte_errno;
dev_flow->flow = flow;
dev_flow->external = external;
/* Subflow object was created, we must include one in the list. */
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
/*
* If dev_flow is as one of the suffix flow, some actions in suffix
* flow may need some user defined item layer flags.
*/
if (prefix_layers)
- dev_flow->layers = prefix_layers;
+ dev_flow->handle->layers = prefix_layers;
if (sub_flow)
*sub_flow = dev_flow;
return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
* reallocation becomes possible (for example, for
* other flows in other threads).
*/
- dev_flow->qrss_id = qrss_id;
- qrss_id = 0;
+ dev_flow->handle->split_flow_id = qrss_id;
ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
error);
if (ret < 0)
external, error);
if (ret < 0)
goto exit;
+ /* qrss ID should be freed if failed. */
+ qrss_id = 0;
MLX5_ASSERT(dev_flow);
}
ret = -rte_errno;
goto exit;
}
- dev_flow->mtr_flow_id = mtr_tag_id;
+ dev_flow->handle->split_flow_id = mtr_tag_id;
/* Setting the sfx group atrr. */
sfx_attr.group = sfx_attr.transfer ?
(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
uint8_t buffer[2048];
} items_tx;
struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)[!!priv->flow_idx];
const struct rte_flow_action *p_actions_rx = actions;
uint32_t i;
- uint32_t flow_size;
int hairpin_flow = 0;
uint32_t hairpin_id = 0;
struct rte_flow_attr attr_tx = { .priority = 0 };
&hairpin_id);
p_actions_rx = actions_rx.actions;
}
- flow_size = sizeof(struct rte_flow);
- rss = flow_get_rss_action(p_actions_rx);
- if (rss)
- flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
- sizeof(void *));
- else
- flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
- flow = rte_calloc(__func__, 1, flow_size, 0);
+ flow = rte_calloc(__func__, 1, sizeof(struct rte_flow), 0);
if (!flow) {
rte_errno = ENOMEM;
goto error_before_flow;
flow->hairpin_flow_id = hairpin_id;
MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
flow->drv_type < MLX5_FLOW_TYPE_MAX);
- flow->rss.queue = (void *)(flow + 1);
+ memset(rss_desc, 0, sizeof(*rss_desc));
+ rss = flow_get_rss_action(p_actions_rx);
if (rss) {
/*
* The following information is required by
* mlx5_flow_hashfields_adjust() in advance.
*/
- flow->rss.level = rss->level;
+ rss_desc->level = rss->level;
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
+ rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
}
- LIST_INIT(&flow->dev_flows);
+ flow->dev_handles = 0;
if (rss && rss->types) {
unsigned int graph_root;
buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
+ /*
+ * Record the start index when there is a nested call. All sub-flows
+ * need to be translated before another calling.
+ * No need to use ping-pong buffer to save memory here.
+ */
+ if (priv->flow_idx) {
+ MLX5_ASSERT(!priv->flow_nested_idx);
+ priv->flow_nested_idx = priv->flow_idx;
+ }
for (i = 0; i < buf->entries; ++i) {
/*
* The splitter may create multiple dev_flows,
attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
attr_tx.ingress = 0;
attr_tx.egress = 1;
- dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+ dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
actions_hairpin_tx.actions, error);
if (!dev_flow)
goto error;
dev_flow->flow = flow;
dev_flow->external = 0;
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+ dev_flow->handle, next);
ret = flow_drv_translate(dev, dev_flow, &attr_tx,
items_tx.items,
actions_hairpin_tx.actions, error);
if (ret)
goto error;
}
- if (dev->data->dev_started) {
+ /*
+ * If the flow is external (from application) OR device is started, then
+ * the flow will be applied immediately.
+ */
+ if (external || dev->data->dev_started) {
ret = flow_drv_apply(dev, flow, error);
if (ret < 0)
goto error;
if (list)
TAILQ_INSERT_TAIL(list, flow, next);
flow_rxq_flags_set(dev, flow);
+ /* Nested flow creation index recovery. */
+ priv->flow_idx = priv->flow_nested_idx;
+ if (priv->flow_nested_idx)
+ priv->flow_nested_idx = 0;
return flow;
-error_before_flow:
- if (hairpin_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- hairpin_id);
- return NULL;
error:
MLX5_ASSERT(flow);
- flow_mreg_del_copy_action(dev, flow);
ret = rte_errno; /* Save rte_errno before cleanup. */
- if (flow->hairpin_flow_id)
- mlx5_flow_id_release(priv->sh->flow_id_pool,
- flow->hairpin_flow_id);
- MLX5_ASSERT(flow);
+ flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
rte_free(flow);
rte_errno = ret; /* Restore rte_errno. */
+error_before_flow:
+ ret = rte_errno;
+ if (hairpin_id)
+ mlx5_flow_id_release(priv->sh->flow_id_pool,
+ hairpin_id);
+ rte_errno = ret;
+ priv->flow_idx = priv->flow_nested_idx;
+ if (priv->flow_nested_idx)
+ priv->flow_nested_idx = 0;
return NULL;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
+ /*
+ * If the device is not started yet, it is not allowed to created a
+ * flow from application. PMD default flows and traffic control flows
+ * are not affected.
+ */
+ if (unlikely(!dev->data->dev_started)) {
+ rte_errno = ENODEV;
+ DRV_LOG(DEBUG, "port %u is not started when "
+ "inserting a flow", dev->data->port_id);
+ return NULL;
+ }
return flow_list_create(dev, &priv->flows,
attr, items, actions, true, error);
}
struct rte_flow *flow)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
/*
* Update RX queue flags only if port is started, otherwise it is
if (list)
TAILQ_REMOVE(list, flow, next);
flow_mreg_del_copy_action(dev, flow);
- rte_free(flow->fdir);
+ if (flow->fdir) {
+ LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
+ if (priv_fdir_flow->flow == flow)
+ break;
+ }
+ if (priv_fdir_flow) {
+ LIST_REMOVE(priv_fdir_flow, next);
+ rte_free(priv_fdir_flow->fdir);
+ rte_free(priv_fdir_flow);
+ }
+ }
rte_free(flow);
}
*
* @param dev
* Pointer to Ethernet device.
- * @param list
- * Pointer to a TAILQ flow list.
*/
void
mlx5_flow_stop_default(struct rte_eth_dev *dev)
return flow_mreg_add_default_copy_action(dev, &error);
}
+/**
+ * Allocate intermediate resources for flow creation.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!priv->inter_flows) {
+ priv->inter_flows = rte_calloc(__func__, 1,
+ MLX5_NUM_MAX_DEV_FLOWS *
+ sizeof(struct mlx5_flow) +
+ (sizeof(struct mlx5_flow_rss_desc) +
+ sizeof(uint16_t) * UINT16_MAX) * 2, 0);
+ if (!priv->inter_flows) {
+ DRV_LOG(ERR, "can't allocate intermediate memory.");
+ return;
+ }
+ }
+ priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows)
+ [MLX5_NUM_MAX_DEV_FLOWS];
+ /* Reset the index. */
+ priv->flow_idx = 0;
+ priv->flow_nested_idx = 0;
+}
+
+/**
+ * Free intermediate resources for flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ rte_free(priv->inter_flows);
+ priv->inter_flows = NULL;
+}
+
/**
* Verify the flow list is empty
*
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = NULL;
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
MLX5_ASSERT(fdir_flow);
- TAILQ_FOREACH(flow, &priv->flows, next) {
- if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
+ LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
+ if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
DRV_LOG(DEBUG, "port %u found FDIR flow %p",
dev->data->port_id, (void *)flow);
+ flow = priv_fdir_flow->flow;
break;
}
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_fdir *fdir_flow;
struct rte_flow *flow;
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
int ret;
fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
rte_errno = EEXIST;
goto error;
}
+ priv_fdir_flow = rte_zmalloc(__func__, sizeof(struct mlx5_fdir_flow),
+ 0);
+ if (!priv_fdir_flow) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
fdir_flow->items, fdir_flow->actions, true,
NULL);
if (!flow)
goto error;
- MLX5_ASSERT(!flow->fdir);
- flow->fdir = fdir_flow;
+ flow->fdir = 1;
+ priv_fdir_flow->fdir = fdir_flow;
+ priv_fdir_flow->flow = flow;
+ LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
DRV_LOG(DEBUG, "port %u created FDIR flow %p",
dev->data->port_id, (void *)flow);
return 0;
error:
+ rte_free(priv_fdir_flow);
rte_free(fdir_flow);
return -rte_errno;
}
struct mlx5_fdir fdir_flow = {
.attr.group = 0,
};
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
int ret;
ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
if (ret)
return -rte_errno;
- flow = flow_fdir_filter_lookup(dev, &fdir_flow);
- if (!flow) {
- rte_errno = ENOENT;
- return -rte_errno;
+ LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
+ /* Find the fdir in priv list */
+ if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow))
+ break;
}
+ if (!priv_fdir_flow)
+ return 0;
+ LIST_REMOVE(priv_fdir_flow, next);
+ flow = priv_fdir_flow->flow;
+ /* Fdir resource will be releasd after flow destroy. */
+ flow->fdir = 0;
flow_list_destroy(dev, &priv->flows, flow);
+ rte_free(priv_fdir_flow->fdir);
+ rte_free(priv_fdir_flow);
DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
dev->data->port_id, (void *)flow);
return 0;
flow_fdir_filter_flush(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
-
- mlx5_flow_list_flush(dev, &priv->flows, false);
+ struct mlx5_fdir_flow *priv_fdir_flow = NULL;
+
+ while (!LIST_EMPTY(&priv->fdir_flows)) {
+ priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
+ LIST_REMOVE(priv_fdir_flow, next);
+ priv_fdir_flow->flow->fdir = 0;
+ flow_list_destroy(dev, &priv->flows, priv_fdir_flow->flow);
+ rte_free(priv_fdir_flow->fdir);
+ rte_free(priv_fdir_flow);
+ }
}
/**
* Pointer to Ethernet device structure.
*
* @return
- * Pointer to allocated counter on success, NULL otherwise.
+ * Index to allocated counter on success, 0 otherwise.
*/
-struct mlx5_flow_counter *
+uint32_t
mlx5_counter_alloc(struct rte_eth_dev *dev)
{
const struct mlx5_flow_driver_ops *fops;
DRV_LOG(ERR,
"port %u counter allocate is not supported.",
dev->data->port_id);
- return NULL;
+ return 0;
}
/**
* @param[in] dev
* Pointer to Ethernet device structure.
* @param[in] cnt
- * Pointer to counter to be free.
+ * Index to counter to be free.
*/
void
-mlx5_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
+mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
{
const struct mlx5_flow_driver_ops *fops;
struct rte_flow_attr attr = { .transfer = 0 };
* @param[in] dev
* Pointer to Ethernet device structure.
* @param[in] cnt
- * Pointer to counter to query.
+ * Index to counter to query.
* @param[in] clear
* Set to clear counter statistics.
* @param[out] pkts
* 0 on success, a negative errno value otherwise.
*/
int
-mlx5_counter_query(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt,
+mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
bool clear, uint64_t *pkts, uint64_t *bytes)
{
const struct mlx5_flow_driver_ops *fops;
dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
(&pool->a64_dcs);
offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
+ /*
+ * Identify the counters released between query trigger and query
+ * handle more effiecntly. The counter released in this gap period
+ * should wait for a new round of query as the new arrived packets
+ * will not be taken into account.
+ */
+ rte_atomic64_add(&pool->start_query_gen, 1);
ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
offset, NULL, NULL,
pool->raw_hw->mem_mng->dm->id,
sh->devx_comp,
(uint64_t)(uintptr_t)pool);
if (ret) {
+ rte_atomic64_sub(&pool->start_query_gen, 1);
DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
" %d", pool->min_dcs->id);
pool->raw_hw = NULL;
struct mlx5_counter_stats_raw *raw_to_free;
if (unlikely(status)) {
+ rte_atomic64_sub(&pool->start_query_gen, 1);
raw_to_free = pool->raw_hw;
} else {
raw_to_free = pool->raw;
rte_spinlock_lock(&pool->sl);
pool->raw = pool->raw_hw;
rte_spinlock_unlock(&pool->sl);
- rte_atomic64_add(&pool->query_gen, 1);
+ MLX5_ASSERT(rte_atomic64_read(&pool->end_query_gen) + 1 ==
+ rte_atomic64_read(&pool->start_query_gen));
+ rte_atomic64_set(&pool->end_query_gen,
+ rte_atomic64_read(&pool->start_query_gen));
/* Be sure the new raw counters data is updated in memory. */
rte_cio_wmb();
}