#include <stdalign.h>
#include <stdint.h>
#include <string.h>
+#include <stdbool.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
*
* @param[in] dev
* Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- * Pointer to device flow structure.
+ * @param[in] flow
+ * Pointer to flow structure.
+ * @param[in] dev_handle
+ * Pointer to device flow handle structure.
*/
static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->actions &
+ const int mark = !!(dev_handle->act_flags &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
for (i = 0; i != flow->rss.queue_num; ++i) {
/* Increase the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
if ((tunnels_info[j].tunnel &
- dev_flow->layers) ==
+ dev_handle->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]++;
break;
static void
flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_set(dev, dev_flow);
+ LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ flow_drv_rxq_flags_set(dev, flow, dev_handle);
}
/**
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] dev_flow
- * Pointer to the device flow.
+ * @param[in] flow
+ * Pointer to flow structure.
+ * @param[in] dev_handle
+ * Pointer to the device flow handle structure.
*/
static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- const int mark = !!(dev_flow->actions &
+ const int mark = !!(dev_handle->act_flags &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
- const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
MLX5_ASSERT(dev->data->dev_started);
/* Decrease the counter matching the flow. */
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
if ((tunnels_info[j].tunnel &
- dev_flow->layers) ==
+ dev_handle->layers) ==
tunnels_info[j].tunnel) {
rxq_ctrl->flow_tunnels_n[j]--;
break;
static void
flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- flow_drv_rxq_flags_trim(dev, dev_flow);
+ LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ flow_drv_rxq_flags_trim(dev, flow, dev_handle);
}
/**
"\xff\xff\xff\xff\xff\xff\xff\xff",
.vtc_flow = RTE_BE32(0xffffffff),
.proto = 0xff,
- .hop_limits = 0xff,
},
};
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
uint32_t vlan_id;
uint8_t vni[4];
} id = { .vlan_id = 0, };
- uint32_t vlan_id = 0;
if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return ret;
if (spec) {
memcpy(&id.vni[1], spec->vni, 3);
- vlan_id = id.vlan_id;
memcpy(&id.vni[1], mask->vni, 3);
- vlan_id &= id.vlan_id;
}
- /*
- * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
- * only this layer is defined in the Verbs specification it is
- * interpreted as wildcard and all packets will match this
- * rule, if it follows a full stack layer (ex: eth / ipv4 /
- * udp), all packets matching the layers before will also
- * match this rule. To avoid such situation, VNI 0 is
- * currently refused.
- */
- if (!vlan_id)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "VXLAN vni cannot be 0");
if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
uint32_t vlan_id;
uint8_t vni[4];
} id = { .vlan_id = 0, };
- uint32_t vlan_id = 0;
if (!priv->config.l3_vxlan_en)
return rte_flow_error_set(error, ENOTSUP,
"VxLAN-GPE protocol"
" not supported");
memcpy(&id.vni[1], spec->vni, 3);
- vlan_id = id.vlan_id;
memcpy(&id.vni[1], mask->vni, 3);
- vlan_id &= id.vlan_id;
}
- /*
- * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
- * layer is defined in the Verbs specification it is interpreted as
- * wildcard and all packets will match this rule, if it follows a full
- * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
- * before will also match this rule. To avoid such situation, VNI 0
- * is currently refused.
- */
- if (!vlan_id)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "VXLAN-GPE vni cannot be 0");
if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next)
- if (dev_flow->qrss_id)
- flow_qrss_free_id(dev, dev_flow->qrss_id);
+ LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+ if (dev_handle->qrss_id)
+ flow_qrss_free_id(dev, dev_handle->qrss_id);
}
static int
}
static struct mlx5_flow *
-flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
* setting backward reference to the flow should be done out of this function.
* layers field is not filled either.
*
+ * @param[in] dev
+ * Pointer to the dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to device flow on success, otherwise NULL and rte_errno is set.
*/
static inline struct mlx5_flow *
-flow_drv_prepare(const struct rte_flow *flow,
+flow_drv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow *flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
- return fops->prepare(attr, items, actions, error);
+ return fops->prepare(dev, attr, items, actions, error);
}
/**
return 0;
}
-/**
- * Get port id item from the item list.
- *
- * @param[in] item
- * Pointer to the list of items.
- *
- * @return
- * Pointer to the port id item if exist, else return NULL.
- */
-static const struct rte_flow_item *
-find_port_id_item(const struct rte_flow_item *item)
-{
- MLX5_ASSERT(item);
- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID)
- return item;
- }
- return NULL;
-}
-
/**
* Get RSS action from the action list.
*
}
/**
- * Get QUEUE/RSS action from the action list.
+ * Get layer flags from the prefix flow.
+ *
+ * Some flows may be split to several subflows, the prefix subflow gets the
+ * match items and the suffix sub flow gets the actions.
+ * Some actions need the user defined match item flags to get the detail for
+ * the action.
+ * This function helps the suffix flow to get the item layer flags from prefix
+ * subflow.
+ *
+ * @param[in] dev_flow
+ * Pointer the created preifx subflow.
+ *
+ * @return
+ * The layers get from prefix subflow.
+ */
+static inline uint64_t
+flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
+{
+ uint64_t layers = 0;
+
+ /*
+ * Layers bits could be localization, but usually the compiler will
+ * help to do the optimization work for source code.
+ * If no decap actions, use the layers directly.
+ */
+ if (!(dev_flow->handle->act_flags & MLX5_FLOW_ACTION_DECAP))
+ return dev_flow->handle->layers;
+ /* Convert L3 layers with decap action. */
+ if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ /* Convert L4 layers with decap action. */
+ if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+ layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+ layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ return layers;
+}
+
+/**
+ * Get metadata split action information.
*
* @param[in] actions
* Pointer to the list of actions.
* @param[out] qrss_type
* Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
* if no QUEUE/RSS is found.
+ * @param[out] encap_idx
+ * Pointer to the index of the encap action if exists, otherwise the last
+ * action index.
*
* @return
* Total number of actions.
*/
static int
-flow_parse_qrss_action(const struct rte_flow_action actions[],
- const struct rte_flow_action **qrss)
+flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
+ const struct rte_flow_action **qrss,
+ int *encap_idx)
{
+ const struct rte_flow_action_raw_encap *raw_encap;
int actions_n = 0;
+ int raw_decap_idx = -1;
+ *encap_idx = -1;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ *encap_idx = actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ raw_decap_idx = actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ raw_encap = actions->conf;
+ if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
+ *encap_idx = raw_decap_idx != -1 ?
+ raw_decap_idx : actions_n;
+ break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_RSS:
*qrss = actions;
}
actions_n++;
}
+ if (*encap_idx == -1)
+ *encap_idx = actions_n;
/* Count RTE_FLOW_ACTION_TYPE_END. */
return actions_n + 1;
}
* The last stage of splitting chain, just creates the subflow
* without any modification.
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
* @param[in] flow
* Parent flow structure pointer.
* @param[in, out] sub_flow
* Pointer to return the created subflow, may be NULL.
+ * @param[in] prefix_layers
+ * Prefix subflow layers, may be 0.
* @param[in] attr
* Flow rule attributes.
* @param[in] items
flow_create_split_inner(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct mlx5_flow **sub_flow,
+ uint64_t prefix_layers,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
{
struct mlx5_flow *dev_flow;
- dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+ dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error);
if (!dev_flow)
return -rte_errno;
dev_flow->flow = flow;
dev_flow->external = external;
/* Subflow object was created, we must include one in the list. */
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
+ /*
+ * If dev_flow is as one of the suffix flow, some actions in suffix
+ * flow may need some user defined item layer flags.
+ */
+ if (prefix_layers)
+ dev_flow->handle->layers = prefix_layers;
if (sub_flow)
*sub_flow = dev_flow;
return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
*
* @param dev
* Pointer to Ethernet device.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[out] sfx_items
+ * Suffix flow match items (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
* @param[out] actions_sfx
*/
static int
flow_meter_split_prep(struct rte_eth_dev *dev,
+ const struct rte_flow_item items[],
+ struct rte_flow_item sfx_items[],
const struct rte_flow_action actions[],
struct rte_flow_action actions_sfx[],
struct rte_flow_action actions_pre[])
{
struct rte_flow_action *tag_action = NULL;
+ struct rte_flow_item *tag_item;
struct mlx5_rte_flow_action_set_tag *set_tag;
struct rte_flow_error error;
const struct rte_flow_action_raw_encap *raw_encap;
const struct rte_flow_action_raw_decap *raw_decap;
+ struct mlx5_rte_flow_item_tag *tag_spec;
+ struct mlx5_rte_flow_item_tag *tag_mask;
uint32_t tag_id;
+ bool copy_vlan = false;
/* Prepare the actions for prefix and suffix flow. */
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ struct rte_flow_action **action_cur = NULL;
+
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_METER:
/* Add the extra tag action first. */
tag_action = actions_pre;
tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
actions_pre++;
- memcpy(actions_pre, actions,
- sizeof(struct rte_flow_action));
- actions_pre++;
+ action_cur = &actions_pre;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
- memcpy(actions_pre, actions,
- sizeof(struct rte_flow_action));
- actions_pre++;
+ action_cur = &actions_pre;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
raw_encap = actions->conf;
- if (raw_encap->size >
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4))) {
- memcpy(actions_sfx, actions,
- sizeof(struct rte_flow_action));
- actions_sfx++;
- } else {
- rte_memcpy(actions_pre, actions,
- sizeof(struct rte_flow_action));
- actions_pre++;
- }
+ if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
+ action_cur = &actions_pre;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
raw_decap = actions->conf;
- /* Size 0 decap means 50 bytes as vxlan decap. */
- if (raw_decap->size && (raw_decap->size <
- (sizeof(struct rte_flow_item_eth) +
- sizeof(struct rte_flow_item_ipv4)))) {
- memcpy(actions_sfx, actions,
- sizeof(struct rte_flow_action));
- actions_sfx++;
- } else {
- rte_memcpy(actions_pre, actions,
- sizeof(struct rte_flow_action));
- actions_pre++;
- }
+ if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
+ action_cur = &actions_pre;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ copy_vlan = true;
break;
default:
- memcpy(actions_sfx, actions,
- sizeof(struct rte_flow_action));
- actions_sfx++;
break;
}
+ if (!action_cur)
+ action_cur = &actions_sfx;
+ memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
+ (*action_cur)++;
}
/* Add end action to the actions. */
actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
assert(tag_action);
tag_action->conf = set_tag;
+ /* Prepare the suffix subflow items. */
+ tag_item = sfx_items++;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int item_type = items->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ memcpy(sfx_items, items, sizeof(*sfx_items));
+ sfx_items++;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ if (copy_vlan) {
+ memcpy(sfx_items, items, sizeof(*sfx_items));
+ /*
+ * Convert to internal match item, it is used
+ * for vlan push and set vid.
+ */
+ sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
+ sfx_items++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
+ sfx_items++;
+ tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
+ tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
+ tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
+ tag_mask = tag_spec + 1;
+ tag_mask->data = 0xffffff00;
+ tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+ tag_item->spec = tag_spec;
+ tag_item->last = NULL;
+ tag_item->mask = tag_mask;
return tag_id;
}
* Number of actions in the list.
* @param[out] error
* Perform verbose error reporting if not NULL.
+ * @param[in] encap_idx
+ * The encap action inndex.
*
* @return
* 0 on success, negative value otherwise
flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
struct rte_flow_action *ext_actions,
const struct rte_flow_action *actions,
- int actions_n, struct rte_flow_error *error)
+ int actions_n, struct rte_flow_error *error,
+ int encap_idx)
{
struct mlx5_flow_action_copy_mreg *cp_mreg =
(struct mlx5_flow_action_copy_mreg *)
if (ret < 0)
return ret;
cp_mreg->src = ret;
- memcpy(ext_actions, actions,
- sizeof(*ext_actions) * actions_n);
- ext_actions[actions_n - 1] = (struct rte_flow_action){
- .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
- .conf = cp_mreg,
- };
- ext_actions[actions_n] = (struct rte_flow_action){
- .type = RTE_FLOW_ACTION_TYPE_END,
- };
+ if (encap_idx != 0)
+ memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
+ if (encap_idx == actions_n - 1) {
+ ext_actions[actions_n - 1] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = cp_mreg,
+ };
+ ext_actions[actions_n] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ };
+ } else {
+ ext_actions[encap_idx] = (struct rte_flow_action){
+ .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
+ .conf = cp_mreg,
+ };
+ memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
+ sizeof(*ext_actions) * (actions_n - encap_idx));
+ }
return 0;
}
* Pointer to Ethernet device.
* @param[in] flow
* Parent flow structure pointer.
+ * @param[in] prefix_layers
+ * Prefix flow layer flags.
* @param[in] attr
* Flow rule attributes.
* @param[in] items
static int
flow_create_split_metadata(struct rte_eth_dev *dev,
struct rte_flow *flow,
+ uint64_t prefix_layers,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
int mtr_sfx = 0;
size_t act_size;
int actions_n;
+ int encap_idx;
int ret;
/* Check whether extensive metadata feature is engaged. */
if (!config->dv_flow_en ||
config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
!mlx5_flow_ext_mreg_supported(dev))
- return flow_create_split_inner(dev, flow, NULL, attr, items,
- actions, external, error);
- actions_n = flow_parse_qrss_action(actions, &qrss);
+ return flow_create_split_inner(dev, flow, NULL, prefix_layers,
+ attr, items, actions, external,
+ error);
+ actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
+ &encap_idx);
if (qrss) {
/* Exclude hairpin flows from splitting. */
if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
"metadata flow");
/* Create the action list appended with copy register. */
ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
- actions_n, error);
+ actions_n, error, encap_idx);
if (ret < 0)
goto exit;
}
/* Add the unmodified original or prefix subflow. */
- ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items,
- ext_actions ? ext_actions : actions,
- external, error);
+ ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr,
+ items, ext_actions ? ext_actions :
+ actions, external, error);
if (ret < 0)
goto exit;
MLX5_ASSERT(dev_flow);
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- uint64_t hash_fields = dev_flow->hash_fields;
+ uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
/*
* Configure the tag item only if there is no meter subflow.
* reallocation becomes possible (for example, for
* other flows in other threads).
*/
- dev_flow->qrss_id = qrss_id;
- qrss_id = 0;
+ dev_flow->handle->qrss_id = qrss_id;
ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
error);
if (ret < 0)
}
dev_flow = NULL;
/* Add suffix subflow to execute Q/RSS. */
- ret = flow_create_split_inner(dev, flow, &dev_flow,
+ ret = flow_create_split_inner(dev, flow, &dev_flow, layers,
&q_attr, mtr_sfx ? items :
q_items, q_actions,
external, error);
if (ret < 0)
goto exit;
+ /* qrss ID should be freed if failed. */
+ qrss_id = 0;
MLX5_ASSERT(dev_flow);
- dev_flow->hash_fields = hash_fields;
}
exit:
struct rte_flow_action *sfx_actions = NULL;
struct rte_flow_action *pre_actions = NULL;
struct rte_flow_item *sfx_items = NULL;
- const struct rte_flow_item *sfx_port_id_item;
struct mlx5_flow *dev_flow = NULL;
struct rte_flow_attr sfx_attr = *attr;
uint32_t mtr = 0;
if (priv->mtr_en)
actions_n = flow_check_meter_action(actions, &mtr);
if (mtr) {
- struct mlx5_rte_flow_item_tag *tag_spec;
- struct mlx5_rte_flow_item_tag *tag_mask;
/* The five prefix actions: meter, decap, encap, tag, end. */
act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
- sizeof(struct rte_flow_action_set_tag);
- /* tag, end. */
-#define METER_SUFFIX_ITEM 3
+ sizeof(struct mlx5_rte_flow_action_set_tag);
+ /* tag, vlan, port id, end. */
+#define METER_SUFFIX_ITEM 4
item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
sizeof(struct mlx5_rte_flow_item_tag) * 2;
sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0);
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no memory to split "
"meter flow");
+ sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
+ act_size);
pre_actions = sfx_actions + actions_n;
- mtr_tag_id = flow_meter_split_prep(dev, actions, sfx_actions,
- pre_actions);
+ mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
+ actions, sfx_actions,
+ pre_actions);
if (!mtr_tag_id) {
ret = -rte_errno;
goto exit;
}
/* Add the prefix subflow. */
- ret = flow_create_split_inner(dev, flow, &dev_flow, attr, items,
- pre_actions, external, error);
+ ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr,
+ items, pre_actions, external,
+ error);
if (ret) {
ret = -rte_errno;
goto exit;
}
- dev_flow->mtr_flow_id = mtr_tag_id;
- /* Prepare the suffix flow match pattern. */
- sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
- act_size);
- tag_spec = (struct mlx5_rte_flow_item_tag *)(sfx_items +
- METER_SUFFIX_ITEM);
- tag_spec->data = dev_flow->mtr_flow_id << MLX5_MTR_COLOR_BITS;
- tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0,
- error);
- tag_mask = tag_spec + 1;
- tag_mask->data = 0xffffff00;
- sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
- sfx_items->spec = tag_spec;
- sfx_items->last = NULL;
- sfx_items->mask = tag_mask;
- sfx_items++;
- sfx_port_id_item = find_port_id_item(items);
- if (sfx_port_id_item) {
- memcpy(sfx_items, sfx_port_id_item,
- sizeof(*sfx_items));
- sfx_items++;
- }
- sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
- sfx_items -= sfx_port_id_item ? 2 : 1;
+ dev_flow->handle->mtr_flow_id = mtr_tag_id;
/* Setting the sfx group atrr. */
sfx_attr.group = sfx_attr.transfer ?
(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
MLX5_FLOW_TABLE_LEVEL_SUFFIX;
}
/* Add the prefix subflow. */
- ret = flow_create_split_metadata(dev, flow, &sfx_attr,
+ ret = flow_create_split_metadata(dev, flow, dev_flow ?
+ flow_get_prefix_layer_flags(dev_flow) :
+ 0, &sfx_attr,
sfx_items ? sfx_items : items,
sfx_actions ? sfx_actions : actions,
external, error);
/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
}
- LIST_INIT(&flow->dev_flows);
+ LIST_INIT(&flow->dev_handles);
if (rss && rss->types) {
unsigned int graph_root;
buf->entries = 1;
buf->entry[0].pattern = (void *)(uintptr_t)items;
}
+ /* Reset device flow index to 0. */
+ priv->flow_idx = 0;
for (i = 0; i < buf->entries; ++i) {
/*
* The splitter may create multiple dev_flows,
attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
attr_tx.ingress = 0;
attr_tx.egress = 1;
- dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+ dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
actions_hairpin_tx.actions, error);
if (!dev_flow)
goto error;
dev_flow->flow = flow;
dev_flow->external = 0;
- LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
ret = flow_drv_translate(dev, dev_flow, &attr_tx,
items_tx.items,
actions_hairpin_tx.actions, error);
if (ret)
goto error;
}
- if (dev->data->dev_started) {
+ /*
+ * If the flow is external (from application) OR device is started, then
+ * the flow will be applied immediately.
+ */
+ if (external || dev->data->dev_started) {
ret = flow_drv_apply(dev, flow, error);
if (ret < 0)
goto error;
{
struct mlx5_priv *priv = dev->data->dev_private;
+ /*
+ * If the device is not started yet, it is not allowed to created a
+ * flow from application. PMD default flows and traffic control flows
+ * are not affected.
+ */
+ if (unlikely(!dev->data->dev_started)) {
+ rte_errno = ENODEV;
+ DRV_LOG(DEBUG, "port %u is not started when "
+ "inserting a flow", dev->data->port_id);
+ return NULL;
+ }
return flow_list_create(dev, &priv->flows,
attr, items, actions, true, error);
}
* Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
+ * @param active
+ * If flushing is called avtively.
*/
void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ bool active)
{
+ uint32_t num_flushed = 0;
+
while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow;
flow = TAILQ_FIRST(list);
flow_list_destroy(dev, list, flow);
+ num_flushed++;
+ }
+ if (active) {
+ DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
+ dev->data->port_id, num_flushed);
}
}
return -rte_errno;
}
+/**
+ * Stop all default actions for flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_stop_default(struct rte_eth_dev *dev)
+{
+ flow_mreg_del_default_copy_action(dev);
+}
+
+/**
+ * Start all default actions for flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_start_default(struct rte_eth_dev *dev)
+{
+ struct rte_flow_error error;
+
+ /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
+ return flow_mreg_add_default_copy_action(dev, &error);
+}
+
+/**
+ * Allocate intermediate resources for flow creation.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!priv->inter_flows)
+ priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS,
+ sizeof(struct mlx5_flow), 0);
+}
+
+/**
+ * Free intermediate resources for flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ rte_free(priv->inter_flows);
+ priv->inter_flows = NULL;
+}
+
/**
* Verify the flow list is empty
*
{
struct mlx5_priv *priv = dev->data->dev_private;
- mlx5_flow_list_flush(dev, &priv->flows);
+ mlx5_flow_list_flush(dev, &priv->flows, false);
return 0;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
- mlx5_flow_list_flush(dev, &priv->flows);
+ mlx5_flow_list_flush(dev, &priv->flows, false);
}
/**