}
}
+/* Update VLAN's VID/PCP based on input rte_flow_action.
+ *
+ * @param[in] action
+ * Pointer to struct rte_flow_action.
+ * @param[out] vlan
+ * Pointer to struct rte_vlan_hdr.
+ */
+static void
+mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
+ struct rte_vlan_hdr *vlan)
+{
+ uint16_t vlan_tci;
+ if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
+ vlan_tci =
+ ((const struct rte_flow_action_of_set_vlan_pcp *)
+ action->conf)->vlan_pcp;
+ vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
+ vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
+ vlan->vlan_tci |= vlan_tci;
+ } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
+ vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
+ vlan->vlan_tci |= rte_be_to_cpu_16
+ (((const struct rte_flow_action_of_set_vlan_vid *)
+ action->conf)->vlan_vid);
+ }
+}
+
/**
* Convert modify-header action to DV specification.
*
MLX5_MODIFICATION_TYPE_ADD, error);
}
+static enum mlx5_modification_field reg_to_field[] = {
+ [REG_A] = MLX5_MODI_META_DATA_REG_A,
+ [REG_B] = MLX5_MODI_META_DATA_REG_B,
+ [REG_C_0] = MLX5_MODI_META_REG_C_0,
+ [REG_C_1] = MLX5_MODI_META_REG_C_1,
+ [REG_C_2] = MLX5_MODI_META_REG_C_2,
+ [REG_C_3] = MLX5_MODI_META_REG_C_3,
+ [REG_C_4] = MLX5_MODI_META_REG_C_4,
+ [REG_C_5] = MLX5_MODI_META_REG_C_5,
+ [REG_C_6] = MLX5_MODI_META_REG_C_6,
+ [REG_C_7] = MLX5_MODI_META_REG_C_7,
+};
+
+/**
+ * Convert register set to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_set_reg
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_rte_flow_action_set_tag *conf = (action->conf);
+ struct mlx5_modification_cmd *actions = resource->actions;
+ uint32_t i = resource->actions_num;
+
+ if (i >= MLX5_MODIFY_NUM)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "too many items to modify");
+ actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
+ actions[i].field = reg_to_field[conf->id];
+ actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+ actions[i].data1 = conf->data;
+ ++i;
+ resource->actions_num = i;
+ if (!resource->actions_num)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "invalid modification flow item");
+ return 0;
+}
+
/**
* Validate META item.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_item_meta(struct rte_eth_dev *dev,
+flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_item *item,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
const struct rte_flow_item_meta *spec = item->spec;
const struct rte_flow_item_meta *mask = item->mask;
const struct rte_flow_item_meta nic_mask = {
- .data = RTE_BE32(UINT32_MAX)
+ .data = UINT32_MAX
};
int ret;
- uint64_t offloads = dev->data->dev_conf.txmode.offloads;
- if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
- return rte_flow_error_set(error, EPERM,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "match on metadata offload "
- "configuration is off for this port");
if (!spec)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
NULL,
"cannot pop vlan without a "
"match on (outer) vlan in the flow");
+ if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, port_id should "
+ "be after pop VLAN action");
return 0;
}
*/
static int
flow_dv_validate_action_push_vlan(uint64_t action_flags,
+ uint64_t item_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
RTE_FLOW_ERROR_TYPE_ACTION, action,
"no support for multiple VLAN "
"actions");
+ if (!mlx5_flow_find_action
+ (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) &&
+ !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "push VLAN needs to match on VLAN in order to "
+ "get VLAN VID information because there is "
+ "no followed set VLAN VID action");
+ if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, port_id should "
+ "be after push VLAN");
(void)attr;
return 0;
}
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"VLAN PCP value is too big");
- if (mlx5_flow_find_action(actions,
- RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) == NULL)
+ if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "set VLAN PCP can only be used "
- "with push VLAN action");
- if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ "set VLAN PCP action must follow "
+ "the push VLAN action");
+ if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "set VLAN PCP action must precede "
- "the push VLAN action");
+ "Multiple VLAN PCP modification are "
+ "not supported");
+ if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, port_id should "
+ "be after set VLAN PCP");
return 0;
}
*/
static int
flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
+ uint64_t action_flags,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"VLAN VID value is too big");
- /* If a push VLAN action follows then it will handle this action */
- if (mlx5_flow_find_action(actions,
- RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN))
- return 0;
+ /* there is an of_push_vlan action before us */
+ if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
+ if (mlx5_flow_find_action(actions + 1,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Multiple VLAN VID modifications are "
+ "not supported");
+ else
+ return 0;
+ }
/*
* Action is on an existing VLAN header:
* Need to verify this is a single modify CID action.
* Rule mast include a match on outer VLAN.
*/
- if (mlx5_flow_find_action(++action,
- RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
+ if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"Multiple VLAN VID modifications are "
RTE_FLOW_ERROR_TYPE_ACTION, action,
"match on VLAN is required in order "
"to set VLAN VID");
+ if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "wrong action order, port_id should "
+ "be after set VLAN VID");
return 0;
}
return ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- switch (items->type) {
+ int type = items->type;
+
+ switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
return ret;
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
+ case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
item_flags |= last_item;
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ int type = actions->type;
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions, "too many actions");
- switch (actions->type) {
+ switch (type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
ret = flow_dv_validate_action_push_vlan(action_flags,
+ item_flags,
actions, attr,
error);
if (ret < 0)
if (ret < 0)
return ret;
/* Count PCP with push_vlan command. */
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
ret = flow_dv_validate_action_set_vlan_vid
- (item_flags, actions, error);
+ (item_flags, action_flags,
+ actions, error);
if (ret < 0)
return ret;
/* Count VID with push_vlan command. */
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
MLX5_FLOW_ACTION_INC_TCP_ACK :
MLX5_FLOW_ACTION_DEC_TCP_ACK;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
meta_m = &rte_flow_item_meta_mask;
meta_v = (const void *)item->spec;
if (meta_v) {
- MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
- rte_be_to_cpu_32(meta_m->data));
- MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
- rte_be_to_cpu_32(meta_v->data & meta_m->data));
+ MLX5_SET(fte_match_set_misc2, misc2_m,
+ metadata_reg_a, meta_m->data);
+ MLX5_SET(fte_match_set_misc2, misc2_v,
+ metadata_reg_a, meta_v->data & meta_m->data);
}
}
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, value);
}
+/**
+ * Add tag item to matcher
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_tag(void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ void *misc2_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
+ void *misc2_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+ const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
+ const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
+ enum modify_reg reg = tag_v->id;
+ rte_be32_t value = tag_v->data;
+ rte_be32_t mask = tag_m->data;
+
+ switch (reg) {
+ case REG_A:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
+ rte_be_to_cpu_32(value));
+ break;
+ case REG_B:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b,
+ rte_be_to_cpu_32(value));
+ break;
+ case REG_C_0:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0,
+ rte_be_to_cpu_32(value));
+ break;
+ case REG_C_1:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1,
+ rte_be_to_cpu_32(value));
+ break;
+ case REG_C_2:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2,
+ rte_be_to_cpu_32(value));
+ break;
+ case REG_C_3:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3,
+ rte_be_to_cpu_32(value));
+ break;
+ case REG_C_4:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4,
+ rte_be_to_cpu_32(value));
+ break;
+ case REG_C_5:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5,
+ rte_be_to_cpu_32(value));
+ break;
+ case REG_C_6:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6,
+ rte_be_to_cpu_32(value));
+ break;
+ case REG_C_7:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7,
+ rte_be_to_cpu_32(mask));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7,
+ rte_be_to_cpu_32(value));
+ break;
+ }
+}
+
/**
* Add source vport match to the specified matcher.
*
return 0;
}
+/**
+ * Add Tx queue matcher
+ *
+ * @param[in] dev
+ * Pointer to the dev struct.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct mlx5_rte_flow_item_tx_queue *queue_m;
+ const struct mlx5_rte_flow_item_tx_queue *queue_v;
+ void *misc_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ struct mlx5_txq_ctrl *txq;
+ uint32_t queue;
+
+
+ queue_m = (const void *)item->mask;
+ if (!queue_m)
+ return;
+ queue_v = (const void *)item->spec;
+ if (!queue_v)
+ return;
+ txq = mlx5_txq_get(dev, queue_v->queue);
+ if (!txq)
+ return;
+ queue = txq->obj->sq->id;
+ MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
+ MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
+ queue & queue_m->queue);
+ mlx5_txq_release(dev, queue_v->queue);
+}
+
/**
* Fill the flow with DV spec.
*
void *match_value = dev_flow->dv.value.buf;
uint8_t next_protocol = 0xff;
struct rte_vlan_hdr vlan = { 0 };
- bool vlan_inherited = false;
- uint16_t vlan_tci;
uint32_t table;
int ret = 0;
struct mlx5_flow_tbl_resource *tbl;
uint32_t port_id = 0;
struct mlx5_flow_dv_port_id_action_resource port_id_resource;
+ int action_type = actions->type;
+ const struct rte_flow_action *found_action = NULL;
- switch (actions->type) {
+ switch (action_type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- if (!vlan_inherited) {
- flow_dev_get_vlan_info_from_items(items, &vlan);
- vlan_inherited = true;
- }
+ flow_dev_get_vlan_info_from_items(items, &vlan);
vlan.eth_proto = rte_be_to_cpu_16
((((const struct rte_flow_action_of_push_vlan *)
actions->conf)->ethertype));
+ found_action = mlx5_flow_find_action
+ (actions + 1,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
+ if (found_action)
+ mlx5_update_vlan_vid_pcp(found_action, &vlan);
+ found_action = mlx5_flow_find_action
+ (actions + 1,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
+ if (found_action)
+ mlx5_update_vlan_vid_pcp(found_action, &vlan);
if (flow_dv_create_action_push_vlan
(dev, attr, &vlan, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.push_vlan_res->action;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
- /* Push VLAN command is also handling this VLAN_VID */
- action_flags &= ~MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
- if (!vlan_inherited) {
- flow_dev_get_vlan_info_from_items(items, &vlan);
- vlan_inherited = true;
- }
- vlan_tci =
- ((const struct rte_flow_action_of_set_vlan_pcp *)
- actions->conf)->vlan_pcp;
- vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
- vlan.vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
- vlan.vlan_tci |= vlan_tci;
- /* Push VLAN command will use this value */
+ /* of_vlan_push action handled this action */
+ assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
- if (!vlan_inherited) {
- flow_dev_get_vlan_info_from_items(items, &vlan);
- vlan_inherited = true;
- }
- vlan.vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
- vlan.vlan_tci |= rte_be_to_cpu_16
- (((const struct rte_flow_action_of_set_vlan_vid *)
- actions->conf)->vlan_vid);
- /* Push VLAN command will use this value */
- if (mlx5_flow_find_action
- (actions,
- RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN))
+ if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
break;
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ mlx5_update_vlan_vid_pcp(actions, &vlan);
/* If no VLAN push - this is a modify header action */
if (flow_dv_convert_action_modify_vlan_vid
(&res, actions, error))
MLX5_FLOW_ACTION_INC_TCP_ACK :
MLX5_FLOW_ACTION_DEC_TCP_ACK;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
+ if (flow_dv_convert_action_set_reg(&res, actions,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
modify_action_position = actions_n++;
}
dev_flow->dv.actions_n = actions_n;
- flow->actions = action_flags;
+ dev_flow->actions = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int item_type = items->type;
- switch (items->type) {
+ switch (item_type) {
case RTE_FLOW_ITEM_TYPE_PORT_ID:
flow_dv_translate_item_port_id(dev, match_mask,
match_value, items);
items, tunnel);
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
+ flow_dv_translate_item_tag(match_mask, match_value,
+ items);
+ last_item = MLX5_FLOW_ITEM_TAG;
+ break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ flow_dv_translate_item_tx_queue(dev, match_mask,
+ match_value,
+ items);
+ last_item = MLX5_FLOW_ITEM_TX_QUEUE;
+ break;
default:
break;
}
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
dv = &dev_flow->dv;
n = dv->actions_n;
- if (flow->actions & MLX5_FLOW_ACTION_DROP) {
+ if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
if (flow->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
}
dv->actions[n++] = dv->hrxq->action;
}
- } else if (flow->actions &
+ } else if (dev_flow->actions &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
struct mlx5_flow_dv *dv = &dev_flow->dv;
if (dv->hrxq) {
- if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, dv->hrxq);
dv->flow = NULL;
}
if (dv->hrxq) {
- if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, dv->hrxq);