}
/**
- * Check if the flow should be splited due to hairpin.
+ * Check if the flow should be split due to hairpin.
* The reason for the split is that in current HW we can't
- * support encap on Rx, so if a flow have encap we move it
- * to Tx.
+ * support encap and push-vlan on Rx, so if a flow contains
+ * these actions we move it to Tx.
*
* @param dev
* Pointer to Ethernet device.
{
int queue_action = 0;
int action_n = 0;
- int encap = 0;
+ int split = 0;
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
const struct rte_flow_action_raw_encap *raw_encap;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- encap = 1;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ split++;
action_n++;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
if (raw_encap->size >
(sizeof(struct rte_flow_item_eth) +
sizeof(struct rte_flow_item_ipv4)))
- encap = 1;
+ split++;
action_n++;
break;
default:
break;
}
}
- if (encap == 1 && queue_action)
+ if (split && queue_action)
return action_n;
return 0;
}
};
struct mlx5_flow_action_copy_mreg cp_mreg = {
.dst = REG_B,
- .src = 0,
+ .src = REG_NONE,
};
struct rte_flow_action_jump jump = {
.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
/**
* Split the hairpin flow.
- * Since HW can't support encap on Rx we move the encap to Tx.
+ * Since HW can't support encap and push-vlan on Rx, we move these
+ * actions to Tx.
* If the count action is after the encap then we also
* move the count action. in this case the count will also measure
* the outer bytes.
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
rte_memcpy(actions_tx, actions,
sizeof(struct rte_flow_action));
actions_tx++;
tag_item->data = UINT32_MAX;
tag_item->id = UINT16_MAX;
item->mask = tag_item;
- addr += sizeof(struct mlx5_rte_flow_item_tag);
item->last = NULL;
item++;
item->type = RTE_FLOW_ITEM_TYPE_END;
/* Internal PMD action to set register. */
struct mlx5_rte_flow_item_tag q_tag_spec = {
.data = qrss_id,
- .id = 0,
+ .id = REG_NONE,
};
struct rte_flow_item q_items[] = {
{
goto set_alarm;
dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
(&pool->a64_dcs);
+ if (dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) {
+ /* Pool without valid counter. */
+ pool->raw_hw = NULL;
+ goto next_pool;
+ }
offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
/*
* Identify the counters released between query trigger and query
pool->raw_hw->min_dcs_id = dcs->id;
LIST_REMOVE(pool->raw_hw, next);
sh->cmng.pending_queries++;
+next_pool:
pool_index++;
if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
batch ^= 0x1;