#define MLX5_FLOW_ENCAP_ACTIONS (MLX5_FLOW_ACTION_VXLAN_ENCAP | \
MLX5_FLOW_ACTION_NVGRE_ENCAP | \
- MLX5_FLOW_ACTION_RAW_ENCAP)
+ MLX5_FLOW_ACTION_RAW_ENCAP | \
+ MLX5_FLOW_ACTION_OF_PUSH_VLAN)
#define MLX5_FLOW_DECAP_ACTIONS (MLX5_FLOW_ACTION_VXLAN_DECAP | \
MLX5_FLOW_ACTION_NVGRE_DECAP | \
- MLX5_FLOW_ACTION_RAW_DECAP)
+ MLX5_FLOW_ACTION_RAW_DECAP | \
+ MLX5_FLOW_ACTION_OF_POP_VLAN)
#define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
MLX5_FLOW_ACTION_SET_IPV4_DST | \
MLX5_FLOW_ACTION_INC_TCP_SEQ | \
MLX5_FLOW_ACTION_DEC_TCP_SEQ | \
MLX5_FLOW_ACTION_INC_TCP_ACK | \
- MLX5_FLOW_ACTION_DEC_TCP_ACK)
+ MLX5_FLOW_ACTION_DEC_TCP_ACK | \
+ MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
+
+#define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \
+ MLX5_FLOW_ACTION_OF_PUSH_VLAN)
#ifndef IPPROTO_MPLS
#define IPPROTO_MPLS 137
uint32_t port_id; /**< Port ID value. */
};
+/* Push VLAN action resource structure */
+struct mlx5_flow_dv_push_vlan_action_resource {
+ LIST_ENTRY(mlx5_flow_dv_push_vlan_action_resource) next;
+ /* Pointer to next element. */
+ rte_atomic32_t refcnt; /**< Reference counter. */
+ void *action; /**< Direct verbs action object. */
+ uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
+ rte_be32_t vlan_tag; /**< VLAN tag value. */
+};
+
/*
* Max number of actions per DV flow.
* See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
/**< Pointer to the jump action resource. */
struct mlx5_flow_dv_port_id_action_resource *port_id_action;
/**< Pointer to port ID action resource. */
+ struct mlx5_vf_vlan vf_vlan;
+ /**< Structure for VF VLAN workaround. */
+ struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
+ /**< Pointer to push VLAN action resource in cache. */
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
/**< Action list. */
struct ibv_flow *flow; /**< Verbs flow pointer. */
struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+ struct mlx5_vf_vlan vf_vlan;
+ /**< Structure for VF VLAN workaround. */
};
/** Device flow structure. */
#endif
struct mlx5_flow_verbs verbs;
};
+ bool external; /**< true if the flow is created external to PMD. */
};
/* Flow structure. */
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
+ bool external,
struct rte_flow_error *error);
typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
(const struct rte_flow_attr *attr, const struct rte_flow_item items[],
/* mlx5_flow.c */
+int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes,
+ bool external, uint32_t group, uint32_t *table,
+ struct rte_flow_error *error);
uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel,
uint64_t layer_types,
uint64_t hash_fields);
uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
uint32_t subpriority);
+const struct rte_flow_action *mlx5_flow_find_action
+ (const struct rte_flow_action *actions,
+ enum rte_flow_action_type action);
int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
struct rte_flow_error *error);
struct rte_flow_error *error);
int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
uint64_t item_flags,
+ struct rte_eth_dev *dev,
struct rte_flow_error *error);
int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
uint64_t item_flags,