#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
#endif
+#ifndef HAVE_MLX5DV_DR_ESWITCH
+#ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
+#define MLX5DV_FLOW_TABLE_TYPE_FDB 0
+#endif
+#endif
+
+#ifndef HAVE_MLX5DV_DR
+#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
+#endif
+
union flow_dv_attr {
struct {
uint32_t valid:1;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct rte_flow *flow = dev_flow->flow;
- struct mlx5dv_dr_ns *ns;
+ struct mlx5dv_dr_domain *domain;
resource->flags = flow->group ? 0 : 1;
- if (flow->ingress)
- ns = sh->rx_ns;
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ domain = sh->fdb_domain;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
+ domain = sh->rx_domain;
else
- ns = sh->tx_ns;
+ domain = sh->tx_domain;
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_packet_reformat
(sh->ctx, cache_resource->reformat_type,
- cache_resource->ft_type, ns, cache_resource->flags,
+ cache_resource->ft_type, domain, cache_resource->flags,
cache_resource->size,
(cache_resource->size ? cache_resource->buf : NULL));
if (!cache_resource->verbs_action) {
"cannot allocate resource memory");
*cache_resource = *resource;
cache_resource->action =
- mlx5_glue->dr_create_flow_action_dest_vport(priv->sh->fdb_ns,
- resource->port_id);
+ mlx5_glue->dr_create_flow_action_dest_vport
+ (priv->sh->fdb_domain, resource->port_id);
if (!cache_resource->action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
size_t *size, struct rte_flow_error *error)
{
- struct ether_hdr *eth = NULL;
- struct vlan_hdr *vlan = NULL;
+ struct rte_ether_hdr *eth = NULL;
+ struct rte_vlan_hdr *vlan = NULL;
struct ipv4_hdr *ipv4 = NULL;
struct ipv6_hdr *ipv6 = NULL;
struct udp_hdr *udp = NULL;
- struct vxlan_hdr *vxlan = NULL;
- struct vxlan_gpe_hdr *vxlan_gpe = NULL;
+ struct rte_vxlan_hdr *vxlan = NULL;
+ struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
struct gre_hdr *gre = NULL;
size_t len;
size_t temp_size = 0;
rte_memcpy((void *)&buf[temp_size], items->spec, len);
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth = (struct ether_hdr *)&buf[temp_size];
+ eth = (struct rte_ether_hdr *)&buf[temp_size];
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan = (struct vlan_hdr *)&buf[temp_size];
+ vlan = (struct rte_vlan_hdr *)&buf[temp_size];
if (!eth)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
ipv6->proto = IPPROTO_UDP;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan = (struct vxlan_hdr *)&buf[temp_size];
+ vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
if (!udp)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
+ vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
if (!udp)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
* Pointer to action structure.
* @param[in, out] dev_flow
* Pointer to the mlx5_flow.
+ * @param[in] transfer
+ * Mark if the flow is E-Switch flow.
* @param[out] error
* Pointer to the error structure.
*
flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
struct mlx5_flow *dev_flow,
+ uint8_t transfer,
struct rte_flow_error *error)
{
const struct rte_flow_item *encap_data;
struct mlx5_flow_dv_encap_decap_resource res = {
.reformat_type =
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
- .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
+ .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
};
if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
* Pointer to rte_eth_dev structure.
* @param[in, out] dev_flow
* Pointer to the mlx5_flow.
+ * @param[in] transfer
+ * Mark if the flow is E-Switch flow.
* @param[out] error
* Pointer to the error structure.
*
static int
flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
+ uint8_t transfer,
struct rte_flow_error *error)
{
struct mlx5_flow_dv_encap_decap_resource res = {
.size = 0,
.reformat_type =
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
- .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
+ .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
};
if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
res.reformat_type = attr->egress ?
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
- res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
- MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ if (attr->transfer)
+ res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ else
+ res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
+ struct mlx5dv_dr_domain *ns;
- struct mlx5dv_dr_ns *ns =
- resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ?
- sh->tx_ns : sh->rx_ns;
-
+ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+ ns = sh->fdb_domain;
+ else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
+ ns = sh->tx_domain;
+ else
+ ns = sh->rx_domain;
+ resource->flags =
+ dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
if (resource->ft_type == cache_resource->ft_type &&
resource->actions_num == cache_resource->actions_num &&
+ resource->flags == cache_resource->flags &&
!memcmp((const void *)resource->actions,
(const void *)cache_resource->actions,
(resource->actions_num *
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_modify_header
(sh->ctx, cache_resource->ft_type,
- ns, 0,
+ ns, cache_resource->flags,
cache_resource->actions_num *
sizeof(cache_resource->actions[0]),
(uint64_t *)cache_resource->actions);
tbl = &sh->fdb_tbl[table_id];
if (!tbl->obj)
tbl->obj = mlx5_glue->dr_create_flow_tbl
- (sh->fdb_ns, table_id);
+ (sh->fdb_domain, table_id);
} else if (egress) {
tbl = &sh->tx_tbl[table_id];
if (!tbl->obj)
tbl->obj = mlx5_glue->dr_create_flow_tbl
- (sh->tx_ns, table_id);
+ (sh->tx_domain, table_id);
} else {
tbl = &sh->rx_tbl[table_id];
if (!tbl->obj)
tbl->obj = mlx5_glue->dr_create_flow_tbl
- (sh->rx_ns, table_id);
+ (sh->rx_domain, table_id);
}
if (!tbl->obj) {
rte_flow_error_set(error, ENOMEM,
union flow_dv_attr flow_attr = { .attr = 0 };
struct mlx5_flow_dv_tag_resource tag_resource;
uint32_t modify_action_position = UINT32_MAX;
+ void *match_mask = matcher.mask.buf;
+ void *match_value = dev_flow->dv.value.buf;
+ flow->group = attr->group;
+ if (attr->transfer)
+ res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = priv->config.flow_prio - 1;
for (; !actions_end ; actions++) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
if (flow_dv_create_action_l2_encap(dev, actions,
- dev_flow, error))
+ dev_flow,
+ attr->transfer,
+ error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
if (flow_dv_create_action_l2_decap(dev, dev_flow,
+ attr->transfer,
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
- if (flow_dv_create_action_l2_encap(dev, actions,
- dev_flow,
- error))
+ if (flow_dv_create_action_l2_encap
+ (dev, actions, dev_flow, attr->transfer,
+ error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
}
/* Handle decap only if it isn't followed by encap. */
if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
- if (flow_dv_create_action_l2_decap(dev,
- dev_flow,
- error))
+ if (flow_dv_create_action_l2_decap
+ (dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
}
dev_flow->dv.actions_n = actions_n;
flow->actions = action_flags;
- if (attr->ingress && !attr->transfer &&
- (priv->representor || priv->master)) {
- /* It was validated - we support unidirection flows only. */
- assert(!attr->egress);
- /*
- * Add matching on source vport index only
- * for ingress rules in E-Switch configurations.
- */
- flow_dv_translate_item_source_vport(matcher.mask.buf,
- dev_flow->dv.value.buf,
- priv->vport_id,
- 0xffff);
- }
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- void *match_mask = matcher.mask.buf;
- void *match_value = dev_flow->dv.value.buf;
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_PORT_ID:
}
item_flags |= last_item;
}
+ /*
+ * In case of ingress traffic when E-Switch mode is enabled,
+ * we have two cases where we need to set the source port manually.
+ * The first one, is in case of Nic steering rule, and the second is
+ * E-Switch rule where no port_id item was found. In both cases
+ * the source port is set according the current port in use.
+ */
+ if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
+ (priv->representor || priv->master)) {
+ if (flow_dv_translate_item_port_id(dev, match_mask,
+ match_value, NULL))
+ return -rte_errno;
+ }
assert(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
dev_flow->layers = item_flags;
{
struct mlx5_flow_dv *dv;
struct mlx5_flow *dev_flow;
+ struct mlx5_priv *priv = dev->data->dev_private;
int n;
int err;
dv = &dev_flow->dv;
n = dv->actions_n;
if (flow->actions & MLX5_FLOW_ACTION_DROP) {
- dv->hrxq = mlx5_hrxq_drop_new(dev);
- if (!dv->hrxq) {
- rte_flow_error_set
- (error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot get drop hash queue");
- goto error;
+ if (flow->transfer) {
+ dv->actions[n++] = priv->sh->esw_drop_action;
+ } else {
+ dv->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!dv->hrxq) {
+ rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot get drop hash queue");
+ goto error;
+ }
+ dv->actions[n++] = dv->hrxq->action;
}
- dv->actions[n++] = dv->hrxq->action;
} else if (flow->actions &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;