.next = RTE_FLOW_EXPAND_RSS_NEXT
(MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_OUTER_IPV4_TCP,
- MLX5_EXPANSION_GRE),
+ MLX5_EXPANSION_GRE,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
ETH_RSS_NONFRAG_IPV4_OTHER,
[MLX5_EXPANSION_OUTER_IPV6] = {
.next = RTE_FLOW_EXPAND_RSS_NEXT
(MLX5_EXPANSION_OUTER_IPV6_UDP,
- MLX5_EXPANSION_OUTER_IPV6_TCP),
+ MLX5_EXPANSION_OUTER_IPV6_TCP,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
ETH_RSS_NONFRAG_IPV6_OTHER,
.tunnel = MLX5_FLOW_LAYER_NVGRE,
.ptype = RTE_PTYPE_TUNNEL_NVGRE,
},
+ {
+ .tunnel = MLX5_FLOW_LAYER_IPIP,
+ .ptype = RTE_PTYPE_TUNNEL_IP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
+ .ptype = RTE_PTYPE_TUNNEL_IP,
+ },
};
/**
}
}
+/*
+ * return a pointer to the desired action in the list of actions.
+ *
+ * @param[in] actions
+ * The list of actions to search the action in.
+ * @param[in] action
+ * The action to find.
+ *
+ * @return
+ * Pointer to the action in the list, if found. NULL otherwise.
+ */
+const struct rte_flow_action *
+mlx5_flow_find_action(const struct rte_flow_action *actions,
+ enum rte_flow_action_type action)
+{
+ if (actions == NULL)
+ return NULL;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
+ if (actions->type == action)
+ return actions;
+ return NULL;
+}
+
/*
* Validate the flag action.
*
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't drop and mark in same flow");
- if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
+ MLX5_FLOW_FATE_ESWITCH_ACTIONS))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have 2 fate actions in"
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"multiple L2 layers not supported");
+ if (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "inner L2 layer should not "
+ "follow inner L3 layers");
if (!mask)
mask = &rte_flow_item_eth_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
+ * @param[in] dev
+ * Ethernet device flow is being created on.
* @param[out] error
* Pointer to error structure.
*
int
mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
uint64_t item_flags,
+ struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
const struct rte_flow_item_vlan nic_mask = {
- .tci = RTE_BE16(0x0fff),
- .inner_type = RTE_BE16(0xffff),
+ .tci = RTE_BE16(UINT16_MAX),
+ .inner_type = RTE_BE16(UINT16_MAX),
};
uint16_t vlan_tag = 0;
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
MLX5_FLOW_LAYER_OUTER_VLAN;
+ const uint64_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
if (item_flags & vlanm)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L2 layer cannot follow L3/L4 layer");
+ else if ((item_flags & l2m) == 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no L2 layer before VLAN");
if (!mask)
mask = &rte_flow_item_vlan_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
error);
if (ret)
return ret;
+ if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->vmwa_context) {
+ /*
+ * Non-NULL context means we have a virtual machine
+ * and SR-IOV enabled, we have to create VLAN interface
+ * to make hypervisor to setup E-Switch vport
+ * context correctly. We avoid creating the multiple
+ * VLAN interfaces, so we cannot support VLAN tag mask.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN tag mask is not"
+ " supported in virtual"
+ " environment");
+ }
+ }
if (spec) {
vlan_tag = spec->tci;
vlan_tag &= mask->tci;
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an NVGRE layer.");
+ else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no L2 layer before IPV4");
if (!mask)
mask = &rte_flow_item_ipv4_mask;
else if (mask->hdr.next_proto_id != 0 &&
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an NVGRE layer.");
+ else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no L2 layer before IPV6");
if (!mask)
mask = &rte_flow_item_ipv6_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
return 0;
}
+/**
+ * Validate Geneve item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] itemFlags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] enPriv
+ * Pointer to the private data structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+
+int
+mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_geneve *spec = item->spec;
+ const struct rte_flow_item_geneve *mask = item->mask;
+ int ret;
+ uint16_t gbhdr;
+ uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+ MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
+ const struct rte_flow_item_geneve nic_mask = {
+ .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
+ .vni = "\xff\xff\xff",
+ .protocol = RTE_BE16(UINT16_MAX),
+ };
+
+ if (!(priv->config.hca_attr.flex_parser_protocols &
+ MLX5_HCA_FLEX_GENEVE_ENABLED) ||
+ !priv->config.hca_attr.tunnel_stateless_geneve_rx)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 Geneve is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple tunnel layers not"
+ " supported");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_geneve_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_geneve), error);
+ if (ret)
+ return ret;
+ if (spec) {
+ gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
+ if (MLX5_GENEVE_VER_VAL(gbhdr) ||
+ MLX5_GENEVE_CRITO_VAL(gbhdr) ||
+ MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Geneve protocol unsupported"
+ " fields are being used");
+ if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported Geneve options length");
+ }
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Geneve tunnel must be fully defined");
+ return 0;
+}
+
/**
* Validate MPLS item.
*
const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
+ bool external __rte_unused,
struct rte_flow_error *error)
{
return rte_flow_error_set(error, ENOTSUP,
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
* @param[out] error
* Pointer to the error structure.
*
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+ bool external, struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
fops = flow_get_drv_ops(type);
- return fops->validate(dev, attr, items, actions, error);
+ return fops->validate(dev, attr, items, actions, external, error);
}
/**
{
int ret;
- ret = flow_drv_validate(dev, attr, items, actions, error);
+ ret = flow_drv_validate(dev, attr, items, actions, true, error);
if (ret < 0)
return ret;
return 0;
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+ bool external, struct rte_flow_error *error)
{
struct rte_flow *flow = NULL;
struct mlx5_flow *dev_flow;
uint32_t i;
uint32_t flow_size;
- ret = flow_drv_validate(dev, attr, items, actions, error);
+ ret = flow_drv_validate(dev, attr, items, actions, external, error);
if (ret < 0)
return NULL;
flow_size = sizeof(struct rte_flow);
if (!dev_flow)
goto error;
dev_flow->flow = flow;
+ dev_flow->external = external;
LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
ret = flow_drv_translate(dev, dev_flow, attr,
buf->entry[i].pattern,
return NULL;
}
+/**
+ * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
+ * incoming packets to table 1.
+ *
+ * Other flow rules, requested for group n, will be created in
+ * e-switch table n+1.
+ * Jump action to e-switch group n will be created to group n+1.
+ *
+ * Used when working in switchdev mode, to utilise advantages of table 1
+ * and above.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Pointer to flow on success, NULL otherwise and rte_errno is set.
+ */
+struct rte_flow *
+mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
+{
+ const struct rte_flow_attr attr = {
+ .group = 0,
+ .priority = 0,
+ .ingress = 1,
+ .egress = 0,
+ .transfer = 1,
+ };
+ const struct rte_flow_item pattern = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ };
+ struct rte_flow_action_jump jump = {
+ .group = 1,
+ };
+ const struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+
+ return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
+ actions, false, &error);
+}
+
/**
* Create a flow.
*
struct mlx5_priv *priv = dev->data->dev_private;
return flow_list_create(dev, &priv->flows,
- attr, items, actions, error);
+ attr, items, actions, true, error);
}
/**
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
flow = flow_list_create(dev, &priv->ctrl_flows,
- &attr, items, actions, &error);
+ &attr, items, actions, false, &error);
if (!flow)
return -rte_errno;
return 0;
goto error;
}
flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
- fdir_flow->items, fdir_flow->actions, NULL);
+ fdir_flow->items, fdir_flow->actions, true,
+ NULL);
if (!flow)
goto error;
assert(!flow->fdir);
pool->raw_hw = NULL;
sh->cmng.pending_queries--;
}
+
+/**
+ * Translate the rte_flow group index to HW table value.
+ *
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[in] external
+ * Value is part of flow rule created by request external to PMD.
+ * @param[in] group
+ * rte_flow group index value.
+ * @param[out] table
+ * HW table value.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
+ uint32_t group, uint32_t *table,
+ struct rte_flow_error *error)
+{
+ if (attributes->transfer && external) {
+ if (group == UINT32_MAX)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "group index not supported");
+ *table = group + 1;
+ } else {
+ *table = group;
+ }
+ return 0;
+}