From: Xueming Li Date: Mon, 23 Apr 2018 12:33:02 +0000 (+0800) Subject: net/mlx5: support L3 VXLAN flow X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=78a54648ffe3d3174af0daf6f276abec22832bde;p=dpdk.git net/mlx5: support L3 VXLAN flow This patch support L3 VXLAN, no inner L2 header comparing to standard VXLAN protocol. L3 VXLAN using specific overlay UDP destination port to discriminate against standard VXLAN, device parameter and FW has to be configured to support it: sudo mlxconfig -d -y s IP_OVER_VXLAN_EN=1 sudo mlxconfig -d -y s IP_OVER_VXLAN_PORT= Signed-off-by: Xueming Li Acked-by: Nelio Laranjeiro --- diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index c28c83278f..db2af188f3 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -327,6 +327,32 @@ Run-time configuration Enabled by default, valid only on VF devices ignored otherwise. +- ``l3_vxlan_en`` parameter [int] + + A nonzero value allows L3 VXLAN flow creation. To enable L3 VXLAN, users + has to configure firmware and enable this parameter. This is a prerequisite + to receive this kind of traffic. + + Disabled by default. + +Firmware configuration +~~~~~~~~~~~~~~~~~~~~~~ + +- L3 VXLAN destination UDP port + + .. code-block:: console + + mlxconfig -d set IP_OVER_VXLAN_EN=1 + mlxconfig -d set IP_OVER_VXLAN_PORT= + + Verify configurations are set: + + .. code-block:: console + + mlxconfig -d query | grep IP_OVER_VXLAN + IP_OVER_VXLAN_EN True(1) + IP_OVER_VXLAN_PORT + Prerequisites ------------- diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 24edcdab49..8f983061a0 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -69,6 +69,9 @@ /* Device parameter to enable hardware Rx vector. */ #define MLX5_RX_VEC_EN "rx_vec_en" +/* Allow L3 VXLAN flow creation. */ +#define MLX5_L3_VXLAN_EN "l3_vxlan_en" + /* Activate Netlink support in VF mode. */ #define MLX5_VF_NL_EN "vf_nl_en" @@ -418,6 +421,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->tx_vec_en = !!tmp; } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { config->rx_vec_en = !!tmp; + } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { + config->l3_vxlan_en = !!tmp; } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { config->vf_nl_en = !!tmp; } else { @@ -451,6 +456,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) MLX5_TXQ_MAX_INLINE_LEN, MLX5_TX_VEC_EN, MLX5_RX_VEC_EN, + MLX5_L3_VXLAN_EN, MLX5_VF_NL_EN, NULL, }; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index a0c393ef9c..874978baa8 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -88,6 +88,7 @@ struct mlx5_dev_config { unsigned int tx_vec_en:1; /* Tx vector is enabled. */ unsigned int rx_vec_en:1; /* Rx vector is enabled. */ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ + unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */ unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */ unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */ unsigned int max_verbs_prio; /* Number of Verb flow priorities. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 2a4727b709..f1811c5c93 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -51,6 +51,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate; /** Structure give to the conversion functions. */ struct mlx5_flow_data { + struct rte_eth_dev *dev; /** Ethernet device. */ struct mlx5_flow_parse *parser; /** Parser context. */ struct rte_flow_error *error; /** Error context. */ }; @@ -116,6 +117,7 @@ enum hash_rxq_type { HASH_RXQ_UDPV6, HASH_RXQ_IPV6, HASH_RXQ_ETH, + HASH_RXQ_TUNNEL, }; /* Initialization data for hash RX queue. */ @@ -241,6 +243,14 @@ struct rte_flow { (type) == RTE_FLOW_ITEM_TYPE_VXLAN || \ (type) == RTE_FLOW_ITEM_TYPE_GRE) +#define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12) + +const uint32_t ptype_ext[] = { + [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_L4_UDP, + [PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE, +}; + /** Structure to generate a simple graph of layers supported by the NIC. */ struct mlx5_flow_items { /** List of possible actions for these items. */ @@ -413,7 +423,9 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { .dst_sz = sizeof(struct ibv_flow_spec_tunnel), }, [RTE_FLOW_ITEM_TYPE_VXLAN] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH), + .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, /* For L3 VXLAN. */ + RTE_FLOW_ITEM_TYPE_IPV6), /* For L3 VXLAN. */ .actions = valid_actions, .mask = &(const struct rte_flow_item_vxlan){ .vni = "\xff\xff\xff", @@ -439,6 +451,7 @@ struct mlx5_flow_parse { uint8_t rss_key[40]; /**< copy of the RSS key. */ enum hash_rxq_type layer; /**< Last pattern layer detected. */ enum hash_rxq_type out_layer; /**< Last outer pattern layer detected. */ + uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */ struct ibv_counter_set *cs; /**< Holds the counter set for the rule */ struct { struct ibv_flow_attr *ibv_attr; @@ -1186,6 +1199,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev, parser->inner = 0; for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { struct mlx5_flow_data data = { + .dev = dev, .parser = parser, .error = error, }; @@ -1407,6 +1421,7 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, const void *default_mask, struct mlx5_flow_data *data) { + struct priv *priv = data->dev->data->dev_private; const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; struct mlx5_flow_parse *parser = data->parser; @@ -1416,6 +1431,15 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, .size = ipv4_size, }; + if (parser->layer == HASH_RXQ_TUNNEL && + parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && + !priv->config.l3_vxlan_en) + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 VXLAN not enabled by device" + " parameter and/or not configured" + " in firmware"); /* Don't update layer for the inner pattern. */ if (!parser->inner) parser->layer = HASH_RXQ_IPV4; @@ -1462,6 +1486,7 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, const void *default_mask, struct mlx5_flow_data *data) { + struct priv *priv = data->dev->data->dev_private; const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; struct mlx5_flow_parse *parser = data->parser; @@ -1471,6 +1496,15 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, .size = ipv6_size, }; + if (parser->layer == HASH_RXQ_TUNNEL && + parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && + !priv->config.l3_vxlan_en) + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 VXLAN not enabled by device" + " parameter and/or not configured" + " in firmware"); /* Don't update layer for the inner pattern. */ if (!parser->inner) parser->layer = HASH_RXQ_IPV6;