#define TCA_TUNNEL_KEY_NO_CSUM 10
#endif
+#ifndef HAVE_TCA_TUNNEL_KEY_ENC_TOS
+#define TCA_TUNNEL_KEY_ENC_TOS 12
+#endif
+
+#ifndef HAVE_TCA_TUNNEL_KEY_ENC_TTL
+#define TCA_TUNNEL_KEY_ENC_TTL 13
+#endif
+
#else /* HAVE_TC_ACT_TUNNEL_KEY */
#define TCA_ACT_TUNNEL_KEY 17
#define TCA_TUNNEL_KEY_ENC_KEY_ID 7
#define TCA_TUNNEL_KEY_ENC_DST_PORT 9
#define TCA_TUNNEL_KEY_NO_CSUM 10
+#define TCA_TUNNEL_KEY_ENC_TOS 12
+#define TCA_TUNNEL_KEY_ENC_TTL 13
struct tc_tunnel_key {
tc_gen;
#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK
#define TCA_FLOWER_KEY_TCP_FLAGS_MASK 72
#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IP_TOS
+#define TCA_FLOWER_KEY_IP_TOS 73
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IP_TOS_MASK
+#define TCA_FLOWER_KEY_IP_TOS_MASK 74
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IP_TTL
+#define TCA_FLOWER_KEY_IP_TTL 75
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IP_TTL_MASK
+#define TCA_FLOWER_KEY_IP_TTL_MASK 76
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TOS
+#define TCA_FLOWER_KEY_ENC_IP_TOS 80
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TOS_MASK
+#define TCA_FLOWER_KEY_ENC_IP_TOS_MASK 81
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TTL
+#define TCA_FLOWER_KEY_ENC_IP_TTL 82
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TTL_MASK
+#define TCA_FLOWER_KEY_ENC_IP_TTL_MASK 83
+#endif
+
#ifndef HAVE_TC_ACT_GOTO_CHAIN
#define TC_ACT_GOTO_CHAIN 0x20000000
#endif
#define TCA_ACT_MAX_PRIO 32
#endif
-/** UDP port range of VXLAN devices created by driver. */
-#define MLX5_VXLAN_PORT_MIN 30000
-#define MLX5_VXLAN_PORT_MAX 60000
+/** Parameters of VXLAN devices created by driver. */
+#define MLX5_VXLAN_DEFAULT_VNI 1
#define MLX5_VXLAN_DEVICE_PFX "vmlx_"
/** Tunnel action type, used for @p type in header structure. */
#define FLOW_TCF_ENCAP_UDP_SRC (1u << 6)
#define FLOW_TCF_ENCAP_UDP_DST (1u << 7)
#define FLOW_TCF_ENCAP_VXLAN_VNI (1u << 8)
+#define FLOW_TCF_ENCAP_IP_TTL (1u << 9)
+#define FLOW_TCF_ENCAP_IP_TOS (1u << 10)
/**
* Structure for holding netlink context.
/** VXLAN virtual netdev. */
struct tcf_vtep {
LIST_ENTRY(tcf_vtep) next;
- LIST_HEAD(, tcf_neigh_rule) neigh;
- LIST_HEAD(, tcf_local_rule) local;
uint32_t refcnt;
unsigned int ifindex; /**< Own interface index. */
- unsigned int ifouter; /**< Index of device attached to. */
uint16_t port;
uint8_t created;
};
struct flow_tcf_tunnel_hdr hdr;
struct tcf_irule *iface;
uint32_t mask;
+ uint8_t ip_tos;
+ uint8_t ip_ttl_hop;
struct {
struct ether_addr dst;
struct ether_addr src;
},
.ipv4.hdr = {
.next_proto_id = 0xff,
+ .time_to_live = 0xff,
+ .type_of_service = 0xff,
.src_addr = RTE_BE32(0xffffffff),
.dst_addr = RTE_BE32(0xffffffff),
},
.ipv6.hdr = {
.proto = 0xff,
+ .vtc_flow = RTE_BE32(0xfful << IPV6_HDR_FL_SHIFT),
+ .hop_limits = 0xff,
.src_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
" must be specified for"
" vxlan encapsulation");
}
+ if (mask->hdr.type_of_service &&
+ mask->hdr.type_of_service != 0xff)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv4.hdr.type_of_service\" field"
+ " for vxlan encapsulation");
+ if (mask->hdr.time_to_live &&
+ mask->hdr.time_to_live != 0xff)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv4.hdr.time_to_live\" field"
+ " for vxlan encapsulation");
return 0;
}
{
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
+ uint8_t msk6;
if (!spec) {
/*
" must be specified for"
" vxlan encapsulation");
}
+ msk6 = (rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
+ IPV6_HDR_TC_SHIFT) & 0xff;
+ if (msk6 && msk6 != 0xff)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv6.hdr.vtc_flow.tos\" field"
+ " for vxlan encapsulation");
+ if (mask->hdr.hop_limits && mask->hdr.hop_limits != 0xff)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "no support for partial mask on"
+ " \"ipv6.hdr.hop_limits\" field"
+ " for vxlan encapsulation");
return 0;
}
break;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- error);
+ ret = mlx5_flow_validate_item_ipv4
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv4, error);
if (ret < 0)
return ret;
ret = flow_tcf_validate_vxlan_encap_ipv4(items, error);
item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- error);
+ ret = mlx5_flow_validate_item_ipv6
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv6, error);
if (ret < 0)
return ret;
ret = flow_tcf_validate_vxlan_encap_ipv6(items, error);
vlan_etype = spec.vlan->inner_type;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- error);
+ ret = mlx5_flow_validate_item_ipv4
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv4, error);
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
}
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- error);
+ ret = mlx5_flow_validate_item_ipv6
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv6, error);
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
SZ_NLATTR_TYPE_OF(uint8_t) + /* VLAN prio. */
SZ_NLATTR_TYPE_OF(uint16_t); /* VLAN ID. */
break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV4: {
+ const struct rte_flow_item_ipv4 *ipv4 = items->mask;
+
size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_TYPE_OF(uint32_t) * 4;
/* dst/src IP addr and mask. */
+ if (ipv4 && ipv4->hdr.time_to_live)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ if (ipv4 && ipv4->hdr.type_of_service)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
+ }
+ case RTE_FLOW_ITEM_TYPE_IPV6: {
+ const struct rte_flow_item_ipv6 *ipv6 = items->mask;
+
size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 4;
/* dst/src IP addr and mask. */
+ if (ipv6 && ipv6->hdr.hop_limits)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
+ (0xfful << IPV6_HDR_TC_SHIFT)))
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
+ }
case RTE_FLOW_ITEM_TYPE_UDP:
size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_TYPE_OF(uint16_t) * 4;
case RTE_FLOW_ITEM_TYPE_ETH:
/* This item does not require message buffer. */
break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV4: {
+ const struct rte_flow_item_ipv4 *ipv4 = items->mask;
+
size += SZ_NLATTR_DATA_OF(IPV4_ADDR_LEN) * 2;
+ if (ipv4 && ipv4->hdr.time_to_live)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ if (ipv4 && ipv4->hdr.type_of_service)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
+ }
+ case RTE_FLOW_ITEM_TYPE_IPV6: {
+ const struct rte_flow_item_ipv6 *ipv6 = items->mask;
+
size += SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 2;
+ if (ipv6 && ipv6->hdr.hop_limits)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
+ (0xfful << IPV6_HDR_TC_SHIFT)))
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
+ }
case RTE_FLOW_ITEM_TYPE_UDP: {
const struct rte_flow_item_udp *udp = items->mask;
*
* @param[in] spec
* RTE_FLOW_ITEM_TYPE_IPV4 entry specification.
+ * @param[in] mask
+ * RTE_FLOW_ITEM_TYPE_IPV4 entry mask.
* @param[out] encap
* Structure to fill the gathered IPV4 address data.
*/
static void
flow_tcf_parse_vxlan_encap_ipv4(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
struct flow_tcf_vxlan_encap *encap)
{
/* Item must be validated before. No redundant checks. */
encap->ipv4.src = spec->hdr.src_addr;
encap->mask |= FLOW_TCF_ENCAP_IPV4_SRC |
FLOW_TCF_ENCAP_IPV4_DST;
+ if (mask && mask->hdr.type_of_service) {
+ encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
+ encap->ip_tos = spec->hdr.type_of_service;
+ }
+ if (mask && mask->hdr.time_to_live) {
+ encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
+ encap->ip_ttl_hop = spec->hdr.time_to_live;
+ }
}
/**
*
* @param[in] spec
* RTE_FLOW_ITEM_TYPE_IPV6 entry specification.
+ * @param[in] mask
+ * RTE_FLOW_ITEM_TYPE_IPV6 entry mask.
* @param[out] encap
* Structure to fill the gathered IPV6 address data.
*/
static void
flow_tcf_parse_vxlan_encap_ipv6(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
struct flow_tcf_vxlan_encap *encap)
{
/* Item must be validated before. No redundant checks. */
memcpy(encap->ipv6.src, spec->hdr.src_addr, IPV6_ADDR_LEN);
encap->mask |= FLOW_TCF_ENCAP_IPV6_SRC |
FLOW_TCF_ENCAP_IPV6_DST;
+ if (mask) {
+ if ((rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
+ IPV6_HDR_TC_SHIFT) & 0xff) {
+ encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
+ encap->ip_tos = (rte_be_to_cpu_32
+ (spec->hdr.vtc_flow) >>
+ IPV6_HDR_TC_SHIFT) & 0xff;
+ }
+ if (mask->hdr.hop_limits) {
+ encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
+ encap->ip_ttl_hop = spec->hdr.hop_limits;
+ }
+ }
}
/**
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
spec.ipv4 = items->spec;
- flow_tcf_parse_vxlan_encap_ipv4(spec.ipv4, encap);
+ mask.ipv4 = items->mask;
+ flow_tcf_parse_vxlan_encap_ipv4(spec.ipv4, mask.ipv4,
+ encap);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
spec.ipv6 = items->spec;
- flow_tcf_parse_vxlan_encap_ipv6(spec.ipv6, encap);
+ mask.ipv6 = items->mask;
+ flow_tcf_parse_vxlan_encap_ipv6(spec.ipv6, mask.ipv6,
+ encap);
break;
case RTE_FLOW_ITEM_TYPE_UDP:
mask.udp = items->mask;
TCA_FLOWER_KEY_IPV4_DST_MASK,
mask.ipv4->hdr.dst_addr);
}
+ if (mask.ipv4->hdr.time_to_live) {
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TTL :
+ TCA_FLOWER_KEY_IP_TTL,
+ spec.ipv4->hdr.time_to_live);
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TTL_MASK :
+ TCA_FLOWER_KEY_IP_TTL_MASK,
+ mask.ipv4->hdr.time_to_live);
+ }
+ if (mask.ipv4->hdr.type_of_service) {
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TOS :
+ TCA_FLOWER_KEY_IP_TOS,
+ spec.ipv4->hdr.type_of_service);
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TOS_MASK :
+ TCA_FLOWER_KEY_IP_TOS_MASK,
+ mask.ipv4->hdr.type_of_service);
+ }
assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
break;
case RTE_FLOW_ITEM_TYPE_IPV6: {
bool ipv6_src, ipv6_dst;
+ uint8_t msk6, tos6;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
MLX5_FLOW_LAYER_INNER_L3_IPV6 :
IPV6_ADDR_LEN,
mask.ipv6->hdr.dst_addr);
}
+ if (mask.ipv6->hdr.hop_limits) {
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TTL :
+ TCA_FLOWER_KEY_IP_TTL,
+ spec.ipv6->hdr.hop_limits);
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TTL_MASK :
+ TCA_FLOWER_KEY_IP_TTL_MASK,
+ mask.ipv6->hdr.hop_limits);
+ }
+ msk6 = (rte_be_to_cpu_32(mask.ipv6->hdr.vtc_flow) >>
+ IPV6_HDR_TC_SHIFT) & 0xff;
+ if (msk6) {
+ tos6 = (rte_be_to_cpu_32
+ (spec.ipv6->hdr.vtc_flow) >>
+ IPV6_HDR_TC_SHIFT) & 0xff;
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TOS :
+ TCA_FLOWER_KEY_IP_TOS, tos6);
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TOS_MASK :
+ TCA_FLOWER_KEY_IP_TOS_MASK, msk6);
+ }
assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
break;
}
TCA_TUNNEL_KEY_ENC_IPV6_DST,
sizeof(encap.vxlan->ipv6.dst),
&encap.vxlan->ipv6.dst);
+ if (encap.vxlan->mask & FLOW_TCF_ENCAP_IP_TTL)
+ mnl_attr_put_u8(nlh,
+ TCA_TUNNEL_KEY_ENC_TTL,
+ encap.vxlan->ip_ttl_hop);
+ if (encap.vxlan->mask & FLOW_TCF_ENCAP_IP_TOS)
+ mnl_attr_put_u8(nlh,
+ TCA_TUNNEL_KEY_ENC_TOS,
+ encap.vxlan->ip_tos);
if (encap.vxlan->mask & FLOW_TCF_ENCAP_VXLAN_VNI)
mnl_attr_put_u32(nlh,
TCA_TUNNEL_KEY_ENC_KEY_ID,
*
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
- * @param[in] ifouter
- * Outer interface to attach new-created VXLAN device
- * If zero the VXLAN device will not be attached to any device.
- * These VTEPs are used for decapsulation and can be precreated
- * and shared between processes.
* @param[in] port
* UDP port of created VTEP device.
* @param[out] error
* Pointer to created device structure on success,
* NULL otherwise and rte_errno is set.
*/
-#ifdef HAVE_IFLA_VXLAN_COLLECT_METADATA
static struct tcf_vtep*
flow_tcf_vtep_create(struct mlx5_flow_tcf_context *tcf,
- unsigned int ifouter,
uint16_t port, struct rte_flow_error *error)
{
struct tcf_vtep *vtep;
}
*vtep = (struct tcf_vtep){
.port = port,
- .local = LIST_HEAD_INITIALIZER(),
- .neigh = LIST_HEAD_INITIALIZER(),
};
memset(buf, 0, sizeof(buf));
nlh = mnl_nlmsg_put_header(buf);
assert(na_info);
mnl_attr_put_strz(nlh, IFLA_INFO_KIND, "vxlan");
na_vxlan = mnl_attr_nest_start(nlh, IFLA_INFO_DATA);
- if (ifouter)
- mnl_attr_put_u32(nlh, IFLA_VXLAN_LINK, ifouter);
assert(na_vxlan);
+#ifdef HAVE_IFLA_VXLAN_COLLECT_METADATA
+ /*
+ * RH 7.2 does not support metadata for tunnel device.
+ * It does not matter because we are going to use the
+ * hardware offload by mlx5 driver.
+ */
mnl_attr_put_u8(nlh, IFLA_VXLAN_COLLECT_METADATA, 1);
+#endif
mnl_attr_put_u8(nlh, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 1);
mnl_attr_put_u8(nlh, IFLA_VXLAN_LEARNING, 0);
mnl_attr_put_u16(nlh, IFLA_VXLAN_PORT, vxlan_port);
+#ifndef HAVE_IFLA_VXLAN_COLLECT_METADATA
+ /*
+ * We must specify VNI explicitly if metadata not supported.
+ * Note, VNI is transferred with native endianness format.
+ */
+ mnl_attr_put_u16(nlh, IFLA_VXLAN_ID, MLX5_VXLAN_DEFAULT_VNI);
+#endif
mnl_attr_nest_end(nlh, na_vxlan);
mnl_attr_nest_end(nlh, na_info);
assert(sizeof(buf) >= nlh->nlmsg_len);
DRV_LOG(WARNING,
"netlink: VTEP %s create failure (%d)",
name, rte_errno);
- if (rte_errno != EEXIST || ifouter)
+ if (rte_errno != EEXIST)
/*
* Some unhandled error occurred or device is
* for encapsulation and cannot be shared.
goto error;
}
vtep->ifindex = ret;
- vtep->ifouter = ifouter;
memset(buf, 0, sizeof(buf));
nlh = mnl_nlmsg_put_header(buf);
nlh->nlmsg_type = RTM_NEWLINK;
rte_free(vtep);
return NULL;
}
-#else
-static struct tcf_vtep*
-flow_tcf_vtep_create(struct mlx5_flow_tcf_context *tcf __rte_unused,
- unsigned int ifouter __rte_unused,
- uint16_t port __rte_unused,
- struct rte_flow_error *error)
-{
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "netlink: failed to create VTEP, "
- "vxlan metadata are not supported by kernel");
- return NULL;
-}
-#endif /* HAVE_IFLA_VXLAN_COLLECT_METADATA */
/**
* Acquire target interface index for VXLAN tunneling decapsulation.
if (vtep->port == port)
break;
}
- if (vtep && vtep->ifouter) {
- rte_flow_error_set(error, -errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to create decap VTEP with specified"
- " UDP port, atatched device exists");
- return NULL;
- }
if (vtep) {
/* Device exists, just increment the reference counter. */
vtep->refcnt++;
return vtep;
}
/* No decapsulation device exists, try to create the new one. */
- vtep = flow_tcf_vtep_create(tcf, 0, port, error);
+ vtep = flow_tcf_vtep_create(tcf, port, error);
if (vtep)
LIST_INSERT_HEAD(&vtep_list_vxlan, vtep, next);
return vtep;
static struct tcf_vtep*
flow_tcf_encap_vtep_acquire(struct mlx5_flow_tcf_context *tcf,
unsigned int ifouter,
- struct mlx5_flow *dev_flow __rte_unused,
+ struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- static uint16_t encap_port = MLX5_VXLAN_PORT_MIN - 1;
+ static uint16_t port;
struct tcf_vtep *vtep;
struct tcf_irule *iface;
int ret;
assert(ifouter);
- /* Look whether the attached VTEP for encap is created. */
+ /* Look whether the VTEP for specified port is created. */
+ port = rte_be_to_cpu_16(dev_flow->tcf.vxlan_encap->udp.dst);
LIST_FOREACH(vtep, &vtep_list_vxlan, next) {
- if (vtep->ifouter == ifouter)
+ if (vtep->port == port)
break;
}
if (vtep) {
/* VTEP already exists, just increment the reference. */
vtep->refcnt++;
} else {
- uint16_t pcnt;
-
- /* Not found, we should create the new attached VTEP. */
- flow_tcf_encap_iface_cleanup(tcf, ifouter);
- flow_tcf_encap_local_cleanup(tcf, ifouter);
- flow_tcf_encap_neigh_cleanup(tcf, ifouter);
- for (pcnt = 0; pcnt <= (MLX5_VXLAN_PORT_MAX
- - MLX5_VXLAN_PORT_MIN); pcnt++) {
- encap_port++;
- /* Wraparound the UDP port index. */
- if (encap_port < MLX5_VXLAN_PORT_MIN ||
- encap_port > MLX5_VXLAN_PORT_MAX)
- encap_port = MLX5_VXLAN_PORT_MIN;
- /* Check whether UDP port is in already in use. */
- LIST_FOREACH(vtep, &vtep_list_vxlan, next) {
- if (vtep->port == encap_port)
- break;
- }
- if (vtep) {
- /* Port is in use, try the next one. */
- vtep = NULL;
- continue;
- }
- vtep = flow_tcf_vtep_create(tcf, ifouter,
- encap_port, error);
- if (vtep) {
- LIST_INSERT_HEAD(&vtep_list_vxlan, vtep, next);
- break;
- }
- if (rte_errno != EEXIST)
- break;
- }
+ /* Not found, we should create the new VTEP. */
+ vtep = flow_tcf_vtep_create(tcf, port, error);
if (!vtep)
return NULL;
+ LIST_INSERT_HEAD(&vtep_list_vxlan, vtep, next);
}
- assert(vtep->ifouter == ifouter);
assert(vtep->ifindex);
iface = flow_tcf_encap_irule_acquire(tcf, ifouter, error);
if (!iface) {
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] ifouter
- * Network interface index to attach VXLAN encap device to.
+ * Network interface index to create VXLAN encap rules on.
* @param[in] dev_flow
* Flow tcf object with tunnel structure pointer set.
* @param[out] error