Normal flows do not currently provide IPv6 support.
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
HAVE_EXP_QUERY_DEVICE \
infiniband/verbs.h \
type 'struct ibv_exp_device_attr' $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_FLOW_SPEC_IPV6 \
+ infiniband/verbs.h \
+ type 'struct ibv_exp_flow_spec_ipv6' $(AUTOCONF_OUTPUT)
mlx5.o: mlx5_autoconf.h
(*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
mac_index,
vlan_index);
- claim_zero(ibv_destroy_flow(hash_rxq->mac_flow
- [mac_index][vlan_index]));
+ claim_zero(ibv_exp_destroy_flow(hash_rxq->mac_flow
+ [mac_index][vlan_index]));
hash_rxq->mac_flow[mac_index][vlan_index] = NULL;
}
hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,
unsigned int vlan_index)
{
- struct ibv_flow *flow;
+ struct ibv_exp_flow *flow;
struct priv *priv = hash_rxq->priv;
const uint8_t (*mac)[ETHER_ADDR_LEN] =
(const uint8_t (*)[ETHER_ADDR_LEN])
priv->mac[mac_index].addr_bytes;
FLOW_ATTR_SPEC_ETH(data, hash_rxq_flow_attr(hash_rxq, NULL, 0));
- struct ibv_flow_attr *attr = &data->attr;
- struct ibv_flow_spec_eth *spec = &data->spec;
+ struct ibv_exp_flow_attr *attr = &data->attr;
+ struct ibv_exp_flow_spec_eth *spec = &data->spec;
unsigned int vlan_enabled = !!priv->vlan_filter_n;
unsigned int vlan_id = priv->vlan_filter[vlan_index];
assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
hash_rxq_flow_attr(hash_rxq, attr, sizeof(data));
/* The first specification must be Ethernet. */
- assert(spec->type == IBV_FLOW_SPEC_ETH);
+ assert(spec->type == IBV_EXP_FLOW_SPEC_ETH);
assert(spec->size == sizeof(*spec));
- *spec = (struct ibv_flow_spec_eth){
- .type = IBV_FLOW_SPEC_ETH,
+ *spec = (struct ibv_exp_flow_spec_eth){
+ .type = IBV_EXP_FLOW_SPEC_ETH,
.size = sizeof(*spec),
.val = {
.dst_mac = {
vlan_id);
/* Create related flow. */
errno = 0;
- flow = ibv_create_flow(hash_rxq->qp, attr);
+ flow = ibv_exp_create_flow(hash_rxq->qp, attr);
if (flow == NULL) {
/* It's not clear whether errno is always set in this case. */
ERROR("%p: flow configuration failed, errno=%d: %s",
static int
hash_rxq_promiscuous_enable(struct hash_rxq *hash_rxq)
{
- struct ibv_flow *flow;
+ struct ibv_exp_flow *flow;
FLOW_ATTR_SPEC_ETH(data, hash_rxq_flow_attr(hash_rxq, NULL, 0));
- struct ibv_flow_attr *attr = &data->attr;
+ struct ibv_exp_flow_attr *attr = &data->attr;
if (hash_rxq->promisc_flow != NULL)
return 0;
* on specific MAC addresses. */
hash_rxq_flow_attr(hash_rxq, attr, sizeof(data));
errno = 0;
- flow = ibv_create_flow(hash_rxq->qp, attr);
+ flow = ibv_exp_create_flow(hash_rxq->qp, attr);
if (flow == NULL) {
/* It's not clear whether errno is always set in this case. */
ERROR("%p: flow configuration failed, errno=%d: %s",
if (hash_rxq->promisc_flow == NULL)
return;
DEBUG("%p: disabling promiscuous mode", (void *)hash_rxq);
- claim_zero(ibv_destroy_flow(hash_rxq->promisc_flow));
+ claim_zero(ibv_exp_destroy_flow(hash_rxq->promisc_flow));
hash_rxq->promisc_flow = NULL;
DEBUG("%p: promiscuous mode disabled", (void *)hash_rxq);
}
static int
hash_rxq_allmulticast_enable(struct hash_rxq *hash_rxq)
{
- struct ibv_flow *flow;
+ struct ibv_exp_flow *flow;
FLOW_ATTR_SPEC_ETH(data, hash_rxq_flow_attr(hash_rxq, NULL, 0));
- struct ibv_flow_attr *attr = &data->attr;
- struct ibv_flow_spec_eth *spec = &data->spec;
+ struct ibv_exp_flow_attr *attr = &data->attr;
+ struct ibv_exp_flow_spec_eth *spec = &data->spec;
if (hash_rxq->allmulti_flow != NULL)
return 0;
*/
assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
hash_rxq_flow_attr(hash_rxq, attr, sizeof(data));
- *spec = (struct ibv_flow_spec_eth){
- .type = IBV_FLOW_SPEC_ETH,
+ *spec = (struct ibv_exp_flow_spec_eth){
+ .type = IBV_EXP_FLOW_SPEC_ETH,
.size = sizeof(*spec),
.val = {
.dst_mac = "\x01\x00\x00\x00\x00\x00",
},
};
errno = 0;
- flow = ibv_create_flow(hash_rxq->qp, attr);
+ flow = ibv_exp_create_flow(hash_rxq->qp, attr);
if (flow == NULL) {
/* It's not clear whether errno is always set in this case. */
ERROR("%p: flow configuration failed, errno=%d: %s",
if (hash_rxq->allmulti_flow == NULL)
return;
DEBUG("%p: disabling allmulticast mode", (void *)hash_rxq);
- claim_zero(ibv_destroy_flow(hash_rxq->allmulti_flow));
+ claim_zero(ibv_exp_destroy_flow(hash_rxq->allmulti_flow));
hash_rxq->allmulti_flow = NULL;
DEBUG("%p: allmulticast mode disabled", (void *)hash_rxq);
}
.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
.flow_priority = 0,
.flow_spec.tcp_udp = {
- .type = IBV_FLOW_SPEC_TCP,
+ .type = IBV_EXP_FLOW_SPEC_TCP,
.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
},
.underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
.flow_priority = 0,
.flow_spec.tcp_udp = {
- .type = IBV_FLOW_SPEC_UDP,
+ .type = IBV_EXP_FLOW_SPEC_UDP,
.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
},
.underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
ETH_RSS_FRAG_IPV4),
.flow_priority = 1,
.flow_spec.ipv4 = {
- .type = IBV_FLOW_SPEC_IPV4,
+ .type = IBV_EXP_FLOW_SPEC_IPV4,
.size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
},
+#ifdef HAVE_FLOW_SPEC_IPV6
+ [HASH_RXQ_TCPV6] = {
+ .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+ IBV_EXP_RX_HASH_DST_IPV6 |
+ IBV_EXP_RX_HASH_SRC_PORT_TCP |
+ IBV_EXP_RX_HASH_DST_PORT_TCP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
+ .flow_priority = 0,
+ .flow_spec.tcp_udp = {
+ .type = IBV_EXP_FLOW_SPEC_TCP,
+ .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
+ },
+ .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
+ },
+ [HASH_RXQ_UDPV6] = {
+ .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+ IBV_EXP_RX_HASH_DST_IPV6 |
+ IBV_EXP_RX_HASH_SRC_PORT_UDP |
+ IBV_EXP_RX_HASH_DST_PORT_UDP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
+ .flow_priority = 0,
+ .flow_spec.tcp_udp = {
+ .type = IBV_EXP_FLOW_SPEC_UDP,
+ .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
+ },
+ .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
+ },
+ [HASH_RXQ_IPV6] = {
+ .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+ IBV_EXP_RX_HASH_DST_IPV6),
+ .dpdk_rss_hf = (ETH_RSS_IPV6 |
+ ETH_RSS_FRAG_IPV6),
+ .flow_priority = 1,
+ .flow_spec.ipv6 = {
+ .type = IBV_EXP_FLOW_SPEC_IPV6,
+ .size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
+ },
+ .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
+ },
+#endif /* HAVE_FLOW_SPEC_IPV6 */
[HASH_RXQ_ETH] = {
.hash_fields = 0,
.dpdk_rss_hf = 0,
.flow_priority = 2,
.flow_spec.eth = {
- .type = IBV_FLOW_SPEC_ETH,
+ .type = IBV_EXP_FLOW_SPEC_ETH,
.size = sizeof(hash_rxq_init[0].flow_spec.eth),
},
.underlayer = NULL,
1 << HASH_RXQ_TCPV4 |
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
+#ifdef HAVE_FLOW_SPEC_IPV6
+ 1 << HASH_RXQ_TCPV6 |
+ 1 << HASH_RXQ_UDPV6 |
+ 1 << HASH_RXQ_IPV6 |
+#endif /* HAVE_FLOW_SPEC_IPV6 */
0,
+#ifdef HAVE_FLOW_SPEC_IPV6
+ .hash_types_n = 6,
+#else /* HAVE_FLOW_SPEC_IPV6 */
.hash_types_n = 3,
+#endif /* HAVE_FLOW_SPEC_IPV6 */
},
{
.max_size = 1,
*/
size_t
hash_rxq_flow_attr(const struct hash_rxq *hash_rxq,
- struct ibv_flow_attr *flow_attr,
+ struct ibv_exp_flow_attr *flow_attr,
size_t flow_attr_size)
{
size_t offset = sizeof(*flow_attr);
return offset;
flow_attr_size = offset;
init = &hash_rxq_init[type];
- *flow_attr = (struct ibv_flow_attr){
- .type = IBV_FLOW_ATTR_NORMAL,
+ *flow_attr = (struct ibv_exp_flow_attr){
+ .type = IBV_EXP_FLOW_ATTR_NORMAL,
.priority = init->flow_priority,
.num_of_specs = 0,
.port = hash_rxq->priv->port,
#include "mlx5_utils.h"
#include "mlx5.h"
+#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
struct mlx5_rxq_stats {
HASH_RXQ_TCPV4,
HASH_RXQ_UDPV4,
HASH_RXQ_IPV4,
+#ifdef HAVE_FLOW_SPEC_IPV6
+ HASH_RXQ_TCPV6,
+ HASH_RXQ_UDPV6,
+ HASH_RXQ_IPV6,
+#endif /* HAVE_FLOW_SPEC_IPV6 */
HASH_RXQ_ETH,
};
/* Flow structure with Ethernet specification. It is packed to prevent padding
* between attr and spec as this layout is expected by libibverbs. */
struct flow_attr_spec_eth {
- struct ibv_flow_attr attr;
- struct ibv_flow_spec_eth spec;
+ struct ibv_exp_flow_attr attr;
+ struct ibv_exp_flow_spec_eth spec;
} __attribute__((packed));
/* Define a struct flow_attr_spec_eth object as an array of at least
uint64_t hash_fields; /* Fields that participate in the hash. */
uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
unsigned int flow_priority; /* Flow priority to use. */
- struct ibv_flow_spec flow_spec; /* Flow specification template. */
+ struct ibv_exp_flow_spec flow_spec; /* Flow specification template. */
const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
};
struct ibv_qp *qp; /* Hash RX QP. */
enum hash_rxq_type type; /* Hash RX queue type. */
/* MAC flow steering rules, one per VLAN ID. */
- struct ibv_flow *mac_flow[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
- struct ibv_flow *promisc_flow; /* Promiscuous flow. */
- struct ibv_flow *allmulti_flow; /* Multicast flow. */
+ struct ibv_exp_flow *mac_flow[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
+ struct ibv_exp_flow *promisc_flow; /* Promiscuous flow. */
+ struct ibv_exp_flow *allmulti_flow; /* Multicast flow. */
};
/* TX element. */
extern uint8_t rss_hash_default_key[];
extern const size_t rss_hash_default_key_len;
-size_t hash_rxq_flow_attr(const struct hash_rxq *, struct ibv_flow_attr *,
+size_t hash_rxq_flow_attr(const struct hash_rxq *, struct ibv_exp_flow_attr *,
size_t);
int priv_create_hash_rxqs(struct priv *);
void priv_destroy_hash_rxqs(struct priv *);