From 591cbbb1d7588513d5f6c36f2e62603f170dea26 Mon Sep 17 00:00:00 2001 From: Andrew Rybchenko Date: Sun, 24 Dec 2017 10:46:41 +0000 Subject: [PATCH] net/sfc: support VXLAN and NVGRE packet types classification Signed-off-by: Andrew Rybchenko Reviewed-by: Ivan Malov Reviewed-by: Andy Moreton --- doc/guides/nics/sfc_efx.rst | 11 ++++ drivers/net/sfc/sfc_dp_rx.h | 4 +- drivers/net/sfc/sfc_ef10_rx.c | 102 +++++++++++++++++++++++++++++----- drivers/net/sfc/sfc_ethdev.c | 8 ++- drivers/net/sfc/sfc_rx.c | 6 +- 5 files changed, 114 insertions(+), 17 deletions(-) diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst index bde3cc8fd9..994e1110b8 100644 --- a/doc/guides/nics/sfc_efx.rst +++ b/doc/guides/nics/sfc_efx.rst @@ -124,6 +124,17 @@ with full-feature firmware variant running. **sfboot** should be used to configure NIC to run full-feature firmware variant. See Solarflare Server Adapter User's Guide for details. +SFN8xxx family adapters provide either inner or outer packet classes. +If adapter firmware advertises support for tunnels then the PMD +configures the hardware to report inner classes, and outer classes are +not reported in received packets. +However, for VXLAN and GENEVE tunnels the PMD does report UDP as the +outer layer 4 packet type. + +SFN8xxx family adapters report GENEVE packets as VXLAN. +If UDP ports are configured for only one tunnel type then it is safe to +treat VXLAN packet type indication as the corresponding UDP tunnel type. + Flow API support ---------------- diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h index 3f6a604ba5..33e06ac961 100644 --- a/drivers/net/sfc/sfc_dp_rx.h +++ b/drivers/net/sfc/sfc_dp_rx.h @@ -150,7 +150,8 @@ typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id); typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq); /** Get packet types recognized/classified */ -typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(void); +typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)( + uint32_t tunnel_encaps); /** Get number of pending Rx descriptors */ typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq); @@ -166,6 +167,7 @@ struct sfc_dp_rx { unsigned int features; #define SFC_DP_RX_FEAT_SCATTER 0x1 #define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2 +#define SFC_DP_RX_FEAT_TUNNELS 0x4 sfc_dp_rx_qcreate_t *qcreate; sfc_dp_rx_qdestroy_t *qdestroy; sfc_dp_rx_qstart_t *qstart; diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c index 4c76f74757..41c2885856 100644 --- a/drivers/net/sfc/sfc_ef10_rx.c +++ b/drivers/net/sfc/sfc_ef10_rx.c @@ -251,6 +251,7 @@ static void sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, struct rte_mbuf *m) { + uint32_t tun_ptype = 0; uint32_t l2_ptype = 0; uint32_t l3_ptype = 0; uint32_t l4_ptype = 0; @@ -259,15 +260,40 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN))) goto done; + switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) { + default: + /* Unexpected encapsulation tag class */ + SFC_ASSERT(false); + /* FALLTHROUGH */ + case ESE_EZ_ENCAP_HDR_NONE: + break; + case ESE_EZ_ENCAP_HDR_VXLAN: + /* + * It is definitely UDP, but we have no information + * about IPv4 vs IPv6 and VLAN tagging. + */ + tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP; + break; + case ESE_EZ_ENCAP_HDR_GRE: + /* + * We have no information about IPv4 vs IPv6 and VLAN tagging. + */ + tun_ptype = RTE_PTYPE_TUNNEL_NVGRE; + break; + } + switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) { case ESE_DZ_ETH_TAG_CLASS_NONE: - l2_ptype = RTE_PTYPE_L2_ETHER; + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER : + RTE_PTYPE_INNER_L2_ETHER; break; case ESE_DZ_ETH_TAG_CLASS_VLAN1: - l2_ptype = RTE_PTYPE_L2_ETHER_VLAN; + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN : + RTE_PTYPE_INNER_L2_ETHER_VLAN; break; case ESE_DZ_ETH_TAG_CLASS_VLAN2: - l2_ptype = RTE_PTYPE_L2_ETHER_QINQ; + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ : + RTE_PTYPE_INNER_L2_ETHER_QINQ; break; default: /* Unexpected Eth tag class */ @@ -276,25 +302,31 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) { case ESE_DZ_L3_CLASS_IP4_FRAG: - l4_ptype = RTE_PTYPE_L4_FRAG; + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : + RTE_PTYPE_INNER_L4_FRAG; /* FALLTHROUGH */ case ESE_DZ_L3_CLASS_IP4: - l3_ptype = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; ol_flags |= PKT_RX_RSS_HASH | ((EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_IPCKSUM_ERR_LBN)) ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD); break; case ESE_DZ_L3_CLASS_IP6_FRAG: - l4_ptype = RTE_PTYPE_L4_FRAG; + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : + RTE_PTYPE_INNER_L4_FRAG; /* FALLTHROUGH */ case ESE_DZ_L3_CLASS_IP6: - l3_ptype = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; ol_flags |= PKT_RX_RSS_HASH; break; case ESE_DZ_L3_CLASS_ARP: /* Override Layer 2 packet type */ - l2_ptype = RTE_PTYPE_L2_ETHER_ARP; + /* There is no ARP classification for inner packets */ + if (tun_ptype == 0) + l2_ptype = RTE_PTYPE_L2_ETHER_ARP; break; default: /* Unexpected Layer 3 class */ @@ -303,14 +335,16 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) { case ESE_DZ_L4_CLASS_TCP: - l4_ptype = RTE_PTYPE_L4_TCP; + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP : + RTE_PTYPE_INNER_L4_TCP; ol_flags |= (EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; break; case ESE_DZ_L4_CLASS_UDP: - l4_ptype = RTE_PTYPE_L4_UDP; + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP : + RTE_PTYPE_INNER_L4_UDP; ol_flags |= (EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ? @@ -329,7 +363,7 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, done: m->ol_flags = ol_flags; - m->packet_type = l2_ptype | l3_ptype | l4_ptype; + m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype; } static uint16_t @@ -515,7 +549,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } static const uint32_t * -sfc_ef10_supported_ptypes_get(void) +sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps) { static const uint32_t ef10_native_ptypes[] = { RTE_PTYPE_L2_ETHER, @@ -529,8 +563,47 @@ sfc_ef10_supported_ptypes_get(void) RTE_PTYPE_L4_UDP, RTE_PTYPE_UNKNOWN }; + static const uint32_t ef10_overlay_ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L2_ETHER_QINQ, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L2_ETHER_QINQ, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; - return ef10_native_ptypes; + /* + * The function returns static set of supported packet types, + * so we can't build it dynamically based on supported tunnel + * encapsulations and should limit to known sets. + */ + switch (tunnel_encaps) { + case (1u << EFX_TUNNEL_PROTOCOL_VXLAN | + 1u << EFX_TUNNEL_PROTOCOL_GENEVE | + 1u << EFX_TUNNEL_PROTOCOL_NVGRE): + return ef10_overlay_ptypes; + default: + RTE_LOG(ERR, PMD, + "Unexpected set of supported tunnel encapsulations: %#x\n", + tunnel_encaps); + /* FALLTHROUGH */ + case 0: + return ef10_native_ptypes; + } } static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending; @@ -707,7 +780,8 @@ struct sfc_dp_rx sfc_ef10_rx = { .type = SFC_DP_RX, .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, }, - .features = SFC_DP_RX_FEAT_MULTI_PROCESS, + .features = SFC_DP_RX_FEAT_MULTI_PROCESS | + SFC_DP_RX_FEAT_TUNNELS, .qcreate = sfc_ef10_rx_qcreate, .qdestroy = sfc_ef10_rx_qdestroy, .qstart = sfc_ef10_rx_qstart, diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index 837fd5555e..0fea997887 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -131,6 +131,10 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; + if ((encp->enc_tunnel_encapsulations_supported != 0) && + (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS)) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | @@ -183,8 +187,10 @@ static const uint32_t * sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) { struct sfc_adapter *sa = dev->data->dev_private; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported; - return sa->dp_rx->supported_ptypes_get(); + return sa->dp_rx->supported_ptypes_get(tunnel_encaps); } static int diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c index 22bf3727c8..70a72b348f 100644 --- a/drivers/net/sfc/sfc_rx.c +++ b/drivers/net/sfc/sfc_rx.c @@ -193,7 +193,7 @@ sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags) } static const uint32_t * -sfc_efx_supported_ptypes_get(void) +sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps) { static const uint32_t ptypes[] = { RTE_PTYPE_L2_ETHER, @@ -947,6 +947,10 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, sa->eth_dev->data->dev_conf.rxmode.enable_scatter ? EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE; + if ((encp->enc_tunnel_encapsulations_supported != 0) && + (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS)) + rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES; + rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index, rxq_info->entries, socket_id, &evq); if (rc != 0) -- 2.20.1