From: Ivan Malov Date: Tue, 2 Apr 2019 09:28:44 +0000 (+0100) Subject: net/sfc: support tunnel TSO on EF10 native Tx datapath X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=c1ce2ba218f83ff3a3d8304b99e75e40c44bf899;p=dpdk.git net/sfc: support tunnel TSO on EF10 native Tx datapath Handle VXLAN and GENEVE TSO on EF10 native Tx datapath. Signed-off-by: Ivan Malov Signed-off-by: Andrew Rybchenko --- diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst index 028c92cc3f..eb47f25e3a 100644 --- a/doc/guides/nics/sfc_efx.rst +++ b/doc/guides/nics/sfc_efx.rst @@ -66,7 +66,7 @@ SFC EFX PMD has support for: - Allmulticast mode -- TCP segmentation offload (TSO) +- TCP segmentation offload (TSO) including VXLAN and GENEVE encapsulated - Multicast MAC filter diff --git a/doc/guides/rel_notes/release_19_05.rst b/doc/guides/rel_notes/release_19_05.rst index 1a81fbc6c5..ec45908920 100644 --- a/doc/guides/rel_notes/release_19_05.rst +++ b/doc/guides/rel_notes/release_19_05.rst @@ -88,6 +88,7 @@ New Features process. * Added support for Rx packet types list in a secondary process. * Added Tx prepare to do Tx offloads checks. + * Added support for VXLAN and GENEVE encapsulated TSO. * **Updated Mellanox drivers.** diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c index dee468f89e..406386a8ca 100644 --- a/drivers/net/sfc/sfc.c +++ b/drivers/net/sfc/sfc.c @@ -750,6 +750,12 @@ sfc_attach(struct sfc_adapter *sa) sfc_info(sa, "TSO support isn't available on this adapter"); } + if (sa->tso && sa->priv.dp_tx->features & SFC_DP_TX_FEAT_TSO_ENCAP) { + sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled; + if (!sa->tso_encap) + sfc_info(sa, "Encapsulated TSO support isn't available on this adapter"); + } + sfc_log_init(sa, "estimate resource limits"); rc = sfc_estimate_resource_limits(sa); if (rc != 0) diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h index a4b9a3f331..ecd20e546e 100644 --- a/drivers/net/sfc/sfc.h +++ b/drivers/net/sfc/sfc.h @@ -286,6 +286,7 @@ struct sfc_adapter { struct sfc_txq *txq_ctrl; boolean_t tso; + boolean_t tso_encap; uint32_t rxd_wait_timeout_ns; }; diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h index ae5524f24f..72a69149b8 100644 --- a/drivers/net/sfc/sfc_dp_tx.h +++ b/drivers/net/sfc/sfc_dp_tx.h @@ -163,6 +163,7 @@ struct sfc_dp_tx { #define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8 #define SFC_DP_TX_FEAT_MULTI_POOL 0x10 #define SFC_DP_TX_FEAT_REFCNT 0x20 +#define SFC_DP_TX_FEAT_TSO_ENCAP 0x40 sfc_dp_tx_get_dev_info_t *get_dev_info; sfc_dp_tx_qsize_up_rings_t *qsize_up_rings; sfc_dp_tx_qcreate_t *qcreate; @@ -220,7 +221,22 @@ sfc_dp_tx_prepare_pkt(struct rte_mbuf *m, if (m->ol_flags & PKT_TX_TCP_SEG) { unsigned int tcph_off = m->l2_len + m->l3_len; - unsigned int header_len = tcph_off + m->l4_len; + unsigned int header_len; + + switch (m->ol_flags & PKT_TX_TUNNEL_MASK) { + case 0: + break; + case PKT_TX_TUNNEL_VXLAN: + /* FALLTHROUGH */ + case PKT_TX_TUNNEL_GENEVE: + if (!(m->ol_flags & + (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) + return EINVAL; + + tcph_off += m->outer_l2_len + m->outer_l3_len; + } + + header_len = tcph_off + m->l4_len; if (unlikely(tcph_off > tso_tcp_header_offset_limit)) return EINVAL; diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index bcbd15d559..055389efea 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -366,13 +366,16 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, unsigned int *added, unsigned int *dma_desc_space, bool *reap_done) { - size_t iph_off = m_seg->l2_len; - size_t tcph_off = m_seg->l2_len + m_seg->l3_len; - size_t header_len = m_seg->l2_len + m_seg->l3_len + m_seg->l4_len; + size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ? + m_seg->outer_l2_len + m_seg->outer_l3_len : 0) + + m_seg->l2_len; + size_t tcph_off = iph_off + m_seg->l3_len; + size_t header_len = tcph_off + m_seg->l4_len; /* Offset of the payload in the last segment that contains the header */ size_t in_off = 0; const struct tcp_hdr *th; uint16_t packet_id = 0; + uint16_t outer_packet_id = 0; uint32_t sent_seq; uint8_t *hdr_addr; rte_iova_t hdr_iova; @@ -482,12 +485,16 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, if (first_m_seg->ol_flags & PKT_TX_IPV4) packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off); + if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4) + outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr, + first_m_seg->outer_l2_len); + th = (const struct tcp_hdr *)(hdr_addr + tcph_off); rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); sent_seq = rte_be_to_cpu_32(sent_seq); - sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, 0, sent_seq, - first_m_seg->tso_segsz); + sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id, + sent_seq, first_m_seg->tso_segsz); (*added) += SFC_EF10_TSO_OPT_DESCS_NUM; sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false, @@ -927,7 +934,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id, if (txq->sw_ring == NULL) goto fail_sw_ring_alloc; - if (info->offloads & DEV_TX_OFFLOAD_TCP_TSO) { + if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) { txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh", info->txq_entries, SFC_TSOH_STD_LEN, @@ -1090,6 +1099,7 @@ struct sfc_dp_tx sfc_ef10_tx = { .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, }, .features = SFC_DP_TX_FEAT_TSO | + SFC_DP_TX_FEAT_TSO_ENCAP | SFC_DP_TX_FEAT_MULTI_SEG | SFC_DP_TX_FEAT_MULTI_POOL | SFC_DP_TX_FEAT_REFCNT | diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index 4037802e6a..e1ef00cc7c 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -70,6 +70,10 @@ sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa) if (sa->tso) caps |= DEV_TX_OFFLOAD_TCP_TSO; + if (sa->tso_encap) + caps |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + return caps; } @@ -469,7 +473,9 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) flags |= EFX_TXQ_CKSUM_INNER_TCPUDP; } - if (txq_info->offloads & DEV_TX_OFFLOAD_TCP_TSO) + if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) flags |= EFX_TXQ_FATSOV2; rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem, @@ -588,18 +594,25 @@ int sfc_tx_start(struct sfc_adapter *sa) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); unsigned int sw_index; int rc = 0; sfc_log_init(sa, "txq_count = %u", sas->txq_count); if (sa->tso) { - if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) { + if (!encp->enc_fw_assisted_tso_v2_enabled) { sfc_warn(sa, "TSO support was unable to be restored"); sa->tso = B_FALSE; + sa->tso_encap = B_FALSE; } } + if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled) { + sfc_warn(sa, "Encapsulated TSO support was unable to be restored"); + sa->tso_encap = B_FALSE; + } + rc = efx_tx_init(sa->nic); if (rc != 0) goto fail_efx_tx_init;