Handle VXLAN and GENEVE TSO on EF10 native Tx datapath.
Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
- Allmulticast mode
-- TCP segmentation offload (TSO)
+- TCP segmentation offload (TSO) including VXLAN and GENEVE encapsulated
- Multicast MAC filter
process.
* Added support for Rx packet types list in a secondary process.
* Added Tx prepare to do Tx offloads checks.
+ * Added support for VXLAN and GENEVE encapsulated TSO.
* **Updated Mellanox drivers.**
sfc_info(sa, "TSO support isn't available on this adapter");
}
+ if (sa->tso && sa->priv.dp_tx->features & SFC_DP_TX_FEAT_TSO_ENCAP) {
+ sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled;
+ if (!sa->tso_encap)
+ sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
+ }
+
sfc_log_init(sa, "estimate resource limits");
rc = sfc_estimate_resource_limits(sa);
if (rc != 0)
struct sfc_txq *txq_ctrl;
boolean_t tso;
+ boolean_t tso_encap;
uint32_t rxd_wait_timeout_ns;
};
#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
#define SFC_DP_TX_FEAT_MULTI_POOL 0x10
#define SFC_DP_TX_FEAT_REFCNT 0x20
+#define SFC_DP_TX_FEAT_TSO_ENCAP 0x40
sfc_dp_tx_get_dev_info_t *get_dev_info;
sfc_dp_tx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_tx_qcreate_t *qcreate;
if (m->ol_flags & PKT_TX_TCP_SEG) {
unsigned int tcph_off = m->l2_len + m->l3_len;
- unsigned int header_len = tcph_off + m->l4_len;
+ unsigned int header_len;
+
+ switch (m->ol_flags & PKT_TX_TUNNEL_MASK) {
+ case 0:
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ /* FALLTHROUGH */
+ case PKT_TX_TUNNEL_GENEVE:
+ if (!(m->ol_flags &
+ (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
+ return EINVAL;
+
+ tcph_off += m->outer_l2_len + m->outer_l3_len;
+ }
+
+ header_len = tcph_off + m->l4_len;
if (unlikely(tcph_off > tso_tcp_header_offset_limit))
return EINVAL;
unsigned int *added, unsigned int *dma_desc_space,
bool *reap_done)
{
- size_t iph_off = m_seg->l2_len;
- size_t tcph_off = m_seg->l2_len + m_seg->l3_len;
- size_t header_len = m_seg->l2_len + m_seg->l3_len + m_seg->l4_len;
+ size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ m_seg->outer_l2_len + m_seg->outer_l3_len : 0) +
+ m_seg->l2_len;
+ size_t tcph_off = iph_off + m_seg->l3_len;
+ size_t header_len = tcph_off + m_seg->l4_len;
/* Offset of the payload in the last segment that contains the header */
size_t in_off = 0;
const struct tcp_hdr *th;
uint16_t packet_id = 0;
+ uint16_t outer_packet_id = 0;
uint32_t sent_seq;
uint8_t *hdr_addr;
rte_iova_t hdr_iova;
if (first_m_seg->ol_flags & PKT_TX_IPV4)
packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off);
+ if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4)
+ outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr,
+ first_m_seg->outer_l2_len);
+
th = (const struct tcp_hdr *)(hdr_addr + tcph_off);
rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
sent_seq = rte_be_to_cpu_32(sent_seq);
- sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, 0, sent_seq,
- first_m_seg->tso_segsz);
+ sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id,
+ sent_seq, first_m_seg->tso_segsz);
(*added) += SFC_EF10_TSO_OPT_DESCS_NUM;
sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
if (txq->sw_ring == NULL)
goto fail_sw_ring_alloc;
- if (info->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
info->txq_entries,
SFC_TSOH_STD_LEN,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
.features = SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_TSO_ENCAP |
SFC_DP_TX_FEAT_MULTI_SEG |
SFC_DP_TX_FEAT_MULTI_POOL |
SFC_DP_TX_FEAT_REFCNT |
if (sa->tso)
caps |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (sa->tso_encap)
+ caps |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+
return caps;
}
flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
- if (txq_info->offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
flags |= EFX_TXQ_FATSOV2;
rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
sfc_tx_start(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
unsigned int sw_index;
int rc = 0;
sfc_log_init(sa, "txq_count = %u", sas->txq_count);
if (sa->tso) {
- if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) {
+ if (!encp->enc_fw_assisted_tso_v2_enabled) {
sfc_warn(sa, "TSO support was unable to be restored");
sa->tso = B_FALSE;
+ sa->tso_encap = B_FALSE;
}
}
+ if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled) {
+ sfc_warn(sa, "Encapsulated TSO support was unable to be restored");
+ sa->tso_encap = B_FALSE;
+ }
+
rc = efx_tx_init(sa->nic);
if (rc != 0)
goto fail_efx_tx_init;