X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_tx.c;h=28d696de61f2af2567bf3c9e51cb0f794dd49e09;hb=7d5cfaa7508de0fd248b05effbf421a98317006a;hp=515f4afdc23156e38014ea48511ba1f560fc0ec0;hpb=113a14a63e64ee042898a10d2262f1fa8beb198f;p=dpdk.git diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index 515f4afdc2..28d696de61 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2016-2018 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2021 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. @@ -34,43 +34,44 @@ */ #define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000) -uint64_t -sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa) +static uint64_t +sfc_tx_get_offload_mask(struct sfc_adapter *sa) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); - uint64_t caps = 0; + uint64_t no_caps = 0; + + if (!encp->enc_hw_tx_insert_vlan_enabled) + no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT; + + if (!encp->enc_tunnel_encapsulations_supported) + no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; - if ((sa->priv.dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) && - encp->enc_hw_tx_insert_vlan_enabled) - caps |= DEV_TX_OFFLOAD_VLAN_INSERT; + if (!sa->tso) + no_caps |= DEV_TX_OFFLOAD_TCP_TSO; - if (sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG) - caps |= DEV_TX_OFFLOAD_MULTI_SEGS; + if (!sa->tso_encap || + (encp->enc_tunnel_encapsulations_supported & + (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0) + no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO; - if ((~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) && - (~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) - caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; + if (!sa->tso_encap || + (encp->enc_tunnel_encapsulations_supported & + (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0) + no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO; - return caps; + return ~no_caps; } uint64_t -sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa) +sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa) { - const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); - uint64_t caps = 0; - - caps |= DEV_TX_OFFLOAD_IPV4_CKSUM; - caps |= DEV_TX_OFFLOAD_UDP_CKSUM; - caps |= DEV_TX_OFFLOAD_TCP_CKSUM; - - if (encp->enc_tunnel_encapsulations_supported) - caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; - - if (sa->tso) - caps |= DEV_TX_OFFLOAD_TCP_TSO; + return sa->priv.dp_tx->dev_offload_capa & sfc_tx_get_offload_mask(sa); +} - return caps; +uint64_t +sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa) +{ + return sa->priv.dp_tx->queue_offload_capa & sfc_tx_get_offload_mask(sa); } static int @@ -131,14 +132,20 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, int rc = 0; struct sfc_dp_tx_qcreate_info info; uint64_t offloads; + struct sfc_dp_tx_hw_limits hw_limits; sfc_log_init(sa, "TxQ = %u", sw_index); - rc = sa->priv.dp_tx->qsize_up_rings(nb_tx_desc, &txq_entries, - &evq_entries, &txq_max_fill_level); + memset(&hw_limits, 0, sizeof(hw_limits)); + hw_limits.txq_max_entries = sa->txq_max_entries; + hw_limits.txq_min_entries = sa->txq_min_entries; + + rc = sa->priv.dp_tx->qsize_up_rings(nb_tx_desc, &hw_limits, + &txq_entries, &evq_entries, + &txq_max_fill_level); if (rc != 0) goto fail_size_up_rings; - SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS); + SFC_ASSERT(txq_entries >= sa->txq_min_entries); SFC_ASSERT(txq_entries <= sa->txq_max_entries); SFC_ASSERT(txq_entries >= nb_tx_desc); SFC_ASSERT(txq_max_fill_level <= nb_tx_desc); @@ -167,7 +174,8 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_TX_DEFAULT_FREE_THRESH; txq_info->offloads = offloads; - rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries), + rc = sfc_dma_alloc(sa, "txq", sw_index, + efx_txq_size(sa->nic, txq_info->entries), socket_id, &txq->mem); if (rc != 0) goto fail_dma_alloc; @@ -186,6 +194,17 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.vi_window_shift = encp->enc_vi_window_shift; info.tso_tcp_header_offset_limit = encp->enc_tx_tso_tcp_header_offset_limit; + info.tso_max_nb_header_descs = + RTE_MIN(encp->enc_tx_tso_max_header_ndescs, + (uint32_t)UINT16_MAX); + info.tso_max_header_len = + RTE_MIN(encp->enc_tx_tso_max_header_length, + (uint32_t)UINT16_MAX); + info.tso_max_nb_payload_descs = + RTE_MIN(encp->enc_tx_tso_max_payload_ndescs, + (uint32_t)UINT16_MAX); + info.tso_max_payload_len = encp->enc_tx_tso_max_payload_length; + info.tso_max_nb_outgoing_frames = encp->enc_tx_tso_max_nframes; rc = sa->priv.dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, @@ -462,7 +481,9 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) flags |= EFX_TXQ_CKSUM_INNER_TCPUDP; } - if (txq_info->offloads & DEV_TX_OFFLOAD_TCP_TSO) + if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) flags |= EFX_TXQ_FATSOV2; rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem, @@ -581,18 +602,27 @@ int sfc_tx_start(struct sfc_adapter *sa) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); unsigned int sw_index; int rc = 0; sfc_log_init(sa, "txq_count = %u", sas->txq_count); if (sa->tso) { - if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) { + if (!encp->enc_fw_assisted_tso_v2_enabled && + !encp->enc_tso_v3_enabled) { sfc_warn(sa, "TSO support was unable to be restored"); sa->tso = B_FALSE; + sa->tso_encap = B_FALSE; } } + if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled && + !encp->enc_tso_v3_enabled) { + sfc_warn(sa, "Encapsulated TSO support was unable to be restored"); + sa->tso_encap = B_FALSE; + } + rc = efx_tx_init(sa->nic); if (rc != 0) goto fail_efx_tx_init; @@ -690,6 +720,36 @@ sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m, return 1; } +static uint16_t +sfc_efx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct sfc_dp_txq *dp_txq = tx_queue; + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic); + uint16_t i; + + for (i = 0; i < nb_pkts; i++) { + int ret; + + /* + * EFX Tx datapath may require extra VLAN descriptor if VLAN + * insertion offload is requested regardless the offload + * requested/supported. + */ + ret = sfc_dp_tx_prepare_pkt(tx_pkts[i], 0, SFC_TSOH_STD_LEN, + encp->enc_tx_tso_tcp_header_offset_limit, + txq->max_fill_level, EFX_TX_FATSOV2_OPT_NDESCS, + 1); + if (unlikely(ret != 0)) { + rte_errno = ret; + break; + } + } + + return i; +} + static uint16_t sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -750,13 +810,10 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend, &pkt_descs, &pkt_len) != 0) { - /* We may have reached this place for - * one of the following reasons: - * - * 1) Packet header length is greater - * than SFC_TSOH_STD_LEN - * 2) TCP header starts at more then - * 208 bytes into the frame + /* We may have reached this place if packet + * header linearization is needed but the + * header length is greater than + * SFC_TSOH_STD_LEN * * We will deceive RTE saying that we have sent * the packet, but we will actually drop it. @@ -911,7 +968,7 @@ sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq) SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); eth_dev = &rte_eth_devices[dpq->port_id]; - sa = eth_dev->data->dev_private; + sa = sfc_adapter_by_eth_dev(eth_dev); SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->txq_count); return &sa->txq_ctrl[dpq->queue_id]; @@ -920,6 +977,7 @@ sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq) static sfc_dp_tx_qsize_up_rings_t sfc_efx_tx_qsize_up_rings; static int sfc_efx_tx_qsize_up_rings(uint16_t nb_tx_desc, + __rte_unused struct sfc_dp_tx_hw_limits *limits, unsigned int *txq_entries, unsigned int *evq_entries, unsigned int *txq_max_fill_level) @@ -1099,13 +1157,16 @@ struct sfc_dp_tx sfc_efx_tx = { .dp = { .name = SFC_KVARG_DATAPATH_EFX, .type = SFC_DP_TX, - .hw_fw_caps = 0, + .hw_fw_caps = SFC_DP_HW_FW_CAP_TX_EFX, }, - .features = SFC_DP_TX_FEAT_VLAN_INSERT | - SFC_DP_TX_FEAT_TSO | - SFC_DP_TX_FEAT_MULTI_POOL | - SFC_DP_TX_FEAT_REFCNT | - SFC_DP_TX_FEAT_MULTI_SEG, + .features = 0, + .dev_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_MULTI_SEGS, + .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO, .qsize_up_rings = sfc_efx_tx_qsize_up_rings, .qcreate = sfc_efx_tx_qcreate, .qdestroy = sfc_efx_tx_qdestroy, @@ -1113,5 +1174,6 @@ struct sfc_dp_tx sfc_efx_tx = { .qstop = sfc_efx_tx_qstop, .qreap = sfc_efx_tx_qreap, .qdesc_status = sfc_efx_tx_qdesc_status, + .pkt_prepare = sfc_efx_prepare_pkts, .pkt_burst = sfc_efx_xmit_pkts, };