X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fbase%2Fef10_tx.c;h=69c757009feca8b10ae6c42a3730ed18f138548f;hb=2b38e7b7b7e14c7fd60b7d621f977bd26be6d2a4;hp=59343a329c594205a4e949e4fd8fea779910449c;hpb=e7cd430c864f9bd1ba76026f7b5ffe8fbb020c7a;p=dpdk.git diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c index 59343a329c..69c757009f 100644 --- a/drivers/net/sfc/base/ef10_tx.c +++ b/drivers/net/sfc/base/ef10_tx.c @@ -1,31 +1,7 @@ -/* - * Copyright (c) 2012-2016 Solarflare Communications Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. +/* SPDX-License-Identifier: BSD-3-Clause * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of the FreeBSD Project. + * Copyright (c) 2012-2018 Solarflare Communications Inc. + * All rights reserved. */ #include "efx.h" @@ -34,12 +10,20 @@ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_QSTATS +#define EFX_TX_QSTAT_INCR(_etp, _stat) \ + do { \ + (_etp)->et_stat[_stat]++; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) +#else #define EFX_TX_QSTAT_INCR(_etp, _stat) +#endif static __checkReturn efx_rc_t efx_mcdi_init_txq( __in efx_nic_t *enp, - __in uint32_t size, + __in uint32_t ndescs, __in uint32_t target_evq, __in uint32_t label, __in uint32_t instance, @@ -58,8 +42,8 @@ efx_mcdi_init_txq( EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >= EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs)); - npages = EFX_TXQ_NBUFS(size); - if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) { + npages = EFX_TXQ_NBUFS(ndescs); + if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { rc = EINVAL; goto fail1; } @@ -71,17 +55,21 @@ efx_mcdi_init_txq( req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; - MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size); + MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); - MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS, + MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, INIT_TXQ_IN_FLAG_BUFF_MODE, 0, INIT_TXQ_IN_FLAG_IP_CSUM_DIS, (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, + INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, + (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, + INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, + (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, INIT_TXQ_IN_CRC_MODE, 0, @@ -140,7 +128,7 @@ efx_mcdi_fini_txq( efx_mcdi_execute_quiet(enp, &req); - if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) { + if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } @@ -148,7 +136,12 @@ efx_mcdi_fini_txq( return (0); fail1: - EFSYS_PROBE1(fail1, efx_rc_t, rc); + /* + * EALREADY is not an error, but indicates that the MC has rebooted and + * that the TXQ has already been destroyed. + */ + if (rc != EALREADY) + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } @@ -174,21 +167,30 @@ ef10_tx_qcreate( __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp) { + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + uint16_t inner_csum; efx_qword_t desc; efx_rc_t rc; _NOTE(ARGUNUSED(id)) - if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags, - esmp)) != 0) + inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP; + if (((flags & inner_csum) != 0) && + (encp->enc_tunnel_encapsulations_supported == 0)) { + rc = EINVAL; goto fail1; + } + + if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index, + flags, esmp)) != 0) + goto fail2; /* * A previous user of this TX queue may have written a descriptor to the @@ -199,19 +201,25 @@ ef10_tx_qcreate( * a no-op TX option descriptor. See bug29981 for details. */ *addedp = 1; - EFX_POPULATE_QWORD_4(desc, + EFX_POPULATE_QWORD_6(desc, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, ESF_DZ_TX_OPTION_UDP_TCP_CSUM, (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, ESF_DZ_TX_OPTION_IP_CSUM, - (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0); + (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, + ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, + (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, + ESF_DZ_TX_OPTION_INNER_IP_CSUM, + (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc); ef10_tx_qpush(etp, *addedp, 0); return (0); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -274,9 +282,9 @@ ef10_tx_qpio_enable( fail3: EFSYS_PROBE(fail3); ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); - etp->et_pio_size = 0; fail2: EFSYS_PROBE(fail2); + etp->et_pio_size = 0; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -401,24 +409,24 @@ fail1: return (rc); } - __checkReturn efx_rc_t + __checkReturn efx_rc_t ef10_tx_qpost( - __in efx_txq_t *etp, - __in_ecount(n) efx_buffer_t *eb, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp) + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; efx_rc_t rc; - if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } - for (i = 0; i < n; i++) { + for (i = 0; i < ndescs; i++) { efx_buffer_t *ebp = &eb[i]; efsys_dma_addr_t addr = ebp->eb_addr; size_t size = ebp->eb_size; @@ -427,8 +435,9 @@ ef10_tx_qpost( size_t offset; efx_qword_t qword; - /* Fragments must not span 4k boundaries. */ - EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size)); + /* No limitations on boundary crossing */ + EFSYS_ASSERT(size <= + etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); id = added++ & etp->et_mask; offset = id * sizeof (efx_qword_t); @@ -459,9 +468,9 @@ fail1: } /* - * This improves performance by pushing a TX descriptor at the same time as the - * doorbell. The descriptor must be added to the TXQ, so that can be used if the - * hardware decides not to use the pushed descriptor. + * This improves performance by, when possible, pushing a TX descriptor at the + * same time as the doorbell. The descriptor must be added to the TXQ, so that + * can be used if the hardware decides not to use the pushed descriptor. */ void ef10_tx_qpush( @@ -481,36 +490,66 @@ ef10_tx_qpush( offset = id * sizeof (efx_qword_t); EFSYS_MEM_READQ(etp->et_esmp, offset, &desc); - EFX_POPULATE_OWORD_3(oword, - ERF_DZ_TX_DESC_WPTR, wptr, - ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), - ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); - - /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ - EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); - EFSYS_PIO_WRITE_BARRIER(); - EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index, - &oword); + + /* + * Bug 65776: TSO option descriptors cannot be pushed if pacer bypass is + * enabled on the event queue this transmit queue is attached to. + * + * To ensure the code is safe, it is easiest to simply test the type of + * the descriptor to push, and only push it is if it not a TSO option + * descriptor. + */ + if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) || + (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) != + ESE_DZ_TX_OPTION_DESC_TSO)) { + /* Push the descriptor and update the wptr. */ + EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr, + ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), + ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); + + /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ + EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, + wptr, id); + EFSYS_PIO_WRITE_BARRIER(); + EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, + etp->et_index, &oword); + } else { + efx_dword_t dword; + + /* + * Only update the wptr. This is signalled to the hardware by + * only writing one DWORD of the doorbell register. + */ + EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr); + dword = oword.eo_dword[2]; + + /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ + EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, + wptr, id); + EFSYS_PIO_WRITE_BARRIER(); + EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, + etp->et_index, &dword, B_FALSE); + } } - __checkReturn efx_rc_t + __checkReturn efx_rc_t ef10_tx_qdesc_post( - __in efx_txq_t *etp, - __in_ecount(n) efx_desc_t *ed, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp) + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_desc_t *ed, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; efx_rc_t rc; - if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } - for (i = 0; i < n; i++) { + for (i = 0; i < ndescs; i++) { efx_desc_t *edp = &ed[i]; unsigned int id; size_t offset; @@ -522,7 +561,7 @@ ef10_tx_qdesc_post( } EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, - unsigned int, added, unsigned int, n); + unsigned int, added, unsigned int, ndescs); EFX_TX_QSTAT_INCR(etp, TX_POST); @@ -543,8 +582,10 @@ ef10_tx_qdesc_dma_create( __in boolean_t eop, __out efx_desc_t *edp) { - /* Fragments must not span 4k boundaries. */ - EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size); + _NOTE(ARGUNUSED(etp)) + + /* No limitations on boundary crossing */ + EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, efsys_dma_addr_t, addr, @@ -566,6 +607,8 @@ ef10_tx_qdesc_tso_create( __in uint8_t tcp_flags, __out efx_desc_t *edp) { + _NOTE(ARGUNUSED(etp)) + EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index, uint16_t, ipv4_id, uint32_t, tcp_seq, uint8_t, tcp_flags); @@ -588,6 +631,8 @@ ef10_tx_qdesc_tso2_create( __out_ecount(count) efx_desc_t *edp, __in int count) { + _NOTE(ARGUNUSED(etp, count)) + EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index, uint16_t, ipv4_id, uint32_t, tcp_seq, uint16_t, tcp_mss); @@ -617,6 +662,8 @@ ef10_tx_qdesc_vlantci_create( __in uint16_t tci, __out efx_desc_t *edp) { + _NOTE(ARGUNUSED(etp)) + EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index, uint16_t, tci); @@ -666,7 +713,14 @@ ef10_tx_qflush( return (0); fail1: - EFSYS_PROBE1(fail1, efx_rc_t, rc); + /* + * EALREADY is not an error, but indicates that the MC has rebooted and + * that the TXQ has already been destroyed. Callers need to know that + * the TXQ flush has completed to avoid waiting until timeout for a + * flush done event that will not be delivered. + */ + if (rc != EALREADY) + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } @@ -680,4 +734,22 @@ ef10_tx_qenable( /* FIXME */ } +#if EFSYS_OPT_QSTATS + void +ef10_tx_qstats_update( + __in efx_txq_t *etp, + __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) +{ + unsigned int id; + + for (id = 0; id < TX_NQSTATS; id++) { + efsys_stat_t *essp = &stat[id]; + + EFSYS_STAT_INCR(essp, etp->et_stat[id]); + etp->et_stat[id] = 0; + } +} + +#endif /* EFSYS_OPT_QSTATS */ + #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */