X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_tx.c;h=aa73d264267cd5d9e652a75c7f6729e16c3bcf0d;hb=649885c08e51bcb8680ed2aa522d0f5160dd8fb1;hp=7504037bd600626ab62e297da967aa76367ae077;hpb=85069adea7e9b5520eff8dd1e5d890ee26dfab11;p=dpdk.git diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index 7504037bd6..aa73d26426 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -40,6 +40,26 @@ sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa) const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); uint64_t caps = 0; + if ((sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) && + encp->enc_hw_tx_insert_vlan_enabled) + caps |= DEV_TX_OFFLOAD_VLAN_INSERT; + + if (sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG) + caps |= DEV_TX_OFFLOAD_MULTI_SEGS; + + if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) && + (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) + caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; + + return caps; +} + +uint64_t +sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint64_t caps = 0; + caps |= DEV_TX_OFFLOAD_IPV4_CKSUM; caps |= DEV_TX_OFFLOAD_UDP_CKSUM; caps |= DEV_TX_OFFLOAD_TCP_CKSUM; @@ -47,10 +67,6 @@ sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa) if (encp->enc_tunnel_encapsulations_supported) caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; - if ((sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) && - encp->enc_hw_tx_insert_vlan_enabled) - caps |= DEV_TX_OFFLOAD_VLAN_INSERT; - if (sa->tso) caps |= DEV_TX_OFFLOAD_TCP_TSO; @@ -59,10 +75,9 @@ sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa) static int sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level, - const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf, + uint64_t offloads) { - unsigned int flags = tx_conf->txq_flags; - const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); int rc = 0; if (tx_conf->tx_rs_thresh != 0) { @@ -84,48 +99,9 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level, "prefetch/host/writeback thresholds are not supported"); } - if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) && - (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)) { - sfc_err(sa, "Multi-segment is not supported by %s datapath", - sa->dp_tx->dp.name); - rc = EINVAL; - } - - if (((flags & ETH_TXQ_FLAGS_NOMULTMEMP) == 0) && - (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)) { - sfc_err(sa, "multi-mempool is not supported by %s datapath", - sa->dp_tx->dp.name); - rc = EINVAL; - } - - if (((flags & ETH_TXQ_FLAGS_NOREFCOUNT) == 0) && - (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) { - sfc_err(sa, - "mbuf reference counters are neglected by %s datapath", - sa->dp_tx->dp.name); - rc = EINVAL; - } - - if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) { - if (!encp->enc_hw_tx_insert_vlan_enabled) { - sfc_err(sa, "VLAN offload is not supported"); - rc = EINVAL; - } else if (~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) { - sfc_err(sa, - "VLAN offload is not supported by %s datapath", - sa->dp_tx->dp.name); - rc = EINVAL; - } - } - - if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) { - sfc_err(sa, "SCTP offload is not supported"); - rc = EINVAL; - } - /* We either perform both TCP and UDP offload, or no offload at all */ - if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) != - ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) { + if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) != + ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) { sfc_err(sa, "TCP and UDP offloads can't be set independently"); rc = EINVAL; } @@ -154,6 +130,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, struct sfc_txq *txq; int rc = 0; struct sfc_dp_tx_qcreate_info info; + uint64_t offloads; sfc_log_init(sa, "TxQ = %u", sw_index); @@ -166,7 +143,9 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(txq_entries >= nb_tx_desc); SFC_ASSERT(txq_max_fill_level <= nb_tx_desc); - rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf); + offloads = tx_conf->offloads | + sa->eth_dev->data->dev_conf.txmode.offloads; + rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads); if (rc != 0) goto fail_bad_conf; @@ -192,7 +171,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : SFC_TX_DEFAULT_FREE_THRESH; - txq->flags = tx_conf->txq_flags; + txq->offloads = offloads; rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries), socket_id, &txq->mem); @@ -202,7 +181,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, memset(&info, 0, sizeof(info)); info.max_fill_level = txq_max_fill_level; info.free_thresh = txq->free_thresh; - info.flags = tx_conf->txq_flags; + info.offloads = offloads; info.txq_entries = txq_info->entries; info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max; info.txq_hw_ring = txq->mem.esm_base; @@ -210,6 +189,9 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.evq_hw_ring = evq->mem.esm_base; info.hw_index = txq->hw_index; info.mem_bar = sa->mem_bar.esb_base; + info.vi_window_shift = encp->enc_vi_window_shift; + info.tso_tcp_header_offset_limit = + encp->enc_tx_tso_tcp_header_offset_limit; rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, @@ -253,6 +235,8 @@ sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) sfc_log_init(sa, "TxQ = %u", sw_index); SFC_ASSERT(sw_index < sa->txq_count); + sa->eth_dev->data->tx_queues[sw_index] = NULL; + txq_info = &sa->txq_info[sw_index]; txq = txq_info->txq; @@ -424,12 +408,13 @@ sfc_tx_close(struct sfc_adapter *sa) int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) { - const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) | + sfc_tx_get_queue_offload_caps(sa); struct rte_eth_dev_data *dev_data; struct sfc_txq_info *txq_info; struct sfc_txq *txq; struct sfc_evq *evq; - uint16_t flags; + uint16_t flags = 0; unsigned int desc_index; int rc = 0; @@ -440,6 +425,7 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) txq = txq_info->txq; + SFC_ASSERT(txq != NULL); SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED); evq = txq->evq; @@ -448,28 +434,24 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) if (rc != 0) goto fail_ev_qstart; - /* - * It seems that DPDK has no controls regarding IPv4 offloads, - * hence, we always enable it here - */ - if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) || - (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) { - flags = EFX_TXQ_CKSUM_IPV4; + if (txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + flags |= EFX_TXQ_CKSUM_IPV4; - if (encp->enc_tunnel_encapsulations_supported != 0) - flags |= EFX_TXQ_CKSUM_INNER_IPV4; - } else { - flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; + if (txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + flags |= EFX_TXQ_CKSUM_INNER_IPV4; - if (encp->enc_tunnel_encapsulations_supported != 0) - flags |= EFX_TXQ_CKSUM_INNER_IPV4 | - EFX_TXQ_CKSUM_INNER_TCPUDP; + if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || + (txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { + flags |= EFX_TXQ_CKSUM_TCPUDP; - if (sa->tso) - flags |= EFX_TXQ_FATSOV2; + if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + flags |= EFX_TXQ_CKSUM_INNER_TCPUDP; } - rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem, + if (txq->offloads & DEV_TX_OFFLOAD_TCP_TSO) + flags |= EFX_TXQ_FATSOV2; + + rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem, txq_info->entries, 0 /* not used on EF10 */, flags, evq->common, &txq->common, &desc_index); @@ -524,7 +506,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) txq = txq_info->txq; - if (txq->state == SFC_TXQ_INITIALIZED) + if (txq == NULL || txq->state == SFC_TXQ_INITIALIZED) return; SFC_ASSERT(txq->state & SFC_TXQ_STARTED); @@ -563,7 +545,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) sfc_err(sa, "TxQ %u flush timed out", sw_index); if (txq->state & SFC_TXQ_FLUSHED) - sfc_info(sa, "TxQ %u flushed", sw_index); + sfc_notice(sa, "TxQ %u flushed", sw_index); } sa->dp_tx->qreap(txq->dp); @@ -601,8 +583,9 @@ sfc_tx_start(struct sfc_adapter *sa) goto fail_efx_tx_init; for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) { - if (!(sa->txq_info[sw_index].deferred_start) || - sa->txq_info[sw_index].deferred_started) { + if (sa->txq_info[sw_index].txq != NULL && + (!(sa->txq_info[sw_index].deferred_start) || + sa->txq_info[sw_index].deferred_started)) { rc = sfc_tx_qstart(sa, sw_index); if (rc != 0) goto fail_tx_qstart; @@ -729,6 +712,7 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) for (pkts_sent = 0, pktp = &tx_pkts[0]; (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill); pkts_sent++, pktp++) { + uint16_t hw_vlan_tci_prev = txq->hw_vlan_tci; struct rte_mbuf *m_seg = *pktp; size_t pkt_len = m_seg->pkt_len; unsigned int pkt_descs = 0; @@ -736,9 +720,9 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* * Here VLAN TCI is expected to be zero in case if no - * DEV_TX_VLAN_OFFLOAD capability is advertised; + * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised; * if the calling app ignores the absence of - * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then + * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then * TX_ERROR will occur */ pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend); @@ -767,6 +751,7 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * mbuf shouldn't be orphaned */ pend -= pkt_descs; + txq->hw_vlan_tci = hw_vlan_tci_prev; rte_pktmbuf_free(*pktp); @@ -836,10 +821,12 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) fill_level = added - txq->completed; if (fill_level > hard_max_fill) { pend -= pkt_descs; + txq->hw_vlan_tci = hw_vlan_tci_prev; break; } } else { pend -= pkt_descs; + txq->hw_vlan_tci = hw_vlan_tci_prev; break; } }