*/
#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
+uint64_t
+sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t caps = 0;
+
+ if ((sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) &&
+ encp->enc_hw_tx_insert_vlan_enabled)
+ caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if (sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
+ caps |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT))
+ caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ return caps;
+}
+
+uint64_t
+sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t caps = 0;
+
+ caps |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ caps |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ caps |= DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (encp->enc_tunnel_encapsulations_supported)
+ caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+ if (sa->tso)
+ caps |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ return caps;
+}
+
static int
sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf,
+ uint64_t offloads)
{
- unsigned int flags = tx_conf->txq_flags;
- const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
int rc = 0;
if (tx_conf->tx_rs_thresh != 0) {
"prefetch/host/writeback thresholds are not supported");
}
- if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) &&
- (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)) {
- sfc_err(sa, "Multi-segment is not supported by %s datapath",
- sa->dp_tx->dp.name);
- rc = EINVAL;
- }
-
- if (((flags & ETH_TXQ_FLAGS_NOMULTMEMP) == 0) &&
- (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)) {
- sfc_err(sa, "multi-mempool is not supported by %s datapath",
- sa->dp_tx->dp.name);
- rc = EINVAL;
- }
-
- if (((flags & ETH_TXQ_FLAGS_NOREFCOUNT) == 0) &&
- (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) {
- sfc_err(sa,
- "mbuf reference counters are neglected by %s datapath",
- sa->dp_tx->dp.name);
- rc = EINVAL;
- }
-
- if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
- if (!encp->enc_hw_tx_insert_vlan_enabled) {
- sfc_err(sa, "VLAN offload is not supported");
- rc = EINVAL;
- } else if (~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) {
- sfc_err(sa,
- "VLAN offload is not supported by %s datapath",
- sa->dp_tx->dp.name);
- rc = EINVAL;
- }
- }
-
- if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
- sfc_err(sa, "SCTP offload is not supported");
- rc = EINVAL;
- }
-
/* We either perform both TCP and UDP offload, or no offload at all */
- if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
- ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
+ if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+ ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
sfc_err(sa, "TCP and UDP offloads can't be set independently");
rc = EINVAL;
}
struct sfc_txq *txq;
int rc = 0;
struct sfc_dp_tx_qcreate_info info;
+ uint64_t offloads;
sfc_log_init(sa, "TxQ = %u", sw_index);
SFC_ASSERT(txq_entries >= nb_tx_desc);
SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
- rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+ offloads = tx_conf->offloads |
+ sa->eth_dev->data->dev_conf.txmode.offloads;
+ rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
txq->free_thresh =
(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
- txq->flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
socket_id, &txq->mem);
memset(&info, 0, sizeof(info));
info.max_fill_level = txq_max_fill_level;
info.free_thresh = txq->free_thresh;
- info.flags = tx_conf->txq_flags;
+ info.offloads = offloads;
info.txq_entries = txq_info->entries;
info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
info.txq_hw_ring = txq->mem.esm_base;
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = txq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
+ info.vi_window_shift = encp->enc_vi_window_shift;
+ info.tso_tcp_header_offset_limit =
+ encp->enc_tx_tso_tcp_header_offset_limit;
rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
&RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
sfc_log_init(sa, "TxQ = %u", sw_index);
SFC_ASSERT(sw_index < sa->txq_count);
+ sa->eth_dev->data->tx_queues[sw_index] = NULL;
+
txq_info = &sa->txq_info[sw_index];
txq = txq_info->txq;
int
sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
{
- const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
+ sfc_tx_get_queue_offload_caps(sa);
struct rte_eth_dev_data *dev_data;
struct sfc_txq_info *txq_info;
struct sfc_txq *txq;
struct sfc_evq *evq;
- uint16_t flags;
+ uint16_t flags = 0;
unsigned int desc_index;
int rc = 0;
txq = txq_info->txq;
+ SFC_ASSERT(txq != NULL);
SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
evq = txq->evq;
if (rc != 0)
goto fail_ev_qstart;
- /*
- * It seems that DPDK has no controls regarding IPv4 offloads,
- * hence, we always enable it here
- */
- if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
- (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) {
- flags = EFX_TXQ_CKSUM_IPV4;
+ if (txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ flags |= EFX_TXQ_CKSUM_IPV4;
- if (encp->enc_tunnel_encapsulations_supported != 0)
- flags |= EFX_TXQ_CKSUM_INNER_IPV4;
- } else {
- flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
+ if (txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ flags |= EFX_TXQ_CKSUM_INNER_IPV4;
- if (encp->enc_tunnel_encapsulations_supported != 0)
- flags |= EFX_TXQ_CKSUM_INNER_IPV4 |
- EFX_TXQ_CKSUM_INNER_TCPUDP;
+ if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+ (txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ flags |= EFX_TXQ_CKSUM_TCPUDP;
- if (sa->tso)
- flags |= EFX_TXQ_FATSOV2;
+ if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
+ if (txq->offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ flags |= EFX_TXQ_FATSOV2;
+
rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
txq_info->entries, 0 /* not used on EF10 */,
flags, evq->common,
txq = txq_info->txq;
- if (txq->state == SFC_TXQ_INITIALIZED)
+ if (txq == NULL || txq->state == SFC_TXQ_INITIALIZED)
return;
SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
sfc_err(sa, "TxQ %u flush timed out", sw_index);
if (txq->state & SFC_TXQ_FLUSHED)
- sfc_info(sa, "TxQ %u flushed", sw_index);
+ sfc_notice(sa, "TxQ %u flushed", sw_index);
}
sa->dp_tx->qreap(txq->dp);
goto fail_efx_tx_init;
for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
- if (!(sa->txq_info[sw_index].deferred_start) ||
- sa->txq_info[sw_index].deferred_started) {
+ if (sa->txq_info[sw_index].txq != NULL &&
+ (!(sa->txq_info[sw_index].deferred_start) ||
+ sa->txq_info[sw_index].deferred_started)) {
rc = sfc_tx_qstart(sa, sw_index);
if (rc != 0)
goto fail_tx_qstart;
/*
* Here VLAN TCI is expected to be zero in case if no
- * DEV_TX_VLAN_OFFLOAD capability is advertised;
+ * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
* if the calling app ignores the absence of
- * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then
+ * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
* TX_ERROR will occur
*/
pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);