const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
uint64_t caps = 0;
- if ((sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) &&
+ if ((sa->priv.dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) &&
encp->enc_hw_tx_insert_vlan_enabled)
caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
- if (sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
+ if (sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
caps |= DEV_TX_OFFLOAD_MULTI_SEGS;
- if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) &&
- (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT))
+ if ((~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) &&
+ (~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_REFCNT))
caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
return caps;
return caps;
}
-static void
-sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
- const char *verdict, uint64_t offloads)
-{
- unsigned long long bit;
-
- while ((bit = __builtin_ffsll(offloads)) != 0) {
- uint64_t flag = (1ULL << --bit);
-
- sfc_err(sa, "Tx %s offload %s %s", offload_group,
- rte_eth_dev_tx_offload_name(flag), verdict);
-
- offloads &= ~flag;
- }
-}
-
-static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
- uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
- uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
- sfc_tx_get_queue_offload_caps(sa);
- uint64_t rejected = requested & ~supported;
- uint64_t missing = (requested & mandatory) ^ mandatory;
- boolean_t mismatch = B_FALSE;
-
- if (rejected) {
- sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
- mismatch = B_TRUE;
- }
-
- if (missing) {
- sfc_tx_log_offloads(sa, "queue", "must be set", missing);
- mismatch = B_TRUE;
- }
-
- return mismatch;
-}
-
static int
sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf,
+ uint64_t offloads)
{
int rc = 0;
}
/* We either perform both TCP and UDP offload, or no offload at all */
- if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
- ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+ if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+ ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
sfc_err(sa, "TCP and UDP offloads can't be set independently");
rc = EINVAL;
}
- if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
- rc = EINVAL;
-
return rc;
}
void
-sfc_tx_qflush_done(struct sfc_txq *txq)
+sfc_tx_qflush_done(struct sfc_txq_info *txq_info)
{
- txq->state |= SFC_TXQ_FLUSHED;
- txq->state &= ~SFC_TXQ_FLUSHING;
+ txq_info->state |= SFC_TXQ_FLUSHED;
+ txq_info->state &= ~SFC_TXQ_FLUSHING;
}
int
struct sfc_txq *txq;
int rc = 0;
struct sfc_dp_tx_qcreate_info info;
+ uint64_t offloads;
sfc_log_init(sa, "TxQ = %u", sw_index);
- rc = sa->dp_tx->qsize_up_rings(nb_tx_desc, &txq_entries, &evq_entries,
- &txq_max_fill_level);
+ rc = sa->priv.dp_tx->qsize_up_rings(nb_tx_desc, &txq_entries,
+ &evq_entries, &txq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS);
SFC_ASSERT(txq_entries >= nb_tx_desc);
SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
- rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+ offloads = tx_conf->offloads |
+ sa->eth_dev->data->dev_conf.txmode.offloads;
+ rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
if (rc != 0)
goto fail_ev_qinit;
- rc = ENOMEM;
- txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
- if (txq == NULL)
- goto fail_txq_alloc;
-
- txq_info->txq = txq;
-
+ txq = &sa->txq_ctrl[sw_index];
txq->hw_index = sw_index;
txq->evq = evq;
- txq->free_thresh =
+ txq_info->free_thresh =
(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
- txq->flags = tx_conf->txq_flags;
- txq->offloads = tx_conf->offloads;
+ txq_info->offloads = offloads;
rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
socket_id, &txq->mem);
memset(&info, 0, sizeof(info));
info.max_fill_level = txq_max_fill_level;
- info.free_thresh = txq->free_thresh;
- info.flags = tx_conf->txq_flags;
- info.offloads = tx_conf->offloads;
+ info.free_thresh = txq_info->free_thresh;
+ info.offloads = offloads;
info.txq_entries = txq_info->entries;
info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
info.txq_hw_ring = txq->mem.esm_base;
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = txq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
+ info.vi_window_shift = encp->enc_vi_window_shift;
+ info.tso_tcp_header_offset_limit =
+ encp->enc_tx_tso_tcp_header_offset_limit;
- rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
- socket_id, &info, &txq->dp);
+ rc = sa->priv.dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &txq_info->dp);
if (rc != 0)
goto fail_dp_tx_qinit;
- evq->dp_txq = txq->dp;
+ evq->dp_txq = txq_info->dp;
- txq->state = SFC_TXQ_INITIALIZED;
+ txq_info->state = SFC_TXQ_INITIALIZED;
txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
sfc_dma_free(sa, &txq->mem);
fail_dma_alloc:
- txq_info->txq = NULL;
- rte_free(txq);
-
-fail_txq_alloc:
sfc_ev_qfini(evq);
fail_ev_qinit:
sfc_log_init(sa, "TxQ = %u", sw_index);
SFC_ASSERT(sw_index < sa->txq_count);
+ sa->eth_dev->data->tx_queues[sw_index] = NULL;
+
txq_info = &sa->txq_info[sw_index];
- txq = txq_info->txq;
- SFC_ASSERT(txq != NULL);
- SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+ SFC_ASSERT(txq_info->state == SFC_TXQ_INITIALIZED);
- sa->dp_tx->qdestroy(txq->dp);
- txq->dp = NULL;
+ sa->priv.dp_tx->qdestroy(txq_info->dp);
+ txq_info->dp = NULL;
- txq_info->txq = NULL;
+ txq_info->state &= ~SFC_TXQ_INITIALIZED;
txq_info->entries = 0;
+ txq = &sa->txq_ctrl[sw_index];
+
sfc_dma_free(sa, &txq->mem);
sfc_ev_qfini(txq->evq);
txq->evq = NULL;
-
- rte_free(txq);
}
static int
static int
sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
{
- uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
- sfc_tx_get_queue_offload_caps(sa);
- uint64_t offloads_rejected = txmode->offloads & ~offloads_supported;
int rc = 0;
switch (txmode->mq_mode) {
rc = EINVAL;
}
- if (offloads_rejected) {
- sfc_tx_log_offloads(sa, "device", "is unsupported",
- offloads_rejected);
- rc = EINVAL;
- }
-
return rc;
}
sw_index = sa->txq_count;
while (--sw_index >= (int)nb_tx_queues) {
- if (sa->txq_info[sw_index].txq != NULL)
+ if (sa->txq_info[sw_index].state & SFC_TXQ_INITIALIZED)
sfc_tx_qfini(sa, sw_index);
}
sa->socket_id);
if (sa->txq_info == NULL)
goto fail_txqs_alloc;
+
+ /*
+ * Allocate primary process only TxQ control from heap
+ * since it should not be shared.
+ */
+ rc = ENOMEM;
+ sa->txq_ctrl = calloc(nb_tx_queues, sizeof(sa->txq_ctrl[0]));
+ if (sa->txq_ctrl == NULL)
+ goto fail_txqs_ctrl_alloc;
} else {
struct sfc_txq_info *new_txq_info;
+ struct sfc_txq *new_txq_ctrl;
if (nb_tx_queues < sa->txq_count)
sfc_tx_fini_queues(sa, nb_tx_queues);
if (new_txq_info == NULL && nb_tx_queues > 0)
goto fail_txqs_realloc;
+ new_txq_ctrl = realloc(sa->txq_ctrl,
+ nb_tx_queues * sizeof(sa->txq_ctrl[0]));
+ if (new_txq_ctrl == NULL && nb_tx_queues > 0)
+ goto fail_txqs_ctrl_realloc;
+
sa->txq_info = new_txq_info;
- if (nb_tx_queues > sa->txq_count)
+ sa->txq_ctrl = new_txq_ctrl;
+ if (nb_tx_queues > sa->txq_count) {
memset(&sa->txq_info[sa->txq_count], 0,
(nb_tx_queues - sa->txq_count) *
sizeof(sa->txq_info[0]));
+ memset(&sa->txq_ctrl[sa->txq_count], 0,
+ (nb_tx_queues - sa->txq_count) *
+ sizeof(sa->txq_ctrl[0]));
+ }
}
while (sa->txq_count < nb_tx_queues) {
return 0;
fail_tx_qinit_info:
+fail_txqs_ctrl_realloc:
fail_txqs_realloc:
+fail_txqs_ctrl_alloc:
fail_txqs_alloc:
sfc_tx_close(sa);
{
sfc_tx_fini_queues(sa, 0);
+ free(sa->txq_ctrl);
+ sa->txq_ctrl = NULL;
+
rte_free(sa->txq_info);
sa->txq_info = NULL;
}
SFC_ASSERT(sw_index < sa->txq_count);
txq_info = &sa->txq_info[sw_index];
- txq = txq_info->txq;
-
- SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+ SFC_ASSERT(txq_info->state == SFC_TXQ_INITIALIZED);
+ txq = &sa->txq_ctrl[sw_index];
evq = txq->evq;
rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
if (rc != 0)
goto fail_ev_qstart;
- /*
- * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
- * application which expects that IPv4 checksum offload is enabled
- * all the time as there is no legacy flag to turn off the offload.
- */
- if ((txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ||
- (~txq->flags & ETH_TXQ_FLAGS_IGNORE))
+ if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_IPV4;
- if ((txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
- ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)))
+ if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_IPV4;
- if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
- (txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+ (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
flags |= EFX_TXQ_CKSUM_TCPUDP;
- if ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
- /*
- * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
- * application. In turn, the absence of ETH_TXQ_FLAGS_NOXSUMTCP is
- * associated specifically with a legacy application which expects
- * both TCP checksum offload and TSO to be enabled because the legacy
- * API does not provide a dedicated mechanism to control TSO.
- */
- if ((txq->offloads & DEV_TX_OFFLOAD_TCP_TSO) ||
- ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (~txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP)))
+ if (txq_info->offloads & DEV_TX_OFFLOAD_TCP_TSO)
flags |= EFX_TXQ_FATSOV2;
- rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
+ rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
txq_info->entries, 0 /* not used on EF10 */,
flags, evq->common,
&txq->common, &desc_index);
efx_tx_qenable(txq->common);
- txq->state |= SFC_TXQ_STARTED;
+ txq_info->state |= SFC_TXQ_STARTED;
- rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
+ rc = sa->priv.dp_tx->qstart(txq_info->dp, evq->read_ptr, desc_index);
if (rc != 0)
goto fail_dp_qstart;
return 0;
fail_dp_qstart:
- txq->state = SFC_TXQ_INITIALIZED;
+ txq_info->state = SFC_TXQ_INITIALIZED;
efx_tx_qdestroy(txq->common);
fail_tx_qcreate:
SFC_ASSERT(sw_index < sa->txq_count);
txq_info = &sa->txq_info[sw_index];
- txq = txq_info->txq;
-
- if (txq->state == SFC_TXQ_INITIALIZED)
+ if (txq_info->state == SFC_TXQ_INITIALIZED)
return;
- SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
+ SFC_ASSERT(txq_info->state & SFC_TXQ_STARTED);
- sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
+ txq = &sa->txq_ctrl[sw_index];
+ sa->priv.dp_tx->qstop(txq_info->dp, &txq->evq->read_ptr);
/*
* Retry TX queue flushing in case of flush failed or
* timeout; in the worst case it can delay for 6 seconds
*/
for (retry_count = 0;
- ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
+ ((txq_info->state & SFC_TXQ_FLUSHED) == 0) &&
(retry_count < SFC_TX_QFLUSH_ATTEMPTS);
++retry_count) {
rc = efx_tx_qflush(txq->common);
if (rc != 0) {
- txq->state |= (rc == EALREADY) ?
+ txq_info->state |= (rc == EALREADY) ?
SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED;
break;
}
do {
rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
sfc_ev_qpoll(txq->evq);
- } while ((txq->state & SFC_TXQ_FLUSHING) &&
+ } while ((txq_info->state & SFC_TXQ_FLUSHING) &&
wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
- if (txq->state & SFC_TXQ_FLUSHING)
+ if (txq_info->state & SFC_TXQ_FLUSHING)
sfc_err(sa, "TxQ %u flush timed out", sw_index);
- if (txq->state & SFC_TXQ_FLUSHED)
+ if (txq_info->state & SFC_TXQ_FLUSHED)
sfc_notice(sa, "TxQ %u flushed", sw_index);
}
- sa->dp_tx->qreap(txq->dp);
+ sa->priv.dp_tx->qreap(txq_info->dp);
- txq->state = SFC_TXQ_INITIALIZED;
+ txq_info->state = SFC_TXQ_INITIALIZED;
efx_tx_qdestroy(txq->common);
goto fail_efx_tx_init;
for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
- if (!(sa->txq_info[sw_index].deferred_start) ||
- sa->txq_info[sw_index].deferred_started) {
+ if (sa->txq_info[sw_index].state == SFC_TXQ_INITIALIZED &&
+ (!(sa->txq_info[sw_index].deferred_start) ||
+ sa->txq_info[sw_index].deferred_started)) {
rc = sfc_tx_qstart(sa, sw_index);
if (rc != 0)
goto fail_tx_qstart;
sw_index = sa->txq_count;
while (sw_index-- > 0) {
- if (sa->txq_info[sw_index].txq != NULL)
+ if (sa->txq_info[sw_index].state & SFC_TXQ_STARTED)
sfc_tx_qstop(sa, sw_index);
}
for (pkts_sent = 0, pktp = &tx_pkts[0];
(pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
pkts_sent++, pktp++) {
+ uint16_t hw_vlan_tci_prev = txq->hw_vlan_tci;
struct rte_mbuf *m_seg = *pktp;
size_t pkt_len = m_seg->pkt_len;
unsigned int pkt_descs = 0;
* mbuf shouldn't be orphaned
*/
pend -= pkt_descs;
+ txq->hw_vlan_tci = hw_vlan_tci_prev;
rte_pktmbuf_free(*pktp);
fill_level = added - txq->completed;
if (fill_level > hard_max_fill) {
pend -= pkt_descs;
+ txq->hw_vlan_tci = hw_vlan_tci_prev;
break;
}
} else {
pend -= pkt_descs;
+ txq->hw_vlan_tci = hw_vlan_tci_prev;
break;
}
}
return pkts_sent;
}
+const struct sfc_dp_tx *
+sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq)
+{
+ const struct sfc_dp_queue *dpq = &dp_txq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter_priv *sap;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sap = sfc_adapter_priv_by_eth_dev(eth_dev);
+
+ return sap->dp_tx;
+}
+
+struct sfc_txq_info *
+sfc_txq_info_by_dp_txq(const struct sfc_dp_txq *dp_txq)
+{
+ const struct sfc_dp_queue *dpq = &dp_txq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->txq_count);
+ return &sa->txq_info[dpq->queue_id];
+}
+
struct sfc_txq *
sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
{
const struct sfc_dp_queue *dpq = &dp_txq->dpq;
struct rte_eth_dev *eth_dev;
struct sfc_adapter *sa;
- struct sfc_txq *txq;
SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
eth_dev = &rte_eth_devices[dpq->port_id];
sa = eth_dev->data->dev_private;
SFC_ASSERT(dpq->queue_id < sa->txq_count);
- txq = sa->txq_info[dpq->queue_id].txq;
-
- SFC_ASSERT(txq != NULL);
- return txq;
+ return &sa->txq_ctrl[dpq->queue_id];
}
static sfc_dp_tx_qsize_up_rings_t sfc_efx_tx_qsize_up_rings;