rc = EINVAL;
}
+ if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)) {
+ sfc_err(sa, "Multi-segment is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
+ if (((flags & ETH_TXQ_FLAGS_NOMULTMEMP) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)) {
+ sfc_err(sa, "multi-mempool is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
+ if (((flags & ETH_TXQ_FLAGS_NOREFCOUNT) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) {
+ sfc_err(sa,
+ "mbuf reference counters are neglected by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
if (!encp->enc_hw_tx_insert_vlan_enabled) {
sfc_err(sa, "VLAN offload is not supported");
struct sfc_txq_info *txq_info;
struct sfc_evq *evq;
struct sfc_txq *txq;
- unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index);
int rc = 0;
struct sfc_dp_tx_qcreate_info info;
SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
txq_info->entries = nb_tx_desc;
- rc = sfc_ev_qinit(sa, evq_index, txq_info->entries, socket_id);
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
+ txq_info->entries, socket_id, &evq);
if (rc != 0)
goto fail_ev_qinit;
- evq = sa->evq_info[evq_index].evq;
-
rc = ENOMEM;
txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
if (txq == NULL)
info.flags = tx_conf->txq_flags;
info.txq_entries = txq_info->entries;
info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
+ info.txq_hw_ring = txq->mem.esm_base;
+ info.evq_entries = txq_info->entries;
+ info.evq_hw_ring = evq->mem.esm_base;
+ info.hw_index = txq->hw_index;
+ info.mem_bar = sa->mem_bar.esb_base;
rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
socket_id, &info, &txq->dp);
if (rc != 0)
goto fail_dp_tx_qinit;
rte_free(txq);
fail_txq_alloc:
- sfc_ev_qfini(sa, evq_index);
+ sfc_ev_qfini(evq);
fail_ev_qinit:
txq_info->entries = 0;
txq_info->entries = 0;
sfc_dma_free(sa, &txq->mem);
+
+ sfc_ev_qfini(txq->evq);
+ txq->evq = NULL;
+
rte_free(txq);
}
return rc;
}
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_tx_queues <= sa->txq_count);
+
+ sw_index = sa->txq_count;
+ while (--sw_index >= (int)nb_tx_queues) {
+ if (sa->txq_info[sw_index].txq != NULL)
+ sfc_tx_qfini(sa, sw_index);
+ }
+
+ sa->txq_count = nb_tx_queues;
+}
+
int
-sfc_tx_init(struct sfc_adapter *sa)
+sfc_tx_configure(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
- unsigned int sw_index;
+ const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
int rc = 0;
+ sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
+ nb_tx_queues, sa->txq_count);
+
/*
* The datapath implementation assumes absence of boundary
* limits on Tx DMA descriptors. Addition of these checks on
goto fail_tx_dma_desc_boundary;
}
- if (~sa->dp_tx->features & SFC_DP_TX_FEAT_TSO)
- sa->tso = B_FALSE;
-
rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
if (rc != 0)
goto fail_check_mode;
- sa->txq_count = sa->eth_dev->data->nb_tx_queues;
+ if (nb_tx_queues == sa->txq_count)
+ goto done;
- sa->txq_info = rte_calloc_socket("sfc-txqs", sa->txq_count,
- sizeof(sa->txq_info[0]), 0,
- sa->socket_id);
- if (sa->txq_info == NULL)
- goto fail_txqs_alloc;
+ if (sa->txq_info == NULL) {
+ sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
+ sizeof(sa->txq_info[0]), 0,
+ sa->socket_id);
+ if (sa->txq_info == NULL)
+ goto fail_txqs_alloc;
+ } else {
+ struct sfc_txq_info *new_txq_info;
+
+ if (nb_tx_queues < sa->txq_count)
+ sfc_tx_fini_queues(sa, nb_tx_queues);
+
+ new_txq_info =
+ rte_realloc(sa->txq_info,
+ nb_tx_queues * sizeof(sa->txq_info[0]), 0);
+ if (new_txq_info == NULL && nb_tx_queues > 0)
+ goto fail_txqs_realloc;
+
+ sa->txq_info = new_txq_info;
+ if (nb_tx_queues > sa->txq_count)
+ memset(&sa->txq_info[sa->txq_count], 0,
+ (nb_tx_queues - sa->txq_count) *
+ sizeof(sa->txq_info[0]));
+ }
- for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
- rc = sfc_tx_qinit_info(sa, sw_index);
+ while (sa->txq_count < nb_tx_queues) {
+ rc = sfc_tx_qinit_info(sa, sa->txq_count);
if (rc != 0)
goto fail_tx_qinit_info;
+
+ sa->txq_count++;
}
+done:
return 0;
fail_tx_qinit_info:
- rte_free(sa->txq_info);
- sa->txq_info = NULL;
-
+fail_txqs_realloc:
fail_txqs_alloc:
- sa->txq_count = 0;
+ sfc_tx_close(sa);
fail_check_mode:
fail_tx_dma_desc_boundary:
}
void
-sfc_tx_fini(struct sfc_adapter *sa)
+sfc_tx_close(struct sfc_adapter *sa)
{
- int sw_index;
-
- sw_index = sa->txq_count;
- while (--sw_index >= 0) {
- if (sa->txq_info[sw_index].txq != NULL)
- sfc_tx_qfini(sa, sw_index);
- }
+ sfc_tx_fini_queues(sa, 0);
rte_free(sa->txq_info);
sa->txq_info = NULL;
- sa->txq_count = 0;
}
int
evq = txq->evq;
- rc = sfc_ev_qstart(sa, evq->evq_index);
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
if (rc != 0)
goto fail_ev_qstart;
efx_tx_qdestroy(txq->common);
fail_tx_qcreate:
- sfc_ev_qstop(sa, evq->evq_index);
+ sfc_ev_qstop(evq);
fail_ev_qstart:
return rc;
struct sfc_txq *txq;
unsigned int retry_count;
unsigned int wait_count;
+ int rc;
sfc_log_init(sa, "TxQ = %u", sw_index);
((txq->state & SFC_TXQ_FLUSHED) == 0) &&
(retry_count < SFC_TX_QFLUSH_ATTEMPTS);
++retry_count) {
- if (efx_tx_qflush(txq->common) != 0) {
- txq->state |= SFC_TXQ_FLUSHING;
+ rc = efx_tx_qflush(txq->common);
+ if (rc != 0) {
+ txq->state |= (rc == EALREADY) ?
+ SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED;
break;
}
efx_tx_qdestroy(txq->common);
- sfc_ev_qstop(sa, txq->evq->evq_index);
+ sfc_ev_qstop(txq->evq);
/*
* It seems to be used by DPDK for debug purposes only ('rte_ether')
txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
}
+static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status;
+static int
+sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ if (unlikely(offset > txq->ptr_mask))
+ return -EINVAL;
+
+ if (unlikely(offset >= EFX_TXQ_LIMIT(txq->ptr_mask + 1)))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+
+ /*
+ * Poll EvQ to derive up-to-date 'txq->pending' figure;
+ * it is required for the queue to be running, but the
+ * check is omitted because API design assumes that it
+ * is the duty of the caller to satisfy all conditions
+ */
+ SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) ==
+ SFC_EFX_TXQ_FLAG_RUNNING);
+ sfc_ev_qpoll(txq->evq);
+
+ /*
+ * Ring tail is 'txq->pending', and although descriptors
+ * between 'txq->completed' and 'txq->pending' are still
+ * in use by the driver, they should be reported as DONE
+ */
+ if (unlikely(offset < (txq->added - txq->pending)))
+ return RTE_ETH_TX_DESC_FULL;
+
+ /*
+ * There is no separate return value for unused descriptors;
+ * the latter will be reported as DONE because genuine DONE
+ * descriptors will be freed anyway in SW on the next burst
+ */
+ return RTE_ETH_TX_DESC_DONE;
+}
+
struct sfc_dp_tx sfc_efx_tx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EFX,
.hw_fw_caps = 0,
},
.features = SFC_DP_TX_FEAT_VLAN_INSERT |
- SFC_DP_TX_FEAT_TSO,
+ SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_MULTI_POOL |
+ SFC_DP_TX_FEAT_REFCNT |
+ SFC_DP_TX_FEAT_MULTI_SEG,
.qcreate = sfc_efx_tx_qcreate,
.qdestroy = sfc_efx_tx_qdestroy,
.qstart = sfc_efx_tx_qstart,
.qstop = sfc_efx_tx_qstop,
.qreap = sfc_efx_tx_qreap,
+ .qdesc_status = sfc_efx_tx_qdesc_status,
.pkt_burst = sfc_efx_xmit_pkts,
};