+
+struct sfc_txq *
+sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
+{
+ const struct sfc_dp_queue *dpq = &dp_txq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_txq *txq;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->txq_count);
+ txq = sa->txq_info[dpq->queue_id].txq;
+
+ SFC_ASSERT(txq != NULL);
+ return txq;
+}
+
+static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
+static int
+sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp)
+{
+ struct sfc_efx_txq *txq;
+ struct sfc_txq *ctrl_txq;
+ int rc;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
+ EFX_TXQ_LIMIT(info->txq_entries),
+ sizeof(*txq->pend_desc), 0,
+ socket_id);
+ if (txq->pend_desc == NULL)
+ goto fail_pend_desc_alloc;
+
+ rc = ENOMEM;
+ txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
+ info->txq_entries,
+ sizeof(*txq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL)
+ goto fail_sw_ring_alloc;
+
+ ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
+ if (ctrl_txq->evq->sa->tso) {
+ rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
+ info->txq_entries, socket_id);
+ if (rc != 0)
+ goto fail_alloc_tsoh_objs;
+ }
+
+ txq->evq = ctrl_txq->evq;
+ txq->ptr_mask = info->txq_entries - 1;
+ txq->free_thresh = info->free_thresh;
+ txq->dma_desc_size_max = info->dma_desc_size_max;
+
+ *dp_txqp = &txq->dp;
+ return 0;
+
+fail_alloc_tsoh_objs:
+ rte_free(txq->sw_ring);
+
+fail_sw_ring_alloc:
+ rte_free(txq->pend_desc);
+
+fail_pend_desc_alloc:
+ rte_free(txq);
+
+fail_txq_alloc:
+ return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
+static void
+sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
+ rte_free(txq->sw_ring);
+ rte_free(txq->pend_desc);
+ rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
+static int
+sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int evq_read_ptr,
+ unsigned int txq_desc_index)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
+
+ txq->common = ctrl_txq->common;
+
+ txq->pending = txq->completed = txq->added = txq_desc_index;
+ txq->hw_vlan_tci = 0;
+
+ txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
+
+ return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
+static void
+sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int *evq_read_ptr)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
+}
+
+static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
+static void
+sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ unsigned int txds;
+
+ sfc_efx_tx_reap(txq);
+
+ for (txds = 0; txds <= txq->ptr_mask; txds++) {
+ if (txq->sw_ring[txds].mbuf != NULL) {
+ rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
+ txq->sw_ring[txds].mbuf = NULL;
+ }
+ }
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
+}
+
+struct sfc_dp_tx sfc_efx_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_TX,
+ .hw_fw_caps = 0,
+ },
+ .features = SFC_DP_TX_FEAT_VLAN_INSERT |
+ SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_MULTI_SEG,
+ .qcreate = sfc_efx_tx_qcreate,
+ .qdestroy = sfc_efx_tx_qdestroy,
+ .qstart = sfc_efx_tx_qstart,
+ .qstop = sfc_efx_tx_qstop,
+ .qreap = sfc_efx_tx_qreap,
+ .pkt_burst = sfc_efx_xmit_pkts,
+};