From: Igor Romanov Date: Fri, 2 Jul 2021 08:39:34 +0000 (+0300) Subject: net/sfc: prepare for internal Tx queue X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=db980d266f7eb01ac463b8c95a5a9e1fe5f87ff7;p=dpdk.git net/sfc: prepare for internal Tx queue Make software index of a Tx queue and ethdev index separate. When an ethdev TxQ is accessed in ethdev callbacks, an explicit ethdev queue index is used. Signed-off-by: Igor Romanov Signed-off-by: Andrew Rybchenko Reviewed-by: Andy Moreton Reviewed-by: Ivan Malov --- diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h index ebe705020d..00fc26cf0e 100644 --- a/drivers/net/sfc/sfc.h +++ b/drivers/net/sfc/sfc.h @@ -173,6 +173,7 @@ struct sfc_adapter_shared { unsigned int txq_count; struct sfc_txq_info *txq_info; + unsigned int ethdev_txq_count; struct sfc_rss rss; diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index 2651c41288..88896db1f8 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -524,24 +524,28 @@ sfc_rx_queue_release(void *queue) } static int -sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, +sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_txq_info *txq_info; + sfc_sw_index_t sw_index; int rc; sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", - tx_queue_id, nb_tx_desc, socket_id); + ethdev_qid, nb_tx_desc, socket_id); sfc_adapter_lock(sa); - rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); + sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid); + rc = sfc_tx_qinit(sa, sw_index, nb_tx_desc, socket_id, tx_conf); if (rc != 0) goto fail_tx_qinit; - dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp; + txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid); + dev->data->tx_queues[ethdev_qid] = txq_info->dp; sfc_adapter_unlock(sa); return 0; @@ -557,7 +561,7 @@ sfc_tx_queue_release(void *queue) { struct sfc_dp_txq *dp_txq = queue; struct sfc_txq *txq; - unsigned int sw_index; + sfc_sw_index_t sw_index; struct sfc_adapter *sa; if (dp_txq == NULL) @@ -1213,15 +1217,15 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid, * use any process-local pointers from the adapter data. */ static void -sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, +sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid, struct rte_eth_txq_info *qinfo) { struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_txq_info *txq_info; - SFC_ASSERT(tx_queue_id < sas->txq_count); + SFC_ASSERT(ethdev_qid < sas->ethdev_txq_count); - txq_info = &sas->txq_info[tx_queue_id]; + txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid); memset(qinfo, 0, sizeof(*qinfo)); @@ -1362,13 +1366,15 @@ sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid) } static int -sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid) { struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_txq_info *txq_info; + sfc_sw_index_t sw_index; int rc; - sfc_log_init(sa, "TxQ = %u", tx_queue_id); + sfc_log_init(sa, "TxQ = %u", ethdev_qid); sfc_adapter_lock(sa); @@ -1376,14 +1382,16 @@ sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (sa->state != SFC_ADAPTER_STARTED) goto fail_not_started; - if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED) + txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid); + if (txq_info->state != SFC_TXQ_INITIALIZED) goto fail_not_setup; - rc = sfc_tx_qstart(sa, tx_queue_id); + sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid); + rc = sfc_tx_qstart(sa, sw_index); if (rc != 0) goto fail_tx_qstart; - sas->txq_info[tx_queue_id].deferred_started = B_TRUE; + txq_info->deferred_started = B_TRUE; sfc_adapter_unlock(sa); return 0; @@ -1398,18 +1406,22 @@ fail_not_started: } static int -sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid) { struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_txq_info *txq_info; + sfc_sw_index_t sw_index; - sfc_log_init(sa, "TxQ = %u", tx_queue_id); + sfc_log_init(sa, "TxQ = %u", ethdev_qid); sfc_adapter_lock(sa); - sfc_tx_qstop(sa, tx_queue_id); + sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid); + sfc_tx_qstop(sa, sw_index); - sas->txq_info[tx_queue_id].deferred_started = B_FALSE; + txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid); + txq_info->deferred_started = B_FALSE; sfc_adapter_unlock(sa); return 0; diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c index 71f706e403..ed28d51e12 100644 --- a/drivers/net/sfc/sfc_ev.c +++ b/drivers/net/sfc/sfc_ev.c @@ -598,7 +598,7 @@ sfc_ev_qpoll(struct sfc_evq *evq) } if (evq->dp_txq != NULL) { - unsigned int txq_sw_index; + sfc_sw_index_t txq_sw_index; txq_sw_index = evq->dp_txq->dpq.queue_id; diff --git a/drivers/net/sfc/sfc_ev.h b/drivers/net/sfc/sfc_ev.h index 5a9f85c2d9..75b9dcdebd 100644 --- a/drivers/net/sfc/sfc_ev.h +++ b/drivers/net/sfc/sfc_ev.h @@ -92,8 +92,25 @@ sfc_evq_sw_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa, return 1 + rxq_sw_index; } -static inline unsigned int -sfc_evq_index_by_txq_sw_index(struct sfc_adapter *sa, unsigned int txq_sw_index) +static inline sfc_ethdev_qid_t +sfc_ethdev_tx_qid_by_txq_sw_index(__rte_unused struct sfc_adapter_shared *sas, + sfc_sw_index_t txq_sw_index) +{ + /* Only ethdev queues are present for now */ + return txq_sw_index; +} + +static inline sfc_sw_index_t +sfc_txq_sw_index_by_ethdev_tx_qid(__rte_unused struct sfc_adapter_shared *sas, + sfc_ethdev_qid_t ethdev_qid) +{ + /* Only ethdev queues are present for now */ + return ethdev_qid; +} + +static inline sfc_sw_index_t +sfc_evq_sw_index_by_txq_sw_index(struct sfc_adapter *sa, + sfc_sw_index_t txq_sw_index) { return 1 + sa->eth_dev->data->nb_rx_queues + txq_sw_index; } diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index 28d696de61..ce2a9a6a4f 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -34,6 +34,19 @@ */ #define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000) +struct sfc_txq_info * +sfc_txq_info_by_ethdev_qid(struct sfc_adapter_shared *sas, + sfc_ethdev_qid_t ethdev_qid) +{ + sfc_sw_index_t sw_index; + + SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_txq_count); + SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID); + + sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid); + return &sas->txq_info[sw_index]; +} + static uint64_t sfc_tx_get_offload_mask(struct sfc_adapter *sa) { @@ -118,10 +131,12 @@ sfc_tx_qflush_done(struct sfc_txq_info *txq_info) } int -sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, +sfc_tx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); unsigned int txq_entries; unsigned int evq_entries; @@ -134,7 +149,9 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, uint64_t offloads; struct sfc_dp_tx_hw_limits hw_limits; - sfc_log_init(sa, "TxQ = %u", sw_index); + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); memset(&hw_limits, 0, sizeof(hw_limits)); hw_limits.txq_max_entries = sa->txq_max_entries; @@ -150,8 +167,11 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(txq_entries >= nb_tx_desc); SFC_ASSERT(txq_max_fill_level <= nb_tx_desc); - offloads = tx_conf->offloads | - sa->eth_dev->data->dev_conf.txmode.offloads; + offloads = tx_conf->offloads; + /* Add device level Tx offloads if the queue is an ethdev Tx queue */ + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) + offloads |= sa->eth_dev->data->dev_conf.txmode.offloads; + rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads); if (rc != 0) goto fail_bad_conf; @@ -231,20 +251,26 @@ fail_ev_qinit: fail_bad_conf: fail_size_up_rings: - sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc); + sfc_log_init(sa, "failed (TxQ = %d (internal %u), rc = %d)", ethdev_qid, + sw_index, rc); return rc; } void -sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) +sfc_tx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; struct sfc_txq_info *txq_info; struct sfc_txq *txq; - sfc_log_init(sa, "TxQ = %u", sw_index); + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); SFC_ASSERT(sw_index < sfc_sa2shared(sa)->txq_count); - sa->eth_dev->data->tx_queues[sw_index] = NULL; + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) + sa->eth_dev->data->tx_queues[ethdev_qid] = NULL; txq_info = &sfc_sa2shared(sa)->txq_info[sw_index]; @@ -265,9 +291,14 @@ sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) } static int -sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) +sfc_tx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { - sfc_log_init(sa, "TxQ = %u", sw_index); + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; + + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); return 0; } @@ -316,17 +347,26 @@ static void sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - int sw_index; + sfc_sw_index_t sw_index; + sfc_ethdev_qid_t ethdev_qid; - SFC_ASSERT(nb_tx_queues <= sas->txq_count); + SFC_ASSERT(nb_tx_queues <= sas->ethdev_txq_count); - sw_index = sas->txq_count; - while (--sw_index >= (int)nb_tx_queues) { - if (sas->txq_info[sw_index].state & SFC_TXQ_INITIALIZED) + /* + * Finalize only ethdev queues since other ones are finalized only + * on device close and they may require additional deinitializaton. + */ + ethdev_qid = sas->ethdev_txq_count; + while (--ethdev_qid >= (int)nb_tx_queues) { + struct sfc_txq_info *txq_info; + + sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid); + txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid); + if (txq_info->state & SFC_TXQ_INITIALIZED) sfc_tx_qfini(sa, sw_index); } - sas->txq_count = nb_tx_queues; + sas->ethdev_txq_count = nb_tx_queues; } int @@ -339,7 +379,7 @@ sfc_tx_configure(struct sfc_adapter *sa) int rc = 0; sfc_log_init(sa, "nb_tx_queues=%u (old %u)", - nb_tx_queues, sas->txq_count); + nb_tx_queues, sas->ethdev_txq_count); /* * The datapath implementation assumes absence of boundary @@ -377,7 +417,7 @@ sfc_tx_configure(struct sfc_adapter *sa) struct sfc_txq_info *new_txq_info; struct sfc_txq *new_txq_ctrl; - if (nb_tx_queues < sas->txq_count) + if (nb_tx_queues < sas->ethdev_txq_count) sfc_tx_fini_queues(sa, nb_tx_queues); new_txq_info = @@ -393,24 +433,30 @@ sfc_tx_configure(struct sfc_adapter *sa) sas->txq_info = new_txq_info; sa->txq_ctrl = new_txq_ctrl; - if (nb_tx_queues > sas->txq_count) { - memset(&sas->txq_info[sas->txq_count], 0, - (nb_tx_queues - sas->txq_count) * + if (nb_tx_queues > sas->ethdev_txq_count) { + memset(&sas->txq_info[sas->ethdev_txq_count], 0, + (nb_tx_queues - sas->ethdev_txq_count) * sizeof(sas->txq_info[0])); - memset(&sa->txq_ctrl[sas->txq_count], 0, - (nb_tx_queues - sas->txq_count) * + memset(&sa->txq_ctrl[sas->ethdev_txq_count], 0, + (nb_tx_queues - sas->ethdev_txq_count) * sizeof(sa->txq_ctrl[0])); } } - while (sas->txq_count < nb_tx_queues) { - rc = sfc_tx_qinit_info(sa, sas->txq_count); + while (sas->ethdev_txq_count < nb_tx_queues) { + sfc_sw_index_t sw_index; + + sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, + sas->ethdev_txq_count); + rc = sfc_tx_qinit_info(sa, sw_index); if (rc != 0) goto fail_tx_qinit_info; - sas->txq_count++; + sas->ethdev_txq_count++; } + sas->txq_count = sas->ethdev_txq_count; + done: return 0; @@ -440,12 +486,12 @@ sfc_tx_close(struct sfc_adapter *sa) } int -sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) +sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) | sfc_tx_get_queue_offload_caps(sa); - struct rte_eth_dev_data *dev_data; struct sfc_txq_info *txq_info; struct sfc_txq *txq; struct sfc_evq *evq; @@ -453,7 +499,9 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) unsigned int desc_index; int rc = 0; - sfc_log_init(sa, "TxQ = %u", sw_index); + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); SFC_ASSERT(sw_index < sas->txq_count); txq_info = &sas->txq_info[sw_index]; @@ -463,7 +511,7 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) txq = &sa->txq_ctrl[sw_index]; evq = txq->evq; - rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index)); + rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_txq_sw_index(sa, sw_index)); if (rc != 0) goto fail_ev_qstart; @@ -505,11 +553,17 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) if (rc != 0) goto fail_dp_qstart; - /* - * It seems to be used by DPDK for debug purposes only ('rte_ether') - */ - dev_data = sa->eth_dev->data; - dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED; + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) { + struct rte_eth_dev_data *dev_data; + + /* + * It sems to be used by DPDK for debug purposes only + * ('rte_ether'). + */ + dev_data = sa->eth_dev->data; + dev_data->tx_queue_state[ethdev_qid] = + RTE_ETH_QUEUE_STATE_STARTED; + } return 0; @@ -525,17 +579,19 @@ fail_ev_qstart: } void -sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) +sfc_tx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - struct rte_eth_dev_data *dev_data; + sfc_ethdev_qid_t ethdev_qid; struct sfc_txq_info *txq_info; struct sfc_txq *txq; unsigned int retry_count; unsigned int wait_count; int rc; - sfc_log_init(sa, "TxQ = %u", sw_index); + ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, sw_index); + + sfc_log_init(sa, "TxQ = %d (internal %u)", ethdev_qid, sw_index); SFC_ASSERT(sw_index < sas->txq_count); txq_info = &sas->txq_info[sw_index]; @@ -577,10 +633,12 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS); if (txq_info->state & SFC_TXQ_FLUSHING) - sfc_err(sa, "TxQ %u flush timed out", sw_index); + sfc_err(sa, "TxQ %d (internal %u) flush timed out", + ethdev_qid, sw_index); if (txq_info->state & SFC_TXQ_FLUSHED) - sfc_notice(sa, "TxQ %u flushed", sw_index); + sfc_notice(sa, "TxQ %d (internal %u) flushed", + ethdev_qid, sw_index); } sa->priv.dp_tx->qreap(txq_info->dp); @@ -591,11 +649,17 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) sfc_ev_qstop(txq->evq); - /* - * It seems to be used by DPDK for debug purposes only ('rte_ether') - */ - dev_data = sa->eth_dev->data; - dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED; + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) { + struct rte_eth_dev_data *dev_data; + + /* + * It seems to be used by DPDK for debug purposes only + * ('rte_ether') + */ + dev_data = sa->eth_dev->data; + dev_data->tx_queue_state[ethdev_qid] = + RTE_ETH_QUEUE_STATE_STOPPED; + } } int @@ -603,10 +667,11 @@ sfc_tx_start(struct sfc_adapter *sa) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); - unsigned int sw_index; + sfc_sw_index_t sw_index; int rc = 0; - sfc_log_init(sa, "txq_count = %u", sas->txq_count); + sfc_log_init(sa, "txq_count = %u (internal %u)", + sas->ethdev_txq_count, sas->txq_count); if (sa->tso) { if (!encp->enc_fw_assisted_tso_v2_enabled && @@ -654,9 +719,10 @@ void sfc_tx_stop(struct sfc_adapter *sa) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - unsigned int sw_index; + sfc_sw_index_t sw_index; - sfc_log_init(sa, "txq_count = %u", sas->txq_count); + sfc_log_init(sa, "txq_count = %u (internal %u)", + sas->ethdev_txq_count, sas->txq_count); sw_index = sas->txq_count; while (sw_index-- > 0) { diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h index 5ed678703e..f1700b13ca 100644 --- a/drivers/net/sfc/sfc_tx.h +++ b/drivers/net/sfc/sfc_tx.h @@ -58,7 +58,8 @@ struct sfc_txq { }; struct sfc_txq *sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq); - +struct sfc_txq_info *sfc_txq_info_by_ethdev_qid(struct sfc_adapter_shared *sas, + sfc_ethdev_qid_t ethdev_qid); /** * Transmit queue information used on libefx-based data path. * Allocated on the socket specified on the queue setup. @@ -107,14 +108,14 @@ struct sfc_txq_info *sfc_txq_info_by_dp_txq(const struct sfc_dp_txq *dp_txq); int sfc_tx_configure(struct sfc_adapter *sa); void sfc_tx_close(struct sfc_adapter *sa); -int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, +int sfc_tx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); -void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index); +void sfc_tx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index); void sfc_tx_qflush_done(struct sfc_txq_info *txq_info); -int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index); -void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index); +int sfc_tx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index); +void sfc_tx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index); int sfc_tx_start(struct sfc_adapter *sa); void sfc_tx_stop(struct sfc_adapter *sa);