X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_tx.c;h=bf596017a90e831e533ae230366efdddc615be9e;hb=7df6f8542de3ade577441637def4d681682d680d;hp=adf49d3db2d28141db62c1d7a4d835a314149efc;hpb=8b00f426eb6614d5c01fec95cdc271700f85f886;p=dpdk.git diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index adf49d3db2..bf596017a9 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -84,6 +84,13 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc, rc = EINVAL; } + if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) && + (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)) { + sfc_err(sa, "Multi-segment is not supported by %s datapath", + sa->dp_tx->dp.name); + rc = EINVAL; + } + if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) { if (!encp->enc_hw_tx_insert_vlan_enabled) { sfc_err(sa, "VLAN offload is not supported"); @@ -127,7 +134,6 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, struct sfc_txq_info *txq_info; struct sfc_evq *evq; struct sfc_txq *txq; - unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index); int rc = 0; struct sfc_dp_tx_qcreate_info info; @@ -143,12 +149,11 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries); txq_info->entries = nb_tx_desc; - rc = sfc_ev_qinit(sa, evq_index, txq_info->entries, socket_id); + rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index, + txq_info->entries, socket_id, &evq); if (rc != 0) goto fail_ev_qinit; - evq = sa->evq_info[evq_index].evq; - rc = ENOMEM; txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id); if (txq == NULL) @@ -180,7 +185,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.mem_bar = sa->mem_bar.esb_base; rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, - &SFC_DEV_TO_PCI(sa->eth_dev)->addr, + &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, socket_id, &info, &txq->dp); if (rc != 0) goto fail_dp_tx_qinit; @@ -201,7 +206,7 @@ fail_dma_alloc: rte_free(txq); fail_txq_alloc: - sfc_ev_qfini(sa, evq_index); + sfc_ev_qfini(evq); fail_ev_qinit: txq_info->entries = 0; @@ -233,6 +238,10 @@ sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) txq_info->entries = 0; sfc_dma_free(sa, &txq->mem); + + sfc_ev_qfini(txq->evq); + txq->evq = NULL; + rte_free(txq); } @@ -280,14 +289,37 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode) return rc; } +/** + * Destroy excess queues that are no longer needed after reconfiguration + * or complete close. + */ +static void +sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues) +{ + int sw_index; + + SFC_ASSERT(nb_tx_queues <= sa->txq_count); + + sw_index = sa->txq_count; + while (--sw_index >= (int)nb_tx_queues) { + if (sa->txq_info[sw_index].txq != NULL) + sfc_tx_qfini(sa, sw_index); + } + + sa->txq_count = nb_tx_queues; +} + int -sfc_tx_init(struct sfc_adapter *sa) +sfc_tx_configure(struct sfc_adapter *sa) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; - unsigned int sw_index; + const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues; int rc = 0; + sfc_log_init(sa, "nb_tx_queues=%u (old %u)", + nb_tx_queues, sa->txq_count); + /* * The datapath implementation assumes absence of boundary * limits on Tx DMA descriptors. Addition of these checks on @@ -298,35 +330,53 @@ sfc_tx_init(struct sfc_adapter *sa) goto fail_tx_dma_desc_boundary; } - if (~sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) - sa->tso = B_FALSE; - rc = sfc_tx_check_mode(sa, &dev_conf->txmode); if (rc != 0) goto fail_check_mode; - sa->txq_count = sa->eth_dev->data->nb_tx_queues; + if (nb_tx_queues == sa->txq_count) + goto done; - sa->txq_info = rte_calloc_socket("sfc-txqs", sa->txq_count, - sizeof(sa->txq_info[0]), 0, - sa->socket_id); - if (sa->txq_info == NULL) - goto fail_txqs_alloc; + if (sa->txq_info == NULL) { + sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues, + sizeof(sa->txq_info[0]), 0, + sa->socket_id); + if (sa->txq_info == NULL) + goto fail_txqs_alloc; + } else { + struct sfc_txq_info *new_txq_info; + + if (nb_tx_queues < sa->txq_count) + sfc_tx_fini_queues(sa, nb_tx_queues); + + new_txq_info = + rte_realloc(sa->txq_info, + nb_tx_queues * sizeof(sa->txq_info[0]), 0); + if (new_txq_info == NULL && nb_tx_queues > 0) + goto fail_txqs_realloc; + + sa->txq_info = new_txq_info; + if (nb_tx_queues > sa->txq_count) + memset(&sa->txq_info[sa->txq_count], 0, + (nb_tx_queues - sa->txq_count) * + sizeof(sa->txq_info[0])); + } - for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) { - rc = sfc_tx_qinit_info(sa, sw_index); + while (sa->txq_count < nb_tx_queues) { + rc = sfc_tx_qinit_info(sa, sa->txq_count); if (rc != 0) goto fail_tx_qinit_info; + + sa->txq_count++; } +done: return 0; fail_tx_qinit_info: - rte_free(sa->txq_info); - sa->txq_info = NULL; - +fail_txqs_realloc: fail_txqs_alloc: - sa->txq_count = 0; + sfc_tx_close(sa); fail_check_mode: fail_tx_dma_desc_boundary: @@ -335,19 +385,12 @@ fail_tx_dma_desc_boundary: } void -sfc_tx_fini(struct sfc_adapter *sa) +sfc_tx_close(struct sfc_adapter *sa) { - int sw_index; - - sw_index = sa->txq_count; - while (--sw_index >= 0) { - if (sa->txq_info[sw_index].txq != NULL) - sfc_tx_qfini(sa, sw_index); - } + sfc_tx_fini_queues(sa, 0); rte_free(sa->txq_info); sa->txq_info = NULL; - sa->txq_count = 0; } int @@ -372,7 +415,7 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) evq = txq->evq; - rc = sfc_ev_qstart(sa, evq->evq_index); + rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index)); if (rc != 0) goto fail_ev_qstart; @@ -422,7 +465,7 @@ fail_dp_qstart: efx_tx_qdestroy(txq->common); fail_tx_qcreate: - sfc_ev_qstop(sa, evq->evq_index); + sfc_ev_qstop(evq); fail_ev_qstart: return rc; @@ -436,6 +479,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) struct sfc_txq *txq; unsigned int retry_count; unsigned int wait_count; + int rc; sfc_log_init(sa, "TxQ = %u", sw_index); @@ -459,8 +503,10 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) ((txq->state & SFC_TXQ_FLUSHED) == 0) && (retry_count < SFC_TX_QFLUSH_ATTEMPTS); ++retry_count) { - if (efx_tx_qflush(txq->common) != 0) { - txq->state |= SFC_TXQ_FLUSHING; + rc = efx_tx_qflush(txq->common); + if (rc != 0) { + txq->state |= (rc == EALREADY) ? + SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED; break; } @@ -490,7 +536,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) efx_tx_qdestroy(txq->common); - sfc_ev_qstop(sa, txq->evq->evq_index); + sfc_ev_qstop(txq->evq); /* * It seems to be used by DPDK for debug purposes only ('rte_ether') @@ -931,6 +977,44 @@ sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq) txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED; } +static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status; +static int +sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset) +{ + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + + if (unlikely(offset > txq->ptr_mask)) + return -EINVAL; + + if (unlikely(offset >= EFX_TXQ_LIMIT(txq->ptr_mask + 1))) + return RTE_ETH_TX_DESC_UNAVAIL; + + /* + * Poll EvQ to derive up-to-date 'txq->pending' figure; + * it is required for the queue to be running, but the + * check is omitted because API design assumes that it + * is the duty of the caller to satisfy all conditions + */ + SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == + SFC_EFX_TXQ_FLAG_RUNNING); + sfc_ev_qpoll(txq->evq); + + /* + * Ring tail is 'txq->pending', and although descriptors + * between 'txq->completed' and 'txq->pending' are still + * in use by the driver, they should be reported as DONE + */ + if (unlikely(offset < (txq->added - txq->pending))) + return RTE_ETH_TX_DESC_FULL; + + /* + * There is no separate return value for unused descriptors; + * the latter will be reported as DONE because genuine DONE + * descriptors will be freed anyway in SW on the next burst + */ + return RTE_ETH_TX_DESC_DONE; +} + struct sfc_dp_tx sfc_efx_tx = { .dp = { .name = SFC_KVARG_DATAPATH_EFX, @@ -938,11 +1022,13 @@ struct sfc_dp_tx sfc_efx_tx = { .hw_fw_caps = 0, }, .features = SFC_DP_TX_FEAT_VLAN_INSERT | - SFC_DP_TX_FEAT_TSO, + SFC_DP_TX_FEAT_TSO | + SFC_DP_TX_FEAT_MULTI_SEG, .qcreate = sfc_efx_tx_qcreate, .qdestroy = sfc_efx_tx_qdestroy, .qstart = sfc_efx_tx_qstart, .qstop = sfc_efx_tx_qstop, .qreap = sfc_efx_tx_qreap, + .qdesc_status = sfc_efx_tx_qdesc_status, .pkt_burst = sfc_efx_xmit_pkts, };