X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_tx.c;h=d1320f462dda918cdb2b18580c117083c4f57c13;hb=048da2530d0d02c00a6a509c3351e5953817ffea;hp=3c4717f09f16aa1551ece96f36bdd1e27da9ce4f;hpb=b3b667c9925680a3dbc0e9179c975180e9432bda;p=dpdk.git diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index 3c4717f09f..d1320f462d 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -79,9 +79,8 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc, if (tx_conf->tx_thresh.pthresh != 0 || tx_conf->tx_thresh.hthresh != 0 || tx_conf->tx_thresh.wthresh != 0) { - sfc_err(sa, + sfc_warn(sa, "prefetch/host/writeback thresholds are not supported"); - rc = EINVAL; } if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) && @@ -91,6 +90,21 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc, rc = EINVAL; } + if (((flags & ETH_TXQ_FLAGS_NOMULTMEMP) == 0) && + (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)) { + sfc_err(sa, "multi-mempool is not supported by %s datapath", + sa->dp_tx->dp.name); + rc = EINVAL; + } + + if (((flags & ETH_TXQ_FLAGS_NOREFCOUNT) == 0) && + (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) { + sfc_err(sa, + "mbuf reference counters are neglected by %s datapath", + sa->dp_tx->dp.name); + rc = EINVAL; + } + if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) { if (!encp->enc_hw_tx_insert_vlan_enabled) { sfc_err(sa, "VLAN offload is not supported"); @@ -134,7 +148,6 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, struct sfc_txq_info *txq_info; struct sfc_evq *evq; struct sfc_txq *txq; - unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index); int rc = 0; struct sfc_dp_tx_qcreate_info info; @@ -150,12 +163,11 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries); txq_info->entries = nb_tx_desc; - rc = sfc_ev_qinit(sa, evq_index, txq_info->entries, socket_id); + rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index, + txq_info->entries, socket_id, &evq); if (rc != 0) goto fail_ev_qinit; - evq = sa->evq_info[evq_index].evq; - rc = ENOMEM; txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id); if (txq == NULL) @@ -187,7 +199,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.mem_bar = sa->mem_bar.esb_base; rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, - &SFC_DEV_TO_PCI(sa->eth_dev)->addr, + &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, socket_id, &info, &txq->dp); if (rc != 0) goto fail_dp_tx_qinit; @@ -208,7 +220,7 @@ fail_dma_alloc: rte_free(txq); fail_txq_alloc: - sfc_ev_qfini(sa, evq_index); + sfc_ev_qfini(evq); fail_ev_qinit: txq_info->entries = 0; @@ -240,9 +252,11 @@ sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) txq_info->entries = 0; sfc_dma_free(sa, &txq->mem); - rte_free(txq); - sfc_ev_qfini(sa, sfc_evq_index_by_txq_sw_index(sa, sw_index)); + sfc_ev_qfini(txq->evq); + txq->evq = NULL; + + rte_free(txq); } static int @@ -289,14 +303,37 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode) return rc; } +/** + * Destroy excess queues that are no longer needed after reconfiguration + * or complete close. + */ +static void +sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues) +{ + int sw_index; + + SFC_ASSERT(nb_tx_queues <= sa->txq_count); + + sw_index = sa->txq_count; + while (--sw_index >= (int)nb_tx_queues) { + if (sa->txq_info[sw_index].txq != NULL) + sfc_tx_qfini(sa, sw_index); + } + + sa->txq_count = nb_tx_queues; +} + int -sfc_tx_init(struct sfc_adapter *sa) +sfc_tx_configure(struct sfc_adapter *sa) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; - unsigned int sw_index; + const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues; int rc = 0; + sfc_log_init(sa, "nb_tx_queues=%u (old %u)", + nb_tx_queues, sa->txq_count); + /* * The datapath implementation assumes absence of boundary * limits on Tx DMA descriptors. Addition of these checks on @@ -311,28 +348,49 @@ sfc_tx_init(struct sfc_adapter *sa) if (rc != 0) goto fail_check_mode; - sa->txq_count = sa->eth_dev->data->nb_tx_queues; + if (nb_tx_queues == sa->txq_count) + goto done; - sa->txq_info = rte_calloc_socket("sfc-txqs", sa->txq_count, - sizeof(sa->txq_info[0]), 0, - sa->socket_id); - if (sa->txq_info == NULL) - goto fail_txqs_alloc; + if (sa->txq_info == NULL) { + sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues, + sizeof(sa->txq_info[0]), 0, + sa->socket_id); + if (sa->txq_info == NULL) + goto fail_txqs_alloc; + } else { + struct sfc_txq_info *new_txq_info; + + if (nb_tx_queues < sa->txq_count) + sfc_tx_fini_queues(sa, nb_tx_queues); + + new_txq_info = + rte_realloc(sa->txq_info, + nb_tx_queues * sizeof(sa->txq_info[0]), 0); + if (new_txq_info == NULL && nb_tx_queues > 0) + goto fail_txqs_realloc; + + sa->txq_info = new_txq_info; + if (nb_tx_queues > sa->txq_count) + memset(&sa->txq_info[sa->txq_count], 0, + (nb_tx_queues - sa->txq_count) * + sizeof(sa->txq_info[0])); + } - for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) { - rc = sfc_tx_qinit_info(sa, sw_index); + while (sa->txq_count < nb_tx_queues) { + rc = sfc_tx_qinit_info(sa, sa->txq_count); if (rc != 0) goto fail_tx_qinit_info; + + sa->txq_count++; } +done: return 0; fail_tx_qinit_info: - rte_free(sa->txq_info); - sa->txq_info = NULL; - +fail_txqs_realloc: fail_txqs_alloc: - sa->txq_count = 0; + sfc_tx_close(sa); fail_check_mode: fail_tx_dma_desc_boundary: @@ -341,19 +399,12 @@ fail_tx_dma_desc_boundary: } void -sfc_tx_fini(struct sfc_adapter *sa) +sfc_tx_close(struct sfc_adapter *sa) { - int sw_index; - - sw_index = sa->txq_count; - while (--sw_index >= 0) { - if (sa->txq_info[sw_index].txq != NULL) - sfc_tx_qfini(sa, sw_index); - } + sfc_tx_fini_queues(sa, 0); rte_free(sa->txq_info); sa->txq_info = NULL; - sa->txq_count = 0; } int @@ -378,7 +429,7 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) evq = txq->evq; - rc = sfc_ev_qstart(sa, evq->evq_index); + rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index)); if (rc != 0) goto fail_ev_qstart; @@ -428,7 +479,7 @@ fail_dp_qstart: efx_tx_qdestroy(txq->common); fail_tx_qcreate: - sfc_ev_qstop(sa, evq->evq_index); + sfc_ev_qstop(evq); fail_ev_qstart: return rc; @@ -442,6 +493,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) struct sfc_txq *txq; unsigned int retry_count; unsigned int wait_count; + int rc; sfc_log_init(sa, "TxQ = %u", sw_index); @@ -465,8 +517,10 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) ((txq->state & SFC_TXQ_FLUSHED) == 0) && (retry_count < SFC_TX_QFLUSH_ATTEMPTS); ++retry_count) { - if (efx_tx_qflush(txq->common) != 0) { - txq->state |= SFC_TXQ_FLUSHING; + rc = efx_tx_qflush(txq->common); + if (rc != 0) { + txq->state |= (rc == EALREADY) ? + SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED; break; } @@ -496,7 +550,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) efx_tx_qdestroy(txq->common); - sfc_ev_qstop(sa, txq->evq->evq_index); + sfc_ev_qstop(txq->evq); /* * It seems to be used by DPDK for debug purposes only ('rte_ether') @@ -710,7 +764,7 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) size_t seg_len; seg_len = m_seg->data_len; - next_frag = rte_mbuf_data_dma_addr(m_seg); + next_frag = rte_mbuf_data_iova(m_seg); /* * If we've started TSO transaction few steps earlier, @@ -937,6 +991,44 @@ sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq) txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED; } +static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status; +static int +sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset) +{ + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + + if (unlikely(offset > txq->ptr_mask)) + return -EINVAL; + + if (unlikely(offset >= EFX_TXQ_LIMIT(txq->ptr_mask + 1))) + return RTE_ETH_TX_DESC_UNAVAIL; + + /* + * Poll EvQ to derive up-to-date 'txq->pending' figure; + * it is required for the queue to be running, but the + * check is omitted because API design assumes that it + * is the duty of the caller to satisfy all conditions + */ + SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == + SFC_EFX_TXQ_FLAG_RUNNING); + sfc_ev_qpoll(txq->evq); + + /* + * Ring tail is 'txq->pending', and although descriptors + * between 'txq->completed' and 'txq->pending' are still + * in use by the driver, they should be reported as DONE + */ + if (unlikely(offset < (txq->added - txq->pending))) + return RTE_ETH_TX_DESC_FULL; + + /* + * There is no separate return value for unused descriptors; + * the latter will be reported as DONE because genuine DONE + * descriptors will be freed anyway in SW on the next burst + */ + return RTE_ETH_TX_DESC_DONE; +} + struct sfc_dp_tx sfc_efx_tx = { .dp = { .name = SFC_KVARG_DATAPATH_EFX, @@ -945,11 +1037,14 @@ struct sfc_dp_tx sfc_efx_tx = { }, .features = SFC_DP_TX_FEAT_VLAN_INSERT | SFC_DP_TX_FEAT_TSO | + SFC_DP_TX_FEAT_MULTI_POOL | + SFC_DP_TX_FEAT_REFCNT | SFC_DP_TX_FEAT_MULTI_SEG, .qcreate = sfc_efx_tx_qcreate, .qdestroy = sfc_efx_tx_qdestroy, .qstart = sfc_efx_tx_qstart, .qstop = sfc_efx_tx_qstop, .qreap = sfc_efx_tx_qreap, + .qdesc_status = sfc_efx_tx_qdesc_status, .pkt_burst = sfc_efx_xmit_pkts, };