X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ef10_tx.c;h=0454e79a2003479677319c48e818885c1b470435;hb=8999f8f679418cbb41637731f934e18f00cab3bd;hp=182fc23aee45cd6b9eae20771167f961b384a066;hpb=7df6f8542de3ade577441637def4d681682d680d;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index 182fc23aee..0454e79a20 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -158,17 +158,35 @@ sfc_ef10_tx_reap(struct sfc_ef10_txq *txq) pending += sfc_ef10_tx_process_events(txq); if (pending != completed) { + struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE]; + unsigned int nb = 0; + do { struct sfc_ef10_tx_sw_desc *txd; + struct rte_mbuf *m; txd = &txq->sw_ring[completed & ptr_mask]; + if (txd->mbuf == NULL) + continue; - if (txd->mbuf != NULL) { - rte_pktmbuf_free(txd->mbuf); - txd->mbuf = NULL; + m = rte_pktmbuf_prefree_seg(txd->mbuf); + txd->mbuf = NULL; + if (m == NULL) + continue; + + if ((nb == RTE_DIM(bulk)) || + ((nb != 0) && (m->pool != bulk[0]->pool))) { + rte_mempool_put_bulk(bulk[0]->pool, + (void *)bulk, nb); + nb = 0; } + + bulk[nb++] = m; } while (++completed != pending); + if (nb != 0) + rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb); + txq->completed = completed; } @@ -177,7 +195,7 @@ sfc_ef10_tx_reap(struct sfc_ef10_txq *txq) } static void -sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop, +sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop, efx_qword_t *edp) { EFX_POPULATE_QWORD_4(*edp, @@ -323,8 +341,9 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) pkt_len = m_seg->pkt_len; do { - phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg); + rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg); unsigned int seg_len = rte_pktmbuf_data_len(m_seg); + unsigned int id = added & ptr_mask; SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX); @@ -332,15 +351,30 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) sfc_ef10_tx_qdesc_dma_create(seg_addr, seg_len, (pkt_len == 0), - &txq->txq_hw_ring[added & ptr_mask]); + &txq->txq_hw_ring[id]); + + /* + * rte_pktmbuf_free() is commonly used in DPDK for + * recycling packets - the function checks every + * segment's reference counter and returns the + * buffer to its pool whenever possible; + * nevertheless, freeing mbuf segments one by one + * may entail some performance decline; + * from this point, sfc_efx_tx_reap() does the same job + * on its own and frees buffers in bulks (all mbufs + * within a bulk belong to the same pool); + * from this perspective, individual segment pointers + * must be associated with the corresponding SW + * descriptors independently so that only one loop + * is sufficient on reap to inspect all the buffers + */ + txq->sw_ring[id].mbuf = m_seg; + ++added; } while ((m_seg = m_seg->next) != 0); dma_desc_space -= (added - pkt_start); - - /* Assign mbuf to the last used desc */ - txq->sw_ring[(added - 1) & ptr_mask].mbuf = *pktp; } if (likely(added != txq->added)) { @@ -367,14 +401,25 @@ sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq) pending += sfc_ef10_tx_process_events(txq); if (pending != completed) { + struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE]; + unsigned int nb = 0; + do { struct sfc_ef10_tx_sw_desc *txd; txd = &txq->sw_ring[completed & ptr_mask]; - rte_pktmbuf_free_seg(txd->mbuf); + if (nb == RTE_DIM(bulk)) { + rte_mempool_put_bulk(bulk[0]->pool, + (void *)bulk, nb); + nb = 0; + } + + bulk[nb++] = txd->mbuf; } while (++completed != pending); + rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb); + txq->completed = completed; } @@ -419,7 +464,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, SFC_ASSERT(rte_pktmbuf_data_len(pkt) <= SFC_EF10_TX_DMA_DESC_LEN_MAX); - sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt), + sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt), rte_pktmbuf_data_len(pkt), true, &txq->txq_hw_ring[id]); @@ -557,7 +602,7 @@ sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq) txd = &txq->sw_ring[completed & txq->ptr_mask]; if (txd->mbuf != NULL) { - rte_pktmbuf_free(txd->mbuf); + rte_pktmbuf_free_seg(txd->mbuf); txd->mbuf = NULL; } } @@ -580,6 +625,8 @@ struct sfc_dp_tx sfc_ef10_tx = { .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, }, .features = SFC_DP_TX_FEAT_MULTI_SEG | + SFC_DP_TX_FEAT_MULTI_POOL | + SFC_DP_TX_FEAT_REFCNT | SFC_DP_TX_FEAT_MULTI_PROCESS, .qcreate = sfc_ef10_tx_qcreate, .qdestroy = sfc_ef10_tx_qdestroy,