mbuf: rename data address helpers to IOVA
[dpdk.git] / drivers / net / sfc / sfc_ef10_tx.c
index 5482db8..2e246f4 100644 (file)
@@ -128,14 +128,10 @@ sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
        return true;
 }
 
-static void
-sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+static unsigned int
+sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
 {
-       const unsigned int old_read_ptr = txq->evq_read_ptr;
-       const unsigned int ptr_mask = txq->ptr_mask;
-       unsigned int completed = txq->completed;
-       unsigned int pending = completed;
-       const unsigned int curr_done = pending - 1;
+       const unsigned int curr_done = txq->completed - 1;
        unsigned int anew_done = curr_done;
        efx_qword_t tx_ev;
 
@@ -148,20 +144,49 @@ sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
                /* Update the latest done descriptor */
                anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
        }
-       pending += (anew_done - curr_done) & ptr_mask;
+       return (anew_done - curr_done) & txq->ptr_mask;
+}
+
+static void
+sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+{
+       const unsigned int old_read_ptr = txq->evq_read_ptr;
+       const unsigned int ptr_mask = txq->ptr_mask;
+       unsigned int completed = txq->completed;
+       unsigned int pending = completed;
+
+       pending += sfc_ef10_tx_process_events(txq);
 
        if (pending != completed) {
+               struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
+               unsigned int nb = 0;
+
                do {
                        struct sfc_ef10_tx_sw_desc *txd;
+                       struct rte_mbuf *m;
 
                        txd = &txq->sw_ring[completed & ptr_mask];
-
-                       if (txd->mbuf != NULL) {
-                               rte_pktmbuf_free(txd->mbuf);
-                               txd->mbuf = NULL;
+                       if (txd->mbuf == NULL)
+                               continue;
+
+                       m = rte_pktmbuf_prefree_seg(txd->mbuf);
+                       txd->mbuf = NULL;
+                       if (m == NULL)
+                               continue;
+
+                       if ((nb == RTE_DIM(bulk)) ||
+                           ((nb != 0) && (m->pool != bulk[0]->pool))) {
+                               rte_mempool_put_bulk(bulk[0]->pool,
+                                                    (void *)bulk, nb);
+                               nb = 0;
                        }
+
+                       bulk[nb++] = m;
                } while (++completed != pending);
 
+               if (nb != 0)
+                       rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
+
                txq->completed = completed;
        }
 
@@ -316,8 +341,9 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                pkt_len = m_seg->pkt_len;
                do {
-                       phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+                       phys_addr_t seg_addr = rte_mbuf_data_iova(m_seg);
                        unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+                       unsigned int id = added & ptr_mask;
 
                        SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
 
@@ -325,15 +351,30 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                        sfc_ef10_tx_qdesc_dma_create(seg_addr,
                                seg_len, (pkt_len == 0),
-                               &txq->txq_hw_ring[added & ptr_mask]);
+                               &txq->txq_hw_ring[id]);
+
+                       /*
+                        * rte_pktmbuf_free() is commonly used in DPDK for
+                        * recycling packets - the function checks every
+                        * segment's reference counter and returns the
+                        * buffer to its pool whenever possible;
+                        * nevertheless, freeing mbuf segments one by one
+                        * may entail some performance decline;
+                        * from this point, sfc_efx_tx_reap() does the same job
+                        * on its own and frees buffers in bulks (all mbufs
+                        * within a bulk belong to the same pool);
+                        * from this perspective, individual segment pointers
+                        * must be associated with the corresponding SW
+                        * descriptors independently so that only one loop
+                        * is sufficient on reap to inspect all the buffers
+                        */
+                       txq->sw_ring[id].mbuf = m_seg;
+
                        ++added;
 
                } while ((m_seg = m_seg->next) != 0);
 
                dma_desc_space -= (added - pkt_start);
-
-               /* Assign mbuf to the last used desc */
-               txq->sw_ring[(added - 1) & ptr_mask].mbuf = *pktp;
        }
 
        if (likely(added != txq->added)) {
@@ -349,6 +390,44 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        return pktp - &tx_pkts[0];
 }
 
+static void
+sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
+{
+       const unsigned int old_read_ptr = txq->evq_read_ptr;
+       const unsigned int ptr_mask = txq->ptr_mask;
+       unsigned int completed = txq->completed;
+       unsigned int pending = completed;
+
+       pending += sfc_ef10_tx_process_events(txq);
+
+       if (pending != completed) {
+               struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
+               unsigned int nb = 0;
+
+               do {
+                       struct sfc_ef10_tx_sw_desc *txd;
+
+                       txd = &txq->sw_ring[completed & ptr_mask];
+
+                       if (nb == RTE_DIM(bulk)) {
+                               rte_mempool_put_bulk(bulk[0]->pool,
+                                                    (void *)bulk, nb);
+                               nb = 0;
+                       }
+
+                       bulk[nb++] = txd->mbuf;
+               } while (++completed != pending);
+
+               rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
+
+               txq->completed = completed;
+       }
+
+       sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
+                          txq->evq_read_ptr);
+}
+
+
 static uint16_t
 sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                          uint16_t nb_pkts)
@@ -372,7 +451,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
        if (reap_done) {
-               sfc_ef10_tx_reap(txq);
+               sfc_ef10_simple_tx_reap(txq);
                dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
                                 (added - txq->completed);
        }
@@ -385,7 +464,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
                           SFC_EF10_TX_DMA_DESC_LEN_MAX);
 
-               sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
+               sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
                                             rte_pktmbuf_data_len(pkt),
                                             true, &txq->txq_hw_ring[id]);
 
@@ -401,7 +480,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
        if (!reap_done)
-               sfc_ef10_tx_reap(txq);
+               sfc_ef10_simple_tx_reap(txq);
 #endif
 
        return pktp - &tx_pkts[0];
@@ -516,18 +595,29 @@ static void
 sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
 {
        struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
-       unsigned int txds;
+       unsigned int completed;
 
-       for (txds = 0; txds <= txq->ptr_mask; ++txds) {
-               if (txq->sw_ring[txds].mbuf != NULL) {
-                       rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
-                       txq->sw_ring[txds].mbuf = NULL;
+       for (completed = txq->completed; completed != txq->added; ++completed) {
+               struct sfc_ef10_tx_sw_desc *txd;
+
+               txd = &txq->sw_ring[completed & txq->ptr_mask];
+               if (txd->mbuf != NULL) {
+                       rte_pktmbuf_free_seg(txd->mbuf);
+                       txd->mbuf = NULL;
                }
        }
 
        txq->flags &= ~SFC_EF10_TXQ_STARTED;
 }
 
+static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
+static int
+sfc_ef10_tx_qdesc_status(__rte_unused struct sfc_dp_txq *dp_txq,
+                        __rte_unused uint16_t offset)
+{
+       return -ENOTSUP;
+}
+
 struct sfc_dp_tx sfc_ef10_tx = {
        .dp = {
                .name           = SFC_KVARG_DATAPATH_EF10,
@@ -535,6 +625,8 @@ struct sfc_dp_tx sfc_ef10_tx = {
                .hw_fw_caps     = SFC_DP_HW_FW_CAP_EF10,
        },
        .features               = SFC_DP_TX_FEAT_MULTI_SEG |
+                                 SFC_DP_TX_FEAT_MULTI_POOL |
+                                 SFC_DP_TX_FEAT_REFCNT |
                                  SFC_DP_TX_FEAT_MULTI_PROCESS,
        .qcreate                = sfc_ef10_tx_qcreate,
        .qdestroy               = sfc_ef10_tx_qdestroy,
@@ -542,6 +634,7 @@ struct sfc_dp_tx sfc_ef10_tx = {
        .qtx_ev                 = sfc_ef10_tx_qtx_ev,
        .qstop                  = sfc_ef10_tx_qstop,
        .qreap                  = sfc_ef10_tx_qreap,
+       .qdesc_status           = sfc_ef10_tx_qdesc_status,
        .pkt_burst              = sfc_ef10_xmit_pkts,
 };
 
@@ -557,5 +650,6 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
        .qtx_ev                 = sfc_ef10_tx_qtx_ev,
        .qstop                  = sfc_ef10_tx_qstop,
        .qreap                  = sfc_ef10_tx_qreap,
+       .qdesc_status           = sfc_ef10_tx_qdesc_status,
        .pkt_burst              = sfc_ef10_simple_xmit_pkts,
 };