From d803387a7f3faeb50c126f9ec065a86cb0dc7f70 Mon Sep 17 00:00:00 2001 From: Bruce Richardson Date: Tue, 23 Sep 2014 12:08:14 +0100 Subject: [PATCH] ixgbe: add prefetch to improve slow-path tx perf Make a small improvement to slow path TX performance by adding in a prefetch for the second mbuf cache line. Also move assignment of l2/l3 length values only when needed. What I've done with the prefetches is two-fold: 1) changed it from prefetching the mbuf (first cache line) to prefetching the mbuf pool pointer (second cache line) so that when we go to access the pool pointer to free transmitted mbufs we don't get a cache miss. When clearing the ring and freeing mbufs, the pool pointer is the only mbuf field used, so we don't need that first cache line. 2) changed the code to prefetch earlier - in effect to prefetch one mbuf ahead. The original code prefetched the mbuf to be freed as soon as it started processing the mbuf to replace it. Instead now, every time we calculate what the next mbuf position is going to be we prefetch the mbuf in that position (i.e. the mbuf pool pointer we are going to free the mbuf to), even while we are still updating the previous mbuf slot on the ring. This gives the prefetch much more time to resolve and get the data we need in the cache before we need it. In terms of performance difference, a quick sanity test using testpmd on a Xeon (Sandy Bridge uarch) platform showed performance increases between approx 8-18%, depending on the particular RX path used in conjuntion with this TX path code. Signed-off-by: Bruce Richardson Acked-by: Pablo de Lara --- lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index d34588576b..123b8b357d 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -565,25 +565,26 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ixgbe_xmit_cleanup(txq); } + rte_prefetch0(&txe->mbuf->pool); + /* TX loop */ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { new_ctx = 0; tx_pkt = *tx_pkts++; pkt_len = tx_pkt->pkt_len; - RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); - /* * Determine how many (if any) context descriptors * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; - vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci; - vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len; /* If hardware offload required */ tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK; if (tx_ol_req) { + vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci; + vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len; + /* If new context need be built or reuse the exist ctx. */ ctx = what_advctx_update(txq, tx_ol_req, vlan_macip_lens.data); @@ -720,7 +721,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, &txr[tx_id]; txn = &sw_ring[txe->next_id]; - RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + rte_prefetch0(&txn->mbuf->pool); if (txe->mbuf != NULL) { rte_pktmbuf_free_seg(txe->mbuf); @@ -749,6 +750,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, do { txd = &txr[tx_id]; txn = &sw_ring[txe->next_id]; + rte_prefetch0(&txn->mbuf->pool); if (txe->mbuf != NULL) rte_pktmbuf_free_seg(txe->mbuf); -- 2.20.1