From 32941b5d52e4ef69b6fcf55765f4f1e8948494ab Mon Sep 17 00:00:00 2001 From: Pavan Nikhilesh Date: Tue, 2 Apr 2019 09:53:36 +0000 Subject: [PATCH] app/testpmd: allocate txonly packets per bulk Use mempool bulk get ops to alloc burst of packets and process them. If bulk get fails fallback to rte_mbuf_raw_alloc. Tested-by: Yingya Han Suggested-by: Andrew Rybchenko Signed-off-by: Pavan Nikhilesh Reviewed-by: Ferruh Yigit --- app/test-pmd/txonly.c | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index 56ca0ad245..66e63788a2 100644 --- a/app/test-pmd/txonly.c +++ b/app/test-pmd/txonly.c @@ -268,16 +268,33 @@ pkt_burst_transmit(struct fwd_stream *fs) ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr); eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); - for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { - pkt = rte_mbuf_raw_alloc(mbp); - if (pkt == NULL) - break; - if (unlikely(!pkt_burst_prepare(pkt, mbp, ð_hdr, vlan_tci, - vlan_tci_outer, ol_flags))) { - rte_pktmbuf_free(pkt); - break; + if (rte_mempool_get_bulk(mbp, (void **)pkts_burst, + nb_pkt_per_burst) == 0) { + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp, + ð_hdr, vlan_tci, + vlan_tci_outer, + ol_flags))) { + rte_mempool_put_bulk(mbp, + (void **)&pkts_burst[nb_pkt], + nb_pkt_per_burst - nb_pkt); + break; + } + } + } else { + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + pkt = rte_mbuf_raw_alloc(mbp); + if (pkt == NULL) + break; + if (unlikely(!pkt_burst_prepare(pkt, mbp, ð_hdr, + vlan_tci, + vlan_tci_outer, + ol_flags))) { + rte_pktmbuf_free(pkt); + break; + } + pkts_burst[nb_pkt] = pkt; } - pkts_burst[nb_pkt] = pkt; } if (nb_pkt == 0) -- 2.20.1