From 561ddcf8d09917b266fe46bc5daea839573eed62 Mon Sep 17 00:00:00 2001 From: Pavan Nikhilesh Date: Tue, 2 Apr 2019 09:53:29 +0000 Subject: [PATCH] app/testpmd: allocate txonly segments per bulk Use bulk ops for allocating segments instead of having a inner loop for every segment. This reduces the number of calls to the mempool layer. Signed-off-by: Pavan Nikhilesh Reviewed-by: Ferruh Yigit --- app/test-pmd/txonly.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index 0d411dbf45..65171c1d18 100644 --- a/app/test-pmd/txonly.c +++ b/app/test-pmd/txonly.c @@ -155,6 +155,7 @@ static void pkt_burst_transmit(struct fwd_stream *fs) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT]; struct rte_port *txp; struct rte_mbuf *pkt; struct rte_mbuf *pkt_seg; @@ -216,18 +217,23 @@ pkt_burst_transmit(struct fwd_stream *fs) rte_pktmbuf_reset_headroom(pkt); pkt->data_len = tx_pkt_seg_lengths[0]; pkt_seg = pkt; + if (tx_pkt_split == TX_PKT_SPLIT_RND) nb_segs = random() % tx_pkt_nb_segs + 1; else nb_segs = tx_pkt_nb_segs; - pkt_len = pkt->data_len; - for (i = 1; i < nb_segs; i++) { - pkt_seg->next = rte_mbuf_raw_alloc(mbp); - if (pkt_seg->next == NULL) { - pkt->nb_segs = i; + + if (nb_segs > 1) { + if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, + nb_segs)) { rte_pktmbuf_free(pkt); goto nomore_mbuf; } + } + + pkt_len = pkt->data_len; + for (i = 1; i < nb_segs; i++) { + pkt_seg->next = pkt_segs[i - 1]; pkt_seg = pkt_seg->next; pkt_seg->data_len = tx_pkt_seg_lengths[i]; pkt_len += pkt_seg->data_len; -- 2.20.1