info.tso_segsz = txp->tso_segsz;
for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
ol_flags = 0;
info.is_tunnel = 0;
fs->rx_packets += nb_rx;
nb_replies = 0;
for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
pkt = pkts_burst[i];
eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
eth_type = RTE_BE_TO_CPU_16(eth_h->ether_type);
#endif
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
mb = pkts_burst[i];
eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
mb = pkts_burst[i];
eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
mb = pkts_burst[i];
eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);