#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
*/
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
nb_pkt_per_burst);
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
-#endif
+ inc_rx_burst_stats(fs, nb_rx);
if (unlikely(nb_rx == 0))
return;
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = PKT_TX_VLAN_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
- ol_flags |= PKT_TX_MACSEC;
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
+ ol_flags = RTE_MBUF_F_TX_VLAN;
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
+ ol_flags |= RTE_MBUF_F_TX_QINQ;
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
+ ol_flags |= RTE_MBUF_F_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
mb = pkts_burst[i];
eth_hdr = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
- ð_hdr->d_addr);
+ ð_hdr->dst_addr);
rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
- ð_hdr->s_addr);
- mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
+ ð_hdr->src_addr);
+ mb->ol_flags &= RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL;
mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct rte_ether_hdr);
mb->l3_len = sizeof(struct rte_ipv4_hdr);
}
fs->tx_packets += nb_tx;
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
-#endif
+ inc_tx_burst_stats(fs, nb_tx);
if (unlikely(nb_tx < nb_rx)) {
fs->fwd_dropped += (nb_rx - nb_tx);
do {