struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_port *txp;
struct rte_mbuf *mb;
- struct ether_hdr *eth_hdr;
+ struct rte_ether_hdr *eth_hdr;
uint32_t retry;
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t i;
uint64_t ol_flags = 0;
+ uint64_t tx_offloads;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
#endif
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
- if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
+ tx_offloads = txp->dev_conf.txmode.offloads;
+ if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
- if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
+ if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
- if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+ if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
void *));
mb = pkts_burst[i];
- eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
- ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
+ eth_hdr = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
+ rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
ð_hdr->d_addr);
- ether_addr_copy(&ports[fs->tx_port].eth_addr,
+ rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
ð_hdr->s_addr);
- mb->ol_flags = ol_flags;
- mb->l2_len = sizeof(struct ether_hdr);
- mb->l3_len = sizeof(struct ipv4_hdr);
+ mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
+ mb->ol_flags |= ol_flags;
+ mb->l2_len = sizeof(struct rte_ether_hdr);
+ mb->l3_len = sizeof(struct rte_ipv4_hdr);
mb->vlan_tci = txp->tx_vlan_id;
mb->vlan_tci_outer = txp->tx_vlan_id_outer;
}