The testpmd application in txonly forwarding mode has an option
to generate the packet flows by varying the destination IP address.
The patch increments the IP for each packet sent, this improves
the entropy and RSS distribution on the peer receiving size
is getting more uniform.
Fixes:
01b645dcff7f ("app/testpmd: move txonly prepare in separate function")
Cc: stable@dpdk.org
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
const uint16_t vlan_tci_outer, const uint64_t ol_flags)
{
struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
const uint16_t vlan_tci_outer, const uint64_t ol_flags)
{
struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
- uint8_t ip_var = RTE_PER_LCORE(_ip_var);
struct rte_mbuf *pkt_seg;
uint32_t nb_segs, pkt_len;
uint8_t i;
struct rte_mbuf *pkt_seg;
uint32_t nb_segs, pkt_len;
uint8_t i;
copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
sizeof(struct rte_ether_hdr));
if (txonly_multi_flow) {
copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
sizeof(struct rte_ether_hdr));
if (txonly_multi_flow) {
+ uint8_t ip_var = RTE_PER_LCORE(_ip_var);
struct rte_ipv4_hdr *ip_hdr;
uint32_t addr;
struct rte_ipv4_hdr *ip_hdr;
uint32_t addr;
*/
addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
ip_hdr->src_addr = rte_cpu_to_be_32(addr);
*/
addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+ RTE_PER_LCORE(_ip_var) = ip_var;
}
copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
sizeof(struct rte_ether_hdr) +
}
copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
sizeof(struct rte_ether_hdr) +
fs->tx_packets += nb_tx;
if (txonly_multi_flow)
fs->tx_packets += nb_tx;
if (txonly_multi_flow)
- RTE_PER_LCORE(_ip_var) += nb_tx;
+ RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;