#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
+RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
static void
pkt_burst_transmit(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
struct rte_port *txp;
struct rte_mbuf *pkt;
struct rte_mbuf *pkt_seg;
uint16_t vlan_tci, vlan_tci_outer;
uint32_t retry;
uint64_t ol_flags = 0;
+ uint8_t ip_var = RTE_PER_LCORE(_ip_var);
uint8_t i;
uint64_t tx_offloads;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
ol_flags |= PKT_TX_QINQ_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
+
+ /*
+ * Initialize Ethernet header.
+ */
+ ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr);
+ ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr);
+ eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
pkt = rte_mbuf_raw_alloc(mbp);
if (pkt == NULL) {
rte_pktmbuf_reset_headroom(pkt);
pkt->data_len = tx_pkt_seg_lengths[0];
pkt_seg = pkt;
+
if (tx_pkt_split == TX_PKT_SPLIT_RND)
nb_segs = random() % tx_pkt_nb_segs + 1;
else
nb_segs = tx_pkt_nb_segs;
- pkt_len = pkt->data_len;
- for (i = 1; i < nb_segs; i++) {
- pkt_seg->next = rte_mbuf_raw_alloc(mbp);
- if (pkt_seg->next == NULL) {
- pkt->nb_segs = i;
+
+ if (nb_segs > 1) {
+ if (rte_mempool_get_bulk(mbp, (void **)pkt_segs,
+ nb_segs)) {
rte_pktmbuf_free(pkt);
goto nomore_mbuf;
}
+ }
+
+ pkt_len = pkt->data_len;
+ for (i = 1; i < nb_segs; i++) {
+ pkt_seg->next = pkt_segs[i - 1];
pkt_seg = pkt_seg->next;
pkt_seg->data_len = tx_pkt_seg_lengths[i];
pkt_len += pkt_seg->data_len;
}
pkt_seg->next = NULL; /* Last segment of packet. */
- /*
- * Initialize Ethernet header.
- */
- ether_addr_copy(&peer_eth_addrs[fs->peer_addr],ð_hdr.d_addr);
- ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr);
- eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
-
/*
* Copy headers in first packet segment(s).
*/
copy_buf_to_pkt(ð_hdr, sizeof(eth_hdr), pkt, 0);
copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
sizeof(struct ether_hdr));
+ if (txonly_multi_flow) {
+ struct ipv4_hdr *ip_hdr;
+ uint32_t addr;
+
+ ip_hdr = rte_pktmbuf_mtod_offset(pkt,
+ struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ /*
+ * Generate multiple flows by varying IP src addr. This
+ * enables packets are well distributed by RSS in
+ * receiver side if any and txonly mode can be a decent
+ * packet generator for developer's quick performance
+ * regression test.
+ */
+ addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
+ ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+ }
copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
sizeof(struct ether_hdr) +
sizeof(struct ipv4_hdr));
}
fs->tx_packets += nb_tx;
+ if (txonly_multi_flow)
+ RTE_PER_LCORE(_ip_var) += nb_tx;
+
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#endif