+static inline void
+update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
+{
+ struct rte_ipv4_hdr *ip_hdr;
+ struct rte_udp_hdr *udp_hdr;
+ uint16_t pkt_data_len;
+ uint16_t pkt_len;
+
+ pkt_data_len = (uint16_t) (total_pkt_len - (
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_udp_hdr)));
+ /* update UDP packet length */
+ udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr));
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
+ udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
+
+ /* update IP packet length and checksum */
+ ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
+ sizeof(struct rte_ether_hdr));
+ ip_hdr->hdr_checksum = 0;
+ pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
+ ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len);
+ ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
+}
+
+static inline bool
+pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
+ struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
+ const uint16_t vlan_tci_outer, const uint64_t ol_flags,
+ const uint16_t idx, struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
+ struct rte_mbuf *pkt_seg;
+ uint32_t nb_segs, pkt_len;
+ uint8_t i;
+
+ if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
+ nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
+ else
+ nb_segs = tx_pkt_nb_segs;
+
+ if (nb_segs > 1) {
+ if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs - 1))
+ return false;
+ }
+
+ rte_pktmbuf_reset_headroom(pkt);
+ pkt->data_len = tx_pkt_seg_lengths[0];
+ pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
+ pkt->ol_flags |= ol_flags;
+ pkt->vlan_tci = vlan_tci;
+ pkt->vlan_tci_outer = vlan_tci_outer;
+ pkt->l2_len = sizeof(struct rte_ether_hdr);
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
+
+ pkt_len = pkt->data_len;
+ pkt_seg = pkt;
+ for (i = 1; i < nb_segs; i++) {
+ pkt_seg->next = pkt_segs[i - 1];
+ pkt_seg = pkt_seg->next;
+ pkt_seg->data_len = tx_pkt_seg_lengths[i];
+ pkt_len += pkt_seg->data_len;
+ }
+ pkt_seg->next = NULL; /* Last segment of packet. */
+ /*
+ * Copy headers in first packet segment(s).
+ */
+ copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0);
+ copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
+ sizeof(struct rte_ether_hdr));
+ if (txonly_multi_flow) {
+ uint8_t ip_var = RTE_PER_LCORE(_ip_var);
+ struct rte_ipv4_hdr *ip_hdr;
+ uint32_t addr;
+
+ ip_hdr = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_ipv4_hdr *,
+ sizeof(struct rte_ether_hdr));
+ /*
+ * Generate multiple flows by varying IP src addr. This
+ * enables packets are well distributed by RSS in
+ * receiver side if any and txonly mode can be a decent
+ * packet generator for developer's quick performance
+ * regression test.
+ */
+ addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
+ ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+ RTE_PER_LCORE(_ip_var) = ip_var;
+ }
+ copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr));
+
+ if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
+ update_pkt_header(pkt, pkt_len);
+
+ if (unlikely(timestamp_enable)) {
+ uint64_t skew = fs->ts_skew;
+ struct tx_timestamp timestamp_mark;
+
+ if (unlikely(!skew)) {
+ struct rte_eth_dev_info dev_info;
+ unsigned int txqs_n;
+ uint64_t phase;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(fs->tx_port, &dev_info);
+ if (ret != 0) {
+ TESTPMD_LOG(ERR,
+ "Failed to get device info for port %d,"
+ "could not finish timestamp init",
+ fs->tx_port);
+ return false;
+ }
+ txqs_n = dev_info.nb_tx_queues;
+ phase = tx_pkt_times_inter * fs->tx_queue /
+ (txqs_n ? txqs_n : 1);
+ /*
+ * Initialize the scheduling time phase shift
+ * depending on queue index.
+ */
+ skew = timestamp_initial[fs->tx_port] +
+ tx_pkt_times_inter + phase;
+ fs->ts_skew = skew;
+ }
+ timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
+ timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
+ timestamp_mark.signature = rte_cpu_to_be_32(0xBEEFC0DE);
+ if (unlikely(!idx)) {
+ skew += tx_pkt_times_inter;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ fs->ts_skew = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else if (tx_pkt_times_intra) {
+ skew += tx_pkt_times_intra;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ fs->ts_skew = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else {
+ timestamp_mark.ts = RTE_BE64(0);
+ }
+ copy_buf_to_pkt(×tamp_mark, sizeof(timestamp_mark), pkt,
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
+ sizeof(pkt_udp_hdr));
+ }
+ /*
+ * Complete first mbuf of packet and append it to the
+ * burst of packets to be transmitted.
+ */
+ pkt->nb_segs = nb_segs;
+ pkt->pkt_len = pkt_len;
+
+ return true;
+}
+