+ sizeof(struct rte_ipv4_hdr));
+
+ if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
+ update_pkt_header(pkt, pkt_len);
+
+ if (unlikely(timestamp_enable)) {
+ uint64_t skew = fs->ts_skew;
+ struct tx_timestamp timestamp_mark;
+
+ if (unlikely(!skew)) {
+ struct rte_eth_dev_info dev_info;
+ unsigned int txqs_n;
+ uint64_t phase;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(fs->tx_port, &dev_info);
+ if (ret != 0) {
+ TESTPMD_LOG(ERR,
+ "Failed to get device info for port %d,"
+ "could not finish timestamp init",
+ fs->tx_port);
+ return false;
+ }
+ txqs_n = dev_info.nb_tx_queues;
+ phase = tx_pkt_times_inter * fs->tx_queue /
+ (txqs_n ? txqs_n : 1);
+ /*
+ * Initialize the scheduling time phase shift
+ * depending on queue index.
+ */
+ skew = timestamp_initial[fs->tx_port] +
+ tx_pkt_times_inter + phase;
+ fs->ts_skew = skew;
+ }
+ timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
+ timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
+ timestamp_mark.signature = rte_cpu_to_be_32(0xBEEFC0DE);
+ if (unlikely(!idx)) {
+ skew += tx_pkt_times_inter;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ fs->ts_skew = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else if (tx_pkt_times_intra) {
+ skew += tx_pkt_times_intra;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ fs->ts_skew = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else {
+ timestamp_mark.ts = RTE_BE64(0);
+ }
+ copy_buf_to_pkt(×tamp_mark, sizeof(timestamp_mark), pkt,
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
+ sizeof(pkt_udp_hdr));
+ }