+ sizeof(struct rte_ipv4_hdr));
+
+ if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
+ update_pkt_header(pkt, pkt_len);
+
+ if (unlikely(timestamp_enable)) {
+ uint64_t skew = RTE_PER_LCORE(timestamp_qskew);
+ struct {
+ rte_be32_t signature;
+ rte_be16_t pkt_idx;
+ rte_be16_t queue_idx;
+ rte_be64_t ts;
+ } timestamp_mark;
+
+ if (unlikely(timestamp_init_req !=
+ RTE_PER_LCORE(timestamp_idone))) {
+ struct rte_eth_dev *dev = &rte_eth_devices[fs->tx_port];
+ unsigned int txqs_n = dev->data->nb_tx_queues;
+ uint64_t phase = tx_pkt_times_inter * fs->tx_queue /
+ (txqs_n ? txqs_n : 1);
+ /*
+ * Initialize the scheduling time phase shift
+ * depending on queue index.
+ */
+ skew = timestamp_initial[fs->tx_port] +
+ tx_pkt_times_inter + phase;
+ RTE_PER_LCORE(timestamp_qskew) = skew;
+ RTE_PER_LCORE(timestamp_idone) = timestamp_init_req;
+ }
+ timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
+ timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
+ timestamp_mark.signature = rte_cpu_to_be_32(0xBEEFC0DE);
+ if (unlikely(!idx)) {
+ skew += tx_pkt_times_inter;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ RTE_PER_LCORE(timestamp_qskew) = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else if (tx_pkt_times_intra) {
+ skew += tx_pkt_times_intra;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ RTE_PER_LCORE(timestamp_qskew) = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else {
+ timestamp_mark.ts = RTE_BE64(0);
+ }
+ copy_buf_to_pkt(×tamp_mark, sizeof(timestamp_mark), pkt,
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
+ sizeof(pkt_udp_hdr));
+ }