app/testpmd: fix timestamp init in txonly mode
[dpdk.git] / app / test-pmd / txonly.c
index e558b40..3bae367 100644 (file)
@@ -45,17 +45,23 @@ uint16_t tx_udp_src_port = 9;
 uint16_t tx_udp_dst_port = 9;
 
 /* use RFC5735 / RFC2544 reserved network test addresses */
-uint32_t tx_ip_src_addr = (192U << 24) | (18 << 16) | (0 << 8) | 1;
-uint32_t tx_ip_dst_addr = (192U << 24) | (18 << 16) | (0 << 8) | 2;
+uint32_t tx_ip_src_addr = (198U << 24) | (18 << 16) | (0 << 8) | 1;
+uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
 
 #define IP_DEFTTL  64   /* from RFC 1340. */
-#define IP_VERSION 0x40
-#define IP_HDRLEN  0x05 /* default IP header length == five 32-bits words. */
-#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
 
-static struct ipv4_hdr  pkt_ip_hdr;  /**< IP header of transmitted packets. */
+static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
 RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
-static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
+static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
+RTE_DEFINE_PER_LCORE(uint64_t, timestamp_qskew);
+                                       /**< Timestamp offset per queue */
+RTE_DEFINE_PER_LCORE(uint32_t, timestamp_idone); /**< Timestamp init done. */
+
+static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
+static int32_t timestamp_off; /**< Timestamp dynamic field offset */
+static bool timestamp_enable; /**< Timestamp enable */
+static uint32_t timestamp_init_req; /**< Timestamp initialization request. */
+static uint64_t timestamp_initial[RTE_MAX_ETHPORTS];
 
 static void
 copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
@@ -95,8 +101,8 @@ copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
 }
 
 static void
-setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
-                        struct udp_hdr *udp_hdr,
+setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
+                        struct rte_udp_hdr *udp_hdr,
                         uint16_t pkt_data_len)
 {
        uint16_t *ptr16;
@@ -106,7 +112,7 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
        /*
         * Initialize UDP header.
         */
-       pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
+       pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
        udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port);
        udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port);
        udp_hdr->dgram_len      = RTE_CPU_TO_BE_16(pkt_len);
@@ -115,8 +121,8 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
        /*
         * Initialize IP header.
         */
-       pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr));
-       ip_hdr->version_ihl   = IP_VHL_DEF;
+       pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
+       ip_hdr->version_ihl   = RTE_IPV4_VHL_DEF;
        ip_hdr->type_of_service   = 0;
        ip_hdr->fragment_offset = 0;
        ip_hdr->time_to_live   = IP_DEFTTL;
@@ -153,16 +159,16 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
 static inline bool
 pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
                struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
-               const uint16_t vlan_tci_outer, const uint64_t ol_flags)
+               const uint16_t vlan_tci_outer, const uint64_t ol_flags,
+               const uint16_t idx, const struct fwd_stream *fs)
 {
        struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
-       uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
        struct rte_mbuf *pkt_seg;
        uint32_t nb_segs, pkt_len;
        uint8_t i;
 
        if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
-               nb_segs = random() % tx_pkt_nb_segs + 1;
+               nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
        else
                nb_segs = tx_pkt_nb_segs;
 
@@ -173,11 +179,12 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 
        rte_pktmbuf_reset_headroom(pkt);
        pkt->data_len = tx_pkt_seg_lengths[0];
-       pkt->ol_flags = ol_flags;
+       pkt->ol_flags &= EXT_ATTACHED_MBUF;
+       pkt->ol_flags |= ol_flags;
        pkt->vlan_tci = vlan_tci;
        pkt->vlan_tci_outer = vlan_tci_outer;
        pkt->l2_len = sizeof(struct rte_ether_hdr);
-       pkt->l3_len = sizeof(struct ipv4_hdr);
+       pkt->l3_len = sizeof(struct rte_ipv4_hdr);
 
        pkt_len = pkt->data_len;
        pkt_seg = pkt;
@@ -195,11 +202,12 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
        copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
                        sizeof(struct rte_ether_hdr));
        if (txonly_multi_flow) {
-               struct ipv4_hdr *ip_hdr;
+               uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
+               struct rte_ipv4_hdr *ip_hdr;
                uint32_t addr;
 
                ip_hdr = rte_pktmbuf_mtod_offset(pkt,
-                               struct ipv4_hdr *,
+                               struct rte_ipv4_hdr *,
                                sizeof(struct rte_ether_hdr));
                /*
                 * Generate multiple flows by varying IP src addr. This
@@ -210,10 +218,60 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
                 */
                addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
                ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+               RTE_PER_LCORE(_ip_var) = ip_var;
        }
        copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
                        sizeof(struct rte_ether_hdr) +
-                       sizeof(struct ipv4_hdr));
+                       sizeof(struct rte_ipv4_hdr));
+       if (unlikely(timestamp_enable)) {
+               uint64_t skew = RTE_PER_LCORE(timestamp_qskew);
+               struct {
+                       rte_be32_t signature;
+                       rte_be16_t pkt_idx;
+                       rte_be16_t queue_idx;
+                       rte_be64_t ts;
+               } timestamp_mark;
+
+               if (unlikely(timestamp_init_req !=
+                               RTE_PER_LCORE(timestamp_idone))) {
+                       struct rte_eth_dev *dev = &rte_eth_devices[fs->tx_port];
+                       unsigned int txqs_n = dev->data->nb_tx_queues;
+                       uint64_t phase = tx_pkt_times_inter * fs->tx_queue /
+                                        (txqs_n ? txqs_n : 1);
+                       /*
+                        * Initialize the scheduling time phase shift
+                        * depending on queue index.
+                        */
+                       skew = timestamp_initial[fs->tx_port] +
+                              tx_pkt_times_inter + phase;
+                       RTE_PER_LCORE(timestamp_qskew) = skew;
+                       RTE_PER_LCORE(timestamp_idone) = timestamp_init_req;
+               }
+               timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
+               timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
+               timestamp_mark.signature = rte_cpu_to_be_32(0xBEEFC0DE);
+               if (unlikely(!idx)) {
+                       skew += tx_pkt_times_inter;
+                       pkt->ol_flags |= timestamp_mask;
+                       *RTE_MBUF_DYNFIELD
+                               (pkt, timestamp_off, uint64_t *) = skew;
+                       RTE_PER_LCORE(timestamp_qskew) = skew;
+                       timestamp_mark.ts = rte_cpu_to_be_64(skew);
+               } else if (tx_pkt_times_intra) {
+                       skew += tx_pkt_times_intra;
+                       pkt->ol_flags |= timestamp_mask;
+                       *RTE_MBUF_DYNFIELD
+                               (pkt, timestamp_off, uint64_t *) = skew;
+                       RTE_PER_LCORE(timestamp_qskew) = skew;
+                       timestamp_mark.ts = rte_cpu_to_be_64(skew);
+               } else {
+                       timestamp_mark.ts = RTE_BE64(0);
+               }
+               copy_buf_to_pkt(&timestamp_mark, sizeof(timestamp_mark), pkt,
+                       sizeof(struct rte_ether_hdr) +
+                       sizeof(struct rte_ipv4_hdr) +
+                       sizeof(pkt_udp_hdr));
+       }
        /*
         * Complete first mbuf of packet and append it to the
         * burst of packets to be transmitted.
@@ -266,9 +324,9 @@ pkt_burst_transmit(struct fwd_stream *fs)
        /*
         * Initialize Ethernet header.
         */
-       ether_addr_copy(&peer_eth_addrs[fs->peer_addr], &eth_hdr.d_addr);
-       ether_addr_copy(&ports[fs->tx_port].eth_addr, &eth_hdr.s_addr);
-       eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+       rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], &eth_hdr.d_addr);
+       rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, &eth_hdr.s_addr);
+       eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 
        if (rte_mempool_get_bulk(mbp, (void **)pkts_burst,
                                nb_pkt_per_burst) == 0) {
@@ -276,7 +334,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
                        if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp,
                                                        &eth_hdr, vlan_tci,
                                                        vlan_tci_outer,
-                                                       ol_flags))) {
+                                                       ol_flags,
+                                                       nb_pkt, fs))) {
                                rte_mempool_put_bulk(mbp,
                                                (void **)&pkts_burst[nb_pkt],
                                                nb_pkt_per_burst - nb_pkt);
@@ -291,7 +350,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
                        if (unlikely(!pkt_burst_prepare(pkt, mbp, &eth_hdr,
                                                        vlan_tci,
                                                        vlan_tci_outer,
-                                                       ol_flags))) {
+                                                       ol_flags,
+                                                       nb_pkt, fs))) {
                                rte_pktmbuf_free(pkt);
                                break;
                        }
@@ -303,6 +363,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
                return;
 
        nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+
        /*
         * Retry if necessary
         */
@@ -317,7 +378,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
        fs->tx_packets += nb_tx;
 
        if (txonly_multi_flow)
-               RTE_PER_LCORE(_ip_var) += nb_tx;
+               RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx;
 
 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
        fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
@@ -343,15 +404,37 @@ pkt_burst_transmit(struct fwd_stream *fs)
 }
 
 static void
-tx_only_begin(__attribute__((unused)) portid_t pi)
+tx_only_begin(portid_t pi)
 {
        uint16_t pkt_data_len;
+       int dynf;
 
        pkt_data_len = (uint16_t) (tx_pkt_length - (
                                        sizeof(struct rte_ether_hdr) +
-                                       sizeof(struct ipv4_hdr) +
-                                       sizeof(struct udp_hdr)));
+                                       sizeof(struct rte_ipv4_hdr) +
+                                       sizeof(struct rte_udp_hdr)));
        setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
+
+       timestamp_enable = false;
+       timestamp_mask = 0;
+       timestamp_off = -1;
+       RTE_PER_LCORE(timestamp_qskew) = 0;
+       dynf = rte_mbuf_dynflag_lookup
+                               (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
+       if (dynf >= 0)
+               timestamp_mask = 1ULL << dynf;
+       dynf = rte_mbuf_dynfield_lookup
+                               (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
+       if (dynf >= 0)
+               timestamp_off = dynf;
+       timestamp_enable = tx_pkt_times_inter &&
+                          timestamp_mask &&
+                          timestamp_off >= 0 &&
+                          !rte_eth_read_clock(pi, &timestamp_initial[pi]);
+       if (timestamp_enable)
+               timestamp_init_req++;
+       /* Make sure all settings are visible on forwarding cores.*/
+       rte_wmb();
 }
 
 struct fwd_engine tx_only_engine = {