X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Ftxonly.c;h=d55ee7ca00b8cb2117bc731b8a83e087295d495f;hb=d9c93f1d9aa5001efe7aafec0b76087fffaf7f0b;hp=871cf6c1547f51e4d443959715960393860007bd;hpb=72512e1897b2ba9a36227b5ff919c5450ed5dc8c;p=dpdk.git diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index 871cf6c154..d55ee7ca00 100644 --- a/app/test-pmd/txonly.c +++ b/app/test-pmd/txonly.c @@ -45,14 +45,23 @@ uint16_t tx_udp_src_port = 9; uint16_t tx_udp_dst_port = 9; /* use RFC5735 / RFC2544 reserved network test addresses */ -uint32_t tx_ip_src_addr = (192U << 24) | (18 << 16) | (0 << 8) | 1; -uint32_t tx_ip_dst_addr = (192U << 24) | (18 << 16) | (0 << 8) | 2; +uint32_t tx_ip_src_addr = (198U << 24) | (18 << 16) | (0 << 8) | 1; +uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2; #define IP_DEFTTL 64 /* from RFC 1340. */ static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */ RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */ static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */ +RTE_DEFINE_PER_LCORE(uint64_t, timestamp_qskew); + /**< Timestamp offset per queue */ +RTE_DEFINE_PER_LCORE(uint32_t, timestamp_idone); /**< Timestamp init done. */ + +static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */ +static int32_t timestamp_off; /**< Timestamp dynamic field offset */ +static bool timestamp_enable; /**< Timestamp enable */ +static uint32_t timestamp_init_req; /**< Timestamp initialization request. */ +static uint64_t timestamp_initial[RTE_MAX_ETHPORTS]; static void copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt, @@ -147,13 +156,41 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, ip_hdr->hdr_checksum = (uint16_t) ip_cksum; } +static inline void +update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len) +{ + struct rte_ipv4_hdr *ip_hdr; + struct rte_udp_hdr *udp_hdr; + uint16_t pkt_data_len; + uint16_t pkt_len; + + pkt_data_len = (uint16_t) (total_pkt_len - ( + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr))); + /* updata udp pkt length */ + udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *, + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr)); + pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr)); + udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len); + + /* updata ip pkt length and csum */ + ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *, + sizeof(struct rte_ether_hdr)); + ip_hdr->hdr_checksum = 0; + pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr)); + ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len); + ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr); +} + static inline bool pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci, - const uint16_t vlan_tci_outer, const uint64_t ol_flags) + const uint16_t vlan_tci_outer, const uint64_t ol_flags, + const uint16_t idx, const struct fwd_stream *fs) { struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT]; - uint8_t ip_var = RTE_PER_LCORE(_ip_var); struct rte_mbuf *pkt_seg; uint32_t nb_segs, pkt_len; uint8_t i; @@ -193,6 +230,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt, sizeof(struct rte_ether_hdr)); if (txonly_multi_flow) { + uint8_t ip_var = RTE_PER_LCORE(_ip_var); struct rte_ipv4_hdr *ip_hdr; uint32_t addr; @@ -208,10 +246,64 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, */ addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id(); ip_hdr->src_addr = rte_cpu_to_be_32(addr); + RTE_PER_LCORE(_ip_var) = ip_var; } copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt, sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr)); + + if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow) + update_pkt_header(pkt, pkt_len); + + if (unlikely(timestamp_enable)) { + uint64_t skew = RTE_PER_LCORE(timestamp_qskew); + struct { + rte_be32_t signature; + rte_be16_t pkt_idx; + rte_be16_t queue_idx; + rte_be64_t ts; + } timestamp_mark; + + if (unlikely(timestamp_init_req != + RTE_PER_LCORE(timestamp_idone))) { + struct rte_eth_dev *dev = &rte_eth_devices[fs->tx_port]; + unsigned int txqs_n = dev->data->nb_tx_queues; + uint64_t phase = tx_pkt_times_inter * fs->tx_queue / + (txqs_n ? txqs_n : 1); + /* + * Initialize the scheduling time phase shift + * depending on queue index. + */ + skew = timestamp_initial[fs->tx_port] + + tx_pkt_times_inter + phase; + RTE_PER_LCORE(timestamp_qskew) = skew; + RTE_PER_LCORE(timestamp_idone) = timestamp_init_req; + } + timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx); + timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue); + timestamp_mark.signature = rte_cpu_to_be_32(0xBEEFC0DE); + if (unlikely(!idx)) { + skew += tx_pkt_times_inter; + pkt->ol_flags |= timestamp_mask; + *RTE_MBUF_DYNFIELD + (pkt, timestamp_off, uint64_t *) = skew; + RTE_PER_LCORE(timestamp_qskew) = skew; + timestamp_mark.ts = rte_cpu_to_be_64(skew); + } else if (tx_pkt_times_intra) { + skew += tx_pkt_times_intra; + pkt->ol_flags |= timestamp_mask; + *RTE_MBUF_DYNFIELD + (pkt, timestamp_off, uint64_t *) = skew; + RTE_PER_LCORE(timestamp_qskew) = skew; + timestamp_mark.ts = rte_cpu_to_be_64(skew); + } else { + timestamp_mark.ts = RTE_BE64(0); + } + copy_buf_to_pkt(×tamp_mark, sizeof(timestamp_mark), pkt, + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr) + + sizeof(pkt_udp_hdr)); + } /* * Complete first mbuf of packet and append it to the * burst of packets to be transmitted. @@ -239,15 +331,9 @@ pkt_burst_transmit(struct fwd_stream *fs) uint32_t retry; uint64_t ol_flags = 0; uint64_t tx_offloads; -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - uint64_t start_tsc; - uint64_t end_tsc; - uint64_t core_cycles; -#endif + uint64_t start_tsc = 0; -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - start_tsc = rte_rdtsc(); -#endif + get_start_cycles(&start_tsc); mbp = current_fwd_lcore()->mbp; txp = &ports[fs->tx_port]; @@ -274,7 +360,8 @@ pkt_burst_transmit(struct fwd_stream *fs) if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp, ð_hdr, vlan_tci, vlan_tci_outer, - ol_flags))) { + ol_flags, + nb_pkt, fs))) { rte_mempool_put_bulk(mbp, (void **)&pkts_burst[nb_pkt], nb_pkt_per_burst - nb_pkt); @@ -289,7 +376,8 @@ pkt_burst_transmit(struct fwd_stream *fs) if (unlikely(!pkt_burst_prepare(pkt, mbp, ð_hdr, vlan_tci, vlan_tci_outer, - ol_flags))) { + ol_flags, + nb_pkt, fs))) { rte_pktmbuf_free(pkt); break; } @@ -301,6 +389,7 @@ pkt_burst_transmit(struct fwd_stream *fs) return; nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt); + /* * Retry if necessary */ @@ -315,11 +404,9 @@ pkt_burst_transmit(struct fwd_stream *fs) fs->tx_packets += nb_tx; if (txonly_multi_flow) - RTE_PER_LCORE(_ip_var) += nb_tx; + RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx; -#ifdef RTE_TEST_PMD_RECORD_BURST_STATS - fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; -#endif + inc_tx_burst_stats(fs, nb_tx); if (unlikely(nb_tx < nb_pkt)) { if (verbose_level > 0 && fs->fwd_dropped == 0) printf("port %d tx_queue %d - drop " @@ -333,23 +420,41 @@ pkt_burst_transmit(struct fwd_stream *fs) } while (++nb_tx < nb_pkt); } -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - end_tsc = rte_rdtsc(); - core_cycles = (end_tsc - start_tsc); - fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); -#endif + get_end_cycles(fs, start_tsc); } static void -tx_only_begin(__attribute__((unused)) portid_t pi) +tx_only_begin(portid_t pi) { uint16_t pkt_data_len; + int dynf; pkt_data_len = (uint16_t) (tx_pkt_length - ( sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr))); setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len); + + timestamp_enable = false; + timestamp_mask = 0; + timestamp_off = -1; + RTE_PER_LCORE(timestamp_qskew) = 0; + dynf = rte_mbuf_dynflag_lookup + (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL); + if (dynf >= 0) + timestamp_mask = 1ULL << dynf; + dynf = rte_mbuf_dynfield_lookup + (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL); + if (dynf >= 0) + timestamp_off = dynf; + timestamp_enable = tx_pkt_times_inter && + timestamp_mask && + timestamp_off >= 0 && + !rte_eth_read_clock(pi, ×tamp_initial[pi]); + if (timestamp_enable) + timestamp_init_req++; + /* Make sure all settings are visible on forwarding cores.*/ + rte_wmb(); } struct fwd_engine tx_only_engine = {