X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Ftxonly.c;h=fc039a622c523e172946cdf85f46b32200ef5361;hb=2d8699ebb2a0638e8ae18fc0a83c56f984d0b270;hp=97f4a452da12e045c18b32c0cddc15d422cd14ea;hpb=4940344dab1d4da95ec6cd9f4fa8ac1fbc61ba54;p=dpdk.git diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index 97f4a452da..fc039a622c 100644 --- a/app/test-pmd/txonly.c +++ b/app/test-pmd/txonly.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -40,6 +39,13 @@ #include "testpmd.h" +struct tx_timestamp { + rte_be32_t signature; + rte_be16_t pkt_idx; + rte_be16_t queue_idx; + rte_be64_t ts; +}; + /* use RFC863 Discard Protocol */ uint16_t tx_udp_src_port = 9; uint16_t tx_udp_dst_port = 9; @@ -53,8 +59,7 @@ uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2; static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */ RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */ static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */ -RTE_DEFINE_PER_LCORE(uint64_t, timestamp_qskew); - /**< Timestamp offset per queue */ + static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */ static int32_t timestamp_off; /**< Timestamp dynamic field offset */ static bool timestamp_enable; /**< Timestamp enable */ @@ -153,11 +158,39 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, ip_hdr->hdr_checksum = (uint16_t) ip_cksum; } +static inline void +update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len) +{ + struct rte_ipv4_hdr *ip_hdr; + struct rte_udp_hdr *udp_hdr; + uint16_t pkt_data_len; + uint16_t pkt_len; + + pkt_data_len = (uint16_t) (total_pkt_len - ( + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr))); + /* update UDP packet length */ + udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *, + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr)); + pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr)); + udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len); + + /* update IP packet length and checksum */ + ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *, + sizeof(struct rte_ether_hdr)); + ip_hdr->hdr_checksum = 0; + pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr)); + ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len); + ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr); +} + static inline bool pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci, const uint16_t vlan_tci_outer, const uint64_t ol_flags, - const uint16_t idx, const struct fwd_stream *fs) + const uint16_t idx, struct fwd_stream *fs) { struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT]; struct rte_mbuf *pkt_seg; @@ -176,7 +209,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, rte_pktmbuf_reset_headroom(pkt); pkt->data_len = tx_pkt_seg_lengths[0]; - pkt->ol_flags &= EXT_ATTACHED_MBUF; + pkt->ol_flags &= RTE_MBUF_F_EXTERNAL; pkt->ol_flags |= ol_flags; pkt->vlan_tci = vlan_tci; pkt->vlan_tci_outer = vlan_tci_outer; @@ -220,19 +253,30 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt, sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr)); + + if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow) + update_pkt_header(pkt, pkt_len); + if (unlikely(timestamp_enable)) { - uint64_t skew = RTE_PER_LCORE(timestamp_qskew); - struct { - rte_be32_t signature; - rte_be16_t pkt_idx; - rte_be16_t queue_idx; - rte_be64_t ts; - } timestamp_mark; + uint64_t skew = fs->ts_skew; + struct tx_timestamp timestamp_mark; if (unlikely(!skew)) { - struct rte_eth_dev *dev = &rte_eth_devices[fs->tx_port]; - unsigned int txqs_n = dev->data->nb_tx_queues; - uint64_t phase = tx_pkt_times_inter * fs->tx_queue / + struct rte_eth_dev_info dev_info; + unsigned int txqs_n; + uint64_t phase; + int ret; + + ret = eth_dev_info_get_print_err(fs->tx_port, &dev_info); + if (ret != 0) { + TESTPMD_LOG(ERR, + "Failed to get device info for port %d," + "could not finish timestamp init", + fs->tx_port); + return false; + } + txqs_n = dev_info.nb_tx_queues; + phase = tx_pkt_times_inter * fs->tx_queue / (txqs_n ? txqs_n : 1); /* * Initialize the scheduling time phase shift @@ -240,7 +284,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, */ skew = timestamp_initial[fs->tx_port] + tx_pkt_times_inter + phase; - RTE_PER_LCORE(timestamp_qskew) = skew; + fs->ts_skew = skew; } timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx); timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue); @@ -250,14 +294,14 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, pkt->ol_flags |= timestamp_mask; *RTE_MBUF_DYNFIELD (pkt, timestamp_off, uint64_t *) = skew; - RTE_PER_LCORE(timestamp_qskew) = skew; + fs->ts_skew = skew; timestamp_mark.ts = rte_cpu_to_be_64(skew); } else if (tx_pkt_times_intra) { skew += tx_pkt_times_intra; pkt->ol_flags |= timestamp_mask; *RTE_MBUF_DYNFIELD (pkt, timestamp_off, uint64_t *) = skew; - RTE_PER_LCORE(timestamp_qskew) = skew; + fs->ts_skew = skew; timestamp_mark.ts = rte_cpu_to_be_64(skew); } else { timestamp_mark.ts = RTE_BE64(0); @@ -294,33 +338,27 @@ pkt_burst_transmit(struct fwd_stream *fs) uint32_t retry; uint64_t ol_flags = 0; uint64_t tx_offloads; -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - uint64_t start_tsc; - uint64_t end_tsc; - uint64_t core_cycles; -#endif + uint64_t start_tsc = 0; -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - start_tsc = rte_rdtsc(); -#endif + get_start_cycles(&start_tsc); mbp = current_fwd_lcore()->mbp; txp = &ports[fs->tx_port]; tx_offloads = txp->dev_conf.txmode.offloads; vlan_tci = txp->tx_vlan_id; vlan_tci_outer = txp->tx_vlan_id_outer; - if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) - ol_flags = PKT_TX_VLAN_PKT; - if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) - ol_flags |= PKT_TX_QINQ_PKT; - if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) - ol_flags |= PKT_TX_MACSEC; + if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) + ol_flags = RTE_MBUF_F_TX_VLAN; + if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) + ol_flags |= RTE_MBUF_F_TX_QINQ; + if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) + ol_flags |= RTE_MBUF_F_TX_MACSEC; /* * Initialize Ethernet header. */ - rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr); - rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr); + rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.dst_addr); + rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.src_addr); eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); if (rte_mempool_get_bulk(mbp, (void **)pkts_burst, @@ -375,9 +413,7 @@ pkt_burst_transmit(struct fwd_stream *fs) if (txonly_multi_flow) RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx; -#ifdef RTE_TEST_PMD_RECORD_BURST_STATS - fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; -#endif + inc_tx_burst_stats(fs, nb_tx); if (unlikely(nb_tx < nb_pkt)) { if (verbose_level > 0 && fs->fwd_dropped == 0) printf("port %d tx_queue %d - drop " @@ -391,29 +427,34 @@ pkt_burst_transmit(struct fwd_stream *fs) } while (++nb_tx < nb_pkt); } -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - end_tsc = rte_rdtsc(); - core_cycles = (end_tsc - start_tsc); - fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); -#endif + get_end_cycles(fs, start_tsc); } -static void +static int tx_only_begin(portid_t pi) { - uint16_t pkt_data_len; + uint16_t pkt_hdr_len, pkt_data_len; int dynf; - pkt_data_len = (uint16_t) (tx_pkt_length - ( - sizeof(struct rte_ether_hdr) + - sizeof(struct rte_ipv4_hdr) + - sizeof(struct rte_udp_hdr))); + pkt_hdr_len = (uint16_t)(sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr)); + pkt_data_len = tx_pkt_length - pkt_hdr_len; + + if ((tx_pkt_split == TX_PKT_SPLIT_RND || txonly_multi_flow) && + tx_pkt_seg_lengths[0] < pkt_hdr_len) { + TESTPMD_LOG(ERR, + "Random segment number or multiple flow is enabled, " + "but tx_pkt_seg_lengths[0] %u < %u (needed)\n", + tx_pkt_seg_lengths[0], pkt_hdr_len); + return -EINVAL; + } + setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len); timestamp_enable = false; timestamp_mask = 0; timestamp_off = -1; - RTE_PER_LCORE(timestamp_qskew) = 0; dynf = rte_mbuf_dynflag_lookup (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL); if (dynf >= 0) @@ -426,6 +467,41 @@ tx_only_begin(portid_t pi) timestamp_mask && timestamp_off >= 0 && !rte_eth_read_clock(pi, ×tamp_initial[pi]); + + if (timestamp_enable) { + pkt_hdr_len += sizeof(struct tx_timestamp); + + if (tx_pkt_split == TX_PKT_SPLIT_RND) { + if (tx_pkt_seg_lengths[0] < pkt_hdr_len) { + TESTPMD_LOG(ERR, + "Time stamp and random segment number are enabled, " + "but tx_pkt_seg_lengths[0] %u < %u (needed)\n", + tx_pkt_seg_lengths[0], pkt_hdr_len); + return -EINVAL; + } + } else { + uint16_t total = 0; + uint8_t i; + + for (i = 0; i < tx_pkt_nb_segs; i++) { + total += tx_pkt_seg_lengths[i]; + if (total >= pkt_hdr_len) + break; + } + + if (total < pkt_hdr_len) { + TESTPMD_LOG(ERR, + "Not enough Tx segment space for time stamp info, " + "total %u < %u (needed)\n", + total, pkt_hdr_len); + return -EINVAL; + } + } + } + + /* Make sure all settings are visible on forwarding cores.*/ + rte_wmb(); + return 0; } struct fwd_engine tx_only_engine = {