#include "testpmd.h"
+struct tx_timestamp {
+ rte_be32_t signature;
+ rte_be16_t pkt_idx;
+ rte_be16_t queue_idx;
+ rte_be64_t ts;
+};
+
/* use RFC863 Discard Protocol */
uint16_t tx_udp_src_port = 9;
uint16_t tx_udp_dst_port = 9;
static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
+RTE_DEFINE_PER_LCORE(uint64_t, timestamp_qskew);
+ /**< Timestamp offset per queue */
+RTE_DEFINE_PER_LCORE(uint32_t, timestamp_idone); /**< Timestamp init done. */
+
+static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
+static int32_t timestamp_off; /**< Timestamp dynamic field offset */
+static bool timestamp_enable; /**< Timestamp enable */
+static uint32_t timestamp_init_req; /**< Timestamp initialization request. */
+static uint64_t timestamp_initial[RTE_MAX_ETHPORTS];
static void
copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
}
+static inline void
+update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
+{
+ struct rte_ipv4_hdr *ip_hdr;
+ struct rte_udp_hdr *udp_hdr;
+ uint16_t pkt_data_len;
+ uint16_t pkt_len;
+
+ pkt_data_len = (uint16_t) (total_pkt_len - (
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_udp_hdr)));
+ /* updata udp pkt length */
+ udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr));
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
+ udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
+
+ /* updata ip pkt length and csum */
+ ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
+ sizeof(struct rte_ether_hdr));
+ ip_hdr->hdr_checksum = 0;
+ pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
+ ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len);
+ ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
+}
+
static inline bool
pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
- const uint16_t vlan_tci_outer, const uint64_t ol_flags)
+ const uint16_t vlan_tci_outer, const uint64_t ol_flags,
+ const uint16_t idx, const struct fwd_stream *fs)
{
struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
struct rte_mbuf *pkt_seg;
copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr));
+
+ if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
+ update_pkt_header(pkt, pkt_len);
+
+ if (unlikely(timestamp_enable)) {
+ uint64_t skew = RTE_PER_LCORE(timestamp_qskew);
+ struct tx_timestamp timestamp_mark;
+
+ if (unlikely(timestamp_init_req !=
+ RTE_PER_LCORE(timestamp_idone))) {
+ struct rte_eth_dev_info dev_info;
+ unsigned int txqs_n;
+ uint64_t phase;
+ int ret;
+
+ ret = eth_dev_info_get_print_err(fs->tx_port, &dev_info);
+ if (ret != 0) {
+ TESTPMD_LOG(ERR,
+ "Failed to get device info for port %d,"
+ "could not finish timestamp init",
+ fs->tx_port);
+ return false;
+ }
+ txqs_n = dev_info.nb_tx_queues;
+ phase = tx_pkt_times_inter * fs->tx_queue /
+ (txqs_n ? txqs_n : 1);
+ /*
+ * Initialize the scheduling time phase shift
+ * depending on queue index.
+ */
+ skew = timestamp_initial[fs->tx_port] +
+ tx_pkt_times_inter + phase;
+ RTE_PER_LCORE(timestamp_qskew) = skew;
+ RTE_PER_LCORE(timestamp_idone) = timestamp_init_req;
+ }
+ timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
+ timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
+ timestamp_mark.signature = rte_cpu_to_be_32(0xBEEFC0DE);
+ if (unlikely(!idx)) {
+ skew += tx_pkt_times_inter;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ RTE_PER_LCORE(timestamp_qskew) = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else if (tx_pkt_times_intra) {
+ skew += tx_pkt_times_intra;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ RTE_PER_LCORE(timestamp_qskew) = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else {
+ timestamp_mark.ts = RTE_BE64(0);
+ }
+ copy_buf_to_pkt(×tamp_mark, sizeof(timestamp_mark), pkt,
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
+ sizeof(pkt_udp_hdr));
+ }
/*
* Complete first mbuf of packet and append it to the
* burst of packets to be transmitted.
uint32_t retry;
uint64_t ol_flags = 0;
uint64_t tx_offloads;
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- uint64_t start_tsc;
- uint64_t end_tsc;
- uint64_t core_cycles;
-#endif
+ uint64_t start_tsc = 0;
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- start_tsc = rte_rdtsc();
-#endif
+ get_start_cycles(&start_tsc);
mbp = current_fwd_lcore()->mbp;
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
vlan_tci = txp->tx_vlan_id;
vlan_tci_outer = txp->tx_vlan_id_outer;
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = PKT_TX_VLAN_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
+ ol_flags = PKT_TX_VLAN;
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
+ ol_flags |= PKT_TX_QINQ;
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
/*
* Initialize Ethernet header.
*/
- rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr);
- rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr);
+ rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.dst_addr);
+ rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.src_addr);
eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
if (rte_mempool_get_bulk(mbp, (void **)pkts_burst,
if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp,
ð_hdr, vlan_tci,
vlan_tci_outer,
- ol_flags))) {
+ ol_flags,
+ nb_pkt, fs))) {
rte_mempool_put_bulk(mbp,
(void **)&pkts_burst[nb_pkt],
nb_pkt_per_burst - nb_pkt);
if (unlikely(!pkt_burst_prepare(pkt, mbp, ð_hdr,
vlan_tci,
vlan_tci_outer,
- ol_flags))) {
+ ol_flags,
+ nb_pkt, fs))) {
rte_pktmbuf_free(pkt);
break;
}
return;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+
/*
* Retry if necessary
*/
if (txonly_multi_flow)
RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx;
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
-#endif
+ inc_tx_burst_stats(fs, nb_tx);
if (unlikely(nb_tx < nb_pkt)) {
if (verbose_level > 0 && fs->fwd_dropped == 0)
printf("port %d tx_queue %d - drop "
} while (++nb_tx < nb_pkt);
}
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- end_tsc = rte_rdtsc();
- core_cycles = (end_tsc - start_tsc);
- fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
-#endif
+ get_end_cycles(fs, start_tsc);
}
-static void
-tx_only_begin(__attribute__((unused)) portid_t pi)
+static int
+tx_only_begin(portid_t pi)
{
- uint16_t pkt_data_len;
+ uint16_t pkt_hdr_len, pkt_data_len;
+ int dynf;
+
+ pkt_hdr_len = (uint16_t)(sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_udp_hdr));
+ pkt_data_len = tx_pkt_length - pkt_hdr_len;
+
+ if ((tx_pkt_split == TX_PKT_SPLIT_RND || txonly_multi_flow) &&
+ tx_pkt_seg_lengths[0] < pkt_hdr_len) {
+ TESTPMD_LOG(ERR,
+ "Random segment number or multiple flow is enabled, "
+ "but tx_pkt_seg_lengths[0] %u < %u (needed)\n",
+ tx_pkt_seg_lengths[0], pkt_hdr_len);
+ return -EINVAL;
+ }
- pkt_data_len = (uint16_t) (tx_pkt_length - (
- sizeof(struct rte_ether_hdr) +
- sizeof(struct rte_ipv4_hdr) +
- sizeof(struct rte_udp_hdr)));
setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
+
+ timestamp_enable = false;
+ timestamp_mask = 0;
+ timestamp_off = -1;
+ RTE_PER_LCORE(timestamp_qskew) = 0;
+ dynf = rte_mbuf_dynflag_lookup
+ (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
+ if (dynf >= 0)
+ timestamp_mask = 1ULL << dynf;
+ dynf = rte_mbuf_dynfield_lookup
+ (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
+ if (dynf >= 0)
+ timestamp_off = dynf;
+ timestamp_enable = tx_pkt_times_inter &&
+ timestamp_mask &&
+ timestamp_off >= 0 &&
+ !rte_eth_read_clock(pi, ×tamp_initial[pi]);
+
+ if (timestamp_enable) {
+ pkt_hdr_len += sizeof(struct tx_timestamp);
+
+ if (tx_pkt_split == TX_PKT_SPLIT_RND) {
+ if (tx_pkt_seg_lengths[0] < pkt_hdr_len) {
+ TESTPMD_LOG(ERR,
+ "Time stamp and random segment number are enabled, "
+ "but tx_pkt_seg_lengths[0] %u < %u (needed)\n",
+ tx_pkt_seg_lengths[0], pkt_hdr_len);
+ return -EINVAL;
+ }
+ } else {
+ uint16_t total = 0;
+ uint8_t i;
+
+ for (i = 0; i < tx_pkt_nb_segs; i++) {
+ total += tx_pkt_seg_lengths[i];
+ if (total >= pkt_hdr_len)
+ break;
+ }
+
+ if (total < pkt_hdr_len) {
+ TESTPMD_LOG(ERR,
+ "Not enough Tx segment space for time stamp info, "
+ "total %u < %u (needed)\n",
+ total, pkt_hdr_len);
+ return -EINVAL;
+ }
+ }
+ timestamp_init_req++;
+ }
+
+ /* Make sure all settings are visible on forwarding cores.*/
+ rte_wmb();
+ return 0;
}
struct fwd_engine tx_only_engine = {