#include "testpmd.h"
-#define UDP_SRC_PORT 1024
-#define UDP_DST_PORT 1024
+/* use RFC863 Discard Protocol */
+uint16_t tx_udp_src_port = 9;
+uint16_t tx_udp_dst_port = 9;
-#define IP_SRC_ADDR ((192U << 24) | (168 << 16) | (0 << 8) | 1)
-#define IP_DST_ADDR ((192U << 24) | (168 << 16) | (0 << 8) | 2)
+/* use RFC5735 / RFC2544 reserved network test addresses */
+uint32_t tx_ip_src_addr = (198U << 24) | (18 << 16) | (0 << 8) | 1;
+uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
#define IP_DEFTTL 64 /* from RFC 1340. */
-#define IP_VERSION 0x40
-#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
-#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
-static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
+static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
-static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
+static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
+RTE_DEFINE_PER_LCORE(uint64_t, timestamp_qskew);
+ /**< Timestamp offset per queue */
+RTE_DEFINE_PER_LCORE(uint32_t, timestamp_idone); /**< Timestamp init done. */
+
+static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
+static int32_t timestamp_off; /**< Timestamp dynamic field offset */
+static bool timestamp_enable; /**< Timestamp enable */
+static uint32_t timestamp_init_req; /**< Timestamp initialization request. */
+static uint64_t timestamp_initial[RTE_MAX_ETHPORTS];
static void
copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
}
static void
-setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
- struct udp_hdr *udp_hdr,
+setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
+ struct rte_udp_hdr *udp_hdr,
uint16_t pkt_data_len)
{
uint16_t *ptr16;
/*
* Initialize UDP header.
*/
- pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
- udp_hdr->src_port = rte_cpu_to_be_16(UDP_SRC_PORT);
- udp_hdr->dst_port = rte_cpu_to_be_16(UDP_DST_PORT);
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
+ udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port);
+ udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port);
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
/*
* Initialize IP header.
*/
- pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr));
- ip_hdr->version_ihl = IP_VHL_DEF;
+ pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
+ ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
ip_hdr->type_of_service = 0;
ip_hdr->fragment_offset = 0;
ip_hdr->time_to_live = IP_DEFTTL;
ip_hdr->next_proto_id = IPPROTO_UDP;
ip_hdr->packet_id = 0;
ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len);
- ip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR);
- ip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR);
+ ip_hdr->src_addr = rte_cpu_to_be_32(tx_ip_src_addr);
+ ip_hdr->dst_addr = rte_cpu_to_be_32(tx_ip_dst_addr);
/*
* Compute IP header checksum.
static inline bool
pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
- struct ether_hdr *eth_hdr, const uint16_t vlan_tci,
- const uint16_t vlan_tci_outer, const uint64_t ol_flags)
+ struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
+ const uint16_t vlan_tci_outer, const uint64_t ol_flags,
+ const uint16_t idx, const struct fwd_stream *fs)
{
struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
- uint8_t ip_var = RTE_PER_LCORE(_ip_var);
struct rte_mbuf *pkt_seg;
uint32_t nb_segs, pkt_len;
uint8_t i;
if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
- nb_segs = random() % tx_pkt_nb_segs + 1;
+ nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
else
nb_segs = tx_pkt_nb_segs;
if (nb_segs > 1) {
- if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs))
+ if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs - 1))
return false;
}
rte_pktmbuf_reset_headroom(pkt);
pkt->data_len = tx_pkt_seg_lengths[0];
- pkt->ol_flags = ol_flags;
+ pkt->ol_flags &= EXT_ATTACHED_MBUF;
+ pkt->ol_flags |= ol_flags;
pkt->vlan_tci = vlan_tci;
pkt->vlan_tci_outer = vlan_tci_outer;
- pkt->l2_len = sizeof(struct ether_hdr);
- pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkt->l2_len = sizeof(struct rte_ether_hdr);
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
pkt_len = pkt->data_len;
pkt_seg = pkt;
*/
copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0);
copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
- sizeof(struct ether_hdr));
+ sizeof(struct rte_ether_hdr));
if (txonly_multi_flow) {
- struct ipv4_hdr *ip_hdr;
+ uint8_t ip_var = RTE_PER_LCORE(_ip_var);
+ struct rte_ipv4_hdr *ip_hdr;
uint32_t addr;
ip_hdr = rte_pktmbuf_mtod_offset(pkt,
- struct ipv4_hdr *,
- sizeof(struct ether_hdr));
+ struct rte_ipv4_hdr *,
+ sizeof(struct rte_ether_hdr));
/*
* Generate multiple flows by varying IP src addr. This
* enables packets are well distributed by RSS in
* packet generator for developer's quick performance
* regression test.
*/
- addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
+ addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+ RTE_PER_LCORE(_ip_var) = ip_var;
}
copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
- sizeof(struct ether_hdr) +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr));
+ if (unlikely(timestamp_enable)) {
+ uint64_t skew = RTE_PER_LCORE(timestamp_qskew);
+ struct {
+ rte_be32_t signature;
+ rte_be16_t pkt_idx;
+ rte_be16_t queue_idx;
+ rte_be64_t ts;
+ } timestamp_mark;
+
+ if (unlikely(timestamp_init_req !=
+ RTE_PER_LCORE(timestamp_idone))) {
+ struct rte_eth_dev *dev = &rte_eth_devices[fs->tx_port];
+ unsigned int txqs_n = dev->data->nb_tx_queues;
+ uint64_t phase = tx_pkt_times_inter * fs->tx_queue /
+ (txqs_n ? txqs_n : 1);
+ /*
+ * Initialize the scheduling time phase shift
+ * depending on queue index.
+ */
+ skew = timestamp_initial[fs->tx_port] +
+ tx_pkt_times_inter + phase;
+ RTE_PER_LCORE(timestamp_qskew) = skew;
+ RTE_PER_LCORE(timestamp_idone) = timestamp_init_req;
+ }
+ timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
+ timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
+ timestamp_mark.signature = rte_cpu_to_be_32(0xBEEFC0DE);
+ if (unlikely(!idx)) {
+ skew += tx_pkt_times_inter;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ RTE_PER_LCORE(timestamp_qskew) = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else if (tx_pkt_times_intra) {
+ skew += tx_pkt_times_intra;
+ pkt->ol_flags |= timestamp_mask;
+ *RTE_MBUF_DYNFIELD
+ (pkt, timestamp_off, uint64_t *) = skew;
+ RTE_PER_LCORE(timestamp_qskew) = skew;
+ timestamp_mark.ts = rte_cpu_to_be_64(skew);
+ } else {
+ timestamp_mark.ts = RTE_BE64(0);
+ }
+ copy_buf_to_pkt(×tamp_mark, sizeof(timestamp_mark), pkt,
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
+ sizeof(pkt_udp_hdr));
+ }
/*
* Complete first mbuf of packet and append it to the
* burst of packets to be transmitted.
struct rte_port *txp;
struct rte_mbuf *pkt;
struct rte_mempool *mbp;
- struct ether_hdr eth_hdr;
+ struct rte_ether_hdr eth_hdr;
uint16_t nb_tx;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
/*
* Initialize Ethernet header.
*/
- ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr);
- ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr);
- eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr);
+ rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr);
+ eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
if (rte_mempool_get_bulk(mbp, (void **)pkts_burst,
nb_pkt_per_burst) == 0) {
if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp,
ð_hdr, vlan_tci,
vlan_tci_outer,
- ol_flags))) {
+ ol_flags,
+ nb_pkt, fs))) {
rte_mempool_put_bulk(mbp,
(void **)&pkts_burst[nb_pkt],
nb_pkt_per_burst - nb_pkt);
if (unlikely(!pkt_burst_prepare(pkt, mbp, ð_hdr,
vlan_tci,
vlan_tci_outer,
- ol_flags))) {
+ ol_flags,
+ nb_pkt, fs))) {
rte_pktmbuf_free(pkt);
break;
}
return;
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+
/*
* Retry if necessary
*/
fs->tx_packets += nb_tx;
if (txonly_multi_flow)
- RTE_PER_LCORE(_ip_var) += nb_tx;
+ RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
}
static void
-tx_only_begin(__attribute__((unused)) portid_t pi)
+tx_only_begin(portid_t pi)
{
uint16_t pkt_data_len;
+ int dynf;
- pkt_data_len = (uint16_t) (tx_pkt_length - (sizeof(struct ether_hdr) +
- sizeof(struct ipv4_hdr) +
- sizeof(struct udp_hdr)));
+ pkt_data_len = (uint16_t) (tx_pkt_length - (
+ sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_udp_hdr)));
setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
+
+ timestamp_enable = false;
+ timestamp_mask = 0;
+ timestamp_off = -1;
+ RTE_PER_LCORE(timestamp_qskew) = 0;
+ dynf = rte_mbuf_dynflag_lookup
+ (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
+ if (dynf >= 0)
+ timestamp_mask = 1ULL << dynf;
+ dynf = rte_mbuf_dynfield_lookup
+ (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
+ if (dynf >= 0)
+ timestamp_off = dynf;
+ timestamp_enable = tx_pkt_times_inter &&
+ timestamp_mask &&
+ timestamp_off >= 0 &&
+ !rte_eth_read_clock(pi, ×tamp_initial[pi]);
+ if (timestamp_enable)
+ timestamp_init_req++;
+ /* Make sure all settings are visible on forwarding cores.*/
+ rte_wmb();
}
struct fwd_engine tx_only_engine = {