#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_string_fns.h>
+#include <rte_flow.h>
#include "testpmd.h"
/* hardcoded configuration (for now) */
static unsigned cfg_n_flows = 1024;
-static unsigned cfg_pkt_size = 300;
-static uint32_t cfg_ip_src = IPv4(10, 254, 0, 0);
-static uint32_t cfg_ip_dst = IPv4(10, 253, 0, 0);
+static uint32_t cfg_ip_src = RTE_IPV4(10, 254, 0, 0);
+static uint32_t cfg_ip_dst = RTE_IPV4(10, 253, 0, 0);
static uint16_t cfg_udp_src = 1000;
static uint16_t cfg_udp_dst = 1001;
-static struct ether_addr cfg_ether_src =
+static struct rte_ether_addr cfg_ether_src =
{{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x00 }};
-static struct ether_addr cfg_ether_dst =
+static struct rte_ether_addr cfg_ether_dst =
{{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x01 }};
#define IP_DEFTTL 64 /* from RFC 1340. */
#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
-static inline struct rte_mbuf *
-tx_mbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return (m);
-}
-
-
static inline uint16_t
-ip_sum(const uint16_t *hdr, int hdr_len)
+ip_sum(const unaligned_uint16_t *hdr, int hdr_len)
{
uint32_t sum = 0;
static void
pkt_burst_flow_gen(struct fwd_stream *fs)
{
- unsigned pkt_size = cfg_pkt_size - 4; /* Adjust FCS */
+ unsigned pkt_size = tx_pkt_length - 4; /* Adjust FCS */
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mempool *mbp;
struct rte_mbuf *pkt;
- struct ether_hdr *eth_hdr;
- struct ipv4_hdr *ip_hdr;
- struct udp_hdr *udp_hdr;
- uint16_t vlan_tci;
- uint16_t ol_flags;
+ struct rte_ether_hdr *eth_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
+ struct rte_udp_hdr *udp_hdr;
+ uint16_t vlan_tci, vlan_tci_outer;
+ uint64_t ol_flags = 0;
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t nb_pkt;
uint16_t i;
+ uint32_t retry;
+ uint64_t tx_offloads;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
mbp = current_fwd_lcore()->mbp;
vlan_tci = ports[fs->tx_port].tx_vlan_id;
- ol_flags = ports[fs->tx_port].tx_ol_flags;
+ vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
+
+ tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
+ if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ ol_flags |= PKT_TX_VLAN_PKT;
+ if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+ ol_flags |= PKT_TX_QINQ_PKT;
+ if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
+ ol_flags |= PKT_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
- pkt = tx_mbuf_alloc(mbp);
+ pkt = rte_mbuf_raw_alloc(mbp);
if (!pkt)
break;
pkt->next = NULL;
/* Initialize Ethernet header. */
- eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
- ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr);
- ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr);
- eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+ rte_ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr);
+ rte_ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr);
+ eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
/* Initialize IP header. */
- ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
memset(ip_hdr, 0, sizeof(*ip_hdr));
ip_hdr->version_ihl = IP_VHL_DEF;
ip_hdr->type_of_service = 0;
next_flow);
ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_size -
sizeof(*eth_hdr));
- ip_hdr->hdr_checksum = ip_sum((uint16_t *)ip_hdr,
+ ip_hdr->hdr_checksum = ip_sum((unaligned_uint16_t *)ip_hdr,
sizeof(*ip_hdr));
/* Initialize UDP header. */
- udp_hdr = (struct udp_hdr *)(ip_hdr + 1);
+ udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1);
udp_hdr->src_port = rte_cpu_to_be_16(cfg_udp_src);
udp_hdr->dst_port = rte_cpu_to_be_16(cfg_udp_dst);
udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
pkt->pkt_len = pkt_size;
pkt->ol_flags = ol_flags;
pkt->vlan_tci = vlan_tci;
- pkt->l2_len = sizeof(struct ether_hdr);
- pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_tci_outer = vlan_tci_outer;
+ pkt->l2_len = sizeof(struct rte_ether_hdr);
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
next_flow = (next_flow + 1) % cfg_n_flows;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_rx - nb_tx);
+ }
+ }
fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS