#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
-/* we cannot use htons() from arpa/inet.h due to name conflicts, and we
- * cannot use rte_cpu_to_be_16() on a constant in a switch/case */
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+/* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
#else
#define _htons(x) (x)
ipv4_hdr = l3_hdr;
ipv4_hdr->hdr_checksum = 0;
+ ol_flags |= PKT_TX_IPV4;
if (tso_segsz != 0 && l4_proto == IPPROTO_TCP) {
ol_flags |= PKT_TX_IP_CKSUM;
} else {
ipv4_hdr->hdr_checksum =
rte_ipv4_cksum(ipv4_hdr);
}
- ol_flags |= PKT_TX_IPV4;
} else if (ethertype == _htons(ETHER_TYPE_IPv6))
ol_flags |= PKT_TX_IPV6;
else
struct udp_hdr *udp_hdr;
uint64_t ol_flags = 0;
- if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM)
- ol_flags |= PKT_TX_VXLAN_CKSUM;
-
if (outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
ipv4_hdr->hdr_checksum = 0;
+ ol_flags |= PKT_TX_OUTER_IPV4;
- if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM) == 0)
+ if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM)
+ ol_flags |= PKT_TX_OUTER_IP_CKSUM;
+ else
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
- }
+ } else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM)
+ ol_flags |= PKT_TX_OUTER_IPV6;
udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + outer_l3_len);
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
udp_hdr->dgram_cksum = 0;
- if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM) == 0) {
- if (outer_ethertype == _htons(ETHER_TYPE_IPv4))
- udp_hdr->dgram_cksum =
- rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
- else
- udp_hdr->dgram_cksum =
- rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
- }
+ if (outer_ethertype == _htons(ETHER_TYPE_IPv4))
+ udp_hdr->dgram_cksum =
+ rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
+ else
+ udp_hdr->dgram_cksum =
+ rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
}
return ol_flags;
* TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
* wether a checksum must be calculated in software or in hardware. The
* IP, UDP, TCP and SCTP flags always concern the inner layer. The
- * VxLAN flag concerns the outer IP and UDP layer (if packet is
- * recognized as a vxlan packet).
+ * VxLAN flag concerns the outer IP (if packet is recognized as a vxlan packet).
*/
static void
pkt_burst_checksum_forward(struct fwd_stream *fs)
uint16_t i;
uint64_t ol_flags;
uint16_t testpmd_ol_flags;
- uint8_t l4_proto;
+ uint8_t l4_proto, l4_tun_len = 0;
uint16_t ethertype = 0, outer_ethertype = 0;
uint16_t l2_len = 0, l3_len = 0, l4_len = 0;
uint16_t outer_l2_len = 0, outer_l3_len = 0;
ol_flags = 0;
tunnel = 0;
+ l4_tun_len = 0;
m = pkts_burst[i];
/* Update the L3/L4 checksum error packet statistics */
l3_hdr = (char *)eth_hdr + l2_len;
/* check if it's a supported tunnel (only vxlan for now) */
- if (l4_proto == IPPROTO_UDP) {
+ if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) &&
+ l4_proto == IPPROTO_UDP) {
udp_hdr = (struct udp_hdr *)((char *)l3_hdr + l3_len);
+ /* check udp destination port, 4789 is the default
+ * vxlan port (rfc7348) */
+ if (udp_hdr->dst_port == _htons(4789)) {
+ l4_tun_len = ETHER_VXLAN_HLEN;
+ tunnel = 1;
+
/* currently, this flag is set by i40e only if the
* packet is vxlan */
- if (((m->ol_flags & PKT_RX_TUNNEL_IPV4_HDR) ||
- (m->ol_flags & PKT_RX_TUNNEL_IPV6_HDR)))
- tunnel = 1;
- /* else check udp destination port, 4789 is the default
- * vxlan port (rfc7348) */
- else if (udp_hdr->dst_port == _htons(4789))
+ } else if (m->ol_flags & (PKT_RX_TUNNEL_IPV4_HDR |
+ PKT_RX_TUNNEL_IPV6_HDR))
tunnel = 1;
if (tunnel == 1) {
if (tunnel == 1) {
if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM) {
- m->l2_len = outer_l2_len;
- m->l3_len = outer_l3_len;
- m->inner_l2_len = l2_len;
- m->inner_l3_len = l3_len;
+ m->outer_l2_len = outer_l2_len;
+ m->outer_l3_len = outer_l3_len;
+ m->l2_len = l4_tun_len + l2_len;
+ m->l3_len = l3_len;
}
else {
/* if we don't do vxlan cksum in hw,
m->tso_segsz = tso_segsz;
m->ol_flags = ol_flags;
+ /* if verbose mode is enabled, dump debug info */
+ if (verbose_level > 0) {
+ struct {
+ uint64_t flag;
+ uint64_t mask;
+ } tx_flags[] = {
+ { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM },
+ { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK },
+ { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK },
+ { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK },
+ { PKT_TX_IPV4, PKT_TX_IPV4 },
+ { PKT_TX_IPV6, PKT_TX_IPV6 },
+ { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM },
+ { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4 },
+ { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6 },
+ { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG },
+ };
+ unsigned j;
+ const char *name;
+
+ printf("-----------------\n");
+ /* dump rx parsed packet info */
+ printf("rx: l2_len=%d ethertype=%x l3_len=%d "
+ "l4_proto=%d l4_len=%d\n",
+ l2_len, rte_be_to_cpu_16(ethertype),
+ l3_len, l4_proto, l4_len);
+ if (tunnel == 1)
+ printf("rx: outer_l2_len=%d outer_ethertype=%x "
+ "outer_l3_len=%d\n", outer_l2_len,
+ rte_be_to_cpu_16(outer_ethertype),
+ outer_l3_len);
+ /* dump tx packet info */
+ if ((testpmd_ol_flags & (TESTPMD_TX_OFFLOAD_IP_CKSUM |
+ TESTPMD_TX_OFFLOAD_UDP_CKSUM |
+ TESTPMD_TX_OFFLOAD_TCP_CKSUM |
+ TESTPMD_TX_OFFLOAD_SCTP_CKSUM)) ||
+ tso_segsz != 0)
+ printf("tx: m->l2_len=%d m->l3_len=%d "
+ "m->l4_len=%d\n",
+ m->l2_len, m->l3_len, m->l4_len);
+ if ((tunnel == 1) &&
+ (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM))
+ printf("tx: m->outer_l2_len=%d m->outer_l3_len=%d\n",
+ m->outer_l2_len, m->outer_l3_len);
+ if (tso_segsz != 0)
+ printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
+ printf("tx: flags=");
+ for (j = 0; j < sizeof(tx_flags)/sizeof(*tx_flags); j++) {
+ name = rte_get_tx_ol_flag_name(tx_flags[j].flag);
+ if ((m->ol_flags & tx_flags[j].mask) ==
+ tx_flags[j].flag)
+ printf("%s ", name);
+ }
+ printf("\n");
+ }
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;