X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Fcsumonly.c;h=57e6ae276f64a695410ec394452947991f7f8901;hb=d82eeefe7ac83a28eab08e5c8705f76a6be12095;hp=2ecd6b8c871f836bada2f22797c8c3b5a1d9d3f5;hpb=3f3061f4a23da49a1cd945fec21dbe8ff02624ff;p=dpdk.git diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c index 2ecd6b8c87..57e6ae276f 100644 --- a/app/test-pmd/csumonly.c +++ b/app/test-pmd/csumonly.c @@ -102,6 +102,7 @@ struct testpmd_offload_info { uint8_t outer_l4_proto; uint16_t tso_segsz; uint16_t tunnel_tso_segsz; + uint32_t pkt_len; }; /* simplified GRE header */ @@ -318,21 +319,6 @@ parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info) info->l2_len = 0; } -/* modify the IPv4 or IPv4 source address of a packet */ -static void -change_ip_addresses(void *l3_hdr, uint16_t ethertype) -{ - struct ipv4_hdr *ipv4_hdr = l3_hdr; - struct ipv6_hdr *ipv6_hdr = l3_hdr; - - if (ethertype == _htons(ETHER_TYPE_IPv4)) { - ipv4_hdr->src_addr = - rte_cpu_to_be_32(rte_be_to_cpu_32(ipv4_hdr->src_addr) + 1); - } else if (ethertype == _htons(ETHER_TYPE_IPv6)) { - ipv6_hdr->src_addr[15] = ipv6_hdr->src_addr[15] + 1; - } -} - /* if possible, calculate the checksum of a packet in hw or sw, * depending on the testpmd command line configuration */ static uint64_t @@ -344,15 +330,28 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, struct tcp_hdr *tcp_hdr; struct sctp_hdr *sctp_hdr; uint64_t ol_flags = 0; + uint32_t max_pkt_len, tso_segsz = 0; + + /* ensure packet is large enough to require tso */ + if (!info->is_tunnel) { + max_pkt_len = info->l2_len + info->l3_len + info->l4_len + + info->tso_segsz; + if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len) + tso_segsz = info->tso_segsz; + } else { + max_pkt_len = info->outer_l2_len + info->outer_l3_len + + info->l2_len + info->l3_len + info->l4_len + + info->tunnel_tso_segsz; + if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len) + tso_segsz = info->tunnel_tso_segsz; + } if (info->ethertype == _htons(ETHER_TYPE_IPv4)) { ipv4_hdr = l3_hdr; ipv4_hdr->hdr_checksum = 0; ol_flags |= PKT_TX_IPV4; - if (info->l4_proto == IPPROTO_TCP && - ((info->is_tunnel && info->tunnel_tso_segsz != 0) || - (!info->is_tunnel && info->tso_segsz != 0))) { + if (info->l4_proto == IPPROTO_TCP && tso_segsz) { ol_flags |= PKT_TX_IP_CKSUM; } else { if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) @@ -384,8 +383,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, } else if (info->l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len); tcp_hdr->cksum = 0; - if ((info->is_tunnel && info->tunnel_tso_segsz != 0) || - (!info->is_tunnel && info->tso_segsz != 0)) { + if (tso_segsz) { ol_flags |= PKT_TX_TCP_SEG; tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype, ol_flags); @@ -620,7 +618,6 @@ pkt_copy_split(const struct rte_mbuf *pkt) * Receive a burst of packets, and for each packet: * - parse packet, and try to recognize a supported packet type (1) * - if it's not a supported packet type, don't touch the packet, else: - * - modify the IPs in inner headers and in outer headers if any * - reprocess the checksum of all supported layers. This is done in SW * or HW, depending on testpmd command line configuration * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP @@ -652,7 +649,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) uint16_t nb_rx; uint16_t nb_tx; uint16_t i; - uint64_t ol_flags; + uint64_t rx_ol_flags, tx_ol_flags; uint16_t testpmd_ol_flags; uint32_t retry; uint32_t rx_bad_ip_csum; @@ -693,13 +690,17 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1], void *)); - ol_flags = 0; - info.is_tunnel = 0; m = pkts_burst[i]; + info.is_tunnel = 0; + info.pkt_len = rte_pktmbuf_pkt_len(m); + tx_ol_flags = 0; + rx_ol_flags = m->ol_flags; /* Update the L3/L4 checksum error packet statistics */ - rx_bad_ip_csum += ((m->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0); - rx_bad_l4_csum += ((m->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0); + if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD) + rx_bad_ip_csum += 1; + if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD) + rx_bad_l4_csum += 1; /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan * and inner headers */ @@ -721,7 +722,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) info.l3_len); parse_vxlan(udp_hdr, &info, m->packet_type); if (info.is_tunnel) - ol_flags |= PKT_TX_TUNNEL_VXLAN; + tx_ol_flags |= PKT_TX_TUNNEL_VXLAN; } else if (info.l4_proto == IPPROTO_GRE) { struct simple_gre_hdr *gre_hdr; @@ -729,14 +730,14 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) ((char *)l3_hdr + info.l3_len); parse_gre(gre_hdr, &info); if (info.is_tunnel) - ol_flags |= PKT_TX_TUNNEL_GRE; + tx_ol_flags |= PKT_TX_TUNNEL_GRE; } else if (info.l4_proto == IPPROTO_IPIP) { void *encap_ip_hdr; encap_ip_hdr = (char *)l3_hdr + info.l3_len; parse_encap_ip(encap_ip_hdr, &info); if (info.is_tunnel) - ol_flags |= PKT_TX_TUNNEL_IPIP; + tx_ol_flags |= PKT_TX_TUNNEL_IPIP; } } @@ -746,31 +747,25 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len; } - /* step 2: change all source IPs (v4 or v6) so we need - * to recompute the chksums even if they were correct */ - - change_ip_addresses(l3_hdr, info.ethertype); - if (info.is_tunnel == 1) - change_ip_addresses(outer_l3_hdr, info.outer_ethertype); - - /* step 3: depending on user command line configuration, + /* step 2: depending on user command line configuration, * recompute checksum either in software or flag the * mbuf to offload the calculation to the NIC. If TSO * is configured, prepare the mbuf for TCP segmentation. */ /* process checksums of inner headers first */ - ol_flags |= process_inner_cksums(l3_hdr, &info, testpmd_ol_flags); + tx_ol_flags |= process_inner_cksums(l3_hdr, &info, + testpmd_ol_flags); /* Then process outer headers if any. Note that the software * checksum will be wrong if one of the inner checksums is * processed in hardware. */ if (info.is_tunnel == 1) { - ol_flags |= process_outer_cksums(outer_l3_hdr, &info, + tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info, testpmd_ol_flags, - !!(ol_flags & PKT_TX_TCP_SEG)); + !!(tx_ol_flags & PKT_TX_TCP_SEG)); } - /* step 4: fill the mbuf meta data (flags and header lengths) */ + /* step 3: fill the mbuf meta data (flags and header lengths) */ if (info.is_tunnel == 1) { if (info.tunnel_tso_segsz || @@ -802,7 +797,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) m->l4_len = info.l4_len; m->tso_segsz = info.tso_segsz; } - m->ol_flags = ol_flags; + m->ol_flags = tx_ol_flags; /* Do split & copy for the packet. */ if (tx_pkt_split != TX_PKT_SPLIT_OFF) { @@ -819,13 +814,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) char buf[256]; printf("-----------------\n"); - printf("mbuf=%p, pkt_len=%u, nb_segs=%hhu:\n", - m, m->pkt_len, m->nb_segs); + printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%hhu:\n", + fs->rx_port, m, m->pkt_len, m->nb_segs); /* dump rx parsed packet info */ + rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf)); printf("rx: l2_len=%d ethertype=%x l3_len=%d " - "l4_proto=%d l4_len=%d\n", + "l4_proto=%d l4_len=%d flags=%s\n", info.l2_len, rte_be_to_cpu_16(info.ethertype), - info.l3_len, info.l4_proto, info.l4_len); + info.l3_len, info.l4_proto, info.l4_len, buf); + if (rx_ol_flags & PKT_RX_LRO) + printf("rx: m->lro_segsz=%u\n", m->tso_segsz); if (info.is_tunnel == 1) printf("rx: outer_l2_len=%d outer_ethertype=%x " "outer_l3_len=%d\n", info.outer_l2_len, @@ -847,10 +845,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) "m->outer_l3_len=%d\n", m->outer_l2_len, m->outer_l3_len); - if (info.tunnel_tso_segsz != 0) + if (info.tunnel_tso_segsz != 0 && + (m->ol_flags & PKT_TX_TCP_SEG)) printf("tx: m->tso_segsz=%d\n", m->tso_segsz); - } else if (info.tso_segsz != 0) + } else if (info.tso_segsz != 0 && + (m->ol_flags & PKT_TX_TCP_SEG)) printf("tx: m->tso_segsz=%d\n", m->tso_segsz); rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf)); printf("tx: flags=%s", buf);