X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Fcsumonly.c;h=5eaff9b2ffe47967094335b07adf0fe7369e3f1f;hb=97cb466d65c9;hp=bee652a9be73bd2f000e6fb6ea88853e2d35a9c3;hpb=9075b0f1ce764f3c998971eb987ddeeb852d423c;p=dpdk.git diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c index bee652a9be..5eaff9b2ff 100644 --- a/app/test-pmd/csumonly.c +++ b/app/test-pmd/csumonly.c @@ -51,13 +51,11 @@ #include #include #include -#include #include #include #include #include #include -#include #include #include #include @@ -72,6 +70,7 @@ #include #include #include +#include #include "testpmd.h" #define IP_DEFTTL 64 /* from RFC 1340. */ @@ -79,6 +78,10 @@ #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */ #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN) +#define GRE_KEY_PRESENT 0x2000 +#define GRE_KEY_LEN 4 +#define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT + /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8))) @@ -99,22 +102,15 @@ struct testpmd_offload_info { uint16_t outer_l3_len; uint8_t outer_l4_proto; uint16_t tso_segsz; + uint16_t tunnel_tso_segsz; + uint32_t pkt_len; }; -/* simplified GRE header (flags must be 0) */ +/* simplified GRE header */ struct simple_gre_hdr { uint16_t flags; uint16_t proto; -}; - -static uint16_t -get_psd_sum(void *l3_hdr, uint16_t ethertype, uint64_t ol_flags) -{ - if (ethertype == _htons(ETHER_TYPE_IPv4)) - return rte_ipv4_phdr_cksum(l3_hdr, ol_flags); - else /* assume ethertype == ETHER_TYPE_IPv6 */ - return rte_ipv6_phdr_cksum(l3_hdr, ol_flags); -} +} __attribute__((__packed__)); static uint16_t get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype) @@ -199,8 +195,9 @@ parse_ethernet(struct ether_hdr *eth_hdr, struct testpmd_offload_info *info) /* Parse a vxlan header */ static void -parse_vxlan(struct udp_hdr *udp_hdr, struct testpmd_offload_info *info, - uint64_t mbuf_olflags) +parse_vxlan(struct udp_hdr *udp_hdr, + struct testpmd_offload_info *info, + uint32_t pkt_type) { struct ether_hdr *eth_hdr; @@ -208,8 +205,7 @@ parse_vxlan(struct udp_hdr *udp_hdr, struct testpmd_offload_info *info, * (rfc7348) or that the rx offload flag is set (i40e only * currently) */ if (udp_hdr->dst_port != _htons(4789) && - (mbuf_olflags & (PKT_RX_TUNNEL_IPV4_HDR | - PKT_RX_TUNNEL_IPV6_HDR)) == 0) + RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0) return; info->is_tunnel = 1; @@ -233,11 +229,17 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info) struct ether_hdr *eth_hdr; struct ipv4_hdr *ipv4_hdr; struct ipv6_hdr *ipv6_hdr; + uint8_t gre_len = 0; - /* if flags != 0; it's not supported */ - if (gre_hdr->flags != 0) + /* check which fields are supported */ + if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0) return; + gre_len += sizeof(struct simple_gre_hdr); + + if (gre_hdr->flags & _htons(GRE_KEY_PRESENT)) + gre_len += GRE_KEY_LEN; + if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) { info->is_tunnel = 1; info->outer_ethertype = info->ethertype; @@ -245,8 +247,7 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info) info->outer_l3_len = info->l3_len; info->outer_l4_proto = info->l4_proto; - ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + - sizeof(struct simple_gre_hdr)); + ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len); parse_ipv4(ipv4_hdr, info); info->ethertype = _htons(ETHER_TYPE_IPv4); @@ -259,28 +260,26 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info) info->outer_l3_len = info->l3_len; info->outer_l4_proto = info->l4_proto; - ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + - sizeof(struct simple_gre_hdr)); + ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len); info->ethertype = _htons(ETHER_TYPE_IPv6); parse_ipv6(ipv6_hdr, info); info->l2_len = 0; - } else if (gre_hdr->proto == _htons(0x6558)) { /* ETH_P_TEB in linux */ + } else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) { info->is_tunnel = 1; info->outer_ethertype = info->ethertype; info->outer_l2_len = info->l2_len; info->outer_l3_len = info->l3_len; info->outer_l4_proto = info->l4_proto; - eth_hdr = (struct ether_hdr *)((char *)gre_hdr + - sizeof(struct simple_gre_hdr)); + eth_hdr = (struct ether_hdr *)((char *)gre_hdr + gre_len); parse_ethernet(eth_hdr, info); } else return; - info->l2_len += sizeof(struct simple_gre_hdr); + info->l2_len += gre_len; } @@ -312,21 +311,6 @@ parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info) info->l2_len = 0; } -/* modify the IPv4 or IPv4 source address of a packet */ -static void -change_ip_addresses(void *l3_hdr, uint16_t ethertype) -{ - struct ipv4_hdr *ipv4_hdr = l3_hdr; - struct ipv6_hdr *ipv6_hdr = l3_hdr; - - if (ethertype == _htons(ETHER_TYPE_IPv4)) { - ipv4_hdr->src_addr = - rte_cpu_to_be_32(rte_be_to_cpu_32(ipv4_hdr->src_addr) + 1); - } else if (ethertype == _htons(ETHER_TYPE_IPv6)) { - ipv6_hdr->src_addr[15] = ipv6_hdr->src_addr[15] + 1; - } -} - /* if possible, calculate the checksum of a packet in hw or sw, * depending on the testpmd command line configuration */ static uint64_t @@ -338,13 +322,28 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, struct tcp_hdr *tcp_hdr; struct sctp_hdr *sctp_hdr; uint64_t ol_flags = 0; + uint32_t max_pkt_len, tso_segsz = 0; + + /* ensure packet is large enough to require tso */ + if (!info->is_tunnel) { + max_pkt_len = info->l2_len + info->l3_len + info->l4_len + + info->tso_segsz; + if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len) + tso_segsz = info->tso_segsz; + } else { + max_pkt_len = info->outer_l2_len + info->outer_l3_len + + info->l2_len + info->l3_len + info->l4_len + + info->tunnel_tso_segsz; + if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len) + tso_segsz = info->tunnel_tso_segsz; + } if (info->ethertype == _htons(ETHER_TYPE_IPv4)) { ipv4_hdr = l3_hdr; ipv4_hdr->hdr_checksum = 0; ol_flags |= PKT_TX_IPV4; - if (info->tso_segsz != 0 && info->l4_proto == IPPROTO_TCP) { + if (info->l4_proto == IPPROTO_TCP && tso_segsz) { ol_flags |= PKT_TX_IP_CKSUM; } else { if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) @@ -363,11 +362,9 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, /* do not recalculate udp cksum if it was 0 */ if (udp_hdr->dgram_cksum != 0) { udp_hdr->dgram_cksum = 0; - if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) { + if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) ol_flags |= PKT_TX_UDP_CKSUM; - udp_hdr->dgram_cksum = get_psd_sum(l3_hdr, - info->ethertype, ol_flags); - } else { + else { udp_hdr->dgram_cksum = get_udptcp_checksum(l3_hdr, udp_hdr, info->ethertype); @@ -376,15 +373,11 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, } else if (info->l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len); tcp_hdr->cksum = 0; - if (info->tso_segsz != 0) { + if (tso_segsz) ol_flags |= PKT_TX_TCP_SEG; - tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype, - ol_flags); - } else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) { + else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) ol_flags |= PKT_TX_TCP_CKSUM; - tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype, - ol_flags); - } else { + else { tcp_hdr->cksum = get_udptcp_checksum(l3_hdr, tcp_hdr, info->ethertype); @@ -406,12 +399,10 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, return ol_flags; } -/* Calculate the checksum of outer header (only vxlan is supported, - * meaning IP + UDP). The caller already checked that it's a vxlan - * packet */ +/* Calculate the checksum of outer header */ static uint64_t process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, - uint16_t testpmd_ol_flags) + uint16_t testpmd_ol_flags, int tso_enabled) { struct ipv4_hdr *ipv4_hdr = outer_l3_hdr; struct ipv6_hdr *ipv6_hdr = outer_l3_hdr; @@ -432,10 +423,20 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, if (info->outer_l4_proto != IPPROTO_UDP) return ol_flags; - /* outer UDP checksum is always done in software as we have no - * hardware supporting it today, and no API for it. */ - udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len); + + /* outer UDP checksum is done in software as we have no hardware + * supporting it today, and no API for it. In the other side, for + * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be + * set to zero. + * + * If a packet will be TSOed into small packets by NIC, we cannot + * set/calculate a non-zero checksum, because it will be a wrong + * value after the packet be split into several small packets. + */ + if (tso_enabled) + udp_hdr->dgram_cksum = 0; + /* do not recalculate udp cksum if it was 0 */ if (udp_hdr->dgram_cksum != 0) { udp_hdr->dgram_cksum = 0; @@ -450,11 +451,159 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, return ol_flags; } +/* + * Helper function. + * Performs actual copying. + * Returns number of segments in the destination mbuf on success, + * or negative error code on failure. + */ +static int +mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[], + uint16_t seglen[], uint8_t nb_seg) +{ + uint32_t dlen, slen, tlen; + uint32_t i, len; + const struct rte_mbuf *m; + const uint8_t *src; + uint8_t *dst; + + dlen = 0; + slen = 0; + tlen = 0; + + dst = NULL; + src = NULL; + + m = ms; + i = 0; + while (ms != NULL && i != nb_seg) { + + if (slen == 0) { + slen = rte_pktmbuf_data_len(ms); + src = rte_pktmbuf_mtod(ms, const uint8_t *); + } + + if (dlen == 0) { + dlen = RTE_MIN(seglen[i], slen); + md[i]->data_len = dlen; + md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1]; + dst = rte_pktmbuf_mtod(md[i], uint8_t *); + } + + len = RTE_MIN(slen, dlen); + memcpy(dst, src, len); + tlen += len; + slen -= len; + dlen -= len; + src += len; + dst += len; + + if (slen == 0) + ms = ms->next; + if (dlen == 0) + i++; + } + + if (ms != NULL) + return -ENOBUFS; + else if (tlen != m->pkt_len) + return -EINVAL; + + md[0]->nb_segs = nb_seg; + md[0]->pkt_len = tlen; + md[0]->vlan_tci = m->vlan_tci; + md[0]->vlan_tci_outer = m->vlan_tci_outer; + md[0]->ol_flags = m->ol_flags; + md[0]->tx_offload = m->tx_offload; + + return nb_seg; +} + +/* + * Allocate a new mbuf with up to tx_pkt_nb_segs segments. + * Copy packet contents and offload information into then new segmented mbuf. + */ +static struct rte_mbuf * +pkt_copy_split(const struct rte_mbuf *pkt) +{ + int32_t n, rc; + uint32_t i, len, nb_seg; + struct rte_mempool *mp; + uint16_t seglen[RTE_MAX_SEGS_PER_PKT]; + struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT]; + + mp = current_fwd_lcore()->mbp; + + if (tx_pkt_split == TX_PKT_SPLIT_RND) + nb_seg = random() % tx_pkt_nb_segs + 1; + else + nb_seg = tx_pkt_nb_segs; + + memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0])); + + /* calculate number of segments to use and their length. */ + len = 0; + for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) { + len += seglen[i]; + md[i] = NULL; + } + + n = pkt->pkt_len - len; + + /* update size of the last segment to fit rest of the packet */ + if (n >= 0) { + seglen[i - 1] += n; + len += n; + } + + nb_seg = i; + while (i != 0) { + p = rte_pktmbuf_alloc(mp); + if (p == NULL) { + RTE_LOG(ERR, USER1, + "failed to allocate %u-th of %u mbuf " + "from mempool: %s\n", + nb_seg - i, nb_seg, mp->name); + break; + } + + md[--i] = p; + if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) { + RTE_LOG(ERR, USER1, "mempool %s, %u-th segment: " + "expected seglen: %u, " + "actual mbuf tailroom: %u\n", + mp->name, i, seglen[i], + rte_pktmbuf_tailroom(md[i])); + break; + } + } + + /* all mbufs successfully allocated, do copy */ + if (i == 0) { + rc = mbuf_copy_split(pkt, md, seglen, nb_seg); + if (rc < 0) + RTE_LOG(ERR, USER1, + "mbuf_copy_split for %p(len=%u, nb_seg=%u) " + "into %u segments failed with error code: %d\n", + pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc); + + /* figure out how many mbufs to free. */ + i = RTE_MAX(rc, 0); + } + + /* free unused mbufs */ + for (; i != nb_seg; i++) { + rte_pktmbuf_free_seg(md[i]); + md[i] = NULL; + } + + return md[0]; +} + /* * Receive a burst of packets, and for each packet: * - parse packet, and try to recognize a supported packet type (1) * - if it's not a supported packet type, don't touch the packet, else: - * - modify the IPs in inner headers and in outer headers if any * - reprocess the checksum of all supported layers. This is done in SW * or HW, depending on testpmd command line configuration * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP @@ -480,14 +629,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_port *txp; - struct rte_mbuf *m; + struct rte_mbuf *m, *p; struct ether_hdr *eth_hdr; void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */ uint16_t nb_rx; uint16_t nb_tx; + uint16_t nb_prep; uint16_t i; - uint64_t ol_flags; + uint64_t rx_ol_flags, tx_ol_flags; uint16_t testpmd_ol_flags; + uint32_t retry; uint32_t rx_bad_ip_csum; uint32_t rx_bad_l4_csum; struct testpmd_offload_info info; @@ -519,21 +670,33 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) testpmd_ol_flags = txp->tx_ol_flags; memset(&info, 0, sizeof(info)); info.tso_segsz = txp->tso_segsz; + info.tunnel_tso_segsz = txp->tunnel_tso_segsz; for (i = 0; i < nb_rx; i++) { + if (likely(i < nb_rx - 1)) + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1], + void *)); - ol_flags = 0; - info.is_tunnel = 0; m = pkts_burst[i]; + info.is_tunnel = 0; + info.pkt_len = rte_pktmbuf_pkt_len(m); + tx_ol_flags = 0; + rx_ol_flags = m->ol_flags; /* Update the L3/L4 checksum error packet statistics */ - rx_bad_ip_csum += ((m->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0); - rx_bad_l4_csum += ((m->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0); + if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD) + rx_bad_ip_csum += 1; + if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD) + rx_bad_l4_csum += 1; /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan * and inner headers */ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + ether_addr_copy(&peer_eth_addrs[fs->peer_addr], + ð_hdr->d_addr); + ether_addr_copy(&ports[fs->tx_port].eth_addr, + ð_hdr->s_addr); parse_ethernet(eth_hdr, &info); l3_hdr = (char *)eth_hdr + info.l2_len; @@ -541,18 +704,27 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) { if (info.l4_proto == IPPROTO_UDP) { struct udp_hdr *udp_hdr; + udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info.l3_len); - parse_vxlan(udp_hdr, &info, m->ol_flags); + parse_vxlan(udp_hdr, &info, m->packet_type); + if (info.is_tunnel) + tx_ol_flags |= PKT_TX_TUNNEL_VXLAN; } else if (info.l4_proto == IPPROTO_GRE) { struct simple_gre_hdr *gre_hdr; + gre_hdr = (struct simple_gre_hdr *) ((char *)l3_hdr + info.l3_len); parse_gre(gre_hdr, &info); + if (info.is_tunnel) + tx_ol_flags |= PKT_TX_TUNNEL_GRE; } else if (info.l4_proto == IPPROTO_IPIP) { void *encap_ip_hdr; + encap_ip_hdr = (char *)l3_hdr + info.l3_len; parse_encap_ip(encap_ip_hdr, &info); + if (info.is_tunnel) + tx_ol_flags |= PKT_TX_TUNNEL_IPIP; } } @@ -562,37 +734,35 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len; } - /* step 2: change all source IPs (v4 or v6) so we need - * to recompute the chksums even if they were correct */ - - change_ip_addresses(l3_hdr, info.ethertype); - if (info.is_tunnel == 1) - change_ip_addresses(outer_l3_hdr, info.outer_ethertype); - - /* step 3: depending on user command line configuration, + /* step 2: depending on user command line configuration, * recompute checksum either in software or flag the * mbuf to offload the calculation to the NIC. If TSO * is configured, prepare the mbuf for TCP segmentation. */ /* process checksums of inner headers first */ - ol_flags |= process_inner_cksums(l3_hdr, &info, testpmd_ol_flags); + tx_ol_flags |= process_inner_cksums(l3_hdr, &info, + testpmd_ol_flags); /* Then process outer headers if any. Note that the software * checksum will be wrong if one of the inner checksums is * processed in hardware. */ if (info.is_tunnel == 1) { - ol_flags |= process_outer_cksums(outer_l3_hdr, &info, - testpmd_ol_flags); + tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info, + testpmd_ol_flags, + !!(tx_ol_flags & PKT_TX_TCP_SEG)); } - /* step 4: fill the mbuf meta data (flags and header lengths) */ + /* step 3: fill the mbuf meta data (flags and header lengths) */ if (info.is_tunnel == 1) { - if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) { + if (info.tunnel_tso_segsz || + testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) { m->outer_l2_len = info.outer_l2_len; m->outer_l3_len = info.outer_l3_len; m->l2_len = info.l2_len; m->l3_len = info.l3_len; + m->l4_len = info.l4_len; + m->tso_segsz = info.tunnel_tso_segsz; } else { /* if there is a outer UDP cksum @@ -612,36 +782,35 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) m->l2_len = info.l2_len; m->l3_len = info.l3_len; m->l4_len = info.l4_len; + m->tso_segsz = info.tso_segsz; + } + m->ol_flags = tx_ol_flags; + + /* Do split & copy for the packet. */ + if (tx_pkt_split != TX_PKT_SPLIT_OFF) { + p = pkt_copy_split(m); + if (p != NULL) { + rte_pktmbuf_free(m); + m = p; + pkts_burst[i] = m; + } } - m->tso_segsz = info.tso_segsz; - m->ol_flags = ol_flags; /* if verbose mode is enabled, dump debug info */ if (verbose_level > 0) { - struct { - uint64_t flag; - uint64_t mask; - } tx_flags[] = { - { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM }, - { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK }, - { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK }, - { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK }, - { PKT_TX_IPV4, PKT_TX_IPV4 }, - { PKT_TX_IPV6, PKT_TX_IPV6 }, - { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM }, - { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4 }, - { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6 }, - { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG }, - }; - unsigned j; - const char *name; + char buf[256]; printf("-----------------\n"); + printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%u:\n", + fs->rx_port, m, m->pkt_len, m->nb_segs); /* dump rx parsed packet info */ + rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf)); printf("rx: l2_len=%d ethertype=%x l3_len=%d " - "l4_proto=%d l4_len=%d\n", + "l4_proto=%d l4_len=%d flags=%s\n", info.l2_len, rte_be_to_cpu_16(info.ethertype), - info.l3_len, info.l4_proto, info.l4_len); + info.l3_len, info.l4_proto, info.l4_len, buf); + if (rx_ol_flags & PKT_RX_LRO) + printf("rx: m->lro_segsz=%u\n", m->tso_segsz); if (info.is_tunnel == 1) printf("rx: outer_l2_len=%d outer_ethertype=%x " "outer_l3_len=%d\n", info.outer_l2_len, @@ -656,23 +825,46 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) printf("tx: m->l2_len=%d m->l3_len=%d " "m->l4_len=%d\n", m->l2_len, m->l3_len, m->l4_len); - if ((info.is_tunnel == 1) && - (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)) - printf("tx: m->outer_l2_len=%d m->outer_l3_len=%d\n", - m->outer_l2_len, m->outer_l3_len); - if (info.tso_segsz != 0) + if (info.is_tunnel == 1) { + if (testpmd_ol_flags & + TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) + printf("tx: m->outer_l2_len=%d " + "m->outer_l3_len=%d\n", + m->outer_l2_len, + m->outer_l3_len); + if (info.tunnel_tso_segsz != 0 && + (m->ol_flags & PKT_TX_TCP_SEG)) + printf("tx: m->tso_segsz=%d\n", + m->tso_segsz); + } else if (info.tso_segsz != 0 && + (m->ol_flags & PKT_TX_TCP_SEG)) printf("tx: m->tso_segsz=%d\n", m->tso_segsz); - printf("tx: flags="); - for (j = 0; j < sizeof(tx_flags)/sizeof(*tx_flags); j++) { - name = rte_get_tx_ol_flag_name(tx_flags[j].flag); - if ((m->ol_flags & tx_flags[j].mask) == - tx_flags[j].flag) - printf("%s ", name); - } + rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf)); + printf("tx: flags=%s", buf); printf("\n"); } } - nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); + + nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue, + pkts_burst, nb_rx); + if (nb_prep != nb_rx) + printf("Preparing packet burst to transmit failed: %s\n", + rte_strerror(rte_errno)); + + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, + nb_prep); + + /* + * Retry if necessary + */ + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts_burst[nb_tx], nb_rx - nb_tx); + } + } fs->tx_packets += nb_tx; fs->rx_bad_ip_csum += rx_bad_ip_csum; fs->rx_bad_l4_csum += rx_bad_l4_csum; @@ -699,4 +891,3 @@ struct fwd_engine csum_fwd_engine = { .port_fwd_end = NULL, .packet_fwd = pkt_burst_checksum_forward, }; -