X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fnet%2Frte_net.h;h=53a7f4d360077bff0e73addb6f63072f9b3af5ce;hb=52d7d91ed44c6c03f2ea24416bee5a15f5929019;hp=434435ffa2cc8d809b3f22a4a3b51d31f8933d73;hpb=99a2dd955fba6e4cc23b77d590a033650ced9c45;p=dpdk.git diff --git a/lib/net/rte_net.h b/lib/net/rte_net.h index 434435ffa2..53a7f4d360 100644 --- a/lib/net/rte_net.h +++ b/lib/net/rte_net.h @@ -34,9 +34,6 @@ struct rte_net_hdr_lens { * This function skips all IPv6 extensions, returning size of * complete header including options and final protocol value. * - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * * @param proto * Protocol field of IPv6 header. * @param m @@ -51,7 +48,6 @@ struct rte_net_hdr_lens { * Protocol that follows IPv6 header. * -1 if an error occurs during mbuf parsing. */ -__rte_experimental int rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off, int *frag); @@ -125,11 +121,22 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) * Mainly it is required to avoid fragmented headers check if * no offloads are requested. */ - if (!(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))) + if (!(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG | + RTE_MBUF_F_TX_OUTER_IP_CKSUM))) return 0; - if (ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)) + if (ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) { inner_l3_offset += m->outer_l2_len + m->outer_l3_len; + /* + * prepare outer IPv4 header checksum by setting it to 0, + * in order to be computed by hardware NICs. + */ + if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) { + ipv4_hdr = rte_pktmbuf_mtod_offset(m, + struct rte_ipv4_hdr *, m->outer_l2_len); + ipv4_hdr->hdr_checksum = 0; + } + } /* * Check if headers are fragmented. @@ -140,16 +147,16 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) inner_l3_offset + m->l3_len + m->l4_len)) return -ENOTSUP; - if (ol_flags & PKT_TX_IPV4) { + if (ol_flags & RTE_MBUF_F_TX_IPV4) { ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, inner_l3_offset); - if (ol_flags & PKT_TX_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ipv4_hdr->hdr_checksum = 0; } - if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) { - if (ol_flags & PKT_TX_IPV4) { + if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) { + if (ol_flags & RTE_MBUF_F_TX_IPV4) { udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + m->l3_len); udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, @@ -164,9 +171,9 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); } - } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM || - (ol_flags & PKT_TX_TCP_SEG)) { - if (ol_flags & PKT_TX_IPV4) { + } else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM || + (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { + if (ol_flags & RTE_MBUF_F_TX_IPV4) { /* non-TSO tcp or TSO */ tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + m->l3_len);