X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_net%2Frte_net.c;h=d858ab15517aca99136942b79a7341816889934e;hb=be11774d453ba9fee825c1055b6602cef4f0afb9;hp=56a13e3c4bcb1dbe6b8bf2be488c3c1c7466ac3c;hpb=85ebc09bad1bbe1296bd31e787e13bcf893f40a9;p=dpdk.git diff --git a/lib/librte_net/rte_net.c b/lib/librte_net/rte_net.c index 56a13e3c4b..d858ab1551 100644 --- a/lib/librte_net/rte_net.c +++ b/lib/librte_net/rte_net.c @@ -13,6 +13,7 @@ #include #include #include +#include #include /* get l3 packet type from ip6 next protocol */ @@ -139,8 +140,8 @@ ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m, [0xa] = 12, [0xb] = 16, }; - const struct gre_hdr *gh; - struct gre_hdr gh_copy; + const struct rte_gre_hdr *gh; + struct rte_gre_hdr gh_copy; uint16_t flags; gh = rte_pktmbuf_read(m, *off, sizeof(*gh), &gh_copy); @@ -154,16 +155,16 @@ ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m, *off += opt_len[flags]; *proto = gh->proto; - if (*proto == rte_cpu_to_be_16(ETHER_TYPE_TEB)) + if (*proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) return RTE_PTYPE_TUNNEL_NVGRE; else return RTE_PTYPE_TUNNEL_GRE; } case IPPROTO_IPIP: - *proto = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4); return RTE_PTYPE_TUNNEL_IP; case IPPROTO_IPV6: - *proto = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6); return RTE_PTYPE_TUNNEL_IP; /* IP is also valid for IPv6 */ default: return 0; @@ -178,8 +179,8 @@ ip4_hlen(const struct ipv4_hdr *hdr) } /* parse ipv6 extended headers, update offset and return next proto */ -static uint16_t -skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off, +int __rte_experimental +rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off, int *frag) { struct ext_hdr { @@ -201,7 +202,7 @@ skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off, xh = rte_pktmbuf_read(m, *off, sizeof(*xh), &xh_copy); if (xh == NULL) - return 0; + return -1; *off += (xh->len + 1) * 8; proto = xh->next_hdr; break; @@ -209,7 +210,7 @@ skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off, xh = rte_pktmbuf_read(m, *off, sizeof(*xh), &xh_copy); if (xh == NULL) - return 0; + return -1; *off += 8; proto = xh->next_hdr; *frag = 1; @@ -220,7 +221,7 @@ skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off, return proto; } } - return 0; + return -1; } /* parse mbuf data to get packet type */ @@ -228,11 +229,12 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens, uint32_t layers) { struct rte_net_hdr_lens local_hdr_lens; - const struct ether_hdr *eh; - struct ether_hdr eh_copy; + const struct rte_ether_hdr *eh; + struct rte_ether_hdr eh_copy; uint32_t pkt_type = RTE_PTYPE_L2_ETHER; uint32_t off = 0; uint16_t proto; + int ret; if (hdr_lens == NULL) hdr_lens = &local_hdr_lens; @@ -247,12 +249,12 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, if ((layers & RTE_PTYPE_L2_MASK) == 0) return 0; - if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) + if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) goto l3; /* fast path if packet is IPv4 */ - if (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) { - const struct vlan_hdr *vh; - struct vlan_hdr vh_copy; + if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { + const struct rte_vlan_hdr *vh; + struct rte_vlan_hdr vh_copy; pkt_type = RTE_PTYPE_L2_ETHER_VLAN; vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy); @@ -261,9 +263,9 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, off += sizeof(*vh); hdr_lens->l2_len += sizeof(*vh); proto = vh->eth_proto; - } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) { - const struct vlan_hdr *vh; - struct vlan_hdr vh_copy; + } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) { + const struct rte_vlan_hdr *vh; + struct rte_vlan_hdr vh_copy; pkt_type = RTE_PTYPE_L2_ETHER_QINQ; vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh), @@ -273,13 +275,31 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, off += 2 * sizeof(*vh); hdr_lens->l2_len += 2 * sizeof(*vh); proto = vh->eth_proto; + } else if ((proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLS)) || + (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLSM))) { + unsigned int i; + const struct mpls_hdr *mh; + struct mpls_hdr mh_copy; + +#define MAX_MPLS_HDR 5 + for (i = 0; i < MAX_MPLS_HDR; i++) { + mh = rte_pktmbuf_read(m, off + (i * sizeof(*mh)), + sizeof(*mh), &mh_copy); + if (unlikely(mh == NULL)) + return pkt_type; + } + if (i == MAX_MPLS_HDR) + return pkt_type; + pkt_type = RTE_PTYPE_L2_ETHER_MPLS; + hdr_lens->l2_len += (sizeof(*mh) * i); + return pkt_type; } - l3: +l3: if ((layers & RTE_PTYPE_L3_MASK) == 0) return pkt_type; - if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { + if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) { const struct ipv4_hdr *ip4h; struct ipv4_hdr ip4h_copy; @@ -302,7 +322,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, } proto = ip4h->next_proto_id; pkt_type |= ptype_l4(proto); - } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) { + } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) { const struct ipv6_hdr *ip6h; struct ipv6_hdr ip6h_copy; int frag = 0; @@ -316,7 +336,10 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, off += hdr_lens->l3_len; pkt_type |= ptype_l3_ip6(proto); if ((pkt_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6_EXT) { - proto = skip_ip6_ext(proto, m, &off, &frag); + ret = rte_net_skip_ip6_ext(proto, m, &off, &frag); + if (ret < 0) + return pkt_type; + proto = ret; hdr_lens->l3_len = off - hdr_lens->l2_len; } if (proto == 0) @@ -368,7 +391,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, return pkt_type; hdr_lens->inner_l2_len = 0; - if (proto == rte_cpu_to_be_16(ETHER_TYPE_TEB)) { + if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) { eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy); if (unlikely(eh == NULL)) return pkt_type; @@ -378,9 +401,9 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, hdr_lens->inner_l2_len = sizeof(*eh); } - if (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) { - const struct vlan_hdr *vh; - struct vlan_hdr vh_copy; + if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { + const struct rte_vlan_hdr *vh; + struct rte_vlan_hdr vh_copy; pkt_type &= ~RTE_PTYPE_INNER_L2_MASK; pkt_type |= RTE_PTYPE_INNER_L2_ETHER_VLAN; @@ -390,9 +413,9 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, off += sizeof(*vh); hdr_lens->inner_l2_len += sizeof(*vh); proto = vh->eth_proto; - } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) { - const struct vlan_hdr *vh; - struct vlan_hdr vh_copy; + } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) { + const struct rte_vlan_hdr *vh; + struct rte_vlan_hdr vh_copy; pkt_type &= ~RTE_PTYPE_INNER_L2_MASK; pkt_type |= RTE_PTYPE_INNER_L2_ETHER_QINQ; @@ -408,7 +431,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, if ((layers & RTE_PTYPE_INNER_L3_MASK) == 0) return pkt_type; - if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { + if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) { const struct ipv4_hdr *ip4h; struct ipv4_hdr ip4h_copy; @@ -431,7 +454,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, } proto = ip4h->next_proto_id; pkt_type |= ptype_inner_l4(proto); - } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) { + } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) { const struct ipv6_hdr *ip6h; struct ipv6_hdr ip6h_copy; int frag = 0; @@ -449,7 +472,10 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, uint32_t prev_off; prev_off = off; - proto = skip_ip6_ext(proto, m, &off, &frag); + ret = rte_net_skip_ip6_ext(proto, m, &off, &frag); + if (ret < 0) + return pkt_type; + proto = ret; hdr_lens->inner_l3_len += off - prev_off; } if (proto == 0)