4 * Copyright 2016 6WIND S.A.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_mbuf_ptype.h>
38 #include <rte_byteorder.h>
39 #include <rte_ether.h>
47 /* get l3 packet type from ip6 next protocol */
49 ptype_l3_ip6(uint8_t ip6_proto)
51 static const uint32_t ip6_ext_proto_map[256] = {
52 [IPPROTO_HOPOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
53 [IPPROTO_ROUTING] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
54 [IPPROTO_FRAGMENT] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
55 [IPPROTO_ESP] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
56 [IPPROTO_AH] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
57 [IPPROTO_DSTOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
60 return RTE_PTYPE_L3_IPV6 + ip6_ext_proto_map[ip6_proto];
63 /* get l3 packet type from ip version and header length */
65 ptype_l3_ip(uint8_t ipv_ihl)
67 static const uint32_t ptype_l3_ip_proto_map[256] = {
68 [0x45] = RTE_PTYPE_L3_IPV4,
69 [0x46] = RTE_PTYPE_L3_IPV4_EXT,
70 [0x47] = RTE_PTYPE_L3_IPV4_EXT,
71 [0x48] = RTE_PTYPE_L3_IPV4_EXT,
72 [0x49] = RTE_PTYPE_L3_IPV4_EXT,
73 [0x4A] = RTE_PTYPE_L3_IPV4_EXT,
74 [0x4B] = RTE_PTYPE_L3_IPV4_EXT,
75 [0x4C] = RTE_PTYPE_L3_IPV4_EXT,
76 [0x4D] = RTE_PTYPE_L3_IPV4_EXT,
77 [0x4E] = RTE_PTYPE_L3_IPV4_EXT,
78 [0x4F] = RTE_PTYPE_L3_IPV4_EXT,
81 return ptype_l3_ip_proto_map[ipv_ihl];
84 /* get l4 packet type from proto */
86 ptype_l4(uint8_t proto)
88 static const uint32_t ptype_l4_proto[256] = {
89 [IPPROTO_UDP] = RTE_PTYPE_L4_UDP,
90 [IPPROTO_TCP] = RTE_PTYPE_L4_TCP,
91 [IPPROTO_SCTP] = RTE_PTYPE_L4_SCTP,
94 return ptype_l4_proto[proto];
97 /* get inner l3 packet type from ip6 next protocol */
99 ptype_inner_l3_ip6(uint8_t ip6_proto)
101 static const uint32_t ptype_inner_ip6_ext_proto_map[256] = {
102 [IPPROTO_HOPOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
103 RTE_PTYPE_INNER_L3_IPV6,
104 [IPPROTO_ROUTING] = RTE_PTYPE_INNER_L3_IPV6_EXT -
105 RTE_PTYPE_INNER_L3_IPV6,
106 [IPPROTO_FRAGMENT] = RTE_PTYPE_INNER_L3_IPV6_EXT -
107 RTE_PTYPE_INNER_L3_IPV6,
108 [IPPROTO_ESP] = RTE_PTYPE_INNER_L3_IPV6_EXT -
109 RTE_PTYPE_INNER_L3_IPV6,
110 [IPPROTO_AH] = RTE_PTYPE_INNER_L3_IPV6_EXT -
111 RTE_PTYPE_INNER_L3_IPV6,
112 [IPPROTO_DSTOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
113 RTE_PTYPE_INNER_L3_IPV6,
116 return RTE_PTYPE_INNER_L3_IPV6 +
117 ptype_inner_ip6_ext_proto_map[ip6_proto];
120 /* get inner l3 packet type from ip version and header length */
122 ptype_inner_l3_ip(uint8_t ipv_ihl)
124 static const uint32_t ptype_inner_l3_ip_proto_map[256] = {
125 [0x45] = RTE_PTYPE_INNER_L3_IPV4,
126 [0x46] = RTE_PTYPE_INNER_L3_IPV4_EXT,
127 [0x47] = RTE_PTYPE_INNER_L3_IPV4_EXT,
128 [0x48] = RTE_PTYPE_INNER_L3_IPV4_EXT,
129 [0x49] = RTE_PTYPE_INNER_L3_IPV4_EXT,
130 [0x4A] = RTE_PTYPE_INNER_L3_IPV4_EXT,
131 [0x4B] = RTE_PTYPE_INNER_L3_IPV4_EXT,
132 [0x4C] = RTE_PTYPE_INNER_L3_IPV4_EXT,
133 [0x4D] = RTE_PTYPE_INNER_L3_IPV4_EXT,
134 [0x4E] = RTE_PTYPE_INNER_L3_IPV4_EXT,
135 [0x4F] = RTE_PTYPE_INNER_L3_IPV4_EXT,
138 return ptype_inner_l3_ip_proto_map[ipv_ihl];
141 /* get inner l4 packet type from proto */
143 ptype_inner_l4(uint8_t proto)
145 static const uint32_t ptype_inner_l4_proto[256] = {
146 [IPPROTO_UDP] = RTE_PTYPE_INNER_L4_UDP,
147 [IPPROTO_TCP] = RTE_PTYPE_INNER_L4_TCP,
148 [IPPROTO_SCTP] = RTE_PTYPE_INNER_L4_SCTP,
151 return ptype_inner_l4_proto[proto];
154 /* get the tunnel packet type if any, update proto and off. */
156 ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m,
161 static const uint8_t opt_len[16] = {
171 const struct gre_hdr *gh;
172 struct gre_hdr gh_copy;
175 gh = rte_pktmbuf_read(m, *off, sizeof(*gh), &gh_copy);
176 if (unlikely(gh == NULL))
179 flags = rte_be_to_cpu_16(*(const uint16_t *)gh);
181 if (opt_len[flags] == 0)
184 *off += opt_len[flags];
186 if (*proto == rte_cpu_to_be_16(ETHER_TYPE_TEB))
187 return RTE_PTYPE_TUNNEL_NVGRE;
189 return RTE_PTYPE_TUNNEL_GRE;
192 *proto = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
193 return RTE_PTYPE_TUNNEL_IP;
195 *proto = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
196 return RTE_PTYPE_TUNNEL_IP; /* IP is also valid for IPv6 */
202 /* get the ipv4 header length */
204 ip4_hlen(const struct ipv4_hdr *hdr)
206 return (hdr->version_ihl & 0xf) * 4;
209 /* parse ipv6 extended headers, update offset and return next proto */
211 skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
218 const struct ext_hdr *xh;
219 struct ext_hdr xh_copy;
224 #define MAX_EXT_HDRS 5
225 for (i = 0; i < MAX_EXT_HDRS; i++) {
227 case IPPROTO_HOPOPTS:
228 case IPPROTO_ROUTING:
229 case IPPROTO_DSTOPTS:
230 xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
234 *off += (xh->len + 1) * 8;
235 proto = xh->next_hdr;
237 case IPPROTO_FRAGMENT:
238 xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
243 proto = xh->next_hdr;
245 return proto; /* this is always the last ext hdr */
255 /* parse mbuf data to get packet type */
256 uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
257 struct rte_net_hdr_lens *hdr_lens, uint32_t layers)
259 struct rte_net_hdr_lens local_hdr_lens;
260 const struct ether_hdr *eh;
261 struct ether_hdr eh_copy;
262 uint32_t pkt_type = RTE_PTYPE_L2_ETHER;
266 if (hdr_lens == NULL)
267 hdr_lens = &local_hdr_lens;
269 eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
270 if (unlikely(eh == NULL))
272 proto = eh->ether_type;
274 hdr_lens->l2_len = off;
276 if ((layers & RTE_PTYPE_L2_MASK) == 0)
279 if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
280 goto l3; /* fast path if packet is IPv4 */
282 if (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
283 const struct vlan_hdr *vh;
284 struct vlan_hdr vh_copy;
286 pkt_type = RTE_PTYPE_L2_ETHER_VLAN;
287 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
288 if (unlikely(vh == NULL))
291 hdr_lens->l2_len += sizeof(*vh);
292 proto = vh->eth_proto;
293 } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) {
294 const struct vlan_hdr *vh;
295 struct vlan_hdr vh_copy;
297 pkt_type = RTE_PTYPE_L2_ETHER_QINQ;
298 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
300 if (unlikely(vh == NULL))
302 off += 2 * sizeof(*vh);
303 hdr_lens->l2_len += 2 * sizeof(*vh);
304 proto = vh->eth_proto;
308 if ((layers & RTE_PTYPE_L3_MASK) == 0)
311 if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
312 const struct ipv4_hdr *ip4h;
313 struct ipv4_hdr ip4h_copy;
315 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
316 if (unlikely(ip4h == NULL))
319 pkt_type |= ptype_l3_ip(ip4h->version_ihl);
320 hdr_lens->l3_len = ip4_hlen(ip4h);
321 off += hdr_lens->l3_len;
323 if ((layers & RTE_PTYPE_L4_MASK) == 0)
326 if (ip4h->fragment_offset & rte_cpu_to_be_16(
327 IPV4_HDR_OFFSET_MASK | IPV4_HDR_MF_FLAG)) {
328 pkt_type |= RTE_PTYPE_L4_FRAG;
329 hdr_lens->l4_len = 0;
332 proto = ip4h->next_proto_id;
333 pkt_type |= ptype_l4(proto);
334 } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
335 const struct ipv6_hdr *ip6h;
336 struct ipv6_hdr ip6h_copy;
339 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
340 if (unlikely(ip6h == NULL))
344 hdr_lens->l3_len = sizeof(*ip6h);
345 off += hdr_lens->l3_len;
346 pkt_type |= ptype_l3_ip6(proto);
347 if ((pkt_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6_EXT) {
348 proto = skip_ip6_ext(proto, m, &off, &frag);
349 hdr_lens->l3_len = off - hdr_lens->l2_len;
354 if ((layers & RTE_PTYPE_L4_MASK) == 0)
358 pkt_type |= RTE_PTYPE_L4_FRAG;
359 hdr_lens->l4_len = 0;
362 pkt_type |= ptype_l4(proto);
365 if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) {
366 hdr_lens->l4_len = sizeof(struct udp_hdr);
368 } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
369 const struct tcp_hdr *th;
370 struct tcp_hdr th_copy;
372 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
373 if (unlikely(th == NULL))
374 return pkt_type & (RTE_PTYPE_L2_MASK |
376 hdr_lens->l4_len = (th->data_off & 0xf0) >> 2;
378 } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP) {
379 hdr_lens->l4_len = sizeof(struct sctp_hdr);
382 uint32_t prev_off = off;
384 hdr_lens->l4_len = 0;
386 if ((layers & RTE_PTYPE_TUNNEL_MASK) == 0)
389 pkt_type |= ptype_tunnel(&proto, m, &off);
390 hdr_lens->tunnel_len = off - prev_off;
393 /* same job for inner header: we need to duplicate the code
394 * because the packet types do not have the same value.
396 if ((layers & RTE_PTYPE_INNER_L2_MASK) == 0)
399 if (proto == rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
400 eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
401 if (unlikely(eh == NULL))
403 pkt_type |= RTE_PTYPE_INNER_L2_ETHER;
404 proto = eh->ether_type;
406 hdr_lens->inner_l2_len = sizeof(*eh);
409 if (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
410 const struct vlan_hdr *vh;
411 struct vlan_hdr vh_copy;
413 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
414 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
415 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
416 if (unlikely(vh == NULL))
419 hdr_lens->inner_l2_len += sizeof(*vh);
420 proto = vh->eth_proto;
421 } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) {
422 const struct vlan_hdr *vh;
423 struct vlan_hdr vh_copy;
425 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
426 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_QINQ;
427 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
429 if (unlikely(vh == NULL))
431 off += 2 * sizeof(*vh);
432 hdr_lens->inner_l2_len += 2 * sizeof(*vh);
433 proto = vh->eth_proto;
436 if ((layers & RTE_PTYPE_INNER_L3_MASK) == 0)
439 if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
440 const struct ipv4_hdr *ip4h;
441 struct ipv4_hdr ip4h_copy;
443 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
444 if (unlikely(ip4h == NULL))
447 pkt_type |= ptype_inner_l3_ip(ip4h->version_ihl);
448 hdr_lens->inner_l3_len = ip4_hlen(ip4h);
449 off += hdr_lens->inner_l3_len;
451 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
453 if (ip4h->fragment_offset &
454 rte_cpu_to_be_16(IPV4_HDR_OFFSET_MASK |
456 pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
457 hdr_lens->inner_l4_len = 0;
460 proto = ip4h->next_proto_id;
461 pkt_type |= ptype_inner_l4(proto);
462 } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
463 const struct ipv6_hdr *ip6h;
464 struct ipv6_hdr ip6h_copy;
467 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
468 if (unlikely(ip6h == NULL))
472 hdr_lens->inner_l3_len = sizeof(*ip6h);
473 off += hdr_lens->inner_l3_len;
474 pkt_type |= ptype_inner_l3_ip6(proto);
475 if ((pkt_type & RTE_PTYPE_INNER_L3_MASK) ==
476 RTE_PTYPE_INNER_L3_IPV6_EXT) {
480 proto = skip_ip6_ext(proto, m, &off, &frag);
481 hdr_lens->inner_l3_len += off - prev_off;
486 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
490 pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
491 hdr_lens->inner_l4_len = 0;
494 pkt_type |= ptype_inner_l4(proto);
497 if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP) {
498 hdr_lens->inner_l4_len = sizeof(struct udp_hdr);
499 } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
500 RTE_PTYPE_INNER_L4_TCP) {
501 const struct tcp_hdr *th;
502 struct tcp_hdr th_copy;
504 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
505 if (unlikely(th == NULL))
506 return pkt_type & (RTE_PTYPE_INNER_L2_MASK |
507 RTE_PTYPE_INNER_L3_MASK);
508 hdr_lens->inner_l4_len = (th->data_off & 0xf0) >> 2;
509 } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
510 RTE_PTYPE_INNER_L4_SCTP) {
511 hdr_lens->inner_l4_len = sizeof(struct sctp_hdr);
513 hdr_lens->inner_l4_len = 0;