1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
8 #include <rte_mbuf_ptype.h>
9 #include <rte_byteorder.h>
10 #include <rte_ether.h>
19 /* get l3 packet type from ip6 next protocol */
21 ptype_l3_ip6(uint8_t ip6_proto)
23 static const uint32_t ip6_ext_proto_map[256] = {
24 [IPPROTO_HOPOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
25 [IPPROTO_ROUTING] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
26 [IPPROTO_FRAGMENT] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
27 [IPPROTO_ESP] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
28 [IPPROTO_AH] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
29 [IPPROTO_DSTOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
32 return RTE_PTYPE_L3_IPV6 + ip6_ext_proto_map[ip6_proto];
35 /* get l3 packet type from ip version and header length */
37 ptype_l3_ip(uint8_t ipv_ihl)
39 static const uint32_t ptype_l3_ip_proto_map[256] = {
40 [0x45] = RTE_PTYPE_L3_IPV4,
41 [0x46] = RTE_PTYPE_L3_IPV4_EXT,
42 [0x47] = RTE_PTYPE_L3_IPV4_EXT,
43 [0x48] = RTE_PTYPE_L3_IPV4_EXT,
44 [0x49] = RTE_PTYPE_L3_IPV4_EXT,
45 [0x4A] = RTE_PTYPE_L3_IPV4_EXT,
46 [0x4B] = RTE_PTYPE_L3_IPV4_EXT,
47 [0x4C] = RTE_PTYPE_L3_IPV4_EXT,
48 [0x4D] = RTE_PTYPE_L3_IPV4_EXT,
49 [0x4E] = RTE_PTYPE_L3_IPV4_EXT,
50 [0x4F] = RTE_PTYPE_L3_IPV4_EXT,
53 return ptype_l3_ip_proto_map[ipv_ihl];
56 /* get l4 packet type from proto */
58 ptype_l4(uint8_t proto)
60 static const uint32_t ptype_l4_proto[256] = {
61 [IPPROTO_UDP] = RTE_PTYPE_L4_UDP,
62 [IPPROTO_TCP] = RTE_PTYPE_L4_TCP,
63 [IPPROTO_SCTP] = RTE_PTYPE_L4_SCTP,
66 return ptype_l4_proto[proto];
69 /* get inner l3 packet type from ip6 next protocol */
71 ptype_inner_l3_ip6(uint8_t ip6_proto)
73 static const uint32_t ptype_inner_ip6_ext_proto_map[256] = {
74 [IPPROTO_HOPOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
75 RTE_PTYPE_INNER_L3_IPV6,
76 [IPPROTO_ROUTING] = RTE_PTYPE_INNER_L3_IPV6_EXT -
77 RTE_PTYPE_INNER_L3_IPV6,
78 [IPPROTO_FRAGMENT] = RTE_PTYPE_INNER_L3_IPV6_EXT -
79 RTE_PTYPE_INNER_L3_IPV6,
80 [IPPROTO_ESP] = RTE_PTYPE_INNER_L3_IPV6_EXT -
81 RTE_PTYPE_INNER_L3_IPV6,
82 [IPPROTO_AH] = RTE_PTYPE_INNER_L3_IPV6_EXT -
83 RTE_PTYPE_INNER_L3_IPV6,
84 [IPPROTO_DSTOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
85 RTE_PTYPE_INNER_L3_IPV6,
88 return RTE_PTYPE_INNER_L3_IPV6 +
89 ptype_inner_ip6_ext_proto_map[ip6_proto];
92 /* get inner l3 packet type from ip version and header length */
94 ptype_inner_l3_ip(uint8_t ipv_ihl)
96 static const uint32_t ptype_inner_l3_ip_proto_map[256] = {
97 [0x45] = RTE_PTYPE_INNER_L3_IPV4,
98 [0x46] = RTE_PTYPE_INNER_L3_IPV4_EXT,
99 [0x47] = RTE_PTYPE_INNER_L3_IPV4_EXT,
100 [0x48] = RTE_PTYPE_INNER_L3_IPV4_EXT,
101 [0x49] = RTE_PTYPE_INNER_L3_IPV4_EXT,
102 [0x4A] = RTE_PTYPE_INNER_L3_IPV4_EXT,
103 [0x4B] = RTE_PTYPE_INNER_L3_IPV4_EXT,
104 [0x4C] = RTE_PTYPE_INNER_L3_IPV4_EXT,
105 [0x4D] = RTE_PTYPE_INNER_L3_IPV4_EXT,
106 [0x4E] = RTE_PTYPE_INNER_L3_IPV4_EXT,
107 [0x4F] = RTE_PTYPE_INNER_L3_IPV4_EXT,
110 return ptype_inner_l3_ip_proto_map[ipv_ihl];
113 /* get inner l4 packet type from proto */
115 ptype_inner_l4(uint8_t proto)
117 static const uint32_t ptype_inner_l4_proto[256] = {
118 [IPPROTO_UDP] = RTE_PTYPE_INNER_L4_UDP,
119 [IPPROTO_TCP] = RTE_PTYPE_INNER_L4_TCP,
120 [IPPROTO_SCTP] = RTE_PTYPE_INNER_L4_SCTP,
123 return ptype_inner_l4_proto[proto];
126 /* get the tunnel packet type if any, update proto and off. */
128 ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m,
133 static const uint8_t opt_len[16] = {
143 const struct rte_gre_hdr *gh;
144 struct rte_gre_hdr gh_copy;
147 gh = rte_pktmbuf_read(m, *off, sizeof(*gh), &gh_copy);
148 if (unlikely(gh == NULL))
151 flags = rte_be_to_cpu_16(*(const uint16_t *)gh);
153 if (opt_len[flags] == 0)
156 *off += opt_len[flags];
158 if (*proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB))
159 return RTE_PTYPE_TUNNEL_NVGRE;
161 return RTE_PTYPE_TUNNEL_GRE;
164 *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
165 return RTE_PTYPE_TUNNEL_IP;
167 *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
168 return RTE_PTYPE_TUNNEL_IP; /* IP is also valid for IPv6 */
174 /* get the ipv4 header length */
176 ip4_hlen(const struct rte_ipv4_hdr *hdr)
178 return (hdr->version_ihl & 0xf) * 4;
181 /* parse ipv6 extended headers, update offset and return next proto */
183 rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
190 const struct ext_hdr *xh;
191 struct ext_hdr xh_copy;
196 #define MAX_EXT_HDRS 5
197 for (i = 0; i < MAX_EXT_HDRS; i++) {
199 case IPPROTO_HOPOPTS:
200 case IPPROTO_ROUTING:
201 case IPPROTO_DSTOPTS:
202 xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
206 *off += (xh->len + 1) * 8;
207 proto = xh->next_hdr;
209 case IPPROTO_FRAGMENT:
210 xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
215 proto = xh->next_hdr;
217 return proto; /* this is always the last ext hdr */
227 /* parse mbuf data to get packet type */
228 uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
229 struct rte_net_hdr_lens *hdr_lens, uint32_t layers)
231 struct rte_net_hdr_lens local_hdr_lens;
232 const struct rte_ether_hdr *eh;
233 struct rte_ether_hdr eh_copy;
234 uint32_t pkt_type = RTE_PTYPE_L2_ETHER;
239 if (hdr_lens == NULL)
240 hdr_lens = &local_hdr_lens;
242 eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
243 if (unlikely(eh == NULL))
245 proto = eh->ether_type;
247 hdr_lens->l2_len = off;
249 if ((layers & RTE_PTYPE_L2_MASK) == 0)
252 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
253 goto l3; /* fast path if packet is IPv4 */
255 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
256 const struct rte_vlan_hdr *vh;
257 struct rte_vlan_hdr vh_copy;
259 pkt_type = RTE_PTYPE_L2_ETHER_VLAN;
260 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
261 if (unlikely(vh == NULL))
264 hdr_lens->l2_len += sizeof(*vh);
265 proto = vh->eth_proto;
266 } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
267 const struct rte_vlan_hdr *vh;
268 struct rte_vlan_hdr vh_copy;
270 pkt_type = RTE_PTYPE_L2_ETHER_QINQ;
271 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
273 if (unlikely(vh == NULL))
275 off += 2 * sizeof(*vh);
276 hdr_lens->l2_len += 2 * sizeof(*vh);
277 proto = vh->eth_proto;
278 } else if ((proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLS)) ||
279 (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLSM))) {
281 const struct rte_mpls_hdr *mh;
282 struct rte_mpls_hdr mh_copy;
284 #define MAX_MPLS_HDR 5
285 for (i = 0; i < MAX_MPLS_HDR; i++) {
286 mh = rte_pktmbuf_read(m, off + (i * sizeof(*mh)),
287 sizeof(*mh), &mh_copy);
288 if (unlikely(mh == NULL))
291 if (i == MAX_MPLS_HDR)
293 pkt_type = RTE_PTYPE_L2_ETHER_MPLS;
294 hdr_lens->l2_len += (sizeof(*mh) * i);
299 if ((layers & RTE_PTYPE_L3_MASK) == 0)
302 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
303 const struct rte_ipv4_hdr *ip4h;
304 struct rte_ipv4_hdr ip4h_copy;
306 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
307 if (unlikely(ip4h == NULL))
310 pkt_type |= ptype_l3_ip(ip4h->version_ihl);
311 hdr_lens->l3_len = ip4_hlen(ip4h);
312 off += hdr_lens->l3_len;
314 if ((layers & RTE_PTYPE_L4_MASK) == 0)
317 if (ip4h->fragment_offset & rte_cpu_to_be_16(
318 RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)) {
319 pkt_type |= RTE_PTYPE_L4_FRAG;
320 hdr_lens->l4_len = 0;
323 proto = ip4h->next_proto_id;
324 pkt_type |= ptype_l4(proto);
325 } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
326 const struct rte_ipv6_hdr *ip6h;
327 struct rte_ipv6_hdr ip6h_copy;
330 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
331 if (unlikely(ip6h == NULL))
335 hdr_lens->l3_len = sizeof(*ip6h);
336 off += hdr_lens->l3_len;
337 pkt_type |= ptype_l3_ip6(proto);
338 if ((pkt_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6_EXT) {
339 ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
343 hdr_lens->l3_len = off - hdr_lens->l2_len;
348 if ((layers & RTE_PTYPE_L4_MASK) == 0)
352 pkt_type |= RTE_PTYPE_L4_FRAG;
353 hdr_lens->l4_len = 0;
356 pkt_type |= ptype_l4(proto);
359 if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) {
360 hdr_lens->l4_len = sizeof(struct rte_udp_hdr);
362 } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
363 const struct rte_tcp_hdr *th;
364 struct rte_tcp_hdr th_copy;
366 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
367 if (unlikely(th == NULL))
368 return pkt_type & (RTE_PTYPE_L2_MASK |
370 hdr_lens->l4_len = (th->data_off & 0xf0) >> 2;
372 } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP) {
373 hdr_lens->l4_len = sizeof(struct rte_sctp_hdr);
376 uint32_t prev_off = off;
378 hdr_lens->l4_len = 0;
380 if ((layers & RTE_PTYPE_TUNNEL_MASK) == 0)
383 pkt_type |= ptype_tunnel(&proto, m, &off);
384 hdr_lens->tunnel_len = off - prev_off;
387 /* same job for inner header: we need to duplicate the code
388 * because the packet types do not have the same value.
390 if ((layers & RTE_PTYPE_INNER_L2_MASK) == 0)
393 hdr_lens->inner_l2_len = 0;
394 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
395 eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
396 if (unlikely(eh == NULL))
398 pkt_type |= RTE_PTYPE_INNER_L2_ETHER;
399 proto = eh->ether_type;
401 hdr_lens->inner_l2_len = sizeof(*eh);
404 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
405 const struct rte_vlan_hdr *vh;
406 struct rte_vlan_hdr vh_copy;
408 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
409 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
410 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
411 if (unlikely(vh == NULL))
414 hdr_lens->inner_l2_len += sizeof(*vh);
415 proto = vh->eth_proto;
416 } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
417 const struct rte_vlan_hdr *vh;
418 struct rte_vlan_hdr vh_copy;
420 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
421 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_QINQ;
422 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
424 if (unlikely(vh == NULL))
426 off += 2 * sizeof(*vh);
427 hdr_lens->inner_l2_len += 2 * sizeof(*vh);
428 proto = vh->eth_proto;
431 if ((layers & RTE_PTYPE_INNER_L3_MASK) == 0)
434 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
435 const struct rte_ipv4_hdr *ip4h;
436 struct rte_ipv4_hdr ip4h_copy;
438 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
439 if (unlikely(ip4h == NULL))
442 pkt_type |= ptype_inner_l3_ip(ip4h->version_ihl);
443 hdr_lens->inner_l3_len = ip4_hlen(ip4h);
444 off += hdr_lens->inner_l3_len;
446 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
448 if (ip4h->fragment_offset &
449 rte_cpu_to_be_16(RTE_IPV4_HDR_OFFSET_MASK |
450 RTE_IPV4_HDR_MF_FLAG)) {
451 pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
452 hdr_lens->inner_l4_len = 0;
455 proto = ip4h->next_proto_id;
456 pkt_type |= ptype_inner_l4(proto);
457 } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
458 const struct rte_ipv6_hdr *ip6h;
459 struct rte_ipv6_hdr ip6h_copy;
462 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
463 if (unlikely(ip6h == NULL))
467 hdr_lens->inner_l3_len = sizeof(*ip6h);
468 off += hdr_lens->inner_l3_len;
469 pkt_type |= ptype_inner_l3_ip6(proto);
470 if ((pkt_type & RTE_PTYPE_INNER_L3_MASK) ==
471 RTE_PTYPE_INNER_L3_IPV6_EXT) {
475 ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
479 hdr_lens->inner_l3_len += off - prev_off;
484 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
488 pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
489 hdr_lens->inner_l4_len = 0;
492 pkt_type |= ptype_inner_l4(proto);
495 if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP) {
496 hdr_lens->inner_l4_len = sizeof(struct rte_udp_hdr);
497 } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
498 RTE_PTYPE_INNER_L4_TCP) {
499 const struct rte_tcp_hdr *th;
500 struct rte_tcp_hdr th_copy;
502 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
503 if (unlikely(th == NULL))
504 return pkt_type & (RTE_PTYPE_INNER_L2_MASK |
505 RTE_PTYPE_INNER_L3_MASK);
506 hdr_lens->inner_l4_len = (th->data_off & 0xf0) >> 2;
507 } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
508 RTE_PTYPE_INNER_L4_SCTP) {
509 hdr_lens->inner_l4_len = sizeof(struct rte_sctp_hdr);
511 hdr_lens->inner_l4_len = 0;