1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
8 #include <rte_mbuf_ptype.h>
9 #include <rte_byteorder.h>
10 #include <rte_ether.h>
18 #include <rte_os_shim.h>
20 /* get l3 packet type from ip6 next protocol */
22 ptype_l3_ip6(uint8_t ip6_proto)
24 static const uint32_t ip6_ext_proto_map[256] = {
25 [IPPROTO_HOPOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
26 [IPPROTO_ROUTING] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
27 [IPPROTO_FRAGMENT] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
28 [IPPROTO_ESP] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
29 [IPPROTO_AH] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
30 [IPPROTO_DSTOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
33 return RTE_PTYPE_L3_IPV6 + ip6_ext_proto_map[ip6_proto];
36 /* get l3 packet type from ip version and header length */
38 ptype_l3_ip(uint8_t ipv_ihl)
40 static const uint32_t ptype_l3_ip_proto_map[256] = {
41 [0x45] = RTE_PTYPE_L3_IPV4,
42 [0x46] = RTE_PTYPE_L3_IPV4_EXT,
43 [0x47] = RTE_PTYPE_L3_IPV4_EXT,
44 [0x48] = RTE_PTYPE_L3_IPV4_EXT,
45 [0x49] = RTE_PTYPE_L3_IPV4_EXT,
46 [0x4A] = RTE_PTYPE_L3_IPV4_EXT,
47 [0x4B] = RTE_PTYPE_L3_IPV4_EXT,
48 [0x4C] = RTE_PTYPE_L3_IPV4_EXT,
49 [0x4D] = RTE_PTYPE_L3_IPV4_EXT,
50 [0x4E] = RTE_PTYPE_L3_IPV4_EXT,
51 [0x4F] = RTE_PTYPE_L3_IPV4_EXT,
54 return ptype_l3_ip_proto_map[ipv_ihl];
57 /* get l4 packet type from proto */
59 ptype_l4(uint8_t proto)
61 static const uint32_t ptype_l4_proto[256] = {
62 [IPPROTO_UDP] = RTE_PTYPE_L4_UDP,
63 [IPPROTO_TCP] = RTE_PTYPE_L4_TCP,
64 [IPPROTO_SCTP] = RTE_PTYPE_L4_SCTP,
67 return ptype_l4_proto[proto];
70 /* get inner l3 packet type from ip6 next protocol */
72 ptype_inner_l3_ip6(uint8_t ip6_proto)
74 static const uint32_t ptype_inner_ip6_ext_proto_map[256] = {
75 [IPPROTO_HOPOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
76 RTE_PTYPE_INNER_L3_IPV6,
77 [IPPROTO_ROUTING] = RTE_PTYPE_INNER_L3_IPV6_EXT -
78 RTE_PTYPE_INNER_L3_IPV6,
79 [IPPROTO_FRAGMENT] = RTE_PTYPE_INNER_L3_IPV6_EXT -
80 RTE_PTYPE_INNER_L3_IPV6,
81 [IPPROTO_ESP] = RTE_PTYPE_INNER_L3_IPV6_EXT -
82 RTE_PTYPE_INNER_L3_IPV6,
83 [IPPROTO_AH] = RTE_PTYPE_INNER_L3_IPV6_EXT -
84 RTE_PTYPE_INNER_L3_IPV6,
85 [IPPROTO_DSTOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
86 RTE_PTYPE_INNER_L3_IPV6,
89 return RTE_PTYPE_INNER_L3_IPV6 +
90 ptype_inner_ip6_ext_proto_map[ip6_proto];
93 /* get inner l3 packet type from ip version and header length */
95 ptype_inner_l3_ip(uint8_t ipv_ihl)
97 static const uint32_t ptype_inner_l3_ip_proto_map[256] = {
98 [0x45] = RTE_PTYPE_INNER_L3_IPV4,
99 [0x46] = RTE_PTYPE_INNER_L3_IPV4_EXT,
100 [0x47] = RTE_PTYPE_INNER_L3_IPV4_EXT,
101 [0x48] = RTE_PTYPE_INNER_L3_IPV4_EXT,
102 [0x49] = RTE_PTYPE_INNER_L3_IPV4_EXT,
103 [0x4A] = RTE_PTYPE_INNER_L3_IPV4_EXT,
104 [0x4B] = RTE_PTYPE_INNER_L3_IPV4_EXT,
105 [0x4C] = RTE_PTYPE_INNER_L3_IPV4_EXT,
106 [0x4D] = RTE_PTYPE_INNER_L3_IPV4_EXT,
107 [0x4E] = RTE_PTYPE_INNER_L3_IPV4_EXT,
108 [0x4F] = RTE_PTYPE_INNER_L3_IPV4_EXT,
111 return ptype_inner_l3_ip_proto_map[ipv_ihl];
114 /* get inner l4 packet type from proto */
116 ptype_inner_l4(uint8_t proto)
118 static const uint32_t ptype_inner_l4_proto[256] = {
119 [IPPROTO_UDP] = RTE_PTYPE_INNER_L4_UDP,
120 [IPPROTO_TCP] = RTE_PTYPE_INNER_L4_TCP,
121 [IPPROTO_SCTP] = RTE_PTYPE_INNER_L4_SCTP,
124 return ptype_inner_l4_proto[proto];
127 /* get the tunnel packet type if any, update proto and off. */
129 ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m,
134 static const uint8_t opt_len[16] = {
144 const struct rte_gre_hdr *gh;
145 struct rte_gre_hdr gh_copy;
148 gh = rte_pktmbuf_read(m, *off, sizeof(*gh), &gh_copy);
149 if (unlikely(gh == NULL))
152 flags = rte_be_to_cpu_16(*(const uint16_t *)gh);
154 if (opt_len[flags] == 0)
157 *off += opt_len[flags];
159 if (*proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB))
160 return RTE_PTYPE_TUNNEL_NVGRE;
162 return RTE_PTYPE_TUNNEL_GRE;
165 *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
166 return RTE_PTYPE_TUNNEL_IP;
168 *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
169 return RTE_PTYPE_TUNNEL_IP; /* IP is also valid for IPv6 */
175 /* parse ipv6 extended headers, update offset and return next proto */
177 rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
184 const struct ext_hdr *xh;
185 struct ext_hdr xh_copy;
190 #define MAX_EXT_HDRS 5
191 for (i = 0; i < MAX_EXT_HDRS; i++) {
193 case IPPROTO_HOPOPTS:
194 case IPPROTO_ROUTING:
195 case IPPROTO_DSTOPTS:
196 xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
200 *off += (xh->len + 1) * 8;
201 proto = xh->next_hdr;
203 case IPPROTO_FRAGMENT:
204 xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
209 proto = xh->next_hdr;
211 return proto; /* this is always the last ext hdr */
221 /* parse mbuf data to get packet type */
222 uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
223 struct rte_net_hdr_lens *hdr_lens, uint32_t layers)
225 struct rte_net_hdr_lens local_hdr_lens;
226 const struct rte_ether_hdr *eh;
227 struct rte_ether_hdr eh_copy;
228 uint32_t pkt_type = RTE_PTYPE_L2_ETHER;
233 if (hdr_lens == NULL)
234 hdr_lens = &local_hdr_lens;
236 eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
237 if (unlikely(eh == NULL))
239 proto = eh->ether_type;
241 hdr_lens->l2_len = off;
243 if ((layers & RTE_PTYPE_L2_MASK) == 0)
246 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
247 goto l3; /* fast path if packet is IPv4 */
249 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
250 const struct rte_vlan_hdr *vh;
251 struct rte_vlan_hdr vh_copy;
253 pkt_type = RTE_PTYPE_L2_ETHER_VLAN;
254 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
255 if (unlikely(vh == NULL))
258 hdr_lens->l2_len += sizeof(*vh);
259 proto = vh->eth_proto;
260 } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
261 const struct rte_vlan_hdr *vh;
262 struct rte_vlan_hdr vh_copy;
264 pkt_type = RTE_PTYPE_L2_ETHER_QINQ;
265 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
267 if (unlikely(vh == NULL))
269 off += 2 * sizeof(*vh);
270 hdr_lens->l2_len += 2 * sizeof(*vh);
271 proto = vh->eth_proto;
272 } else if ((proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLS)) ||
273 (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLSM))) {
275 const struct rte_mpls_hdr *mh;
276 struct rte_mpls_hdr mh_copy;
278 #define MAX_MPLS_HDR 5
279 for (i = 0; i < MAX_MPLS_HDR; i++) {
280 mh = rte_pktmbuf_read(m, off + (i * sizeof(*mh)),
281 sizeof(*mh), &mh_copy);
282 if (unlikely(mh == NULL))
285 if (i == MAX_MPLS_HDR)
287 pkt_type = RTE_PTYPE_L2_ETHER_MPLS;
288 hdr_lens->l2_len += (sizeof(*mh) * i);
293 if ((layers & RTE_PTYPE_L3_MASK) == 0)
296 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
297 const struct rte_ipv4_hdr *ip4h;
298 struct rte_ipv4_hdr ip4h_copy;
300 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
301 if (unlikely(ip4h == NULL))
304 pkt_type |= ptype_l3_ip(ip4h->version_ihl);
305 hdr_lens->l3_len = rte_ipv4_hdr_len(ip4h);
306 off += hdr_lens->l3_len;
308 if ((layers & RTE_PTYPE_L4_MASK) == 0)
311 if (ip4h->fragment_offset & rte_cpu_to_be_16(
312 RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)) {
313 pkt_type |= RTE_PTYPE_L4_FRAG;
314 hdr_lens->l4_len = 0;
317 proto = ip4h->next_proto_id;
318 pkt_type |= ptype_l4(proto);
319 } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
320 const struct rte_ipv6_hdr *ip6h;
321 struct rte_ipv6_hdr ip6h_copy;
324 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
325 if (unlikely(ip6h == NULL))
329 hdr_lens->l3_len = sizeof(*ip6h);
330 off += hdr_lens->l3_len;
331 pkt_type |= ptype_l3_ip6(proto);
332 if ((pkt_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6_EXT) {
333 ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
337 hdr_lens->l3_len = off - hdr_lens->l2_len;
342 if ((layers & RTE_PTYPE_L4_MASK) == 0)
346 pkt_type |= RTE_PTYPE_L4_FRAG;
347 hdr_lens->l4_len = 0;
350 pkt_type |= ptype_l4(proto);
353 if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) {
354 hdr_lens->l4_len = sizeof(struct rte_udp_hdr);
356 } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
357 const struct rte_tcp_hdr *th;
358 struct rte_tcp_hdr th_copy;
360 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
361 if (unlikely(th == NULL))
362 return pkt_type & (RTE_PTYPE_L2_MASK |
364 hdr_lens->l4_len = (th->data_off & 0xf0) >> 2;
366 } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP) {
367 hdr_lens->l4_len = sizeof(struct rte_sctp_hdr);
370 uint32_t prev_off = off;
372 hdr_lens->l4_len = 0;
374 if ((layers & RTE_PTYPE_TUNNEL_MASK) == 0)
377 pkt_type |= ptype_tunnel(&proto, m, &off);
378 hdr_lens->tunnel_len = off - prev_off;
381 /* same job for inner header: we need to duplicate the code
382 * because the packet types do not have the same value.
384 if ((layers & RTE_PTYPE_INNER_L2_MASK) == 0)
387 hdr_lens->inner_l2_len = 0;
388 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
389 eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
390 if (unlikely(eh == NULL))
392 pkt_type |= RTE_PTYPE_INNER_L2_ETHER;
393 proto = eh->ether_type;
395 hdr_lens->inner_l2_len = sizeof(*eh);
398 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
399 const struct rte_vlan_hdr *vh;
400 struct rte_vlan_hdr vh_copy;
402 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
403 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
404 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
405 if (unlikely(vh == NULL))
408 hdr_lens->inner_l2_len += sizeof(*vh);
409 proto = vh->eth_proto;
410 } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
411 const struct rte_vlan_hdr *vh;
412 struct rte_vlan_hdr vh_copy;
414 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
415 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_QINQ;
416 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
418 if (unlikely(vh == NULL))
420 off += 2 * sizeof(*vh);
421 hdr_lens->inner_l2_len += 2 * sizeof(*vh);
422 proto = vh->eth_proto;
425 if ((layers & RTE_PTYPE_INNER_L3_MASK) == 0)
428 if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
429 const struct rte_ipv4_hdr *ip4h;
430 struct rte_ipv4_hdr ip4h_copy;
432 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
433 if (unlikely(ip4h == NULL))
436 pkt_type |= ptype_inner_l3_ip(ip4h->version_ihl);
437 hdr_lens->inner_l3_len = rte_ipv4_hdr_len(ip4h);
438 off += hdr_lens->inner_l3_len;
440 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
442 if (ip4h->fragment_offset &
443 rte_cpu_to_be_16(RTE_IPV4_HDR_OFFSET_MASK |
444 RTE_IPV4_HDR_MF_FLAG)) {
445 pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
446 hdr_lens->inner_l4_len = 0;
449 proto = ip4h->next_proto_id;
450 pkt_type |= ptype_inner_l4(proto);
451 } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
452 const struct rte_ipv6_hdr *ip6h;
453 struct rte_ipv6_hdr ip6h_copy;
456 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
457 if (unlikely(ip6h == NULL))
461 hdr_lens->inner_l3_len = sizeof(*ip6h);
462 off += hdr_lens->inner_l3_len;
463 pkt_type |= ptype_inner_l3_ip6(proto);
464 if ((pkt_type & RTE_PTYPE_INNER_L3_MASK) ==
465 RTE_PTYPE_INNER_L3_IPV6_EXT) {
469 ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
473 hdr_lens->inner_l3_len += off - prev_off;
478 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
482 pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
483 hdr_lens->inner_l4_len = 0;
486 pkt_type |= ptype_inner_l4(proto);
489 if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP) {
490 hdr_lens->inner_l4_len = sizeof(struct rte_udp_hdr);
491 } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
492 RTE_PTYPE_INNER_L4_TCP) {
493 const struct rte_tcp_hdr *th;
494 struct rte_tcp_hdr th_copy;
496 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
497 if (unlikely(th == NULL))
498 return pkt_type & (RTE_PTYPE_INNER_L2_MASK |
499 RTE_PTYPE_INNER_L3_MASK);
500 hdr_lens->inner_l4_len = (th->data_off & 0xf0) >> 2;
501 } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
502 RTE_PTYPE_INNER_L4_SCTP) {
503 hdr_lens->inner_l4_len = sizeof(struct rte_sctp_hdr);
505 hdr_lens->inner_l4_len = 0;