fix same typo in multiple places
[dpdk.git] / lib / librte_net / rte_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  */
4
5 #include <stdint.h>
6
7 #include <rte_mbuf.h>
8 #include <rte_mbuf_ptype.h>
9 #include <rte_byteorder.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 #include <rte_sctp.h>
15 #include <rte_gre.h>
16 #include <rte_mpls.h>
17 #include <rte_net.h>
18
19 /* get l3 packet type from ip6 next protocol */
20 static uint32_t
21 ptype_l3_ip6(uint8_t ip6_proto)
22 {
23         static const uint32_t ip6_ext_proto_map[256] = {
24                 [IPPROTO_HOPOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
25                 [IPPROTO_ROUTING] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
26                 [IPPROTO_FRAGMENT] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
27                 [IPPROTO_ESP] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
28                 [IPPROTO_AH] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
29                 [IPPROTO_DSTOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
30         };
31
32         return RTE_PTYPE_L3_IPV6 + ip6_ext_proto_map[ip6_proto];
33 }
34
35 /* get l3 packet type from ip version and header length */
36 static uint32_t
37 ptype_l3_ip(uint8_t ipv_ihl)
38 {
39         static const uint32_t ptype_l3_ip_proto_map[256] = {
40                 [0x45] = RTE_PTYPE_L3_IPV4,
41                 [0x46] = RTE_PTYPE_L3_IPV4_EXT,
42                 [0x47] = RTE_PTYPE_L3_IPV4_EXT,
43                 [0x48] = RTE_PTYPE_L3_IPV4_EXT,
44                 [0x49] = RTE_PTYPE_L3_IPV4_EXT,
45                 [0x4A] = RTE_PTYPE_L3_IPV4_EXT,
46                 [0x4B] = RTE_PTYPE_L3_IPV4_EXT,
47                 [0x4C] = RTE_PTYPE_L3_IPV4_EXT,
48                 [0x4D] = RTE_PTYPE_L3_IPV4_EXT,
49                 [0x4E] = RTE_PTYPE_L3_IPV4_EXT,
50                 [0x4F] = RTE_PTYPE_L3_IPV4_EXT,
51         };
52
53         return ptype_l3_ip_proto_map[ipv_ihl];
54 }
55
56 /* get l4 packet type from proto */
57 static uint32_t
58 ptype_l4(uint8_t proto)
59 {
60         static const uint32_t ptype_l4_proto[256] = {
61                 [IPPROTO_UDP] = RTE_PTYPE_L4_UDP,
62                 [IPPROTO_TCP] = RTE_PTYPE_L4_TCP,
63                 [IPPROTO_SCTP] = RTE_PTYPE_L4_SCTP,
64         };
65
66         return ptype_l4_proto[proto];
67 }
68
69 /* get inner l3 packet type from ip6 next protocol */
70 static uint32_t
71 ptype_inner_l3_ip6(uint8_t ip6_proto)
72 {
73         static const uint32_t ptype_inner_ip6_ext_proto_map[256] = {
74                 [IPPROTO_HOPOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
75                         RTE_PTYPE_INNER_L3_IPV6,
76                 [IPPROTO_ROUTING] = RTE_PTYPE_INNER_L3_IPV6_EXT -
77                         RTE_PTYPE_INNER_L3_IPV6,
78                 [IPPROTO_FRAGMENT] = RTE_PTYPE_INNER_L3_IPV6_EXT -
79                         RTE_PTYPE_INNER_L3_IPV6,
80                 [IPPROTO_ESP] = RTE_PTYPE_INNER_L3_IPV6_EXT -
81                         RTE_PTYPE_INNER_L3_IPV6,
82                 [IPPROTO_AH] = RTE_PTYPE_INNER_L3_IPV6_EXT -
83                         RTE_PTYPE_INNER_L3_IPV6,
84                 [IPPROTO_DSTOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
85                         RTE_PTYPE_INNER_L3_IPV6,
86         };
87
88         return RTE_PTYPE_INNER_L3_IPV6 +
89                 ptype_inner_ip6_ext_proto_map[ip6_proto];
90 }
91
92 /* get inner l3 packet type from ip version and header length */
93 static uint32_t
94 ptype_inner_l3_ip(uint8_t ipv_ihl)
95 {
96         static const uint32_t ptype_inner_l3_ip_proto_map[256] = {
97                 [0x45] = RTE_PTYPE_INNER_L3_IPV4,
98                 [0x46] = RTE_PTYPE_INNER_L3_IPV4_EXT,
99                 [0x47] = RTE_PTYPE_INNER_L3_IPV4_EXT,
100                 [0x48] = RTE_PTYPE_INNER_L3_IPV4_EXT,
101                 [0x49] = RTE_PTYPE_INNER_L3_IPV4_EXT,
102                 [0x4A] = RTE_PTYPE_INNER_L3_IPV4_EXT,
103                 [0x4B] = RTE_PTYPE_INNER_L3_IPV4_EXT,
104                 [0x4C] = RTE_PTYPE_INNER_L3_IPV4_EXT,
105                 [0x4D] = RTE_PTYPE_INNER_L3_IPV4_EXT,
106                 [0x4E] = RTE_PTYPE_INNER_L3_IPV4_EXT,
107                 [0x4F] = RTE_PTYPE_INNER_L3_IPV4_EXT,
108         };
109
110         return ptype_inner_l3_ip_proto_map[ipv_ihl];
111 }
112
113 /* get inner l4 packet type from proto */
114 static uint32_t
115 ptype_inner_l4(uint8_t proto)
116 {
117         static const uint32_t ptype_inner_l4_proto[256] = {
118                 [IPPROTO_UDP] = RTE_PTYPE_INNER_L4_UDP,
119                 [IPPROTO_TCP] = RTE_PTYPE_INNER_L4_TCP,
120                 [IPPROTO_SCTP] = RTE_PTYPE_INNER_L4_SCTP,
121         };
122
123         return ptype_inner_l4_proto[proto];
124 }
125
126 /* get the tunnel packet type if any, update proto and off. */
127 static uint32_t
128 ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m,
129         uint32_t *off)
130 {
131         switch (*proto) {
132         case IPPROTO_GRE: {
133                 static const uint8_t opt_len[16] = {
134                         [0x0] = 4,
135                         [0x1] = 8,
136                         [0x2] = 8,
137                         [0x8] = 8,
138                         [0x3] = 12,
139                         [0x9] = 12,
140                         [0xa] = 12,
141                         [0xb] = 16,
142                 };
143                 const struct rte_gre_hdr *gh;
144                 struct rte_gre_hdr gh_copy;
145                 uint16_t flags;
146
147                 gh = rte_pktmbuf_read(m, *off, sizeof(*gh), &gh_copy);
148                 if (unlikely(gh == NULL))
149                         return 0;
150
151                 flags = rte_be_to_cpu_16(*(const uint16_t *)gh);
152                 flags >>= 12;
153                 if (opt_len[flags] == 0)
154                         return 0;
155
156                 *off += opt_len[flags];
157                 *proto = gh->proto;
158                 if (*proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB))
159                         return RTE_PTYPE_TUNNEL_NVGRE;
160                 else
161                         return RTE_PTYPE_TUNNEL_GRE;
162         }
163         case IPPROTO_IPIP:
164                 *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
165                 return RTE_PTYPE_TUNNEL_IP;
166         case IPPROTO_IPV6:
167                 *proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
168                 return RTE_PTYPE_TUNNEL_IP; /* IP is also valid for IPv6 */
169         default:
170                 return 0;
171         }
172 }
173
174 /* get the ipv4 header length */
175 static uint8_t
176 ip4_hlen(const struct rte_ipv4_hdr *hdr)
177 {
178         return (hdr->version_ihl & 0xf) * 4;
179 }
180
181 /* parse ipv6 extended headers, update offset and return next proto */
182 int
183 rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
184         int *frag)
185 {
186         struct ext_hdr {
187                 uint8_t next_hdr;
188                 uint8_t len;
189         };
190         const struct ext_hdr *xh;
191         struct ext_hdr xh_copy;
192         unsigned int i;
193
194         *frag = 0;
195
196 #define MAX_EXT_HDRS 5
197         for (i = 0; i < MAX_EXT_HDRS; i++) {
198                 switch (proto) {
199                 case IPPROTO_HOPOPTS:
200                 case IPPROTO_ROUTING:
201                 case IPPROTO_DSTOPTS:
202                         xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
203                                 &xh_copy);
204                         if (xh == NULL)
205                                 return -1;
206                         *off += (xh->len + 1) * 8;
207                         proto = xh->next_hdr;
208                         break;
209                 case IPPROTO_FRAGMENT:
210                         xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
211                                 &xh_copy);
212                         if (xh == NULL)
213                                 return -1;
214                         *off += 8;
215                         proto = xh->next_hdr;
216                         *frag = 1;
217                         return proto; /* this is always the last ext hdr */
218                 case IPPROTO_NONE:
219                         return 0;
220                 default:
221                         return proto;
222                 }
223         }
224         return -1;
225 }
226
227 /* parse mbuf data to get packet type */
228 uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
229         struct rte_net_hdr_lens *hdr_lens, uint32_t layers)
230 {
231         struct rte_net_hdr_lens local_hdr_lens;
232         const struct rte_ether_hdr *eh;
233         struct rte_ether_hdr eh_copy;
234         uint32_t pkt_type = RTE_PTYPE_L2_ETHER;
235         uint32_t off = 0;
236         uint16_t proto;
237         int ret;
238
239         if (hdr_lens == NULL)
240                 hdr_lens = &local_hdr_lens;
241
242         eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
243         if (unlikely(eh == NULL))
244                 return 0;
245         proto = eh->ether_type;
246         off = sizeof(*eh);
247         hdr_lens->l2_len = off;
248
249         if ((layers & RTE_PTYPE_L2_MASK) == 0)
250                 return 0;
251
252         if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
253                 goto l3; /* fast path if packet is IPv4 */
254
255         if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
256                 const struct rte_vlan_hdr *vh;
257                 struct rte_vlan_hdr vh_copy;
258
259                 pkt_type = RTE_PTYPE_L2_ETHER_VLAN;
260                 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
261                 if (unlikely(vh == NULL))
262                         return pkt_type;
263                 off += sizeof(*vh);
264                 hdr_lens->l2_len += sizeof(*vh);
265                 proto = vh->eth_proto;
266         } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
267                 const struct rte_vlan_hdr *vh;
268                 struct rte_vlan_hdr vh_copy;
269
270                 pkt_type = RTE_PTYPE_L2_ETHER_QINQ;
271                 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
272                         &vh_copy);
273                 if (unlikely(vh == NULL))
274                         return pkt_type;
275                 off += 2 * sizeof(*vh);
276                 hdr_lens->l2_len += 2 * sizeof(*vh);
277                 proto = vh->eth_proto;
278         } else if ((proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLS)) ||
279                 (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_MPLSM))) {
280                 unsigned int i;
281                 const struct rte_mpls_hdr *mh;
282                 struct rte_mpls_hdr mh_copy;
283
284 #define MAX_MPLS_HDR 5
285                 for (i = 0; i < MAX_MPLS_HDR; i++) {
286                         mh = rte_pktmbuf_read(m, off + (i * sizeof(*mh)),
287                                 sizeof(*mh), &mh_copy);
288                         if (unlikely(mh == NULL))
289                                 return pkt_type;
290                 }
291                 if (i == MAX_MPLS_HDR)
292                         return pkt_type;
293                 pkt_type = RTE_PTYPE_L2_ETHER_MPLS;
294                 hdr_lens->l2_len += (sizeof(*mh) * i);
295                 return pkt_type;
296         }
297
298 l3:
299         if ((layers & RTE_PTYPE_L3_MASK) == 0)
300                 return pkt_type;
301
302         if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
303                 const struct rte_ipv4_hdr *ip4h;
304                 struct rte_ipv4_hdr ip4h_copy;
305
306                 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
307                 if (unlikely(ip4h == NULL))
308                         return pkt_type;
309
310                 pkt_type |= ptype_l3_ip(ip4h->version_ihl);
311                 hdr_lens->l3_len = ip4_hlen(ip4h);
312                 off += hdr_lens->l3_len;
313
314                 if ((layers & RTE_PTYPE_L4_MASK) == 0)
315                         return pkt_type;
316
317                 if (ip4h->fragment_offset & rte_cpu_to_be_16(
318                                 RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)) {
319                         pkt_type |= RTE_PTYPE_L4_FRAG;
320                         hdr_lens->l4_len = 0;
321                         return pkt_type;
322                 }
323                 proto = ip4h->next_proto_id;
324                 pkt_type |= ptype_l4(proto);
325         } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
326                 const struct rte_ipv6_hdr *ip6h;
327                 struct rte_ipv6_hdr ip6h_copy;
328                 int frag = 0;
329
330                 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
331                 if (unlikely(ip6h == NULL))
332                         return pkt_type;
333
334                 proto = ip6h->proto;
335                 hdr_lens->l3_len = sizeof(*ip6h);
336                 off += hdr_lens->l3_len;
337                 pkt_type |= ptype_l3_ip6(proto);
338                 if ((pkt_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6_EXT) {
339                         ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
340                         if (ret < 0)
341                                 return pkt_type;
342                         proto = ret;
343                         hdr_lens->l3_len = off - hdr_lens->l2_len;
344                 }
345                 if (proto == 0)
346                         return pkt_type;
347
348                 if ((layers & RTE_PTYPE_L4_MASK) == 0)
349                         return pkt_type;
350
351                 if (frag) {
352                         pkt_type |= RTE_PTYPE_L4_FRAG;
353                         hdr_lens->l4_len = 0;
354                         return pkt_type;
355                 }
356                 pkt_type |= ptype_l4(proto);
357         }
358
359         if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) {
360                 hdr_lens->l4_len = sizeof(struct rte_udp_hdr);
361                 return pkt_type;
362         } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
363                 const struct rte_tcp_hdr *th;
364                 struct rte_tcp_hdr th_copy;
365
366                 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
367                 if (unlikely(th == NULL))
368                         return pkt_type & (RTE_PTYPE_L2_MASK |
369                                 RTE_PTYPE_L3_MASK);
370                 hdr_lens->l4_len = (th->data_off & 0xf0) >> 2;
371                 return pkt_type;
372         } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP) {
373                 hdr_lens->l4_len = sizeof(struct rte_sctp_hdr);
374                 return pkt_type;
375         } else {
376                 uint32_t prev_off = off;
377
378                 hdr_lens->l4_len = 0;
379
380                 if ((layers & RTE_PTYPE_TUNNEL_MASK) == 0)
381                         return pkt_type;
382
383                 pkt_type |= ptype_tunnel(&proto, m, &off);
384                 hdr_lens->tunnel_len = off - prev_off;
385         }
386
387         /* same job for inner header: we need to duplicate the code
388          * because the packet types do not have the same value.
389          */
390         if ((layers & RTE_PTYPE_INNER_L2_MASK) == 0)
391                 return pkt_type;
392
393         hdr_lens->inner_l2_len = 0;
394         if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
395                 eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
396                 if (unlikely(eh == NULL))
397                         return pkt_type;
398                 pkt_type |= RTE_PTYPE_INNER_L2_ETHER;
399                 proto = eh->ether_type;
400                 off += sizeof(*eh);
401                 hdr_lens->inner_l2_len = sizeof(*eh);
402         }
403
404         if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
405                 const struct rte_vlan_hdr *vh;
406                 struct rte_vlan_hdr vh_copy;
407
408                 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
409                 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
410                 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
411                 if (unlikely(vh == NULL))
412                         return pkt_type;
413                 off += sizeof(*vh);
414                 hdr_lens->inner_l2_len += sizeof(*vh);
415                 proto = vh->eth_proto;
416         } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
417                 const struct rte_vlan_hdr *vh;
418                 struct rte_vlan_hdr vh_copy;
419
420                 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
421                 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_QINQ;
422                 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
423                         &vh_copy);
424                 if (unlikely(vh == NULL))
425                         return pkt_type;
426                 off += 2 * sizeof(*vh);
427                 hdr_lens->inner_l2_len += 2 * sizeof(*vh);
428                 proto = vh->eth_proto;
429         }
430
431         if ((layers & RTE_PTYPE_INNER_L3_MASK) == 0)
432                 return pkt_type;
433
434         if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
435                 const struct rte_ipv4_hdr *ip4h;
436                 struct rte_ipv4_hdr ip4h_copy;
437
438                 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
439                 if (unlikely(ip4h == NULL))
440                         return pkt_type;
441
442                 pkt_type |= ptype_inner_l3_ip(ip4h->version_ihl);
443                 hdr_lens->inner_l3_len = ip4_hlen(ip4h);
444                 off += hdr_lens->inner_l3_len;
445
446                 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
447                         return pkt_type;
448                 if (ip4h->fragment_offset &
449                                 rte_cpu_to_be_16(RTE_IPV4_HDR_OFFSET_MASK |
450                                         RTE_IPV4_HDR_MF_FLAG)) {
451                         pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
452                         hdr_lens->inner_l4_len = 0;
453                         return pkt_type;
454                 }
455                 proto = ip4h->next_proto_id;
456                 pkt_type |= ptype_inner_l4(proto);
457         } else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
458                 const struct rte_ipv6_hdr *ip6h;
459                 struct rte_ipv6_hdr ip6h_copy;
460                 int frag = 0;
461
462                 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
463                 if (unlikely(ip6h == NULL))
464                         return pkt_type;
465
466                 proto = ip6h->proto;
467                 hdr_lens->inner_l3_len = sizeof(*ip6h);
468                 off += hdr_lens->inner_l3_len;
469                 pkt_type |= ptype_inner_l3_ip6(proto);
470                 if ((pkt_type & RTE_PTYPE_INNER_L3_MASK) ==
471                                 RTE_PTYPE_INNER_L3_IPV6_EXT) {
472                         uint32_t prev_off;
473
474                         prev_off = off;
475                         ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
476                         if (ret < 0)
477                                 return pkt_type;
478                         proto = ret;
479                         hdr_lens->inner_l3_len += off - prev_off;
480                 }
481                 if (proto == 0)
482                         return pkt_type;
483
484                 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
485                         return pkt_type;
486
487                 if (frag) {
488                         pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
489                         hdr_lens->inner_l4_len = 0;
490                         return pkt_type;
491                 }
492                 pkt_type |= ptype_inner_l4(proto);
493         }
494
495         if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP) {
496                 hdr_lens->inner_l4_len = sizeof(struct rte_udp_hdr);
497         } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
498                         RTE_PTYPE_INNER_L4_TCP) {
499                 const struct rte_tcp_hdr *th;
500                 struct rte_tcp_hdr th_copy;
501
502                 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
503                 if (unlikely(th == NULL))
504                         return pkt_type & (RTE_PTYPE_INNER_L2_MASK |
505                                 RTE_PTYPE_INNER_L3_MASK);
506                 hdr_lens->inner_l4_len = (th->data_off & 0xf0) >> 2;
507         } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
508                         RTE_PTYPE_INNER_L4_SCTP) {
509                 hdr_lens->inner_l4_len = sizeof(struct rte_sctp_hdr);
510         } else {
511                 hdr_lens->inner_l4_len = 0;
512         }
513
514         return pkt_type;
515 }