app/eventdev: switch sequence number to dynamic mbuf field
[dpdk.git] / lib / librte_gro / rte_gro.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_mbuf.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
9
10 #include "rte_gro.h"
11 #include "gro_tcp4.h"
12 #include "gro_udp4.h"
13 #include "gro_vxlan_tcp4.h"
14 #include "gro_vxlan_udp4.h"
15
16 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
17                 uint16_t max_flow_num,
18                 uint16_t max_item_per_flow);
19 typedef void (*gro_tbl_destroy_fn)(void *tbl);
20 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
21
22 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
23                 gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create,
24                 gro_udp4_tbl_create, gro_vxlan_udp4_tbl_create, NULL};
25 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
26                         gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy,
27                         gro_udp4_tbl_destroy, gro_vxlan_udp4_tbl_destroy,
28                         NULL};
29 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
30                         gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count,
31                         gro_udp4_tbl_pkt_count, gro_vxlan_udp4_tbl_pkt_count,
32                         NULL};
33
34 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
35                 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP))
36
37 #define IS_IPV4_UDP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
38                 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP))
39
40 #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
41                 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
42                 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
43                  RTE_PTYPE_TUNNEL_VXLAN) && \
44                  ((ptype & RTE_PTYPE_INNER_L4_TCP) == \
45                   RTE_PTYPE_INNER_L4_TCP) && \
46                   (((ptype & RTE_PTYPE_INNER_L3_MASK) & \
47                     (RTE_PTYPE_INNER_L3_IPV4 | \
48                      RTE_PTYPE_INNER_L3_IPV4_EXT | \
49                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)) != 0))
50
51 #define IS_IPV4_VXLAN_UDP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
52                 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
53                 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
54                  RTE_PTYPE_TUNNEL_VXLAN) && \
55                  ((ptype & RTE_PTYPE_INNER_L4_UDP) == \
56                   RTE_PTYPE_INNER_L4_UDP) && \
57                   (((ptype & RTE_PTYPE_INNER_L3_MASK) & \
58                     (RTE_PTYPE_INNER_L3_IPV4 | \
59                      RTE_PTYPE_INNER_L3_IPV4_EXT | \
60                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)) != 0))
61
62 /*
63  * GRO context structure. It keeps the table structures, which are
64  * used to merge packets, for different GRO types. Before using
65  * rte_gro_reassemble(), applications need to create the GRO context
66  * first.
67  */
68 struct gro_ctx {
69         /* GRO types to perform */
70         uint64_t gro_types;
71         /* reassembly tables */
72         void *tbls[RTE_GRO_TYPE_MAX_NUM];
73 };
74
75 void *
76 rte_gro_ctx_create(const struct rte_gro_param *param)
77 {
78         struct gro_ctx *gro_ctx;
79         gro_tbl_create_fn create_tbl_fn;
80         uint64_t gro_type_flag = 0;
81         uint64_t gro_types = 0;
82         uint8_t i;
83
84         gro_ctx = rte_zmalloc_socket(__func__,
85                         sizeof(struct gro_ctx),
86                         RTE_CACHE_LINE_SIZE,
87                         param->socket_id);
88         if (gro_ctx == NULL)
89                 return NULL;
90
91         for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
92                 gro_type_flag = 1ULL << i;
93                 if ((param->gro_types & gro_type_flag) == 0)
94                         continue;
95
96                 create_tbl_fn = tbl_create_fn[i];
97                 if (create_tbl_fn == NULL)
98                         continue;
99
100                 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
101                                 param->max_flow_num,
102                                 param->max_item_per_flow);
103                 if (gro_ctx->tbls[i] == NULL) {
104                         /* destroy all created tables */
105                         gro_ctx->gro_types = gro_types;
106                         rte_gro_ctx_destroy(gro_ctx);
107                         return NULL;
108                 }
109                 gro_types |= gro_type_flag;
110         }
111         gro_ctx->gro_types = param->gro_types;
112
113         return gro_ctx;
114 }
115
116 void
117 rte_gro_ctx_destroy(void *ctx)
118 {
119         gro_tbl_destroy_fn destroy_tbl_fn;
120         struct gro_ctx *gro_ctx = ctx;
121         uint64_t gro_type_flag;
122         uint8_t i;
123
124         for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
125                 gro_type_flag = 1ULL << i;
126                 if ((gro_ctx->gro_types & gro_type_flag) == 0)
127                         continue;
128                 destroy_tbl_fn = tbl_destroy_fn[i];
129                 if (destroy_tbl_fn)
130                         destroy_tbl_fn(gro_ctx->tbls[i]);
131         }
132         rte_free(gro_ctx);
133 }
134
135 uint16_t
136 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
137                 uint16_t nb_pkts,
138                 const struct rte_gro_param *param)
139 {
140         /* allocate a reassembly table for TCP/IPv4 GRO */
141         struct gro_tcp4_tbl tcp_tbl;
142         struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
143         struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
144
145         /* allocate a reassembly table for UDP/IPv4 GRO */
146         struct gro_udp4_tbl udp_tbl;
147         struct gro_udp4_flow udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
148         struct gro_udp4_item udp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
149
150         /* Allocate a reassembly table for VXLAN TCP GRO */
151         struct gro_vxlan_tcp4_tbl vxlan_tcp_tbl;
152         struct gro_vxlan_tcp4_flow vxlan_tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
153         struct gro_vxlan_tcp4_item vxlan_tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
154                         = {{{0}, 0, 0} };
155
156         /* Allocate a reassembly table for VXLAN UDP GRO */
157         struct gro_vxlan_udp4_tbl vxlan_udp_tbl;
158         struct gro_vxlan_udp4_flow vxlan_udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
159         struct gro_vxlan_udp4_item vxlan_udp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
160                         = {{{0}} };
161
162         struct rte_mbuf *unprocess_pkts[nb_pkts];
163         uint32_t item_num;
164         int32_t ret;
165         uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;
166         uint8_t do_tcp4_gro = 0, do_vxlan_tcp_gro = 0, do_udp4_gro = 0,
167                 do_vxlan_udp_gro = 0;
168
169         if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
170                                         RTE_GRO_TCP_IPV4 |
171                                         RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
172                                         RTE_GRO_UDP_IPV4)) == 0))
173                 return nb_pkts;
174
175         /* Get the maximum number of packets */
176         item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
177                                 param->max_item_per_flow));
178         item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
179
180         if (param->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
181                 for (i = 0; i < item_num; i++)
182                         vxlan_tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
183
184                 vxlan_tcp_tbl.flows = vxlan_tcp_flows;
185                 vxlan_tcp_tbl.items = vxlan_tcp_items;
186                 vxlan_tcp_tbl.flow_num = 0;
187                 vxlan_tcp_tbl.item_num = 0;
188                 vxlan_tcp_tbl.max_flow_num = item_num;
189                 vxlan_tcp_tbl.max_item_num = item_num;
190                 do_vxlan_tcp_gro = 1;
191         }
192
193         if (param->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) {
194                 for (i = 0; i < item_num; i++)
195                         vxlan_udp_flows[i].start_index = INVALID_ARRAY_INDEX;
196
197                 vxlan_udp_tbl.flows = vxlan_udp_flows;
198                 vxlan_udp_tbl.items = vxlan_udp_items;
199                 vxlan_udp_tbl.flow_num = 0;
200                 vxlan_udp_tbl.item_num = 0;
201                 vxlan_udp_tbl.max_flow_num = item_num;
202                 vxlan_udp_tbl.max_item_num = item_num;
203                 do_vxlan_udp_gro = 1;
204         }
205
206         if (param->gro_types & RTE_GRO_TCP_IPV4) {
207                 for (i = 0; i < item_num; i++)
208                         tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
209
210                 tcp_tbl.flows = tcp_flows;
211                 tcp_tbl.items = tcp_items;
212                 tcp_tbl.flow_num = 0;
213                 tcp_tbl.item_num = 0;
214                 tcp_tbl.max_flow_num = item_num;
215                 tcp_tbl.max_item_num = item_num;
216                 do_tcp4_gro = 1;
217         }
218
219         if (param->gro_types & RTE_GRO_UDP_IPV4) {
220                 for (i = 0; i < item_num; i++)
221                         udp_flows[i].start_index = INVALID_ARRAY_INDEX;
222
223                 udp_tbl.flows = udp_flows;
224                 udp_tbl.items = udp_items;
225                 udp_tbl.flow_num = 0;
226                 udp_tbl.item_num = 0;
227                 udp_tbl.max_flow_num = item_num;
228                 udp_tbl.max_item_num = item_num;
229                 do_udp4_gro = 1;
230         }
231
232
233         for (i = 0; i < nb_pkts; i++) {
234                 /*
235                  * The timestamp is ignored, since all packets
236                  * will be flushed from the tables.
237                  */
238                 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
239                                 do_vxlan_tcp_gro) {
240                         ret = gro_vxlan_tcp4_reassemble(pkts[i],
241                                                         &vxlan_tcp_tbl, 0);
242                         if (ret > 0)
243                                 /* Merge successfully */
244                                 nb_after_gro--;
245                         else if (ret < 0)
246                                 unprocess_pkts[unprocess_num++] = pkts[i];
247                 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
248                                 do_vxlan_udp_gro) {
249                         ret = gro_vxlan_udp4_reassemble(pkts[i],
250                                                         &vxlan_udp_tbl, 0);
251                         if (ret > 0)
252                                 /* Merge successfully */
253                                 nb_after_gro--;
254                         else if (ret < 0)
255                                 unprocess_pkts[unprocess_num++] = pkts[i];
256                 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
257                                 do_tcp4_gro) {
258                         ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0);
259                         if (ret > 0)
260                                 /* merge successfully */
261                                 nb_after_gro--;
262                         else if (ret < 0)
263                                 unprocess_pkts[unprocess_num++] = pkts[i];
264                 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
265                                 do_udp4_gro) {
266                         ret = gro_udp4_reassemble(pkts[i], &udp_tbl, 0);
267                         if (ret > 0)
268                                 /* merge successfully */
269                                 nb_after_gro--;
270                         else if (ret < 0)
271                                 unprocess_pkts[unprocess_num++] = pkts[i];
272                 } else
273                         unprocess_pkts[unprocess_num++] = pkts[i];
274         }
275
276         if ((nb_after_gro < nb_pkts)
277                  || (unprocess_num < nb_pkts)) {
278                 i = 0;
279                 /* Flush all packets from the tables */
280                 if (do_vxlan_tcp_gro) {
281                         i = gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tcp_tbl,
282                                         0, pkts, nb_pkts);
283                 }
284
285                 if (do_vxlan_udp_gro) {
286                         i += gro_vxlan_udp4_tbl_timeout_flush(&vxlan_udp_tbl,
287                                         0, &pkts[i], nb_pkts - i);
288
289                 }
290
291                 if (do_tcp4_gro) {
292                         i += gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0,
293                                         &pkts[i], nb_pkts - i);
294                 }
295
296                 if (do_udp4_gro) {
297                         i += gro_udp4_tbl_timeout_flush(&udp_tbl, 0,
298                                         &pkts[i], nb_pkts - i);
299                 }
300                 /* Copy unprocessed packets */
301                 if (unprocess_num > 0) {
302                         memcpy(&pkts[i], unprocess_pkts,
303                                         sizeof(struct rte_mbuf *) *
304                                         unprocess_num);
305                 }
306                 nb_after_gro = i + unprocess_num;
307         }
308
309         return nb_after_gro;
310 }
311
312 uint16_t
313 rte_gro_reassemble(struct rte_mbuf **pkts,
314                 uint16_t nb_pkts,
315                 void *ctx)
316 {
317         struct rte_mbuf *unprocess_pkts[nb_pkts];
318         struct gro_ctx *gro_ctx = ctx;
319         void *tcp_tbl, *udp_tbl, *vxlan_tcp_tbl, *vxlan_udp_tbl;
320         uint64_t current_time;
321         uint16_t i, unprocess_num = 0;
322         uint8_t do_tcp4_gro, do_vxlan_tcp_gro, do_udp4_gro, do_vxlan_udp_gro;
323
324         if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
325                                         RTE_GRO_TCP_IPV4 |
326                                         RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
327                                         RTE_GRO_UDP_IPV4)) == 0))
328                 return nb_pkts;
329
330         tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX];
331         vxlan_tcp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX];
332         udp_tbl = gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX];
333         vxlan_udp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX];
334
335         do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) ==
336                 RTE_GRO_TCP_IPV4;
337         do_vxlan_tcp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) ==
338                 RTE_GRO_IPV4_VXLAN_TCP_IPV4;
339         do_udp4_gro = (gro_ctx->gro_types & RTE_GRO_UDP_IPV4) ==
340                 RTE_GRO_UDP_IPV4;
341         do_vxlan_udp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) ==
342                 RTE_GRO_IPV4_VXLAN_UDP_IPV4;
343
344         current_time = rte_rdtsc();
345
346         for (i = 0; i < nb_pkts; i++) {
347                 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
348                                 do_vxlan_tcp_gro) {
349                         if (gro_vxlan_tcp4_reassemble(pkts[i], vxlan_tcp_tbl,
350                                                 current_time) < 0)
351                                 unprocess_pkts[unprocess_num++] = pkts[i];
352                 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
353                                 do_vxlan_udp_gro) {
354                         if (gro_vxlan_udp4_reassemble(pkts[i], vxlan_udp_tbl,
355                                                 current_time) < 0)
356                                 unprocess_pkts[unprocess_num++] = pkts[i];
357                 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
358                                 do_tcp4_gro) {
359                         if (gro_tcp4_reassemble(pkts[i], tcp_tbl,
360                                                 current_time) < 0)
361                                 unprocess_pkts[unprocess_num++] = pkts[i];
362                 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
363                                 do_udp4_gro) {
364                         if (gro_udp4_reassemble(pkts[i], udp_tbl,
365                                                 current_time) < 0)
366                                 unprocess_pkts[unprocess_num++] = pkts[i];
367                 } else
368                         unprocess_pkts[unprocess_num++] = pkts[i];
369         }
370         if (unprocess_num > 0) {
371                 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) *
372                                 unprocess_num);
373         }
374
375         return unprocess_num;
376 }
377
378 uint16_t
379 rte_gro_timeout_flush(void *ctx,
380                 uint64_t timeout_cycles,
381                 uint64_t gro_types,
382                 struct rte_mbuf **out,
383                 uint16_t max_nb_out)
384 {
385         struct gro_ctx *gro_ctx = ctx;
386         uint64_t flush_timestamp;
387         uint16_t num = 0;
388         uint16_t left_nb_out = max_nb_out;
389
390         gro_types = gro_types & gro_ctx->gro_types;
391         flush_timestamp = rte_rdtsc() - timeout_cycles;
392
393         if (gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
394                 num = gro_vxlan_tcp4_tbl_timeout_flush(gro_ctx->tbls[
395                                 RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX],
396                                 flush_timestamp, out, left_nb_out);
397                 left_nb_out = max_nb_out - num;
398         }
399
400         if ((gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) && left_nb_out > 0) {
401                 num += gro_vxlan_udp4_tbl_timeout_flush(gro_ctx->tbls[
402                                 RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX],
403                                 flush_timestamp, &out[num], left_nb_out);
404                 left_nb_out = max_nb_out - num;
405         }
406
407         /* If no available space in 'out', stop flushing. */
408         if ((gro_types & RTE_GRO_TCP_IPV4) && left_nb_out > 0) {
409                 num += gro_tcp4_tbl_timeout_flush(
410                                 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
411                                 flush_timestamp,
412                                 &out[num], left_nb_out);
413                 left_nb_out = max_nb_out - num;
414         }
415
416         /* If no available space in 'out', stop flushing. */
417         if ((gro_types & RTE_GRO_UDP_IPV4) && left_nb_out > 0) {
418                 num += gro_udp4_tbl_timeout_flush(
419                                 gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX],
420                                 flush_timestamp,
421                                 &out[num], left_nb_out);
422         }
423
424         return num;
425 }
426
427 uint64_t
428 rte_gro_get_pkt_count(void *ctx)
429 {
430         struct gro_ctx *gro_ctx = ctx;
431         gro_tbl_pkt_count_fn pkt_count_fn;
432         uint64_t gro_types = gro_ctx->gro_types, flag;
433         uint64_t item_num = 0;
434         uint8_t i;
435
436         for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) {
437                 flag = 1ULL << i;
438                 if ((gro_types & flag) == 0)
439                         continue;
440
441                 gro_types ^= flag;
442                 pkt_count_fn = tbl_pkt_count_fn[i];
443                 if (pkt_count_fn)
444                         item_num += pkt_count_fn(gro_ctx->tbls[i]);
445         }
446
447         return item_num;
448 }