test/cycles: restore default delay callback
[dpdk.git] / lib / librte_gro / rte_gro.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_mbuf.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
9
10 #include "rte_gro.h"
11 #include "gro_tcp4.h"
12 #include "gro_vxlan_tcp4.h"
13
14 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
15                 uint16_t max_flow_num,
16                 uint16_t max_item_per_flow);
17 typedef void (*gro_tbl_destroy_fn)(void *tbl);
18 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
19
20 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
21                 gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create, NULL};
22 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
23                         gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy,
24                         NULL};
25 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
26                         gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count,
27                         NULL};
28
29 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
30                 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP))
31
32 #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
33                 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
34                 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
35                  RTE_PTYPE_TUNNEL_VXLAN) && \
36                  ((ptype & RTE_PTYPE_INNER_L4_TCP) == \
37                   RTE_PTYPE_INNER_L4_TCP) && \
38                   (((ptype & RTE_PTYPE_INNER_L3_MASK) & \
39                     (RTE_PTYPE_INNER_L3_IPV4 | \
40                      RTE_PTYPE_INNER_L3_IPV4_EXT | \
41                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)) != 0))
42
43 /*
44  * GRO context structure. It keeps the table structures, which are
45  * used to merge packets, for different GRO types. Before using
46  * rte_gro_reassemble(), applications need to create the GRO context
47  * first.
48  */
49 struct gro_ctx {
50         /* GRO types to perform */
51         uint64_t gro_types;
52         /* reassembly tables */
53         void *tbls[RTE_GRO_TYPE_MAX_NUM];
54 };
55
56 void *
57 rte_gro_ctx_create(const struct rte_gro_param *param)
58 {
59         struct gro_ctx *gro_ctx;
60         gro_tbl_create_fn create_tbl_fn;
61         uint64_t gro_type_flag = 0;
62         uint64_t gro_types = 0;
63         uint8_t i;
64
65         gro_ctx = rte_zmalloc_socket(__func__,
66                         sizeof(struct gro_ctx),
67                         RTE_CACHE_LINE_SIZE,
68                         param->socket_id);
69         if (gro_ctx == NULL)
70                 return NULL;
71
72         for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
73                 gro_type_flag = 1ULL << i;
74                 if ((param->gro_types & gro_type_flag) == 0)
75                         continue;
76
77                 create_tbl_fn = tbl_create_fn[i];
78                 if (create_tbl_fn == NULL)
79                         continue;
80
81                 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
82                                 param->max_flow_num,
83                                 param->max_item_per_flow);
84                 if (gro_ctx->tbls[i] == NULL) {
85                         /* destroy all created tables */
86                         gro_ctx->gro_types = gro_types;
87                         rte_gro_ctx_destroy(gro_ctx);
88                         return NULL;
89                 }
90                 gro_types |= gro_type_flag;
91         }
92         gro_ctx->gro_types = param->gro_types;
93
94         return gro_ctx;
95 }
96
97 void
98 rte_gro_ctx_destroy(void *ctx)
99 {
100         gro_tbl_destroy_fn destroy_tbl_fn;
101         struct gro_ctx *gro_ctx = ctx;
102         uint64_t gro_type_flag;
103         uint8_t i;
104
105         for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
106                 gro_type_flag = 1ULL << i;
107                 if ((gro_ctx->gro_types & gro_type_flag) == 0)
108                         continue;
109                 destroy_tbl_fn = tbl_destroy_fn[i];
110                 if (destroy_tbl_fn)
111                         destroy_tbl_fn(gro_ctx->tbls[i]);
112         }
113         rte_free(gro_ctx);
114 }
115
116 uint16_t
117 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
118                 uint16_t nb_pkts,
119                 const struct rte_gro_param *param)
120 {
121         /* allocate a reassembly table for TCP/IPv4 GRO */
122         struct gro_tcp4_tbl tcp_tbl;
123         struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
124         struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
125
126         /* Allocate a reassembly table for VXLAN GRO */
127         struct gro_vxlan_tcp4_tbl vxlan_tbl;
128         struct gro_vxlan_tcp4_flow vxlan_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
129         struct gro_vxlan_tcp4_item vxlan_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {
130                 {{0}, 0, 0} };
131
132         struct rte_mbuf *unprocess_pkts[nb_pkts];
133         uint32_t item_num;
134         int32_t ret;
135         uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;
136         uint8_t do_tcp4_gro = 0, do_vxlan_gro = 0;
137
138         if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
139                                         RTE_GRO_TCP_IPV4)) == 0))
140                 return nb_pkts;
141
142         /* Get the maximum number of packets */
143         item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
144                                 param->max_item_per_flow));
145         item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
146
147         if (param->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
148                 for (i = 0; i < item_num; i++)
149                         vxlan_flows[i].start_index = INVALID_ARRAY_INDEX;
150
151                 vxlan_tbl.flows = vxlan_flows;
152                 vxlan_tbl.items = vxlan_items;
153                 vxlan_tbl.flow_num = 0;
154                 vxlan_tbl.item_num = 0;
155                 vxlan_tbl.max_flow_num = item_num;
156                 vxlan_tbl.max_item_num = item_num;
157                 do_vxlan_gro = 1;
158         }
159
160         if (param->gro_types & RTE_GRO_TCP_IPV4) {
161                 for (i = 0; i < item_num; i++)
162                         tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
163
164                 tcp_tbl.flows = tcp_flows;
165                 tcp_tbl.items = tcp_items;
166                 tcp_tbl.flow_num = 0;
167                 tcp_tbl.item_num = 0;
168                 tcp_tbl.max_flow_num = item_num;
169                 tcp_tbl.max_item_num = item_num;
170                 do_tcp4_gro = 1;
171         }
172
173         for (i = 0; i < nb_pkts; i++) {
174                 /*
175                  * The timestamp is ignored, since all packets
176                  * will be flushed from the tables.
177                  */
178                 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
179                                 do_vxlan_gro) {
180                         ret = gro_vxlan_tcp4_reassemble(pkts[i], &vxlan_tbl, 0);
181                         if (ret > 0)
182                                 /* Merge successfully */
183                                 nb_after_gro--;
184                         else if (ret < 0)
185                                 unprocess_pkts[unprocess_num++] = pkts[i];
186                 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
187                                 do_tcp4_gro) {
188                         ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0);
189                         if (ret > 0)
190                                 /* merge successfully */
191                                 nb_after_gro--;
192                         else if (ret < 0)
193                                 unprocess_pkts[unprocess_num++] = pkts[i];
194                 } else
195                         unprocess_pkts[unprocess_num++] = pkts[i];
196         }
197
198         if (nb_after_gro < nb_pkts) {
199                 i = 0;
200                 /* Flush all packets from the tables */
201                 if (do_vxlan_gro) {
202                         i = gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tbl,
203                                         0, pkts, nb_pkts);
204                 }
205                 if (do_tcp4_gro) {
206                         i += gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0,
207                                         &pkts[i], nb_pkts - i);
208                 }
209                 /* Copy unprocessed packets */
210                 if (unprocess_num > 0) {
211                         memcpy(&pkts[i], unprocess_pkts,
212                                         sizeof(struct rte_mbuf *) *
213                                         unprocess_num);
214                 }
215         }
216
217         return nb_after_gro;
218 }
219
220 uint16_t
221 rte_gro_reassemble(struct rte_mbuf **pkts,
222                 uint16_t nb_pkts,
223                 void *ctx)
224 {
225         struct rte_mbuf *unprocess_pkts[nb_pkts];
226         struct gro_ctx *gro_ctx = ctx;
227         void *tcp_tbl, *vxlan_tbl;
228         uint64_t current_time;
229         uint16_t i, unprocess_num = 0;
230         uint8_t do_tcp4_gro, do_vxlan_gro;
231
232         if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
233                                         RTE_GRO_TCP_IPV4)) == 0))
234                 return nb_pkts;
235
236         tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX];
237         vxlan_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX];
238
239         do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) ==
240                 RTE_GRO_TCP_IPV4;
241         do_vxlan_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) ==
242                 RTE_GRO_IPV4_VXLAN_TCP_IPV4;
243
244         current_time = rte_rdtsc();
245
246         for (i = 0; i < nb_pkts; i++) {
247                 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
248                                 do_vxlan_gro) {
249                         if (gro_vxlan_tcp4_reassemble(pkts[i], vxlan_tbl,
250                                                 current_time) < 0)
251                                 unprocess_pkts[unprocess_num++] = pkts[i];
252                 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
253                                 do_tcp4_gro) {
254                         if (gro_tcp4_reassemble(pkts[i], tcp_tbl,
255                                                 current_time) < 0)
256                                 unprocess_pkts[unprocess_num++] = pkts[i];
257                 } else
258                         unprocess_pkts[unprocess_num++] = pkts[i];
259         }
260         if (unprocess_num > 0) {
261                 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) *
262                                 unprocess_num);
263         }
264
265         return unprocess_num;
266 }
267
268 uint16_t
269 rte_gro_timeout_flush(void *ctx,
270                 uint64_t timeout_cycles,
271                 uint64_t gro_types,
272                 struct rte_mbuf **out,
273                 uint16_t max_nb_out)
274 {
275         struct gro_ctx *gro_ctx = ctx;
276         uint64_t flush_timestamp;
277         uint16_t num = 0;
278
279         gro_types = gro_types & gro_ctx->gro_types;
280         flush_timestamp = rte_rdtsc() - timeout_cycles;
281
282         if (gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
283                 num = gro_vxlan_tcp4_tbl_timeout_flush(gro_ctx->tbls[
284                                 RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX],
285                                 flush_timestamp, out, max_nb_out);
286                 max_nb_out -= num;
287         }
288
289         /* If no available space in 'out', stop flushing. */
290         if ((gro_types & RTE_GRO_TCP_IPV4) && max_nb_out > 0) {
291                 num += gro_tcp4_tbl_timeout_flush(
292                                 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
293                                 flush_timestamp,
294                                 &out[num], max_nb_out);
295         }
296
297         return num;
298 }
299
300 uint64_t
301 rte_gro_get_pkt_count(void *ctx)
302 {
303         struct gro_ctx *gro_ctx = ctx;
304         gro_tbl_pkt_count_fn pkt_count_fn;
305         uint64_t gro_types = gro_ctx->gro_types, flag;
306         uint64_t item_num = 0;
307         uint8_t i;
308
309         for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) {
310                 flag = 1ULL << i;
311                 if ((gro_types & flag) == 0)
312                         continue;
313
314                 gro_types ^= flag;
315                 pkt_count_fn = tbl_pkt_count_fn[i];
316                 if (pkt_count_fn)
317                         item_num += pkt_count_fn(gro_ctx->tbls[i]);
318         }
319
320         return item_num;
321 }