1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_ethdev.h>
12 #include "gro_vxlan_tcp4.h"
13 #include "gro_vxlan_udp4.h"
15 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
16 uint16_t max_flow_num,
17 uint16_t max_item_per_flow);
18 typedef void (*gro_tbl_destroy_fn)(void *tbl);
19 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
21 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
22 gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create,
23 gro_udp4_tbl_create, gro_vxlan_udp4_tbl_create, NULL};
24 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
25 gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy,
26 gro_udp4_tbl_destroy, gro_vxlan_udp4_tbl_destroy,
28 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
29 gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count,
30 gro_udp4_tbl_pkt_count, gro_vxlan_udp4_tbl_pkt_count,
33 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
34 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP) && \
35 (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))
37 #define IS_IPV4_UDP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
38 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
39 (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))
41 #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
42 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
43 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
44 RTE_PTYPE_TUNNEL_VXLAN) && \
45 ((ptype & RTE_PTYPE_INNER_L4_TCP) == \
46 RTE_PTYPE_INNER_L4_TCP) && \
47 (((ptype & RTE_PTYPE_INNER_L3_MASK) == \
48 RTE_PTYPE_INNER_L3_IPV4) || \
49 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
50 RTE_PTYPE_INNER_L3_IPV4_EXT) || \
51 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
52 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)))
54 #define IS_IPV4_VXLAN_UDP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
55 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
56 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
57 RTE_PTYPE_TUNNEL_VXLAN) && \
58 ((ptype & RTE_PTYPE_INNER_L4_UDP) == \
59 RTE_PTYPE_INNER_L4_UDP) && \
60 (((ptype & RTE_PTYPE_INNER_L3_MASK) == \
61 RTE_PTYPE_INNER_L3_IPV4) || \
62 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
63 RTE_PTYPE_INNER_L3_IPV4_EXT) || \
64 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
65 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)))
68 * GRO context structure. It keeps the table structures, which are
69 * used to merge packets, for different GRO types. Before using
70 * rte_gro_reassemble(), applications need to create the GRO context
74 /* GRO types to perform */
76 /* reassembly tables */
77 void *tbls[RTE_GRO_TYPE_MAX_NUM];
81 rte_gro_ctx_create(const struct rte_gro_param *param)
83 struct gro_ctx *gro_ctx;
84 gro_tbl_create_fn create_tbl_fn;
85 uint64_t gro_type_flag = 0;
86 uint64_t gro_types = 0;
89 gro_ctx = rte_zmalloc_socket(__func__,
90 sizeof(struct gro_ctx),
96 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
97 gro_type_flag = 1ULL << i;
98 if ((param->gro_types & gro_type_flag) == 0)
101 create_tbl_fn = tbl_create_fn[i];
102 if (create_tbl_fn == NULL)
105 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
107 param->max_item_per_flow);
108 if (gro_ctx->tbls[i] == NULL) {
109 /* destroy all created tables */
110 gro_ctx->gro_types = gro_types;
111 rte_gro_ctx_destroy(gro_ctx);
114 gro_types |= gro_type_flag;
116 gro_ctx->gro_types = param->gro_types;
122 rte_gro_ctx_destroy(void *ctx)
124 gro_tbl_destroy_fn destroy_tbl_fn;
125 struct gro_ctx *gro_ctx = ctx;
126 uint64_t gro_type_flag;
129 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
130 gro_type_flag = 1ULL << i;
131 if ((gro_ctx->gro_types & gro_type_flag) == 0)
133 destroy_tbl_fn = tbl_destroy_fn[i];
135 destroy_tbl_fn(gro_ctx->tbls[i]);
141 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
143 const struct rte_gro_param *param)
145 /* allocate a reassembly table for TCP/IPv4 GRO */
146 struct gro_tcp4_tbl tcp_tbl;
147 struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
148 struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
150 /* allocate a reassembly table for UDP/IPv4 GRO */
151 struct gro_udp4_tbl udp_tbl;
152 struct gro_udp4_flow udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
153 struct gro_udp4_item udp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
155 /* Allocate a reassembly table for VXLAN TCP GRO */
156 struct gro_vxlan_tcp4_tbl vxlan_tcp_tbl;
157 struct gro_vxlan_tcp4_flow vxlan_tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
158 struct gro_vxlan_tcp4_item vxlan_tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
161 /* Allocate a reassembly table for VXLAN UDP GRO */
162 struct gro_vxlan_udp4_tbl vxlan_udp_tbl;
163 struct gro_vxlan_udp4_flow vxlan_udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
164 struct gro_vxlan_udp4_item vxlan_udp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
167 struct rte_mbuf *unprocess_pkts[nb_pkts];
170 uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;
171 uint8_t do_tcp4_gro = 0, do_vxlan_tcp_gro = 0, do_udp4_gro = 0,
172 do_vxlan_udp_gro = 0;
174 if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
176 RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
177 RTE_GRO_UDP_IPV4)) == 0))
180 /* Get the maximum number of packets */
181 item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
182 param->max_item_per_flow));
183 item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
185 if (param->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
186 for (i = 0; i < item_num; i++)
187 vxlan_tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
189 vxlan_tcp_tbl.flows = vxlan_tcp_flows;
190 vxlan_tcp_tbl.items = vxlan_tcp_items;
191 vxlan_tcp_tbl.flow_num = 0;
192 vxlan_tcp_tbl.item_num = 0;
193 vxlan_tcp_tbl.max_flow_num = item_num;
194 vxlan_tcp_tbl.max_item_num = item_num;
195 do_vxlan_tcp_gro = 1;
198 if (param->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) {
199 for (i = 0; i < item_num; i++)
200 vxlan_udp_flows[i].start_index = INVALID_ARRAY_INDEX;
202 vxlan_udp_tbl.flows = vxlan_udp_flows;
203 vxlan_udp_tbl.items = vxlan_udp_items;
204 vxlan_udp_tbl.flow_num = 0;
205 vxlan_udp_tbl.item_num = 0;
206 vxlan_udp_tbl.max_flow_num = item_num;
207 vxlan_udp_tbl.max_item_num = item_num;
208 do_vxlan_udp_gro = 1;
211 if (param->gro_types & RTE_GRO_TCP_IPV4) {
212 for (i = 0; i < item_num; i++)
213 tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
215 tcp_tbl.flows = tcp_flows;
216 tcp_tbl.items = tcp_items;
217 tcp_tbl.flow_num = 0;
218 tcp_tbl.item_num = 0;
219 tcp_tbl.max_flow_num = item_num;
220 tcp_tbl.max_item_num = item_num;
224 if (param->gro_types & RTE_GRO_UDP_IPV4) {
225 for (i = 0; i < item_num; i++)
226 udp_flows[i].start_index = INVALID_ARRAY_INDEX;
228 udp_tbl.flows = udp_flows;
229 udp_tbl.items = udp_items;
230 udp_tbl.flow_num = 0;
231 udp_tbl.item_num = 0;
232 udp_tbl.max_flow_num = item_num;
233 udp_tbl.max_item_num = item_num;
238 for (i = 0; i < nb_pkts; i++) {
240 * The timestamp is ignored, since all packets
241 * will be flushed from the tables.
243 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
245 ret = gro_vxlan_tcp4_reassemble(pkts[i],
248 /* Merge successfully */
251 unprocess_pkts[unprocess_num++] = pkts[i];
252 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
254 ret = gro_vxlan_udp4_reassemble(pkts[i],
257 /* Merge successfully */
260 unprocess_pkts[unprocess_num++] = pkts[i];
261 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
263 ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0);
265 /* merge successfully */
268 unprocess_pkts[unprocess_num++] = pkts[i];
269 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
271 ret = gro_udp4_reassemble(pkts[i], &udp_tbl, 0);
273 /* merge successfully */
276 unprocess_pkts[unprocess_num++] = pkts[i];
278 unprocess_pkts[unprocess_num++] = pkts[i];
281 if ((nb_after_gro < nb_pkts)
282 || (unprocess_num < nb_pkts)) {
284 /* Flush all packets from the tables */
285 if (do_vxlan_tcp_gro) {
286 i = gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tcp_tbl,
290 if (do_vxlan_udp_gro) {
291 i += gro_vxlan_udp4_tbl_timeout_flush(&vxlan_udp_tbl,
292 0, &pkts[i], nb_pkts - i);
297 i += gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0,
298 &pkts[i], nb_pkts - i);
302 i += gro_udp4_tbl_timeout_flush(&udp_tbl, 0,
303 &pkts[i], nb_pkts - i);
305 /* Copy unprocessed packets */
306 if (unprocess_num > 0) {
307 memcpy(&pkts[i], unprocess_pkts,
308 sizeof(struct rte_mbuf *) *
311 nb_after_gro = i + unprocess_num;
318 rte_gro_reassemble(struct rte_mbuf **pkts,
322 struct rte_mbuf *unprocess_pkts[nb_pkts];
323 struct gro_ctx *gro_ctx = ctx;
324 void *tcp_tbl, *udp_tbl, *vxlan_tcp_tbl, *vxlan_udp_tbl;
325 uint64_t current_time;
326 uint16_t i, unprocess_num = 0;
327 uint8_t do_tcp4_gro, do_vxlan_tcp_gro, do_udp4_gro, do_vxlan_udp_gro;
329 if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
331 RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
332 RTE_GRO_UDP_IPV4)) == 0))
335 tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX];
336 vxlan_tcp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX];
337 udp_tbl = gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX];
338 vxlan_udp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX];
340 do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) ==
342 do_vxlan_tcp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) ==
343 RTE_GRO_IPV4_VXLAN_TCP_IPV4;
344 do_udp4_gro = (gro_ctx->gro_types & RTE_GRO_UDP_IPV4) ==
346 do_vxlan_udp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) ==
347 RTE_GRO_IPV4_VXLAN_UDP_IPV4;
349 current_time = rte_rdtsc();
351 for (i = 0; i < nb_pkts; i++) {
352 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
354 if (gro_vxlan_tcp4_reassemble(pkts[i], vxlan_tcp_tbl,
356 unprocess_pkts[unprocess_num++] = pkts[i];
357 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
359 if (gro_vxlan_udp4_reassemble(pkts[i], vxlan_udp_tbl,
361 unprocess_pkts[unprocess_num++] = pkts[i];
362 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
364 if (gro_tcp4_reassemble(pkts[i], tcp_tbl,
366 unprocess_pkts[unprocess_num++] = pkts[i];
367 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
369 if (gro_udp4_reassemble(pkts[i], udp_tbl,
371 unprocess_pkts[unprocess_num++] = pkts[i];
373 unprocess_pkts[unprocess_num++] = pkts[i];
375 if (unprocess_num > 0) {
376 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) *
380 return unprocess_num;
384 rte_gro_timeout_flush(void *ctx,
385 uint64_t timeout_cycles,
387 struct rte_mbuf **out,
390 struct gro_ctx *gro_ctx = ctx;
391 uint64_t flush_timestamp;
393 uint16_t left_nb_out = max_nb_out;
395 gro_types = gro_types & gro_ctx->gro_types;
396 flush_timestamp = rte_rdtsc() - timeout_cycles;
398 if (gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
399 num = gro_vxlan_tcp4_tbl_timeout_flush(gro_ctx->tbls[
400 RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX],
401 flush_timestamp, out, left_nb_out);
402 left_nb_out = max_nb_out - num;
405 if ((gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) && left_nb_out > 0) {
406 num += gro_vxlan_udp4_tbl_timeout_flush(gro_ctx->tbls[
407 RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX],
408 flush_timestamp, &out[num], left_nb_out);
409 left_nb_out = max_nb_out - num;
412 /* If no available space in 'out', stop flushing. */
413 if ((gro_types & RTE_GRO_TCP_IPV4) && left_nb_out > 0) {
414 num += gro_tcp4_tbl_timeout_flush(
415 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
417 &out[num], left_nb_out);
418 left_nb_out = max_nb_out - num;
421 /* If no available space in 'out', stop flushing. */
422 if ((gro_types & RTE_GRO_UDP_IPV4) && left_nb_out > 0) {
423 num += gro_udp4_tbl_timeout_flush(
424 gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX],
426 &out[num], left_nb_out);
433 rte_gro_get_pkt_count(void *ctx)
435 struct gro_ctx *gro_ctx = ctx;
436 gro_tbl_pkt_count_fn pkt_count_fn;
437 uint64_t gro_types = gro_ctx->gro_types, flag;
438 uint64_t item_num = 0;
441 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) {
443 if ((gro_types & flag) == 0)
447 pkt_count_fn = tbl_pkt_count_fn[i];
449 item_num += pkt_count_fn(gro_ctx->tbls[i]);