1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
13 #include "gro_vxlan_tcp4.h"
14 #include "gro_vxlan_udp4.h"
16 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
17 uint16_t max_flow_num,
18 uint16_t max_item_per_flow);
19 typedef void (*gro_tbl_destroy_fn)(void *tbl);
20 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
22 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
23 gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create,
24 gro_udp4_tbl_create, gro_vxlan_udp4_tbl_create, NULL};
25 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
26 gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy,
27 gro_udp4_tbl_destroy, gro_vxlan_udp4_tbl_destroy,
29 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
30 gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count,
31 gro_udp4_tbl_pkt_count, gro_vxlan_udp4_tbl_pkt_count,
34 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
35 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP) && \
36 (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))
38 #define IS_IPV4_UDP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
39 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
40 (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))
42 #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
43 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
44 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
45 RTE_PTYPE_TUNNEL_VXLAN) && \
46 ((ptype & RTE_PTYPE_INNER_L4_TCP) == \
47 RTE_PTYPE_INNER_L4_TCP) && \
48 (((ptype & RTE_PTYPE_INNER_L3_MASK) == \
49 RTE_PTYPE_INNER_L3_IPV4) || \
50 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
51 RTE_PTYPE_INNER_L3_IPV4_EXT) || \
52 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
53 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)))
55 #define IS_IPV4_VXLAN_UDP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
56 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
57 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
58 RTE_PTYPE_TUNNEL_VXLAN) && \
59 ((ptype & RTE_PTYPE_INNER_L4_UDP) == \
60 RTE_PTYPE_INNER_L4_UDP) && \
61 (((ptype & RTE_PTYPE_INNER_L3_MASK) == \
62 RTE_PTYPE_INNER_L3_IPV4) || \
63 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
64 RTE_PTYPE_INNER_L3_IPV4_EXT) || \
65 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
66 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)))
69 * GRO context structure. It keeps the table structures, which are
70 * used to merge packets, for different GRO types. Before using
71 * rte_gro_reassemble(), applications need to create the GRO context
75 /* GRO types to perform */
77 /* reassembly tables */
78 void *tbls[RTE_GRO_TYPE_MAX_NUM];
82 rte_gro_ctx_create(const struct rte_gro_param *param)
84 struct gro_ctx *gro_ctx;
85 gro_tbl_create_fn create_tbl_fn;
86 uint64_t gro_type_flag = 0;
87 uint64_t gro_types = 0;
90 gro_ctx = rte_zmalloc_socket(__func__,
91 sizeof(struct gro_ctx),
97 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
98 gro_type_flag = 1ULL << i;
99 if ((param->gro_types & gro_type_flag) == 0)
102 create_tbl_fn = tbl_create_fn[i];
103 if (create_tbl_fn == NULL)
106 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
108 param->max_item_per_flow);
109 if (gro_ctx->tbls[i] == NULL) {
110 /* destroy all created tables */
111 gro_ctx->gro_types = gro_types;
112 rte_gro_ctx_destroy(gro_ctx);
115 gro_types |= gro_type_flag;
117 gro_ctx->gro_types = param->gro_types;
123 rte_gro_ctx_destroy(void *ctx)
125 gro_tbl_destroy_fn destroy_tbl_fn;
126 struct gro_ctx *gro_ctx = ctx;
127 uint64_t gro_type_flag;
130 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
131 gro_type_flag = 1ULL << i;
132 if ((gro_ctx->gro_types & gro_type_flag) == 0)
134 destroy_tbl_fn = tbl_destroy_fn[i];
136 destroy_tbl_fn(gro_ctx->tbls[i]);
142 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
144 const struct rte_gro_param *param)
146 /* allocate a reassembly table for TCP/IPv4 GRO */
147 struct gro_tcp4_tbl tcp_tbl;
148 struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
149 struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
151 /* allocate a reassembly table for UDP/IPv4 GRO */
152 struct gro_udp4_tbl udp_tbl;
153 struct gro_udp4_flow udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
154 struct gro_udp4_item udp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
156 /* Allocate a reassembly table for VXLAN TCP GRO */
157 struct gro_vxlan_tcp4_tbl vxlan_tcp_tbl;
158 struct gro_vxlan_tcp4_flow vxlan_tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
159 struct gro_vxlan_tcp4_item vxlan_tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
162 /* Allocate a reassembly table for VXLAN UDP GRO */
163 struct gro_vxlan_udp4_tbl vxlan_udp_tbl;
164 struct gro_vxlan_udp4_flow vxlan_udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
165 struct gro_vxlan_udp4_item vxlan_udp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
168 struct rte_mbuf *unprocess_pkts[nb_pkts];
171 uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;
172 uint8_t do_tcp4_gro = 0, do_vxlan_tcp_gro = 0, do_udp4_gro = 0,
173 do_vxlan_udp_gro = 0;
175 if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
177 RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
178 RTE_GRO_UDP_IPV4)) == 0))
181 /* Get the maximum number of packets */
182 item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
183 param->max_item_per_flow));
184 item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
186 if (param->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
187 for (i = 0; i < item_num; i++)
188 vxlan_tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
190 vxlan_tcp_tbl.flows = vxlan_tcp_flows;
191 vxlan_tcp_tbl.items = vxlan_tcp_items;
192 vxlan_tcp_tbl.flow_num = 0;
193 vxlan_tcp_tbl.item_num = 0;
194 vxlan_tcp_tbl.max_flow_num = item_num;
195 vxlan_tcp_tbl.max_item_num = item_num;
196 do_vxlan_tcp_gro = 1;
199 if (param->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) {
200 for (i = 0; i < item_num; i++)
201 vxlan_udp_flows[i].start_index = INVALID_ARRAY_INDEX;
203 vxlan_udp_tbl.flows = vxlan_udp_flows;
204 vxlan_udp_tbl.items = vxlan_udp_items;
205 vxlan_udp_tbl.flow_num = 0;
206 vxlan_udp_tbl.item_num = 0;
207 vxlan_udp_tbl.max_flow_num = item_num;
208 vxlan_udp_tbl.max_item_num = item_num;
209 do_vxlan_udp_gro = 1;
212 if (param->gro_types & RTE_GRO_TCP_IPV4) {
213 for (i = 0; i < item_num; i++)
214 tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
216 tcp_tbl.flows = tcp_flows;
217 tcp_tbl.items = tcp_items;
218 tcp_tbl.flow_num = 0;
219 tcp_tbl.item_num = 0;
220 tcp_tbl.max_flow_num = item_num;
221 tcp_tbl.max_item_num = item_num;
225 if (param->gro_types & RTE_GRO_UDP_IPV4) {
226 for (i = 0; i < item_num; i++)
227 udp_flows[i].start_index = INVALID_ARRAY_INDEX;
229 udp_tbl.flows = udp_flows;
230 udp_tbl.items = udp_items;
231 udp_tbl.flow_num = 0;
232 udp_tbl.item_num = 0;
233 udp_tbl.max_flow_num = item_num;
234 udp_tbl.max_item_num = item_num;
239 for (i = 0; i < nb_pkts; i++) {
241 * The timestamp is ignored, since all packets
242 * will be flushed from the tables.
244 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
246 ret = gro_vxlan_tcp4_reassemble(pkts[i],
249 /* Merge successfully */
252 unprocess_pkts[unprocess_num++] = pkts[i];
253 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
255 ret = gro_vxlan_udp4_reassemble(pkts[i],
258 /* Merge successfully */
261 unprocess_pkts[unprocess_num++] = pkts[i];
262 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
264 ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0);
266 /* merge successfully */
269 unprocess_pkts[unprocess_num++] = pkts[i];
270 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
272 ret = gro_udp4_reassemble(pkts[i], &udp_tbl, 0);
274 /* merge successfully */
277 unprocess_pkts[unprocess_num++] = pkts[i];
279 unprocess_pkts[unprocess_num++] = pkts[i];
282 if ((nb_after_gro < nb_pkts)
283 || (unprocess_num < nb_pkts)) {
285 /* Flush all packets from the tables */
286 if (do_vxlan_tcp_gro) {
287 i = gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tcp_tbl,
291 if (do_vxlan_udp_gro) {
292 i += gro_vxlan_udp4_tbl_timeout_flush(&vxlan_udp_tbl,
293 0, &pkts[i], nb_pkts - i);
298 i += gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0,
299 &pkts[i], nb_pkts - i);
303 i += gro_udp4_tbl_timeout_flush(&udp_tbl, 0,
304 &pkts[i], nb_pkts - i);
306 /* Copy unprocessed packets */
307 if (unprocess_num > 0) {
308 memcpy(&pkts[i], unprocess_pkts,
309 sizeof(struct rte_mbuf *) *
312 nb_after_gro = i + unprocess_num;
319 rte_gro_reassemble(struct rte_mbuf **pkts,
323 struct rte_mbuf *unprocess_pkts[nb_pkts];
324 struct gro_ctx *gro_ctx = ctx;
325 void *tcp_tbl, *udp_tbl, *vxlan_tcp_tbl, *vxlan_udp_tbl;
326 uint64_t current_time;
327 uint16_t i, unprocess_num = 0;
328 uint8_t do_tcp4_gro, do_vxlan_tcp_gro, do_udp4_gro, do_vxlan_udp_gro;
330 if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
332 RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
333 RTE_GRO_UDP_IPV4)) == 0))
336 tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX];
337 vxlan_tcp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX];
338 udp_tbl = gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX];
339 vxlan_udp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX];
341 do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) ==
343 do_vxlan_tcp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) ==
344 RTE_GRO_IPV4_VXLAN_TCP_IPV4;
345 do_udp4_gro = (gro_ctx->gro_types & RTE_GRO_UDP_IPV4) ==
347 do_vxlan_udp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) ==
348 RTE_GRO_IPV4_VXLAN_UDP_IPV4;
350 current_time = rte_rdtsc();
352 for (i = 0; i < nb_pkts; i++) {
353 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
355 if (gro_vxlan_tcp4_reassemble(pkts[i], vxlan_tcp_tbl,
357 unprocess_pkts[unprocess_num++] = pkts[i];
358 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
360 if (gro_vxlan_udp4_reassemble(pkts[i], vxlan_udp_tbl,
362 unprocess_pkts[unprocess_num++] = pkts[i];
363 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
365 if (gro_tcp4_reassemble(pkts[i], tcp_tbl,
367 unprocess_pkts[unprocess_num++] = pkts[i];
368 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
370 if (gro_udp4_reassemble(pkts[i], udp_tbl,
372 unprocess_pkts[unprocess_num++] = pkts[i];
374 unprocess_pkts[unprocess_num++] = pkts[i];
376 if (unprocess_num > 0) {
377 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) *
381 return unprocess_num;
385 rte_gro_timeout_flush(void *ctx,
386 uint64_t timeout_cycles,
388 struct rte_mbuf **out,
391 struct gro_ctx *gro_ctx = ctx;
392 uint64_t flush_timestamp;
394 uint16_t left_nb_out = max_nb_out;
396 gro_types = gro_types & gro_ctx->gro_types;
397 flush_timestamp = rte_rdtsc() - timeout_cycles;
399 if (gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
400 num = gro_vxlan_tcp4_tbl_timeout_flush(gro_ctx->tbls[
401 RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX],
402 flush_timestamp, out, left_nb_out);
403 left_nb_out = max_nb_out - num;
406 if ((gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) && left_nb_out > 0) {
407 num += gro_vxlan_udp4_tbl_timeout_flush(gro_ctx->tbls[
408 RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX],
409 flush_timestamp, &out[num], left_nb_out);
410 left_nb_out = max_nb_out - num;
413 /* If no available space in 'out', stop flushing. */
414 if ((gro_types & RTE_GRO_TCP_IPV4) && left_nb_out > 0) {
415 num += gro_tcp4_tbl_timeout_flush(
416 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
418 &out[num], left_nb_out);
419 left_nb_out = max_nb_out - num;
422 /* If no available space in 'out', stop flushing. */
423 if ((gro_types & RTE_GRO_UDP_IPV4) && left_nb_out > 0) {
424 num += gro_udp4_tbl_timeout_flush(
425 gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX],
427 &out[num], left_nb_out);
434 rte_gro_get_pkt_count(void *ctx)
436 struct gro_ctx *gro_ctx = ctx;
437 gro_tbl_pkt_count_fn pkt_count_fn;
438 uint64_t gro_types = gro_ctx->gro_types, flag;
439 uint64_t item_num = 0;
442 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) {
444 if ((gro_types & flag) == 0)
448 pkt_count_fn = tbl_pkt_count_fn[i];
450 item_num += pkt_count_fn(gro_ctx->tbls[i]);