1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
13 #include "gro_vxlan_tcp4.h"
14 #include "gro_vxlan_udp4.h"
16 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
17 uint16_t max_flow_num,
18 uint16_t max_item_per_flow);
19 typedef void (*gro_tbl_destroy_fn)(void *tbl);
20 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
22 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
23 gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create,
24 gro_udp4_tbl_create, gro_vxlan_udp4_tbl_create, NULL};
25 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
26 gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy,
27 gro_udp4_tbl_destroy, gro_vxlan_udp4_tbl_destroy,
29 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
30 gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count,
31 gro_udp4_tbl_pkt_count, gro_vxlan_udp4_tbl_pkt_count,
34 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
35 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP))
37 #define IS_IPV4_UDP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
38 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP))
40 #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
41 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
42 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
43 RTE_PTYPE_TUNNEL_VXLAN) && \
44 ((ptype & RTE_PTYPE_INNER_L4_TCP) == \
45 RTE_PTYPE_INNER_L4_TCP) && \
46 (((ptype & RTE_PTYPE_INNER_L3_MASK) & \
47 (RTE_PTYPE_INNER_L3_IPV4 | \
48 RTE_PTYPE_INNER_L3_IPV4_EXT | \
49 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)) != 0))
51 #define IS_IPV4_VXLAN_UDP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
52 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
53 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
54 RTE_PTYPE_TUNNEL_VXLAN) && \
55 ((ptype & RTE_PTYPE_INNER_L4_UDP) == \
56 RTE_PTYPE_INNER_L4_UDP) && \
57 (((ptype & RTE_PTYPE_INNER_L3_MASK) & \
58 (RTE_PTYPE_INNER_L3_IPV4 | \
59 RTE_PTYPE_INNER_L3_IPV4_EXT | \
60 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)) != 0))
63 * GRO context structure. It keeps the table structures, which are
64 * used to merge packets, for different GRO types. Before using
65 * rte_gro_reassemble(), applications need to create the GRO context
69 /* GRO types to perform */
71 /* reassembly tables */
72 void *tbls[RTE_GRO_TYPE_MAX_NUM];
76 rte_gro_ctx_create(const struct rte_gro_param *param)
78 struct gro_ctx *gro_ctx;
79 gro_tbl_create_fn create_tbl_fn;
80 uint64_t gro_type_flag = 0;
81 uint64_t gro_types = 0;
84 gro_ctx = rte_zmalloc_socket(__func__,
85 sizeof(struct gro_ctx),
91 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
92 gro_type_flag = 1ULL << i;
93 if ((param->gro_types & gro_type_flag) == 0)
96 create_tbl_fn = tbl_create_fn[i];
97 if (create_tbl_fn == NULL)
100 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
102 param->max_item_per_flow);
103 if (gro_ctx->tbls[i] == NULL) {
104 /* destroy all created tables */
105 gro_ctx->gro_types = gro_types;
106 rte_gro_ctx_destroy(gro_ctx);
109 gro_types |= gro_type_flag;
111 gro_ctx->gro_types = param->gro_types;
117 rte_gro_ctx_destroy(void *ctx)
119 gro_tbl_destroy_fn destroy_tbl_fn;
120 struct gro_ctx *gro_ctx = ctx;
121 uint64_t gro_type_flag;
124 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
125 gro_type_flag = 1ULL << i;
126 if ((gro_ctx->gro_types & gro_type_flag) == 0)
128 destroy_tbl_fn = tbl_destroy_fn[i];
130 destroy_tbl_fn(gro_ctx->tbls[i]);
136 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
138 const struct rte_gro_param *param)
140 /* allocate a reassembly table for TCP/IPv4 GRO */
141 struct gro_tcp4_tbl tcp_tbl;
142 struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
143 struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
145 /* allocate a reassembly table for UDP/IPv4 GRO */
146 struct gro_udp4_tbl udp_tbl;
147 struct gro_udp4_flow udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
148 struct gro_udp4_item udp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
150 /* Allocate a reassembly table for VXLAN TCP GRO */
151 struct gro_vxlan_tcp4_tbl vxlan_tcp_tbl;
152 struct gro_vxlan_tcp4_flow vxlan_tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
153 struct gro_vxlan_tcp4_item vxlan_tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
156 /* Allocate a reassembly table for VXLAN UDP GRO */
157 struct gro_vxlan_udp4_tbl vxlan_udp_tbl;
158 struct gro_vxlan_udp4_flow vxlan_udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
159 struct gro_vxlan_udp4_item vxlan_udp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
162 struct rte_mbuf *unprocess_pkts[nb_pkts];
165 uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;
166 uint8_t do_tcp4_gro = 0, do_vxlan_tcp_gro = 0, do_udp4_gro = 0,
167 do_vxlan_udp_gro = 0;
169 if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
171 RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
172 RTE_GRO_UDP_IPV4)) == 0))
175 /* Get the maximum number of packets */
176 item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
177 param->max_item_per_flow));
178 item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
180 if (param->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
181 for (i = 0; i < item_num; i++)
182 vxlan_tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
184 vxlan_tcp_tbl.flows = vxlan_tcp_flows;
185 vxlan_tcp_tbl.items = vxlan_tcp_items;
186 vxlan_tcp_tbl.flow_num = 0;
187 vxlan_tcp_tbl.item_num = 0;
188 vxlan_tcp_tbl.max_flow_num = item_num;
189 vxlan_tcp_tbl.max_item_num = item_num;
190 do_vxlan_tcp_gro = 1;
193 if (param->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) {
194 for (i = 0; i < item_num; i++)
195 vxlan_udp_flows[i].start_index = INVALID_ARRAY_INDEX;
197 vxlan_udp_tbl.flows = vxlan_udp_flows;
198 vxlan_udp_tbl.items = vxlan_udp_items;
199 vxlan_udp_tbl.flow_num = 0;
200 vxlan_udp_tbl.item_num = 0;
201 vxlan_udp_tbl.max_flow_num = item_num;
202 vxlan_udp_tbl.max_item_num = item_num;
203 do_vxlan_udp_gro = 1;
206 if (param->gro_types & RTE_GRO_TCP_IPV4) {
207 for (i = 0; i < item_num; i++)
208 tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
210 tcp_tbl.flows = tcp_flows;
211 tcp_tbl.items = tcp_items;
212 tcp_tbl.flow_num = 0;
213 tcp_tbl.item_num = 0;
214 tcp_tbl.max_flow_num = item_num;
215 tcp_tbl.max_item_num = item_num;
219 if (param->gro_types & RTE_GRO_UDP_IPV4) {
220 for (i = 0; i < item_num; i++)
221 udp_flows[i].start_index = INVALID_ARRAY_INDEX;
223 udp_tbl.flows = udp_flows;
224 udp_tbl.items = udp_items;
225 udp_tbl.flow_num = 0;
226 udp_tbl.item_num = 0;
227 udp_tbl.max_flow_num = item_num;
228 udp_tbl.max_item_num = item_num;
233 for (i = 0; i < nb_pkts; i++) {
235 * The timestamp is ignored, since all packets
236 * will be flushed from the tables.
238 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
240 ret = gro_vxlan_tcp4_reassemble(pkts[i],
243 /* Merge successfully */
246 unprocess_pkts[unprocess_num++] = pkts[i];
247 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
249 ret = gro_vxlan_udp4_reassemble(pkts[i],
252 /* Merge successfully */
255 unprocess_pkts[unprocess_num++] = pkts[i];
256 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
258 ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0);
260 /* merge successfully */
263 unprocess_pkts[unprocess_num++] = pkts[i];
264 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
266 ret = gro_udp4_reassemble(pkts[i], &udp_tbl, 0);
268 /* merge successfully */
271 unprocess_pkts[unprocess_num++] = pkts[i];
273 unprocess_pkts[unprocess_num++] = pkts[i];
276 if ((nb_after_gro < nb_pkts)
277 || (unprocess_num < nb_pkts)) {
279 /* Flush all packets from the tables */
280 if (do_vxlan_tcp_gro) {
281 i = gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tcp_tbl,
285 if (do_vxlan_udp_gro) {
286 i += gro_vxlan_udp4_tbl_timeout_flush(&vxlan_udp_tbl,
287 0, &pkts[i], nb_pkts - i);
292 i += gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0,
293 &pkts[i], nb_pkts - i);
297 i += gro_udp4_tbl_timeout_flush(&udp_tbl, 0,
298 &pkts[i], nb_pkts - i);
300 /* Copy unprocessed packets */
301 if (unprocess_num > 0) {
302 memcpy(&pkts[i], unprocess_pkts,
303 sizeof(struct rte_mbuf *) *
306 nb_after_gro = i + unprocess_num;
313 rte_gro_reassemble(struct rte_mbuf **pkts,
317 struct rte_mbuf *unprocess_pkts[nb_pkts];
318 struct gro_ctx *gro_ctx = ctx;
319 void *tcp_tbl, *udp_tbl, *vxlan_tcp_tbl, *vxlan_udp_tbl;
320 uint64_t current_time;
321 uint16_t i, unprocess_num = 0;
322 uint8_t do_tcp4_gro, do_vxlan_tcp_gro, do_udp4_gro, do_vxlan_udp_gro;
324 if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
326 RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
327 RTE_GRO_UDP_IPV4)) == 0))
330 tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX];
331 vxlan_tcp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX];
332 udp_tbl = gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX];
333 vxlan_udp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX];
335 do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) ==
337 do_vxlan_tcp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) ==
338 RTE_GRO_IPV4_VXLAN_TCP_IPV4;
339 do_udp4_gro = (gro_ctx->gro_types & RTE_GRO_UDP_IPV4) ==
341 do_vxlan_udp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) ==
342 RTE_GRO_IPV4_VXLAN_UDP_IPV4;
344 current_time = rte_rdtsc();
346 for (i = 0; i < nb_pkts; i++) {
347 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
349 if (gro_vxlan_tcp4_reassemble(pkts[i], vxlan_tcp_tbl,
351 unprocess_pkts[unprocess_num++] = pkts[i];
352 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
354 if (gro_vxlan_udp4_reassemble(pkts[i], vxlan_udp_tbl,
356 unprocess_pkts[unprocess_num++] = pkts[i];
357 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
359 if (gro_tcp4_reassemble(pkts[i], tcp_tbl,
361 unprocess_pkts[unprocess_num++] = pkts[i];
362 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
364 if (gro_udp4_reassemble(pkts[i], udp_tbl,
366 unprocess_pkts[unprocess_num++] = pkts[i];
368 unprocess_pkts[unprocess_num++] = pkts[i];
370 if (unprocess_num > 0) {
371 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) *
375 return unprocess_num;
379 rte_gro_timeout_flush(void *ctx,
380 uint64_t timeout_cycles,
382 struct rte_mbuf **out,
385 struct gro_ctx *gro_ctx = ctx;
386 uint64_t flush_timestamp;
388 uint16_t left_nb_out = max_nb_out;
390 gro_types = gro_types & gro_ctx->gro_types;
391 flush_timestamp = rte_rdtsc() - timeout_cycles;
393 if (gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
394 num = gro_vxlan_tcp4_tbl_timeout_flush(gro_ctx->tbls[
395 RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX],
396 flush_timestamp, out, left_nb_out);
397 left_nb_out = max_nb_out - num;
400 if ((gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) && left_nb_out > 0) {
401 num += gro_vxlan_udp4_tbl_timeout_flush(gro_ctx->tbls[
402 RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX],
403 flush_timestamp, &out[num], left_nb_out);
404 left_nb_out = max_nb_out - num;
407 /* If no available space in 'out', stop flushing. */
408 if ((gro_types & RTE_GRO_TCP_IPV4) && left_nb_out > 0) {
409 num += gro_tcp4_tbl_timeout_flush(
410 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
412 &out[num], left_nb_out);
413 left_nb_out = max_nb_out - num;
416 /* If no available space in 'out', stop flushing. */
417 if ((gro_types & RTE_GRO_UDP_IPV4) && left_nb_out > 0) {
418 num += gro_udp4_tbl_timeout_flush(
419 gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX],
421 &out[num], left_nb_out);
428 rte_gro_get_pkt_count(void *ctx)
430 struct gro_ctx *gro_ctx = ctx;
431 gro_tbl_pkt_count_fn pkt_count_fn;
432 uint64_t gro_types = gro_ctx->gro_types, flag;
433 uint64_t item_num = 0;
436 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) {
438 if ((gro_types & flag) == 0)
442 pkt_count_fn = tbl_pkt_count_fn[i];
444 item_num += pkt_count_fn(gro_ctx->tbls[i]);