1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
13 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
14 uint16_t max_flow_num,
15 uint16_t max_item_per_flow);
16 typedef void (*gro_tbl_destroy_fn)(void *tbl);
17 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
19 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
20 gro_tcp4_tbl_create, NULL};
21 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
22 gro_tcp4_tbl_destroy, NULL};
23 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
24 gro_tcp4_tbl_pkt_count, NULL};
26 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
27 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP))
30 * GRO context structure. It keeps the table structures, which are
31 * used to merge packets, for different GRO types. Before using
32 * rte_gro_reassemble(), applications need to create the GRO context
36 /* GRO types to perform */
38 /* reassembly tables */
39 void *tbls[RTE_GRO_TYPE_MAX_NUM];
43 rte_gro_ctx_create(const struct rte_gro_param *param)
45 struct gro_ctx *gro_ctx;
46 gro_tbl_create_fn create_tbl_fn;
47 uint64_t gro_type_flag = 0;
48 uint64_t gro_types = 0;
51 gro_ctx = rte_zmalloc_socket(__func__,
52 sizeof(struct gro_ctx),
58 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
59 gro_type_flag = 1ULL << i;
60 if ((param->gro_types & gro_type_flag) == 0)
63 create_tbl_fn = tbl_create_fn[i];
64 if (create_tbl_fn == NULL)
67 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
69 param->max_item_per_flow);
70 if (gro_ctx->tbls[i] == NULL) {
71 /* destroy all created tables */
72 gro_ctx->gro_types = gro_types;
73 rte_gro_ctx_destroy(gro_ctx);
76 gro_types |= gro_type_flag;
78 gro_ctx->gro_types = param->gro_types;
84 rte_gro_ctx_destroy(void *ctx)
86 gro_tbl_destroy_fn destroy_tbl_fn;
87 struct gro_ctx *gro_ctx = ctx;
88 uint64_t gro_type_flag;
91 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
92 gro_type_flag = 1ULL << i;
93 if ((gro_ctx->gro_types & gro_type_flag) == 0)
95 destroy_tbl_fn = tbl_destroy_fn[i];
97 destroy_tbl_fn(gro_ctx->tbls[i]);
103 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
105 const struct rte_gro_param *param)
107 /* allocate a reassembly table for TCP/IPv4 GRO */
108 struct gro_tcp4_tbl tcp_tbl;
109 struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
110 struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
112 struct rte_mbuf *unprocess_pkts[nb_pkts];
115 uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;
117 if (unlikely((param->gro_types & RTE_GRO_TCP_IPV4) == 0))
120 /* Get the maximum number of packets */
121 item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
122 param->max_item_per_flow));
123 item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
125 for (i = 0; i < item_num; i++)
126 tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
128 tcp_tbl.flows = tcp_flows;
129 tcp_tbl.items = tcp_items;
130 tcp_tbl.flow_num = 0;
131 tcp_tbl.item_num = 0;
132 tcp_tbl.max_flow_num = item_num;
133 tcp_tbl.max_item_num = item_num;
135 for (i = 0; i < nb_pkts; i++) {
136 if (IS_IPV4_TCP_PKT(pkts[i]->packet_type)) {
138 * The timestamp is ignored, since all packets
139 * will be flushed from the tables.
141 ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0);
143 /* merge successfully */
146 unprocess_pkts[unprocess_num++] = pkts[i];
148 unprocess_pkts[unprocess_num++] = pkts[i];
151 if (nb_after_gro < nb_pkts) {
152 /* Flush all packets from the tables */
153 i = gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0, pkts, nb_pkts);
154 /* Copy unprocessed packets */
155 if (unprocess_num > 0) {
156 memcpy(&pkts[i], unprocess_pkts,
157 sizeof(struct rte_mbuf *) *
166 rte_gro_reassemble(struct rte_mbuf **pkts,
170 struct rte_mbuf *unprocess_pkts[nb_pkts];
171 struct gro_ctx *gro_ctx = ctx;
173 uint64_t current_time;
174 uint16_t i, unprocess_num = 0;
176 if (unlikely((gro_ctx->gro_types & RTE_GRO_TCP_IPV4) == 0))
179 tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX];
180 current_time = rte_rdtsc();
182 for (i = 0; i < nb_pkts; i++) {
183 if (IS_IPV4_TCP_PKT(pkts[i]->packet_type)) {
184 if (gro_tcp4_reassemble(pkts[i], tcp_tbl,
186 unprocess_pkts[unprocess_num++] = pkts[i];
188 unprocess_pkts[unprocess_num++] = pkts[i];
190 if (unprocess_num > 0) {
191 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) *
195 return unprocess_num;
199 rte_gro_timeout_flush(void *ctx,
200 uint64_t timeout_cycles,
202 struct rte_mbuf **out,
205 struct gro_ctx *gro_ctx = ctx;
206 uint64_t flush_timestamp;
208 gro_types = gro_types & gro_ctx->gro_types;
209 flush_timestamp = rte_rdtsc() - timeout_cycles;
211 if (gro_types & RTE_GRO_TCP_IPV4) {
212 return gro_tcp4_tbl_timeout_flush(
213 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
222 rte_gro_get_pkt_count(void *ctx)
224 struct gro_ctx *gro_ctx = ctx;
225 gro_tbl_pkt_count_fn pkt_count_fn;
226 uint64_t gro_types = gro_ctx->gro_types, flag;
227 uint64_t item_num = 0;
230 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) {
232 if ((gro_types & flag) == 0)
236 pkt_count_fn = tbl_pkt_count_fn[i];
238 item_num += pkt_count_fn(gro_ctx->tbls[i]);