1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
15 gro_tcp4_tbl_create(uint16_t socket_id,
16 uint16_t max_flow_num,
17 uint16_t max_item_per_flow)
19 struct gro_tcp4_tbl *tbl;
21 uint32_t entries_num, i;
23 entries_num = max_flow_num * max_item_per_flow;
24 entries_num = RTE_MIN(entries_num, GRO_TCP4_TBL_MAX_ITEM_NUM);
29 tbl = rte_zmalloc_socket(__func__,
30 sizeof(struct gro_tcp4_tbl),
36 size = sizeof(struct gro_tcp4_item) * entries_num;
37 tbl->items = rte_zmalloc_socket(__func__,
41 if (tbl->items == NULL) {
45 tbl->max_item_num = entries_num;
47 size = sizeof(struct gro_tcp4_flow) * entries_num;
48 tbl->flows = rte_zmalloc_socket(__func__,
52 if (tbl->flows == NULL) {
57 /* INVALID_ARRAY_INDEX indicates an empty flow */
58 for (i = 0; i < entries_num; i++)
59 tbl->flows[i].start_index = INVALID_ARRAY_INDEX;
60 tbl->max_flow_num = entries_num;
66 gro_tcp4_tbl_destroy(void *tbl)
68 struct gro_tcp4_tbl *tcp_tbl = tbl;
71 rte_free(tcp_tbl->items);
72 rte_free(tcp_tbl->flows);
78 * merge two TCP/IPv4 packets without updating checksums.
79 * If cmp is larger than 0, append the new packet to the
80 * original packet. Otherwise, pre-pend the new packet to
81 * the original packet.
84 merge_two_tcp4_packets(struct gro_tcp4_item *item,
90 struct rte_mbuf *pkt_head, *pkt_tail, *lastseg;
94 pkt_head = item->firstseg;
98 pkt_tail = item->firstseg;
101 /* check if the IPv4 packet length is greater than the max value */
102 hdr_len = pkt_head->l2_len + pkt_head->l3_len + pkt_head->l4_len;
103 if (unlikely(pkt_head->pkt_len - pkt_head->l2_len + pkt_tail->pkt_len -
104 hdr_len > MAX_IPV4_PKT_LENGTH))
107 /* remove the packet header for the tail packet */
108 rte_pktmbuf_adj(pkt_tail, hdr_len);
110 /* chain two packets together */
112 item->lastseg->next = pkt;
113 item->lastseg = rte_pktmbuf_lastseg(pkt);
114 /* update IP ID to the larger value */
117 lastseg = rte_pktmbuf_lastseg(pkt);
118 lastseg->next = item->firstseg;
119 item->firstseg = pkt;
120 /* update sent_seq to the smaller value */
121 item->sent_seq = sent_seq;
125 /* update mbuf metadata for the merged packet */
126 pkt_head->nb_segs += pkt_tail->nb_segs;
127 pkt_head->pkt_len += pkt_tail->pkt_len;
133 * Check if two TCP/IPv4 packets are neighbors.
136 check_seq_option(struct gro_tcp4_item *item,
137 struct tcp_hdr *tcph,
144 struct rte_mbuf *pkt_orig = item->firstseg;
145 struct ipv4_hdr *iph_orig;
146 struct tcp_hdr *tcph_orig;
147 uint16_t len, tcp_hl_orig;
149 iph_orig = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt_orig, char *) +
151 tcph_orig = (struct tcp_hdr *)((char *)iph_orig + pkt_orig->l3_len);
152 tcp_hl_orig = pkt_orig->l4_len;
154 /* Check if TCP option fields equal */
155 len = RTE_MAX(tcp_hl, tcp_hl_orig) - sizeof(struct tcp_hdr);
156 if ((tcp_hl != tcp_hl_orig) ||
157 ((len > 0) && (memcmp(tcph + 1, tcph_orig + 1,
161 /* Don't merge packets whose DF bits are different */
162 if (unlikely(item->is_atomic ^ is_atomic))
165 /* check if the two packets are neighbors */
166 len = pkt_orig->pkt_len - pkt_orig->l2_len - pkt_orig->l3_len -
168 if ((sent_seq == item->sent_seq + len) && (is_atomic ||
169 (ip_id == item->ip_id + 1)))
170 /* append the new packet */
172 else if ((sent_seq + tcp_dl == item->sent_seq) && (is_atomic ||
173 (ip_id + item->nb_merged == item->ip_id)))
174 /* pre-pend the new packet */
180 static inline uint32_t
181 find_an_empty_item(struct gro_tcp4_tbl *tbl)
184 uint32_t max_item_num = tbl->max_item_num;
186 for (i = 0; i < max_item_num; i++)
187 if (tbl->items[i].firstseg == NULL)
189 return INVALID_ARRAY_INDEX;
192 static inline uint32_t
193 find_an_empty_flow(struct gro_tcp4_tbl *tbl)
196 uint32_t max_flow_num = tbl->max_flow_num;
198 for (i = 0; i < max_flow_num; i++)
199 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX)
201 return INVALID_ARRAY_INDEX;
204 static inline uint32_t
205 insert_new_item(struct gro_tcp4_tbl *tbl,
206 struct rte_mbuf *pkt,
215 item_idx = find_an_empty_item(tbl);
216 if (item_idx == INVALID_ARRAY_INDEX)
217 return INVALID_ARRAY_INDEX;
219 tbl->items[item_idx].firstseg = pkt;
220 tbl->items[item_idx].lastseg = rte_pktmbuf_lastseg(pkt);
221 tbl->items[item_idx].start_time = start_time;
222 tbl->items[item_idx].next_pkt_idx = INVALID_ARRAY_INDEX;
223 tbl->items[item_idx].sent_seq = sent_seq;
224 tbl->items[item_idx].ip_id = ip_id;
225 tbl->items[item_idx].nb_merged = 1;
226 tbl->items[item_idx].is_atomic = is_atomic;
229 /* if the previous packet exists, chain them together. */
230 if (prev_idx != INVALID_ARRAY_INDEX) {
231 tbl->items[item_idx].next_pkt_idx =
232 tbl->items[prev_idx].next_pkt_idx;
233 tbl->items[prev_idx].next_pkt_idx = item_idx;
239 static inline uint32_t
240 delete_item(struct gro_tcp4_tbl *tbl, uint32_t item_idx,
241 uint32_t prev_item_idx)
243 uint32_t next_idx = tbl->items[item_idx].next_pkt_idx;
245 /* NULL indicates an empty item */
246 tbl->items[item_idx].firstseg = NULL;
248 if (prev_item_idx != INVALID_ARRAY_INDEX)
249 tbl->items[prev_item_idx].next_pkt_idx = next_idx;
254 static inline uint32_t
255 insert_new_flow(struct gro_tcp4_tbl *tbl,
256 struct tcp4_flow_key *src,
259 struct tcp4_flow_key *dst;
262 flow_idx = find_an_empty_flow(tbl);
263 if (unlikely(flow_idx == INVALID_ARRAY_INDEX))
264 return INVALID_ARRAY_INDEX;
266 dst = &(tbl->flows[flow_idx].key);
268 ether_addr_copy(&(src->eth_saddr), &(dst->eth_saddr));
269 ether_addr_copy(&(src->eth_daddr), &(dst->eth_daddr));
270 dst->ip_src_addr = src->ip_src_addr;
271 dst->ip_dst_addr = src->ip_dst_addr;
272 dst->recv_ack = src->recv_ack;
273 dst->src_port = src->src_port;
274 dst->dst_port = src->dst_port;
276 tbl->flows[flow_idx].start_index = item_idx;
283 * Check if two TCP/IPv4 packets belong to the same flow.
286 is_same_tcp4_flow(struct tcp4_flow_key k1, struct tcp4_flow_key k2)
288 return (is_same_ether_addr(&k1.eth_saddr, &k2.eth_saddr) &&
289 is_same_ether_addr(&k1.eth_daddr, &k2.eth_daddr) &&
290 (k1.ip_src_addr == k2.ip_src_addr) &&
291 (k1.ip_dst_addr == k2.ip_dst_addr) &&
292 (k1.recv_ack == k2.recv_ack) &&
293 (k1.src_port == k2.src_port) &&
294 (k1.dst_port == k2.dst_port));
298 * update the packet length for the flushed packet.
301 update_header(struct gro_tcp4_item *item)
303 struct ipv4_hdr *ipv4_hdr;
304 struct rte_mbuf *pkt = item->firstseg;
306 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
308 ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -
313 gro_tcp4_reassemble(struct rte_mbuf *pkt,
314 struct gro_tcp4_tbl *tbl,
317 struct ether_hdr *eth_hdr;
318 struct ipv4_hdr *ipv4_hdr;
319 struct tcp_hdr *tcp_hdr;
321 uint16_t tcp_dl, ip_id, hdr_len, frag_off;
324 struct tcp4_flow_key key;
325 uint32_t cur_idx, prev_idx, item_idx;
326 uint32_t i, max_flow_num, remaining_flow_num;
330 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
331 ipv4_hdr = (struct ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
332 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
333 hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
336 * Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE
339 if (tcp_hdr->tcp_flags != TCP_ACK_FLAG)
342 * Don't process the packet whose payload length is less than or
345 tcp_dl = pkt->pkt_len - hdr_len;
350 * Save IPv4 ID for the packet whose DF bit is 0. For the packet
351 * whose DF bit is 1, IPv4 ID is ignored.
353 frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
354 is_atomic = (frag_off & IPV4_HDR_DF_FLAG) == IPV4_HDR_DF_FLAG;
355 ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id);
356 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
358 ether_addr_copy(&(eth_hdr->s_addr), &(key.eth_saddr));
359 ether_addr_copy(&(eth_hdr->d_addr), &(key.eth_daddr));
360 key.ip_src_addr = ipv4_hdr->src_addr;
361 key.ip_dst_addr = ipv4_hdr->dst_addr;
362 key.src_port = tcp_hdr->src_port;
363 key.dst_port = tcp_hdr->dst_port;
364 key.recv_ack = tcp_hdr->recv_ack;
366 /* Search for a matched flow. */
367 max_flow_num = tbl->max_flow_num;
368 remaining_flow_num = tbl->flow_num;
370 for (i = 0; i < max_flow_num && remaining_flow_num; i++) {
371 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
372 if (is_same_tcp4_flow(tbl->flows[i].key, key)) {
376 remaining_flow_num--;
381 * Fail to find a matched flow. Insert a new flow and store the
382 * packet into the flow.
385 item_idx = insert_new_item(tbl, pkt, start_time,
386 INVALID_ARRAY_INDEX, sent_seq, ip_id,
388 if (item_idx == INVALID_ARRAY_INDEX)
390 if (insert_new_flow(tbl, &key, item_idx) ==
391 INVALID_ARRAY_INDEX) {
393 * Fail to insert a new flow, so delete the
396 delete_item(tbl, item_idx, INVALID_ARRAY_INDEX);
403 * Check all packets in the flow and try to find a neighbor for
406 cur_idx = tbl->flows[i].start_index;
409 cmp = check_seq_option(&(tbl->items[cur_idx]), tcp_hdr,
410 sent_seq, ip_id, pkt->l4_len, tcp_dl,
413 if (merge_two_tcp4_packets(&(tbl->items[cur_idx]),
414 pkt, cmp, sent_seq, ip_id))
417 * Fail to merge the two packets, as the packet
418 * length is greater than the max value. Store
419 * the packet into the flow.
421 if (insert_new_item(tbl, pkt, start_time, prev_idx,
422 sent_seq, ip_id, is_atomic) ==
428 cur_idx = tbl->items[cur_idx].next_pkt_idx;
429 } while (cur_idx != INVALID_ARRAY_INDEX);
431 /* Fail to find a neighbor, so store the packet into the flow. */
432 if (insert_new_item(tbl, pkt, start_time, prev_idx, sent_seq,
433 ip_id, is_atomic) == INVALID_ARRAY_INDEX)
440 gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl *tbl,
441 uint64_t flush_timestamp,
442 struct rte_mbuf **out,
447 uint32_t max_flow_num = tbl->max_flow_num;
449 for (i = 0; i < max_flow_num; i++) {
450 if (unlikely(tbl->flow_num == 0))
453 j = tbl->flows[i].start_index;
454 while (j != INVALID_ARRAY_INDEX) {
455 if (tbl->items[j].start_time <= flush_timestamp) {
456 out[k++] = tbl->items[j].firstseg;
457 if (tbl->items[j].nb_merged > 1)
458 update_header(&(tbl->items[j]));
460 * Delete the packet and get the next
461 * packet in the flow.
463 j = delete_item(tbl, j, INVALID_ARRAY_INDEX);
464 tbl->flows[i].start_index = j;
465 if (j == INVALID_ARRAY_INDEX)
468 if (unlikely(k == nb_out))
472 * The left packets in this flow won't be
473 * timeout. Go to check other flows.
482 gro_tcp4_tbl_pkt_count(void *tbl)
484 struct gro_tcp4_tbl *gro_tbl = tbl;
487 return gro_tbl->item_num;