1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_malloc.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
11 #include "gro_vxlan_tcp4.h"
14 gro_vxlan_tcp4_tbl_create(uint16_t socket_id,
15 uint16_t max_flow_num,
16 uint16_t max_item_per_flow)
18 struct gro_vxlan_tcp4_tbl *tbl;
20 uint32_t entries_num, i;
22 entries_num = max_flow_num * max_item_per_flow;
23 entries_num = RTE_MIN(entries_num, GRO_VXLAN_TCP4_TBL_MAX_ITEM_NUM);
28 tbl = rte_zmalloc_socket(__func__,
29 sizeof(struct gro_vxlan_tcp4_tbl),
35 size = sizeof(struct gro_vxlan_tcp4_item) * entries_num;
36 tbl->items = rte_zmalloc_socket(__func__,
40 if (tbl->items == NULL) {
44 tbl->max_item_num = entries_num;
46 size = sizeof(struct gro_vxlan_tcp4_flow) * entries_num;
47 tbl->flows = rte_zmalloc_socket(__func__,
51 if (tbl->flows == NULL) {
57 for (i = 0; i < entries_num; i++)
58 tbl->flows[i].start_index = INVALID_ARRAY_INDEX;
59 tbl->max_flow_num = entries_num;
65 gro_vxlan_tcp4_tbl_destroy(void *tbl)
67 struct gro_vxlan_tcp4_tbl *vxlan_tbl = tbl;
70 rte_free(vxlan_tbl->items);
71 rte_free(vxlan_tbl->flows);
76 static inline uint32_t
77 find_an_empty_item(struct gro_vxlan_tcp4_tbl *tbl)
79 uint32_t max_item_num = tbl->max_item_num, i;
81 for (i = 0; i < max_item_num; i++)
82 if (tbl->items[i].inner_item.firstseg == NULL)
84 return INVALID_ARRAY_INDEX;
87 static inline uint32_t
88 find_an_empty_flow(struct gro_vxlan_tcp4_tbl *tbl)
90 uint32_t max_flow_num = tbl->max_flow_num, i;
92 for (i = 0; i < max_flow_num; i++)
93 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX)
95 return INVALID_ARRAY_INDEX;
98 static inline uint32_t
99 insert_new_item(struct gro_vxlan_tcp4_tbl *tbl,
100 struct rte_mbuf *pkt,
104 uint16_t outer_ip_id,
106 uint8_t outer_is_atomic,
111 item_idx = find_an_empty_item(tbl);
112 if (unlikely(item_idx == INVALID_ARRAY_INDEX))
113 return INVALID_ARRAY_INDEX;
115 tbl->items[item_idx].inner_item.firstseg = pkt;
116 tbl->items[item_idx].inner_item.lastseg = rte_pktmbuf_lastseg(pkt);
117 tbl->items[item_idx].inner_item.start_time = start_time;
118 tbl->items[item_idx].inner_item.next_pkt_idx = INVALID_ARRAY_INDEX;
119 tbl->items[item_idx].inner_item.sent_seq = sent_seq;
120 tbl->items[item_idx].inner_item.ip_id = ip_id;
121 tbl->items[item_idx].inner_item.nb_merged = 1;
122 tbl->items[item_idx].inner_item.is_atomic = is_atomic;
123 tbl->items[item_idx].outer_ip_id = outer_ip_id;
124 tbl->items[item_idx].outer_is_atomic = outer_is_atomic;
127 /* If the previous packet exists, chain the new one with it. */
128 if (prev_idx != INVALID_ARRAY_INDEX) {
129 tbl->items[item_idx].inner_item.next_pkt_idx =
130 tbl->items[prev_idx].inner_item.next_pkt_idx;
131 tbl->items[prev_idx].inner_item.next_pkt_idx = item_idx;
137 static inline uint32_t
138 delete_item(struct gro_vxlan_tcp4_tbl *tbl,
140 uint32_t prev_item_idx)
142 uint32_t next_idx = tbl->items[item_idx].inner_item.next_pkt_idx;
144 /* NULL indicates an empty item. */
145 tbl->items[item_idx].inner_item.firstseg = NULL;
147 if (prev_item_idx != INVALID_ARRAY_INDEX)
148 tbl->items[prev_item_idx].inner_item.next_pkt_idx = next_idx;
153 static inline uint32_t
154 insert_new_flow(struct gro_vxlan_tcp4_tbl *tbl,
155 struct vxlan_tcp4_flow_key *src,
158 struct vxlan_tcp4_flow_key *dst;
161 flow_idx = find_an_empty_flow(tbl);
162 if (unlikely(flow_idx == INVALID_ARRAY_INDEX))
163 return INVALID_ARRAY_INDEX;
165 dst = &(tbl->flows[flow_idx].key);
167 ether_addr_copy(&(src->inner_key.eth_saddr),
168 &(dst->inner_key.eth_saddr));
169 ether_addr_copy(&(src->inner_key.eth_daddr),
170 &(dst->inner_key.eth_daddr));
171 dst->inner_key.ip_src_addr = src->inner_key.ip_src_addr;
172 dst->inner_key.ip_dst_addr = src->inner_key.ip_dst_addr;
173 dst->inner_key.recv_ack = src->inner_key.recv_ack;
174 dst->inner_key.src_port = src->inner_key.src_port;
175 dst->inner_key.dst_port = src->inner_key.dst_port;
177 dst->vxlan_hdr.vx_flags = src->vxlan_hdr.vx_flags;
178 dst->vxlan_hdr.vx_vni = src->vxlan_hdr.vx_vni;
179 ether_addr_copy(&(src->outer_eth_saddr), &(dst->outer_eth_saddr));
180 ether_addr_copy(&(src->outer_eth_daddr), &(dst->outer_eth_daddr));
181 dst->outer_ip_src_addr = src->outer_ip_src_addr;
182 dst->outer_ip_dst_addr = src->outer_ip_dst_addr;
183 dst->outer_src_port = src->outer_src_port;
184 dst->outer_dst_port = src->outer_dst_port;
186 tbl->flows[flow_idx].start_index = item_idx;
193 is_same_vxlan_tcp4_flow(struct vxlan_tcp4_flow_key k1,
194 struct vxlan_tcp4_flow_key k2)
196 return (is_same_ether_addr(&k1.outer_eth_saddr, &k2.outer_eth_saddr) &&
197 is_same_ether_addr(&k1.outer_eth_daddr,
198 &k2.outer_eth_daddr) &&
199 (k1.outer_ip_src_addr == k2.outer_ip_src_addr) &&
200 (k1.outer_ip_dst_addr == k2.outer_ip_dst_addr) &&
201 (k1.outer_src_port == k2.outer_src_port) &&
202 (k1.outer_dst_port == k2.outer_dst_port) &&
203 (k1.vxlan_hdr.vx_flags == k2.vxlan_hdr.vx_flags) &&
204 (k1.vxlan_hdr.vx_vni == k2.vxlan_hdr.vx_vni) &&
205 is_same_tcp4_flow(k1.inner_key, k2.inner_key));
209 check_vxlan_seq_option(struct gro_vxlan_tcp4_item *item,
210 struct tcp_hdr *tcp_hdr,
212 uint16_t outer_ip_id,
216 uint8_t outer_is_atomic,
219 struct rte_mbuf *pkt = item->inner_item.firstseg;
223 /* Don't merge packets whose outer DF bits are different. */
224 if (unlikely(item->outer_is_atomic ^ outer_is_atomic))
227 l2_offset = pkt->outer_l2_len + pkt->outer_l3_len;
228 cmp = check_seq_option(&item->inner_item, tcp_hdr, sent_seq, ip_id,
229 tcp_hl, tcp_dl, l2_offset, is_atomic);
230 if ((cmp > 0) && (outer_is_atomic ||
231 (outer_ip_id == item->outer_ip_id + 1)))
232 /* Append the new packet. */
234 else if ((cmp < 0) && (outer_is_atomic ||
235 (outer_ip_id + item->inner_item.nb_merged ==
237 /* Prepend the new packet. */
244 merge_two_vxlan_tcp4_packets(struct gro_vxlan_tcp4_item *item,
245 struct rte_mbuf *pkt,
248 uint16_t outer_ip_id,
251 if (merge_two_tcp4_packets(&item->inner_item, pkt, cmp, sent_seq,
252 ip_id, pkt->outer_l2_len +
253 pkt->outer_l3_len)) {
254 /* Update the outer IPv4 ID to the large value. */
255 item->outer_ip_id = cmp > 0 ? outer_ip_id : item->outer_ip_id;
263 update_vxlan_header(struct gro_vxlan_tcp4_item *item)
265 struct ipv4_hdr *ipv4_hdr;
266 struct udp_hdr *udp_hdr;
267 struct rte_mbuf *pkt = item->inner_item.firstseg;
270 /* Update the outer IPv4 header. */
271 len = pkt->pkt_len - pkt->outer_l2_len;
272 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
274 ipv4_hdr->total_length = rte_cpu_to_be_16(len);
276 /* Update the outer UDP header. */
277 len -= pkt->outer_l3_len;
278 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + pkt->outer_l3_len);
279 udp_hdr->dgram_len = rte_cpu_to_be_16(len);
281 /* Update the inner IPv4 header. */
283 ipv4_hdr = (struct ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
284 ipv4_hdr->total_length = rte_cpu_to_be_16(len);
288 gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
289 struct gro_vxlan_tcp4_tbl *tbl,
292 struct ether_hdr *outer_eth_hdr, *eth_hdr;
293 struct ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
294 struct tcp_hdr *tcp_hdr;
295 struct udp_hdr *udp_hdr;
296 struct vxlan_hdr *vxlan_hdr;
298 uint16_t tcp_dl, frag_off, outer_ip_id, ip_id;
299 uint8_t outer_is_atomic, is_atomic;
301 struct vxlan_tcp4_flow_key key;
302 uint32_t cur_idx, prev_idx, item_idx;
303 uint32_t i, max_flow_num, remaining_flow_num;
308 outer_eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
309 outer_ipv4_hdr = (struct ipv4_hdr *)((char *)outer_eth_hdr +
311 udp_hdr = (struct udp_hdr *)((char *)outer_ipv4_hdr +
313 vxlan_hdr = (struct vxlan_hdr *)((char *)udp_hdr +
314 sizeof(struct udp_hdr));
315 eth_hdr = (struct ether_hdr *)((char *)vxlan_hdr +
316 sizeof(struct vxlan_hdr));
317 ipv4_hdr = (struct ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
318 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
321 * Don't process the packet which has FIN, SYN, RST, PSH, URG,
324 if (tcp_hdr->tcp_flags != TCP_ACK_FLAG)
327 hdr_len = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len +
328 pkt->l3_len + pkt->l4_len;
330 * Don't process the packet whose payload length is less than or
333 tcp_dl = pkt->pkt_len - hdr_len;
338 * Save IPv4 ID for the packet whose DF bit is 0. For the packet
339 * whose DF bit is 1, IPv4 ID is ignored.
341 frag_off = rte_be_to_cpu_16(outer_ipv4_hdr->fragment_offset);
342 outer_is_atomic = (frag_off & IPV4_HDR_DF_FLAG) == IPV4_HDR_DF_FLAG;
343 outer_ip_id = outer_is_atomic ? 0 :
344 rte_be_to_cpu_16(outer_ipv4_hdr->packet_id);
345 frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
346 is_atomic = (frag_off & IPV4_HDR_DF_FLAG) == IPV4_HDR_DF_FLAG;
347 ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id);
349 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
351 ether_addr_copy(&(eth_hdr->s_addr), &(key.inner_key.eth_saddr));
352 ether_addr_copy(&(eth_hdr->d_addr), &(key.inner_key.eth_daddr));
353 key.inner_key.ip_src_addr = ipv4_hdr->src_addr;
354 key.inner_key.ip_dst_addr = ipv4_hdr->dst_addr;
355 key.inner_key.recv_ack = tcp_hdr->recv_ack;
356 key.inner_key.src_port = tcp_hdr->src_port;
357 key.inner_key.dst_port = tcp_hdr->dst_port;
359 key.vxlan_hdr.vx_flags = vxlan_hdr->vx_flags;
360 key.vxlan_hdr.vx_vni = vxlan_hdr->vx_vni;
361 ether_addr_copy(&(outer_eth_hdr->s_addr), &(key.outer_eth_saddr));
362 ether_addr_copy(&(outer_eth_hdr->d_addr), &(key.outer_eth_daddr));
363 key.outer_ip_src_addr = outer_ipv4_hdr->src_addr;
364 key.outer_ip_dst_addr = outer_ipv4_hdr->dst_addr;
365 key.outer_src_port = udp_hdr->src_port;
366 key.outer_dst_port = udp_hdr->dst_port;
368 /* Search for a matched flow. */
369 max_flow_num = tbl->max_flow_num;
370 remaining_flow_num = tbl->flow_num;
372 for (i = 0; i < max_flow_num && remaining_flow_num; i++) {
373 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
374 if (is_same_vxlan_tcp4_flow(tbl->flows[i].key, key)) {
378 remaining_flow_num--;
383 * Can't find a matched flow. Insert a new flow and store the
384 * packet into the flow.
387 item_idx = insert_new_item(tbl, pkt, start_time,
388 INVALID_ARRAY_INDEX, sent_seq, outer_ip_id,
389 ip_id, outer_is_atomic, is_atomic);
390 if (item_idx == INVALID_ARRAY_INDEX)
392 if (insert_new_flow(tbl, &key, item_idx) ==
393 INVALID_ARRAY_INDEX) {
395 * Fail to insert a new flow, so
396 * delete the inserted packet.
398 delete_item(tbl, item_idx, INVALID_ARRAY_INDEX);
404 /* Check all packets in the flow and try to find a neighbor. */
405 cur_idx = tbl->flows[i].start_index;
408 cmp = check_vxlan_seq_option(&(tbl->items[cur_idx]), tcp_hdr,
409 sent_seq, outer_ip_id, ip_id, pkt->l4_len,
410 tcp_dl, outer_is_atomic, is_atomic);
412 if (merge_two_vxlan_tcp4_packets(&(tbl->items[cur_idx]),
417 * Can't merge two packets, as the packet
418 * length will be greater than the max value.
419 * Insert the packet into the flow.
421 if (insert_new_item(tbl, pkt, start_time, prev_idx,
422 sent_seq, outer_ip_id,
423 ip_id, outer_is_atomic,
430 cur_idx = tbl->items[cur_idx].inner_item.next_pkt_idx;
431 } while (cur_idx != INVALID_ARRAY_INDEX);
433 /* Can't find neighbor. Insert the packet into the flow. */
434 if (insert_new_item(tbl, pkt, start_time, prev_idx, sent_seq,
435 outer_ip_id, ip_id, outer_is_atomic,
436 is_atomic) == INVALID_ARRAY_INDEX)
443 gro_vxlan_tcp4_tbl_timeout_flush(struct gro_vxlan_tcp4_tbl *tbl,
444 uint64_t flush_timestamp,
445 struct rte_mbuf **out,
450 uint32_t max_flow_num = tbl->max_flow_num;
452 for (i = 0; i < max_flow_num; i++) {
453 if (unlikely(tbl->flow_num == 0))
456 j = tbl->flows[i].start_index;
457 while (j != INVALID_ARRAY_INDEX) {
458 if (tbl->items[j].inner_item.start_time <=
460 out[k++] = tbl->items[j].inner_item.firstseg;
461 if (tbl->items[j].inner_item.nb_merged > 1)
462 update_vxlan_header(&(tbl->items[j]));
464 * Delete the item and get the next packet
467 j = delete_item(tbl, j, INVALID_ARRAY_INDEX);
468 tbl->flows[i].start_index = j;
469 if (j == INVALID_ARRAY_INDEX)
472 if (unlikely(k == nb_out))
476 * The left packets in the flow won't be
477 * timeout. Go to check other flows.
486 gro_vxlan_tcp4_tbl_pkt_count(void *tbl)
488 struct gro_vxlan_tcp4_tbl *gro_tbl = tbl;
491 return gro_tbl->item_num;