1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include <rte_ipsec_sad.h>
10 #define SA_CACHE_SZ 128
11 #define SPI2IDX(spi, mask) ((spi) & (mask))
13 struct ipsec_sad_cache {
19 RTE_DECLARE_PER_LCORE(struct ipsec_sad_cache, sad_cache);
21 int ipsec_sad_create(const char *name, struct ipsec_sad *sad,
22 int socket_id, struct ipsec_sa_cnt *sa_cnt);
24 int ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa);
26 int ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent);
29 cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4,
30 struct rte_ipv6_hdr *ipv6)
32 int sa_type = WITHOUT_TRANSPORT_VERSION(sa->flags);
33 if ((sa_type == TRANSPORT) ||
35 (is_v4 && (sa_type == IP4_TUNNEL) &&
36 (sa->src.ip.ip4 == ipv4->src_addr) &&
37 (sa->dst.ip.ip4 == ipv4->dst_addr)) ||
39 (!is_v4 && (sa_type == IP6_TUNNEL) &&
40 (!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) &&
41 (!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16))))
48 sa_cache_update(struct ipsec_sa **sa_cache, struct ipsec_sa *sa, uint32_t mask)
52 /* SAD cache is disabled */
56 cache_idx = SPI2IDX(sa->spi, mask);
57 sa_cache[cache_idx] = sa;
61 sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[],
62 void *sa[], uint16_t nb_pkts)
65 uint32_t nb_v4 = 0, nb_v6 = 0;
66 struct rte_esp_hdr *esp;
67 struct rte_ipv4_hdr *ipv4;
68 struct rte_ipv6_hdr *ipv6;
69 struct rte_ipsec_sadv4_key v4[nb_pkts];
70 struct rte_ipsec_sadv6_key v6[nb_pkts];
71 int v4_idxes[nb_pkts];
72 int v6_idxes[nb_pkts];
73 const union rte_ipsec_sad_key *keys_v4[nb_pkts];
74 const union rte_ipsec_sad_key *keys_v6[nb_pkts];
75 void *v4_res[nb_pkts];
76 void *v6_res[nb_pkts];
77 uint32_t spi, cache_idx;
78 struct ipsec_sad_cache *cache;
79 struct ipsec_sa *cached_sa;
82 cache = &RTE_PER_LCORE(sad_cache);
84 /* split received packets by address family into two arrays */
85 for (i = 0; i < nb_pkts; i++) {
86 ipv4 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv4_hdr *);
87 ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *);
88 esp = rte_pktmbuf_mtod_offset(pkts[i], struct rte_esp_hdr *,
91 is_ipv4 = pkts[i]->packet_type & RTE_PTYPE_L3_IPV4;
92 spi = rte_be_to_cpu_32(esp->spi);
93 cache_idx = SPI2IDX(spi, cache->mask);
96 cached_sa = (cache->mask != 0) ?
97 cache->v4[cache_idx] : NULL;
98 /* check SAD cache entry */
99 if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
100 if (cmp_sa_key(cached_sa, 1, ipv4, ipv6)) {
108 * preparing sad key to proceed with sad lookup
110 v4[nb_v4].spi = esp->spi;
111 v4[nb_v4].dip = ipv4->dst_addr;
112 v4[nb_v4].sip = ipv4->src_addr;
113 keys_v4[nb_v4] = (const union rte_ipsec_sad_key *)
115 v4_idxes[nb_v4++] = i;
117 cached_sa = (cache->mask != 0) ?
118 cache->v6[cache_idx] : NULL;
119 if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
120 if (cmp_sa_key(cached_sa, 0, ipv4, ipv6)) {
125 v6[nb_v6].spi = esp->spi;
126 memcpy(v6[nb_v6].dip, ipv6->dst_addr,
127 sizeof(ipv6->dst_addr));
128 memcpy(v6[nb_v6].sip, ipv6->src_addr,
129 sizeof(ipv6->src_addr));
130 keys_v6[nb_v6] = (const union rte_ipsec_sad_key *)
132 v6_idxes[nb_v6++] = i;
137 rte_ipsec_sad_lookup(sad->sad_v4, keys_v4, v4_res, nb_v4);
139 rte_ipsec_sad_lookup(sad->sad_v6, keys_v6, v6_res, nb_v6);
141 for (i = 0; i < nb_v4; i++) {
142 ipv4 = rte_pktmbuf_mtod(pkts[v4_idxes[i]],
143 struct rte_ipv4_hdr *);
144 if ((v4_res[i] != NULL) &&
145 (cmp_sa_key(v4_res[i], 1, ipv4, NULL))) {
146 sa[v4_idxes[i]] = v4_res[i];
147 sa_cache_update(cache->v4, (struct ipsec_sa *)v4_res[i],
150 sa[v4_idxes[i]] = NULL;
152 for (i = 0; i < nb_v6; i++) {
153 ipv6 = rte_pktmbuf_mtod(pkts[v6_idxes[i]],
154 struct rte_ipv6_hdr *);
155 if ((v6_res[i] != NULL) &&
156 (cmp_sa_key(v6_res[i], 0, NULL, ipv6))) {
157 sa[v6_idxes[i]] = v6_res[i];
158 sa_cache_update(cache->v6, (struct ipsec_sa *)v6_res[i],
161 sa[v6_idxes[i]] = NULL;
165 #endif /* __SAD_H__ */