1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include <rte_ipsec_sad.h>
10 #define SA_CACHE_SZ 128
11 #define SPI2IDX(spi, mask) ((spi) & (mask))
13 struct ipsec_sad_cache {
19 RTE_DECLARE_PER_LCORE(struct ipsec_sad_cache, sad_cache);
22 struct rte_ipsec_sad *sad_v4;
23 struct rte_ipsec_sad *sad_v6;
26 int ipsec_sad_create(const char *name, struct ipsec_sad *sad,
27 int socket_id, struct ipsec_sa_cnt *sa_cnt);
29 int ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa);
31 int ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent);
34 cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4,
35 struct rte_ipv6_hdr *ipv6)
37 int sa_type = WITHOUT_TRANSPORT_VERSION(sa->flags);
38 if ((sa_type == TRANSPORT) ||
40 (is_v4 && (sa_type == IP4_TUNNEL) &&
41 (sa->src.ip.ip4 == ipv4->src_addr) &&
42 (sa->dst.ip.ip4 == ipv4->dst_addr)) ||
44 (!is_v4 && (sa_type == IP6_TUNNEL) &&
45 (!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) &&
46 (!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16))))
53 sa_cache_update(struct ipsec_sa **sa_cache, struct ipsec_sa *sa, uint32_t mask)
57 /* SAD cache is disabled */
61 cache_idx = SPI2IDX(sa->spi, mask);
62 sa_cache[cache_idx] = sa;
66 sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[],
67 void *sa[], uint16_t nb_pkts)
70 uint32_t nb_v4 = 0, nb_v6 = 0;
71 struct rte_esp_hdr *esp;
72 struct rte_ipv4_hdr *ipv4;
73 struct rte_ipv6_hdr *ipv6;
74 struct rte_ipsec_sadv4_key v4[nb_pkts];
75 struct rte_ipsec_sadv6_key v6[nb_pkts];
76 int v4_idxes[nb_pkts];
77 int v6_idxes[nb_pkts];
78 const union rte_ipsec_sad_key *keys_v4[nb_pkts];
79 const union rte_ipsec_sad_key *keys_v6[nb_pkts];
80 void *v4_res[nb_pkts];
81 void *v6_res[nb_pkts];
82 uint32_t spi, cache_idx;
83 struct ipsec_sad_cache *cache;
84 struct ipsec_sa *cached_sa;
87 cache = &RTE_PER_LCORE(sad_cache);
89 /* split received packets by address family into two arrays */
90 for (i = 0; i < nb_pkts; i++) {
91 ipv4 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv4_hdr *);
92 ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *);
93 esp = rte_pktmbuf_mtod_offset(pkts[i], struct rte_esp_hdr *,
96 is_ipv4 = pkts[i]->packet_type & RTE_PTYPE_L3_IPV4;
97 spi = rte_be_to_cpu_32(esp->spi);
98 cache_idx = SPI2IDX(spi, cache->mask);
101 cached_sa = (cache->mask != 0) ?
102 cache->v4[cache_idx] : NULL;
103 /* check SAD cache entry */
104 if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
105 if (cmp_sa_key(cached_sa, 1, ipv4, ipv6)) {
113 * preparing sad key to proceed with sad lookup
115 v4[nb_v4].spi = esp->spi;
116 v4[nb_v4].dip = ipv4->dst_addr;
117 v4[nb_v4].sip = ipv4->src_addr;
118 keys_v4[nb_v4] = (const union rte_ipsec_sad_key *)
120 v4_idxes[nb_v4++] = i;
122 cached_sa = (cache->mask != 0) ?
123 cache->v6[cache_idx] : NULL;
124 if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
125 if (cmp_sa_key(cached_sa, 0, ipv4, ipv6)) {
130 v6[nb_v6].spi = esp->spi;
131 memcpy(v6[nb_v6].dip, ipv6->dst_addr,
132 sizeof(ipv6->dst_addr));
133 memcpy(v6[nb_v6].sip, ipv6->src_addr,
134 sizeof(ipv6->src_addr));
135 keys_v6[nb_v6] = (const union rte_ipsec_sad_key *)
137 v6_idxes[nb_v6++] = i;
142 rte_ipsec_sad_lookup(sad->sad_v4, keys_v4, v4_res, nb_v4);
144 rte_ipsec_sad_lookup(sad->sad_v6, keys_v6, v6_res, nb_v6);
146 for (i = 0; i < nb_v4; i++) {
147 ipv4 = rte_pktmbuf_mtod(pkts[v4_idxes[i]],
148 struct rte_ipv4_hdr *);
149 if ((v4_res[i] != NULL) &&
150 (cmp_sa_key(v4_res[i], 1, ipv4, NULL))) {
151 sa[v4_idxes[i]] = v4_res[i];
152 sa_cache_update(cache->v4, (struct ipsec_sa *)v4_res[i],
155 sa[v4_idxes[i]] = NULL;
157 for (i = 0; i < nb_v6; i++) {
158 ipv6 = rte_pktmbuf_mtod(pkts[v6_idxes[i]],
159 struct rte_ipv6_hdr *);
160 if ((v6_res[i] != NULL) &&
161 (cmp_sa_key(v6_res[i], 0, NULL, ipv6))) {
162 sa[v6_idxes[i]] = v6_res[i];
163 sa_cache_update(cache->v6, (struct ipsec_sa *)v6_res[i],
166 sa[v6_idxes[i]] = NULL;
170 #endif /* __SAD_H__ */