Introduce SAD cache.
Stores the most recent SA in a per lcore cache.
Cache represents flat array containing SA's indexed by SPI.
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Anoob Joseph <anoobj@marvell.com>
./build/ipsec-secgw [EAL options] --
-p PORTMASK -P -u PORTMASK -j FRAMESIZE
-l -w REPLAY_WINOW_SIZE -e -a
+ -c SAD_CACHE_SIZE
--config (port,queue,lcore)[,(port,queue,lcore]
--single-sa SAIDX
--rxoffload MASK
* ``-a``: enables Security Association sequence number atomic behavior
(available only with librte_ipsec code path).
+* ``-c``: specifies the SAD cache size. Stores the most recent SA in a per
+ lcore cache. Cache represents flat array containing SA's indexed by SPI.
+ Zero value disables cache.
+ Default value: 128.
+
* ``--config (port,queue,lcore)[,(port,queue,lcore)]``: determines which queues
from which ports are mapped to which cores.
#include "ipsec.h"
#include "parser.h"
+#include "sad.h"
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
/* application wide librte_ipsec/SA parameters */
-struct app_sa_prm app_sa_prm = {.enable = 0};
+struct app_sa_prm app_sa_prm = {
+ .enable = 0,
+ .cache_sz = SA_CACHE_SZ
+ };
static const char *cfgfile;
struct lcore_rx_queue {
uint16_t portid;
uint8_t queueid;
struct lcore_conf *qconf;
- int32_t socket_id;
+ int32_t rc, socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
/ US_PER_S * BURST_TX_DRAIN_US;
struct lcore_rx_queue *rxql;
qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+ rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
+ if (rc != 0) {
+ RTE_LOG(ERR, IPSEC,
+ "SAD cache init on lcore %u, failed with code: %d\n",
+ lcore_id, rc);
+ return rc;
+ }
+
if (qconf->nb_rx_queue == 0) {
RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
lcore_id);
" [-w REPLAY_WINDOW_SIZE]"
" [-e]"
" [-a]"
+ " [-c]"
" -f CONFIG_FILE"
" --config (port,queue,lcore)[,(port,queue,lcore)]"
" [--single-sa SAIDX]"
" size for each SA\n"
" -e enables ESN\n"
" -a enables SA SQN atomic behaviour\n"
+ " -c specifies inbound SAD cache size,\n"
+ " zero value disables the cache (default value: 128)\n"
" -f CONFIG_FILE: Configuration file\n"
" --config (port,queue,lcore): Rx queue configuration\n"
" --single-sa SAIDX: Use single SA index for outbound traffic,\n"
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:",
+ while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:",
lgopts, &option_index)) != EOF) {
switch (opt) {
app_sa_prm.enable = 1;
app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
break;
+ case 'c':
+ ret = parse_decimal(optarg);
+ if (ret < 0) {
+ printf("Invalid SA cache size: %s\n", optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ app_sa_prm.cache_sz = ret;
+ break;
case CMD_LINE_OPT_CONFIG_NUM:
ret = parse_config(optarg);
if (ret) {
uint32_t enable; /* use librte_ipsec API for ipsec pkt processing */
uint32_t window_size; /* replay window size */
uint32_t enable_esn; /* enable/disable ESN support */
+ uint32_t cache_sz; /* per lcore SA cache size */
uint64_t flags; /* rte_ipsec_sa_prm.flags */
};
return NULL;
}
- sa_ctx = rte_malloc(NULL, sizeof(struct sa_ctx) +
+ sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
if (sa_ctx == NULL) {
void *sa_arr[], uint16_t nb_pkts)
{
uint32_t i;
- struct ip *ip;
- uint32_t *src4_addr;
- uint8_t *src6_addr;
void *result_sa;
struct ipsec_sa *sa;
intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
result_sa = (void *)intsa;
}
-
- ip = rte_pktmbuf_mtod(pkts[i], struct ip *);
- switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
- case IP4_TUNNEL:
- src4_addr = RTE_PTR_ADD(ip,
- offsetof(struct ip, ip_src));
- if ((ip->ip_v == IPVERSION) &&
- (sa->src.ip.ip4 == *src4_addr) &&
- (sa->dst.ip.ip4 == *(src4_addr + 1)))
- sa_arr[i] = result_sa;
- else
- sa_arr[i] = NULL;
- break;
- case IP6_TUNNEL:
- src6_addr = RTE_PTR_ADD(ip,
- offsetof(struct ip6_hdr, ip6_src));
- if ((ip->ip_v == IP6_VERSION) &&
- !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
- !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
- sa_arr[i] = result_sa;
- else
- sa_arr[i] = NULL;
- break;
- case TRANSPORT:
- sa_arr[i] = result_sa;
- }
+ sa_arr[i] = result_sa;
}
}
*/
#include <rte_errno.h>
+#include <rte_malloc.h>
#include "ipsec.h"
#include "sad.h"
+RTE_DEFINE_PER_LCORE(struct ipsec_sad_cache, sad_cache) = {
+ .v4 = NULL,
+ .v6 = NULL,
+ .mask = 0,
+};
+
int
ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa)
{
return 0;
}
+/*
+ * Init per lcore SAD cache.
+ * Must be called by every processing lcore.
+ */
+int
+ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent)
+{
+ uint32_t cache_elem;
+ size_t cache_mem_sz;
+ struct ipsec_sad_cache *cache;
+
+ cache = &RTE_PER_LCORE(sad_cache);
+
+ cache_elem = rte_align32pow2(nb_cache_ent);
+ cache_mem_sz = sizeof(struct ipsec_sa *) * cache_elem;
+
+ if (cache_mem_sz != 0) {
+ cache->v4 = rte_zmalloc_socket(NULL, cache_mem_sz,
+ RTE_CACHE_LINE_SIZE, rte_socket_id());
+ if (cache->v4 == NULL)
+ return -rte_errno;
+
+ cache->v6 = rte_zmalloc_socket(NULL, cache_mem_sz,
+ RTE_CACHE_LINE_SIZE, rte_socket_id());
+ if (cache->v6 == NULL)
+ return -rte_errno;
+
+ cache->mask = cache_elem - 1;
+ }
+
+ return 0;
+}
+
int
ipsec_sad_create(const char *name, struct ipsec_sad *sad,
int socket_id, struct ipsec_sa_cnt *sa_cnt)
#include <rte_ipsec_sad.h>
+#define SA_CACHE_SZ 128
+#define SPI2IDX(spi, mask) ((spi) & (mask))
+
+struct ipsec_sad_cache {
+ struct ipsec_sa **v4;
+ struct ipsec_sa **v6;
+ uint32_t mask;
+};
+
+RTE_DECLARE_PER_LCORE(struct ipsec_sad_cache, sad_cache);
+
struct ipsec_sad {
struct rte_ipsec_sad *sad_v4;
struct rte_ipsec_sad *sad_v6;
int ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa);
+int ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent);
+
+static inline int
+cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4,
+ struct rte_ipv6_hdr *ipv6)
+{
+ int sa_type = WITHOUT_TRANSPORT_VERSION(sa->flags);
+ if ((sa_type == TRANSPORT) ||
+ /* IPv4 check */
+ (is_v4 && (sa_type == IP4_TUNNEL) &&
+ (sa->src.ip.ip4 == ipv4->src_addr) &&
+ (sa->dst.ip.ip4 == ipv4->dst_addr)) ||
+ /* IPv6 check */
+ (!is_v4 && (sa_type == IP6_TUNNEL) &&
+ (!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) &&
+ (!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16))))
+ return 1;
+
+ return 0;
+}
+
static inline void
-sad_lookup(const struct ipsec_sad *sad, struct rte_mbuf *pkts[],
+sa_cache_update(struct ipsec_sa **sa_cache, struct ipsec_sa *sa, uint32_t mask)
+{
+ uint32_t cache_idx;
+
+ /* SAD cache is disabled */
+ if (mask == 0)
+ return;
+
+ cache_idx = SPI2IDX(sa->spi, mask);
+ sa_cache[cache_idx] = sa;
+}
+
+static inline void
+sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[],
void *sa[], uint16_t nb_pkts)
{
uint32_t i;
const union rte_ipsec_sad_key *keys_v6[nb_pkts];
void *v4_res[nb_pkts];
void *v6_res[nb_pkts];
+ uint32_t spi, cache_idx;
+ struct ipsec_sad_cache *cache;
+ struct ipsec_sa *cached_sa;
+ int is_ipv4;
+
+ cache = &RTE_PER_LCORE(sad_cache);
/* split received packets by address family into two arrays */
for (i = 0; i < nb_pkts; i++) {
ipv4 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv4_hdr *);
+ ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *);
esp = rte_pktmbuf_mtod_offset(pkts[i], struct rte_esp_hdr *,
pkts[i]->l3_len);
- if ((ipv4->version_ihl >> 4) == IPVERSION) {
+
+ is_ipv4 = ((ipv4->version_ihl >> 4) == IPVERSION);
+ spi = rte_be_to_cpu_32(esp->spi);
+ cache_idx = SPI2IDX(spi, cache->mask);
+
+ if (is_ipv4) {
+ cached_sa = (cache->mask != 0) ?
+ cache->v4[cache_idx] : NULL;
+ /* check SAD cache entry */
+ if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
+ if (cmp_sa_key(cached_sa, 1, ipv4, ipv6)) {
+ /* cache hit */
+ sa[i] = cached_sa;
+ continue;
+ }
+ }
+ /*
+ * cache miss
+ * preparing sad key to proceed with sad lookup
+ */
v4[nb_v4].spi = esp->spi;
v4[nb_v4].dip = ipv4->dst_addr;
v4[nb_v4].sip = ipv4->src_addr;
&v4[nb_v4];
v4_idxes[nb_v4++] = i;
} else {
- ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *);
+ cached_sa = (cache->mask != 0) ?
+ cache->v6[cache_idx] : NULL;
+ if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
+ if (cmp_sa_key(cached_sa, 0, ipv4, ipv6)) {
+ sa[i] = cached_sa;
+ continue;
+ }
+ }
v6[nb_v6].spi = esp->spi;
memcpy(v6[nb_v6].dip, ipv6->dst_addr,
sizeof(ipv6->dst_addr));
if (nb_v6 != 0)
rte_ipsec_sad_lookup(sad->sad_v6, keys_v6, v6_res, nb_v6);
- for (i = 0; i < nb_v4; i++)
- sa[v4_idxes[i]] = v4_res[i];
-
- for (i = 0; i < nb_v6; i++)
- sa[v6_idxes[i]] = v6_res[i];
+ for (i = 0; i < nb_v4; i++) {
+ ipv4 = rte_pktmbuf_mtod(pkts[v4_idxes[i]],
+ struct rte_ipv4_hdr *);
+ if ((v4_res[i] != NULL) &&
+ (cmp_sa_key(v4_res[i], 1, ipv4, NULL))) {
+ sa[v4_idxes[i]] = v4_res[i];
+ sa_cache_update(cache->v4, (struct ipsec_sa *)v4_res[i],
+ cache->mask);
+ } else
+ sa[v4_idxes[i]] = NULL;
+ }
+ for (i = 0; i < nb_v6; i++) {
+ ipv6 = rte_pktmbuf_mtod(pkts[v6_idxes[i]],
+ struct rte_ipv6_hdr *);
+ if ((v6_res[i] != NULL) &&
+ (cmp_sa_key(v6_res[i], 0, NULL, ipv6))) {
+ sa[v6_idxes[i]] = v6_res[i];
+ sa_cache_update(cache->v6, (struct ipsec_sa *)v6_res[i],
+ cache->mask);
+ } else
+ sa[v6_idxes[i]] = NULL;
+ }
}
#endif /* __SAD_H__ */