Application ipsec-secgw is not working for IPv4 transport mode and for
IPv6 both transport and tunnel mode.
IPv6 tunnel mode is not working due to wrongly assigned fields of
security association patterns, as it was IPv4, during creation of
inline crypto session.
IPv6 and IPv4 transport mode is iterating through security capabilities
until it reaches tunnel, which causes session to be created as tunnel,
instead of transport. Another issue, is that config file does not
provide source and destination ip addresses for transport mode, which
are required by NIC to perform inline crypto. It uses default addresses
stored in security association (all zeroes), which causes dropped
packages.
To fix that, reorganization of code in create_session() is needed,
to behave appropriately to given protocol (IPv6/IPv4). Change in
iteration through security capabilities is also required, to check
for expected mode (not only tunnel).
For lack of addresses issue, some resolving mechanism is needed.
Approach is to store addresses in security association, as it is
for tunnel mode. Difference is that they are obtained from sp rules,
instead of config file. To do that, sp[4/6]_spi_present() function
is used to find addresses based on spi value, and then stored in
corresponding sa rule. This approach assumes, that every sp rule
for inline crypto have valid addresses, as well as range of addresses
is not supported.
New flags for ipsec_sa structure are required to distinguish between
IPv4 and IPv6 transport modes. Because of that, there is need to
change all checks done on these flags, so they work as expected.
Fixes:
ec17993a145a ("examples/ipsec-secgw: support security offload")
Fixes:
9a0752f498d2 ("net/ixgbe: enable inline IPsec")
Cc: stable@dpdk.org
Signed-off-by: Mariusz Drost <mariuszx.drost@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
Tested-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
}
}
- if (unlikely(sa->flags == TRANSPORT)) {
+ if (unlikely(IS_TRANSPORT(sa->flags))) {
ip = rte_pktmbuf_mtod(m, struct ip *);
ip4 = (struct ip *)rte_pktmbuf_adj(m,
sizeof(struct rte_esp_hdr) + sa->iv_len);
ip4 = rte_pktmbuf_mtod(m, struct ip *);
if (likely(ip4->ip_v == IPVERSION)) {
- if (unlikely(sa->flags == TRANSPORT)) {
+ if (unlikely(IS_TRANSPORT(sa->flags))) {
ip_hdr_len = ip4->ip_hl * 4;
nlp = ip4->ip_p;
} else
nlp = IPPROTO_IPIP;
} else if (ip4->ip_v == IP6_VERSION) {
- if (unlikely(sa->flags == TRANSPORT)) {
+ if (unlikely(IS_TRANSPORT(sa->flags))) {
/* XXX No option headers supported */
ip_hdr_len = sizeof(struct ip6_hdr);
ip6 = (struct ip6_hdr *)ip4;
ip_hdr_len + 2, sa->block_size);
pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
- RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
- sa->flags == TRANSPORT);
+ RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags));
- if (likely(sa->flags == IP4_TUNNEL))
+ if (likely(IS_IP4_TUNNEL(sa->flags)))
ip_hdr_len = sizeof(struct ip);
- else if (sa->flags == IP6_TUNNEL)
+ else if (IS_IP6_TUNNEL(sa->flags))
ip_hdr_len = sizeof(struct ip6_hdr);
- else if (sa->flags != TRANSPORT) {
+ else if (!IS_TRANSPORT(sa->flags)) {
RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
sa->flags);
return -EINVAL;
rte_prefetch0(padding);
}
- switch (sa->flags) {
+ switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
&sa->src, &sa->dst);
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
struct rte_security_ipsec_tunnel_param *tunnel =
&ipsec->tunnel;
- if (sa->flags == IP4_TUNNEL) {
+ if (IS_IP4_TUNNEL(sa->flags)) {
tunnel->type =
RTE_SECURITY_IPSEC_TUNNEL_IPV4;
tunnel->ipv4.ttl = IPDEFTTL;
.options = { 0 },
.direction = sa->direction,
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
- .mode = (sa->flags == IP4_TUNNEL ||
- sa->flags == IP6_TUNNEL) ?
+ .mode = (IS_TUNNEL(sa->flags)) ?
RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
} },
sec_cap->protocol ==
RTE_SECURITY_PROTOCOL_IPSEC &&
sec_cap->ipsec.mode ==
- RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ sess_conf.ipsec.mode &&
sec_cap->ipsec.direction == sa->direction)
break;
sec_cap++;
sa->security_ctx = ctx;
sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
- sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
- sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
- if (sa->flags & IP6_TUNNEL) {
+ if (IS_IP6(sa->flags)) {
+ sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
sa->pattern[1].spec = &sa->ipv6_spec;
+
memcpy(sa->ipv6_spec.hdr.dst_addr,
sa->dst.ip.ip6.ip6_b, 16);
memcpy(sa->ipv6_spec.hdr.src_addr,
sa->src.ip.ip6.ip6_b, 16);
- } else {
+ } else if (IS_IP4(sa->flags)) {
+ sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
sa->pattern[1].spec = &sa->ipv4_spec;
+
sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
}
sec_cap->protocol ==
RTE_SECURITY_PROTOCOL_IPSEC &&
sec_cap->ipsec.mode ==
- RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ sess_conf.ipsec.mode &&
sec_cap->ipsec.direction == sa->direction)
break;
sec_cap++;
#define IP4_TUNNEL (1 << 0)
#define IP6_TUNNEL (1 << 1)
#define TRANSPORT (1 << 2)
+#define IP4_TRANSPORT (1 << 3)
+#define IP6_TRANSPORT (1 << 4)
struct ip_addr src;
struct ip_addr dst;
uint8_t cipher_key[MAX_KEY_SIZE];
uint8_t buf[32];
} __rte_cache_aligned;
+#define IS_TRANSPORT(flags) ((flags) & TRANSPORT)
+
+#define IS_TUNNEL(flags) ((flags) & (IP4_TUNNEL | IP6_TUNNEL))
+
+#define IS_IP4(flags) ((flags) & (IP4_TUNNEL | IP4_TRANSPORT))
+
+#define IS_IP6(flags) ((flags) & (IP6_TUNNEL | IP6_TRANSPORT))
+
+#define IS_IP4_TUNNEL(flags) ((flags) & IP4_TUNNEL)
+
+#define IS_IP6_TUNNEL(flags) ((flags) & IP6_TUNNEL)
+
+/*
+ * Macro for getting ipsec_sa flags statuses without version of protocol
+ * used for transport (IP4_TRANSPORT and IP6_TRANSPORT flags).
+ */
+#define WITHOUT_TRANSPORT_VERSION(flags) \
+ ((flags) & (IP4_TUNNEL | \
+ IP6_TUNNEL | \
+ TRANSPORT))
+
struct cdev_qp {
uint16_t id;
uint16_t qp;
* or -ENOENT otherwise.
*/
int
-sp4_spi_present(uint32_t spi, int inbound);
+sp4_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2],
+ uint32_t mask[2]);
int
-sp6_spi_present(uint32_t spi, int inbound);
+sp6_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2],
+ uint32_t mask[2]);
/*
* Search through SA entries for given SPI.
#define IPDEFTTL 64
+#define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
+
+#define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
+
struct supported_cipher_algo {
const char *keyword;
enum rte_crypto_cipher_algorithm algo;
if (status->status < 0)
return;
- if (rule->flags == IP4_TUNNEL) {
+ if (IS_IP4_TUNNEL(rule->flags)) {
struct in_addr ip;
APP_CHECK(parse_ipv4_addr(tokens[ti],
return;
rule->src.ip.ip4 = rte_bswap32(
(uint32_t)ip.s_addr);
- } else if (rule->flags == IP6_TUNNEL) {
+ } else if (IS_IP6_TUNNEL(rule->flags)) {
struct in6_addr ip;
APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
return;
memcpy(rule->src.ip.ip6.ip6_b,
ip.s6_addr, 16);
- } else if (rule->flags == TRANSPORT) {
+ } else if (IS_TRANSPORT(rule->flags)) {
APP_CHECK(0, status, "unrecognized input "
"\"%s\"", tokens[ti]);
return;
if (status->status < 0)
return;
- if (rule->flags == IP4_TUNNEL) {
+ if (IS_IP4_TUNNEL(rule->flags)) {
struct in_addr ip;
APP_CHECK(parse_ipv4_addr(tokens[ti],
return;
rule->dst.ip.ip4 = rte_bswap32(
(uint32_t)ip.s_addr);
- } else if (rule->flags == IP6_TUNNEL) {
+ } else if (IS_IP6_TUNNEL(rule->flags)) {
struct in6_addr ip;
APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
if (status->status < 0)
return;
memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
- } else if (rule->flags == TRANSPORT) {
+ } else if (IS_TRANSPORT(rule->flags)) {
APP_CHECK(0, status, "unrecognized "
"input \"%s\"", tokens[ti]);
return;
printf("mode:");
- switch (sa->flags) {
+ switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
printf("IP4Tunnel ");
uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
return 0;
}
+/*
+ * Helper function, tries to determine next_proto for SPI
+ * by searching though SP rules.
+ */
+static int
+get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
+ struct ip_addr ip_addr[2], uint32_t mask[2])
+{
+ int32_t rc4, rc6;
+
+ rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ ip_addr, mask);
+ rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ ip_addr, mask);
+
+ if (rc4 >= 0) {
+ if (rc6 >= 0) {
+ RTE_LOG(ERR, IPSEC,
+ "%s: SPI %u used simultaeously by "
+ "IPv4(%d) and IPv6 (%d) SP rules\n",
+ __func__, spi, rc4, rc6);
+ return -EINVAL;
+ } else
+ return IPPROTO_IPIP;
+ } else if (rc6 < 0) {
+ RTE_LOG(ERR, IPSEC,
+ "%s: SPI %u is not used by any SP rule\n",
+ __func__, spi);
+ return -EINVAL;
+ } else
+ return IPPROTO_IPV6;
+}
+
+/*
+ * Helper function for getting source and destination IP addresses
+ * from SP. Needed for inline crypto transport mode, as addresses are not
+ * provided in config file for that mode. It checks if SP for current SA exists,
+ * and based on what type of protocol is returned, it stores appropriate
+ * addresses got from SP into SA.
+ */
+static int
+sa_add_address_inline_crypto(struct ipsec_sa *sa)
+{
+ int protocol;
+ struct ip_addr ip_addr[2];
+ uint32_t mask[2];
+
+ protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
+ if (protocol < 0)
+ return protocol;
+ else if (protocol == IPPROTO_IPIP) {
+ sa->flags |= IP4_TRANSPORT;
+ if (mask[0] == IP4_FULL_MASK &&
+ mask[1] == IP4_FULL_MASK &&
+ ip_addr[0].ip.ip4 != 0 &&
+ ip_addr[1].ip.ip4 != 0) {
+
+ sa->src.ip.ip4 = ip_addr[0].ip.ip4;
+ sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
+ } else {
+ RTE_LOG(ERR, IPSEC,
+ "%s: No valid address or mask entry in"
+ " IPv4 SP rule for SPI %u\n",
+ __func__, sa->spi);
+ return -EINVAL;
+ }
+ } else if (protocol == IPPROTO_IPV6) {
+ sa->flags |= IP6_TRANSPORT;
+ if (mask[0] == IP6_FULL_MASK &&
+ mask[1] == IP6_FULL_MASK &&
+ (ip_addr[0].ip.ip6.ip6[0] != 0 ||
+ ip_addr[0].ip.ip6.ip6[1] != 0) &&
+ (ip_addr[1].ip.ip6.ip6[0] != 0 ||
+ ip_addr[1].ip.ip6.ip6[1] != 0)) {
+
+ sa->src.ip.ip6 = ip_addr[0].ip.ip6;
+ sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
+ } else {
+ RTE_LOG(ERR, IPSEC,
+ "%s: No valid address or mask entry in"
+ " IPv6 SP rule for SPI %u\n",
+ __func__, sa->spi);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
struct ipsec_sa *sa;
uint32_t i, idx;
uint16_t iv_length, aad_length;
+ int inline_status;
/* for ESN upper 32 bits of SQN also need to be part of AAD */
aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
- switch (sa->flags) {
+ switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
+ break;
+ case TRANSPORT:
+ if (sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ inline_status =
+ sa_add_address_inline_crypto(sa);
+ if (inline_status < 0)
+ return inline_status;
+ }
+ break;
}
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
prm->replay_win_sz = app_prm->window_size;
}
-/*
- * Helper function, tries to determine next_proto for SPI
- * by searching though SP rules.
- */
-static int
-get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir)
-{
- int32_t rc4, rc6;
-
- rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
- rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
-
- if (rc4 >= 0) {
- if (rc6 >= 0) {
- RTE_LOG(ERR, IPSEC,
- "%s: SPI %u used simultaeously by "
- "RTE_IPV4(%d) and IPv6 (%d) SP rules\n",
- __func__, spi, rc4, rc6);
- return -EINVAL;
- } else
- return IPPROTO_IPIP;
- } else if (rc6 < 0) {
- RTE_LOG(ERR, IPSEC,
- "%s: SPI %u is not used by any SP rule\n",
- __func__, spi);
- return -EINVAL;
- } else
- return IPPROTO_IPV6;
-}
-
static int
fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
* probably not the optimal way, but there seems nothing
* better right now.
*/
- rc = get_spi_proto(ss->spi, ss->direction);
+ rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
if (rc < 0)
return rc;
prm->ipsec_xform.salt = ss->salt;
prm->ipsec_xform.direction = ss->direction;
prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
- prm->ipsec_xform.mode = (ss->flags == TRANSPORT) ?
+ prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
- if (ss->flags == IP4_TUNNEL) {
+ if (IS_IP4_TUNNEL(ss->flags)) {
prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
prm->tun.hdr_len = sizeof(*v4);
prm->tun.next_proto = rc;
prm->tun.hdr = v4;
- } else if (ss->flags == IP6_TUNNEL) {
+ } else if (IS_IP6_TUNNEL(ss->flags)) {
prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
prm->tun.hdr_len = sizeof(*v6);
prm->tun.next_proto = rc;
.proto = IPPROTO_ESP,
};
- if (lsa->flags == IP6_TUNNEL) {
+ if (IS_IP6_TUNNEL(lsa->flags)) {
memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
}
if (rte_be_to_cpu_32(esp->spi) != sa->spi)
return;
- switch (sa->flags) {
+ switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
if ((ip->ip_v == IPVERSION) &&
#define MAX_ACL_RULE_NUM 1024
+#define IPV4_DST_FROM_SP(acr) \
+ (rte_cpu_to_be_32((acr).field[DST_FIELD_IPV4].value.u32))
+
+#define IPV4_SRC_FROM_SP(acr) \
+ (rte_cpu_to_be_32((acr).field[SRC_FIELD_IPV4].value.u32))
+
+#define IPV4_DST_MASK_FROM_SP(acr) \
+ ((acr).field[DST_FIELD_IPV4].mask_range.u32)
+
+#define IPV4_SRC_MASK_FROM_SP(acr) \
+ ((acr).field[SRC_FIELD_IPV4].mask_range.u32)
+
/*
* Rule and trace formats definitions.
*/
* Search though SP rules for given SPI.
*/
int
-sp4_spi_present(uint32_t spi, int inbound)
+sp4_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2],
+ uint32_t mask[2])
{
uint32_t i, num;
const struct acl4_rules *acr;
}
for (i = 0; i != num; i++) {
- if (acr[i].data.userdata == spi)
+ if (acr[i].data.userdata == spi) {
+ if (NULL != ip_addr && NULL != mask) {
+ ip_addr[0].ip.ip4 = IPV4_SRC_FROM_SP(acr[i]);
+ ip_addr[1].ip.ip4 = IPV4_DST_FROM_SP(acr[i]);
+ mask[0] = IPV4_SRC_MASK_FROM_SP(acr[i]);
+ mask[1] = IPV4_DST_MASK_FROM_SP(acr[i]);
+ }
return i;
+ }
}
return -ENOENT;
#define MAX_ACL_RULE_NUM 1024
+#define IPV6_FROM_SP(acr, fidx_low, fidx_high) \
+ (((uint64_t)(acr).field[(fidx_high)].value.u32 << 32) | \
+ (acr).field[(fidx_low)].value.u32)
+
+#define IPV6_DST_FROM_SP(addr, acr) do {\
+ (addr).ip.ip6.ip6[0] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \
+ IP6_DST1, IP6_DST0));\
+ (addr).ip.ip6.ip6[1] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \
+ IP6_DST3, IP6_DST2));\
+ } while (0)
+
+#define IPV6_SRC_FROM_SP(addr, acr) do {\
+ (addr).ip.ip6.ip6[0] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \
+ IP6_SRC1, IP6_SRC0));\
+ (addr).ip.ip6.ip6[1] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \
+ IP6_SRC3, IP6_SRC2));\
+ } while (0)
+
+#define IPV6_DST_MASK_FROM_SP(mask, acr) \
+ ((mask) = (acr).field[IP6_DST0].mask_range.u32 + \
+ (acr).field[IP6_DST1].mask_range.u32 + \
+ (acr).field[IP6_DST2].mask_range.u32 + \
+ (acr).field[IP6_DST3].mask_range.u32)
+
+#define IPV6_SRC_MASK_FROM_SP(mask, acr) \
+ ((mask) = (acr).field[IP6_SRC0].mask_range.u32 + \
+ (acr).field[IP6_SRC1].mask_range.u32 + \
+ (acr).field[IP6_SRC2].mask_range.u32 + \
+ (acr).field[IP6_SRC3].mask_range.u32)
+
enum {
IP6_PROTO,
IP6_SRC0,
* Search though SP rules for given SPI.
*/
int
-sp6_spi_present(uint32_t spi, int inbound)
+sp6_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2],
+ uint32_t mask[2])
{
uint32_t i, num;
const struct acl6_rules *acr;
}
for (i = 0; i != num; i++) {
- if (acr[i].data.userdata == spi)
+ if (acr[i].data.userdata == spi) {
+ if (NULL != ip_addr && NULL != mask) {
+ IPV6_SRC_FROM_SP(ip_addr[0], acr[i]);
+ IPV6_DST_FROM_SP(ip_addr[1], acr[i]);
+ IPV6_SRC_MASK_FROM_SP(mask[0], acr[i]);
+ IPV6_DST_MASK_FROM_SP(mask[1], acr[i]);
+ }
return i;
+ }
}
return -ENOENT;