* Copyright(C) 2021 Marvell.
*/
+#ifndef RTE_EXEC_ENV_WINDOWS
+
#include <rte_common.h>
#include <rte_cryptodev.h>
#include <rte_esp.h>
(RTE_DIM(cipher_list) *
RTE_DIM(auth_list))];
+static bool
+is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
+{
+ /* The IP version number must be 4 */
+ if (((pkt->version_ihl) >> 4) != 4)
+ return false;
+ /*
+ * The IP header length field must be large enough to hold the
+ * minimum length legal IP datagram (20 bytes = 5 words).
+ */
+ if ((pkt->version_ihl & 0xf) < 5)
+ return false;
+
+ /*
+ * The IP total length field must be large enough to hold the IP
+ * datagram header, whose length is specified in the IP header length
+ * field.
+ */
+ if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
+ return false;
+
+ return true;
+}
+
+static bool
+is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
+{
+ /* The IP version number must be 6 */
+ if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
+ return false;
+
+ return true;
+}
+
void
test_ipsec_alg_list_populate(void)
{
if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
/* Copy template for packet & key fields */
- memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
+ if (flags->ipv6)
+ memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
+ else
+ memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
td->aead = true;
td->xform.aead.aead.algo = param1->alg.aead;
td->xform.aead.aead.key.length = param1->key_length;
} else {
/* Copy template for packet & key fields */
- memcpy(td, &pkt_aes_128_cbc_hmac_sha256, sizeof(*td));
+ if (flags->ipv6)
+ memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
+ sizeof(*td));
+ else
+ memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
+ sizeof(*td));
td->aead = false;
td->xform.chain.cipher.cipher.algo = param1->alg.cipher;
td->xform.chain.cipher.cipher.key.length =
param1->key_length;
+ td->xform.chain.cipher.cipher.iv.length =
+ param1->iv_length;
td->xform.chain.auth.auth.algo = param2->alg.auth;
td->xform.chain.auth.auth.key.length =
param2->key_length;
test_ipsec_csum_init(&td->input_text.data, false, true);
}
+ if (flags->transport) {
+ td->ipsec_xform.mode =
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
+ } else {
+ td->ipsec_xform.mode =
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
+
+ if (flags->tunnel_ipv6)
+ td->ipsec_xform.tunnel.type =
+ RTE_SECURITY_IPSEC_TUNNEL_IPV6;
+ else
+ td->ipsec_xform.tunnel.type =
+ RTE_SECURITY_IPSEC_TUNNEL_IPV4;
+ }
+
+ if (flags->stats_success)
+ td->ipsec_xform.options.stats = 1;
+
+ if (flags->fragment) {
+ struct rte_ipv4_hdr *ip;
+ ip = (struct rte_ipv4_hdr *)&td->input_text.data;
+ ip->fragment_offset = 4;
+ ip->hdr_checksum = rte_ipv4_cksum(ip);
+ }
+
+ if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
+ flags->df == TEST_IPSEC_COPY_DF_INNER_1)
+ td->ipsec_xform.options.copy_df = 1;
}
}
{
uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
uint32_t skip, len = rte_pktmbuf_pkt_len(m);
+ uint8_t td_output_text[4096];
int ret;
/* For tests with status as error for test success, skip verification */
return TEST_FAILED;
}
+ if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
+ flags->fragment) {
+ const struct rte_ipv4_hdr *iph4;
+ iph4 = (const struct rte_ipv4_hdr *)output_text;
+ if (iph4->fragment_offset) {
+ printf("Output packet is fragmented");
+ return TEST_FAILED;
+ }
+ }
+
skip = test_ipsec_tunnel_hdr_len_get(td);
len -= skip;
return ret;
}
+ memcpy(td_output_text, td->output_text.data + skip, len);
+
+ if (test_ipsec_pkt_update(td_output_text, flags)) {
+ printf("Could not update expected vector");
+ return TEST_FAILED;
+ }
- if (memcmp(output_text, td->output_text.data + skip, len)) {
+ if (memcmp(output_text, td_output_text, len)) {
if (silent)
return TEST_FAILED;
printf("TestCase %s line %d: %s\n", __func__, __LINE__,
"output text not as expected\n");
- rte_hexdump(stdout, "expected", td->output_text.data + skip,
- len);
+ rte_hexdump(stdout, "expected", td_output_text, len);
rte_hexdump(stdout, "actual", output_text, len);
return TEST_FAILED;
}
struct ipsec_test_data *res_d, bool silent,
const struct ipsec_test_flags *flags)
{
+ uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
int ret;
- if (flags->iv_gen &&
- td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
- ret = test_ipsec_iv_verify_push(m, td);
- if (ret != TEST_SUCCESS)
- return ret;
+ if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+
+ if (flags->iv_gen) {
+ ret = test_ipsec_iv_verify_push(m, td);
+ if (ret != TEST_SUCCESS)
+ return ret;
+ }
+
+ iph4 = (const struct rte_ipv4_hdr *)output_text;
+
+ if (td->ipsec_xform.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
+ if (flags->ipv6) {
+ iph6 = (const struct rte_ipv6_hdr *)output_text;
+ if (is_valid_ipv6_pkt(iph6) == false) {
+ printf("Transport packet is not IPv6\n");
+ return TEST_FAILED;
+ }
+ } else {
+ if (is_valid_ipv4_pkt(iph4) == false) {
+ printf("Transport packet is not IPv4\n");
+ return TEST_FAILED;
+ }
+ }
+ } else {
+ if (td->ipsec_xform.tunnel.type ==
+ RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+ uint16_t f_off;
+
+ if (is_valid_ipv4_pkt(iph4) == false) {
+ printf("Tunnel outer header is not IPv4\n");
+ return TEST_FAILED;
+ }
+
+ f_off = rte_be_to_cpu_16(iph4->fragment_offset);
+
+ if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
+ flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
+ if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
+ printf("DF bit is not set\n");
+ return TEST_FAILED;
+ }
+ } else {
+ if ((f_off & RTE_IPV4_HDR_DF_FLAG)) {
+ printf("DF bit is set\n");
+ return TEST_FAILED;
+ }
+ }
+ } else {
+ iph6 = (const struct rte_ipv6_hdr *)output_text;
+ if (is_valid_ipv6_pkt(iph6) == false) {
+ printf("Tunnel outer header is not IPv6\n");
+ return TEST_FAILED;
+ }
+ }
+ }
}
/*
return ret;
}
+
+int
+test_ipsec_stats_verify(struct rte_security_ctx *ctx,
+ struct rte_security_session *sess,
+ const struct ipsec_test_flags *flags,
+ enum rte_security_ipsec_sa_direction dir)
+{
+ struct rte_security_stats stats = {0};
+ int ret = TEST_SUCCESS;
+
+ if (flags->stats_success) {
+ if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
+ return TEST_FAILED;
+
+ if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ if (stats.ipsec.opackets != 1 ||
+ stats.ipsec.oerrors != 0)
+ ret = TEST_FAILED;
+ } else {
+ if (stats.ipsec.ipackets != 1 ||
+ stats.ipsec.ierrors != 0)
+ ret = TEST_FAILED;
+ }
+ }
+
+ return ret;
+}
+
+int
+test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
+{
+ struct rte_ipv4_hdr *iph4;
+ bool cksum_dirty = false;
+ uint16_t frag_off;
+
+ iph4 = (struct rte_ipv4_hdr *)pkt;
+
+ if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
+ flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
+ flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
+ flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
+
+ if (!is_ipv4(iph4)) {
+ printf("Invalid packet type");
+ return -1;
+ }
+
+ frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
+
+ if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
+ flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
+ frag_off |= RTE_IPV4_HDR_DF_FLAG;
+ else
+ frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
+
+ iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
+ cksum_dirty = true;
+ }
+
+ if (cksum_dirty && is_ipv4(iph4)) {
+ iph4->hdr_checksum = 0;
+ iph4->hdr_checksum = rte_ipv4_cksum(iph4);
+ }
+
+ return 0;
+}
+
+#endif /* !RTE_EXEC_ENV_WINDOWS */