1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef RTE_EXEC_ENV_WINDOWS
7 #include <rte_common.h>
8 #include <rte_cryptodev.h>
11 #include <rte_security.h>
16 #include "test_cryptodev_security_ipsec.h"
20 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
21 (RTE_DIM(cipher_list) *
24 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)];
27 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
29 /* The IP version number must be 4 */
30 if (((pkt->version_ihl) >> 4) != 4)
33 * The IP header length field must be large enough to hold the
34 * minimum length legal IP datagram (20 bytes = 5 words).
36 if ((pkt->version_ihl & 0xf) < 5)
40 * The IP total length field must be large enough to hold the IP
41 * datagram header, whose length is specified in the IP header length
44 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
51 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
53 /* The IP version number must be 6 */
54 if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
61 test_ipsec_alg_list_populate(void)
63 unsigned long i, j, index = 0;
65 for (i = 0; i < RTE_DIM(aead_list); i++) {
66 alg_list[index].param1 = &aead_list[i];
67 alg_list[index].param2 = NULL;
71 for (i = 0; i < RTE_DIM(cipher_list); i++) {
72 for (j = 0; j < RTE_DIM(auth_list); j++) {
73 alg_list[index].param1 = &cipher_list[i];
74 alg_list[index].param2 = &auth_list[j];
81 test_ipsec_ah_alg_list_populate(void)
83 unsigned long i, index = 0;
85 for (i = 1; i < RTE_DIM(auth_list); i++) {
86 ah_alg_list[index].param1 = &auth_list[i];
87 ah_alg_list[index].param2 = NULL;
91 for (i = 1; i < RTE_DIM(auth_list); i++) {
93 ah_alg_list[index].param1 = &cipher_list[0];
95 ah_alg_list[index].param2 = &auth_list[i];
101 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
102 const struct rte_security_capability *sec_cap,
105 /* Verify security capabilities */
107 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
109 RTE_LOG(INFO, USER1, "ESN is not supported\n");
113 if (ipsec_xform->options.udp_encap == 1 &&
114 sec_cap->ipsec.options.udp_encap == 0) {
116 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
120 if (ipsec_xform->options.udp_ports_verify == 1 &&
121 sec_cap->ipsec.options.udp_ports_verify == 0) {
123 RTE_LOG(INFO, USER1, "UDP encapsulation ports "
124 "verification is not supported\n");
128 if (ipsec_xform->options.copy_dscp == 1 &&
129 sec_cap->ipsec.options.copy_dscp == 0) {
131 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
135 if (ipsec_xform->options.copy_flabel == 1 &&
136 sec_cap->ipsec.options.copy_flabel == 0) {
138 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
142 if (ipsec_xform->options.copy_df == 1 &&
143 sec_cap->ipsec.options.copy_df == 0) {
145 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
149 if (ipsec_xform->options.dec_ttl == 1 &&
150 sec_cap->ipsec.options.dec_ttl == 0) {
152 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
156 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
158 RTE_LOG(INFO, USER1, "ECN is not supported\n");
162 if (ipsec_xform->options.stats == 1 &&
163 sec_cap->ipsec.options.stats == 0) {
165 RTE_LOG(INFO, USER1, "Stats is not supported\n");
169 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
170 (ipsec_xform->options.iv_gen_disable == 1) &&
171 (sec_cap->ipsec.options.iv_gen_disable != 1)) {
174 "Application provided IV is not supported\n");
178 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
179 (ipsec_xform->options.tunnel_hdr_verify >
180 sec_cap->ipsec.options.tunnel_hdr_verify)) {
183 "Tunnel header verify is not supported\n");
187 if (ipsec_xform->options.ip_csum_enable == 1 &&
188 sec_cap->ipsec.options.ip_csum_enable == 0) {
191 "Inner IP checksum is not supported\n");
195 if (ipsec_xform->options.l4_csum_enable == 1 &&
196 sec_cap->ipsec.options.l4_csum_enable == 0) {
199 "Inner L4 checksum is not supported\n");
203 if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
206 "Replay window size is not supported\n");
214 test_ipsec_crypto_caps_aead_verify(
215 const struct rte_security_capability *sec_cap,
216 struct rte_crypto_sym_xform *aead)
218 const struct rte_cryptodev_symmetric_capability *sym_cap;
219 const struct rte_cryptodev_capabilities *crypto_cap;
222 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
223 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
224 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
225 crypto_cap->sym.xform_type == aead->type &&
226 crypto_cap->sym.aead.algo == aead->aead.algo) {
227 sym_cap = &crypto_cap->sym;
228 if (rte_cryptodev_sym_capability_check_aead(sym_cap,
229 aead->aead.key.length,
230 aead->aead.digest_length,
231 aead->aead.aad_length,
232 aead->aead.iv.length) == 0)
241 test_ipsec_crypto_caps_cipher_verify(
242 const struct rte_security_capability *sec_cap,
243 struct rte_crypto_sym_xform *cipher)
245 const struct rte_cryptodev_symmetric_capability *sym_cap;
246 const struct rte_cryptodev_capabilities *cap;
249 while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
250 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
251 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
252 cap->sym.xform_type == cipher->type &&
253 cap->sym.cipher.algo == cipher->cipher.algo) {
255 if (rte_cryptodev_sym_capability_check_cipher(sym_cap,
256 cipher->cipher.key.length,
257 cipher->cipher.iv.length) == 0)
266 test_ipsec_crypto_caps_auth_verify(
267 const struct rte_security_capability *sec_cap,
268 struct rte_crypto_sym_xform *auth)
270 const struct rte_cryptodev_symmetric_capability *sym_cap;
271 const struct rte_cryptodev_capabilities *cap;
274 while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
275 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
276 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
277 cap->sym.xform_type == auth->type &&
278 cap->sym.auth.algo == auth->auth.algo) {
280 if (rte_cryptodev_sym_capability_check_auth(sym_cap,
281 auth->auth.key.length,
282 auth->auth.digest_length,
283 auth->auth.iv.length) == 0)
292 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
293 struct ipsec_test_data *td_in)
295 memcpy(td_in, td_out, sizeof(*td_in));
297 /* Populate output text of td_in with input text of td_out */
298 memcpy(td_in->output_text.data, td_out->input_text.data,
299 td_out->input_text.len);
300 td_in->output_text.len = td_out->input_text.len;
302 /* Populate input text of td_in with output text of td_out */
303 memcpy(td_in->input_text.data, td_out->output_text.data,
304 td_out->output_text.len);
305 td_in->input_text.len = td_out->output_text.len;
307 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
310 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
312 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
313 td_in->xform.chain.cipher.cipher.op =
314 RTE_CRYPTO_CIPHER_OP_DECRYPT;
321 struct rte_ipv4_hdr *ipv4 = ip;
324 ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
325 if (ip_ver == IPVERSION)
332 test_ipsec_csum_init(void *ip, bool l3, bool l4)
334 struct rte_ipv4_hdr *ipv4;
335 struct rte_tcp_hdr *tcp;
336 struct rte_udp_hdr *udp;
342 size = sizeof(struct rte_ipv4_hdr);
343 next_proto = ipv4->next_proto_id;
346 ipv4->hdr_checksum = 0;
348 size = sizeof(struct rte_ipv6_hdr);
349 next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
353 switch (next_proto) {
355 tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
359 udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
360 udp->dgram_cksum = 0;
369 test_ipsec_td_prepare(const struct crypto_param *param1,
370 const struct crypto_param *param2,
371 const struct ipsec_test_flags *flags,
372 struct ipsec_test_data *td_array,
376 struct ipsec_test_data *td;
379 memset(td_array, 0, nb_td * sizeof(*td));
381 for (i = 0; i < nb_td; i++) {
384 /* Prepare fields based on param */
386 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
387 /* Copy template for packet & key fields */
389 memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
391 memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
394 td->xform.aead.aead.algo = param1->alg.aead;
395 td->xform.aead.aead.key.length = param1->key_length;
397 /* Copy template for packet & key fields */
399 memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
402 memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
407 if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
408 td->xform.chain.auth.auth.algo =
410 td->xform.chain.auth.auth.key.length =
412 td->xform.chain.auth.auth.digest_length =
413 param1->digest_length;
414 td->auth_only = true;
416 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
417 td->xform.chain.auth.auth.iv.length =
422 td->xform.chain.cipher.cipher.algo =
424 td->xform.chain.cipher.cipher.key.length =
426 td->xform.chain.cipher.cipher.iv.length =
428 td->xform.chain.auth.auth.algo =
430 td->xform.chain.auth.auth.key.length =
432 td->xform.chain.auth.auth.digest_length =
433 param2->digest_length;
435 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
436 td->xform.chain.auth.auth.iv.length =
444 td->ipsec_xform.proto =
445 RTE_SECURITY_IPSEC_SA_PROTO_AH;
449 td->ipsec_xform.options.iv_gen_disable = 0;
451 if (flags->sa_expiry_pkts_soft)
452 td->ipsec_xform.life.packets_soft_limit =
453 IPSEC_TEST_PACKETS_MAX - 1;
455 if (flags->ip_csum) {
456 td->ipsec_xform.options.ip_csum_enable = 1;
457 test_ipsec_csum_init(&td->input_text.data, true, false);
460 if (flags->l4_csum) {
461 td->ipsec_xform.options.l4_csum_enable = 1;
462 test_ipsec_csum_init(&td->input_text.data, false, true);
465 if (flags->transport) {
466 td->ipsec_xform.mode =
467 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
469 td->ipsec_xform.mode =
470 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
472 if (flags->tunnel_ipv6)
473 td->ipsec_xform.tunnel.type =
474 RTE_SECURITY_IPSEC_TUNNEL_IPV6;
476 td->ipsec_xform.tunnel.type =
477 RTE_SECURITY_IPSEC_TUNNEL_IPV4;
480 if (flags->stats_success)
481 td->ipsec_xform.options.stats = 1;
483 if (flags->fragment) {
484 struct rte_ipv4_hdr *ip;
485 ip = (struct rte_ipv4_hdr *)&td->input_text.data;
486 ip->fragment_offset = 4;
487 ip->hdr_checksum = rte_ipv4_cksum(ip);
490 if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
491 flags->df == TEST_IPSEC_COPY_DF_INNER_1)
492 td->ipsec_xform.options.copy_df = 1;
494 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
495 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
496 td->ipsec_xform.options.copy_dscp = 1;
498 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
499 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1)
500 td->ipsec_xform.options.copy_flabel = 1;
502 if (flags->dec_ttl_or_hop_limit)
503 td->ipsec_xform.options.dec_ttl = 1;
508 test_ipsec_td_update(struct ipsec_test_data td_inb[],
509 const struct ipsec_test_data td_outb[],
511 const struct ipsec_test_flags *flags)
515 for (i = 0; i < nb_td; i++) {
516 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
517 td_outb[i].input_text.len);
518 td_inb[i].output_text.len = td_outb->input_text.len;
520 if (flags->icv_corrupt) {
521 int icv_pos = td_inb[i].input_text.len - 4;
522 td_inb[i].input_text.data[icv_pos] += 1;
525 if (flags->sa_expiry_pkts_hard)
526 td_inb[i].ipsec_xform.life.packets_hard_limit =
527 IPSEC_TEST_PACKETS_MAX - 1;
529 if (flags->udp_encap)
530 td_inb[i].ipsec_xform.options.udp_encap = 1;
532 if (flags->udp_ports_verify)
533 td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
535 td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
536 flags->tunnel_hdr_verify;
539 td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
542 td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
544 /* Clear outbound specific flags */
545 td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
550 test_ipsec_display_alg(const struct crypto_param *param1,
551 const struct crypto_param *param2)
553 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
555 rte_crypto_aead_algorithm_strings[param1->alg.aead],
556 param1->key_length * 8);
557 } else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
559 rte_crypto_auth_algorithm_strings[param1->alg.auth]);
560 if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL)
561 printf(" [%dB ICV]", param1->digest_length);
564 rte_crypto_cipher_algorithm_strings[param1->alg.cipher]);
565 if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
566 printf(" [%d]", param1->key_length * 8);
568 rte_crypto_auth_algorithm_strings[param2->alg.auth]);
569 if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
570 printf(" [%dB ICV]", param2->digest_length);
576 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
580 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
581 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
582 if (td->ipsec_xform.tunnel.type ==
583 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
584 len += sizeof(struct rte_ipv4_hdr);
586 len += sizeof(struct rte_ipv6_hdr);
594 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td)
596 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
597 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *);
598 int i, iv_pos, iv_len;
602 iv_len = td->xform.aead.aead.iv.length - td->salt.len;
604 iv_len = td->xform.chain.cipher.cipher.iv.length;
606 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
607 output_text += iv_pos;
609 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
611 /* Compare against previous values */
612 for (i = 0; i < index; i++) {
613 iv_tmp = &iv_queue[i * IV_LEN_MAX];
615 if (memcmp(output_text, iv_tmp, iv_len) == 0) {
616 printf("IV repeated");
621 /* Save IV for future comparisons */
623 iv_tmp = &iv_queue[index * IV_LEN_MAX];
624 memcpy(iv_tmp, output_text, iv_len);
627 if (index == IPSEC_TEST_PACKETS_MAX)
634 test_ipsec_l3_csum_verify(struct rte_mbuf *m)
636 uint16_t actual_cksum, expected_cksum;
637 struct rte_ipv4_hdr *ip;
639 ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
641 if (!is_ipv4((void *)ip))
644 actual_cksum = ip->hdr_checksum;
646 ip->hdr_checksum = 0;
648 expected_cksum = rte_ipv4_cksum(ip);
650 if (actual_cksum != expected_cksum)
657 test_ipsec_l4_csum_verify(struct rte_mbuf *m)
659 uint16_t actual_cksum = 0, expected_cksum = 0;
660 struct rte_ipv4_hdr *ipv4;
661 struct rte_ipv6_hdr *ipv6;
662 struct rte_tcp_hdr *tcp;
663 struct rte_udp_hdr *udp;
666 ip = rte_pktmbuf_mtod(m, void *);
670 l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
672 switch (ipv4->next_proto_id) {
674 tcp = (struct rte_tcp_hdr *)l4;
675 actual_cksum = tcp->cksum;
677 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
680 udp = (struct rte_udp_hdr *)l4;
681 actual_cksum = udp->dgram_cksum;
682 udp->dgram_cksum = 0;
683 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
690 l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
692 switch (ipv6->proto) {
694 tcp = (struct rte_tcp_hdr *)l4;
695 actual_cksum = tcp->cksum;
697 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
700 udp = (struct rte_udp_hdr *)l4;
701 actual_cksum = udp->dgram_cksum;
702 udp->dgram_cksum = 0;
703 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
710 if (actual_cksum != expected_cksum)
717 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected)
719 struct rte_ipv4_hdr *iph4_ex, *iph4_re;
720 struct rte_ipv6_hdr *iph6_ex, *iph6_re;
722 if (is_ipv4(received) && is_ipv4(expected)) {
725 iph4_ex->time_to_live -= 1;
726 if (iph4_re->time_to_live != iph4_ex->time_to_live)
728 } else if (!is_ipv4(received) && !is_ipv4(expected)) {
731 iph6_ex->hop_limits -= 1;
732 if (iph6_re->hop_limits != iph6_ex->hop_limits)
735 printf("IP header version miss match\n");
743 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
744 bool silent, const struct ipsec_test_flags *flags)
746 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
747 uint32_t skip, len = rte_pktmbuf_pkt_len(m);
748 uint8_t td_output_text[4096];
751 /* For tests with status as error for test success, skip verification */
752 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
753 (flags->icv_corrupt ||
754 flags->sa_expiry_pkts_hard ||
755 flags->tunnel_hdr_verify ||
759 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
761 const struct rte_ipv4_hdr *iph4;
762 const struct rte_ipv6_hdr *iph6;
764 if (td->ipsec_xform.tunnel.type ==
765 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
766 iph4 = (const struct rte_ipv4_hdr *)output_text;
767 if (iph4->next_proto_id != IPPROTO_UDP) {
768 printf("UDP header is not found\n");
772 iph6 = (const struct rte_ipv6_hdr *)output_text;
773 if (iph6->proto != IPPROTO_UDP) {
774 printf("UDP header is not found\n");
779 len -= sizeof(struct rte_udp_hdr);
780 output_text += sizeof(struct rte_udp_hdr);
783 if (len != td->output_text.len) {
784 printf("Output length (%d) not matching with expected (%d)\n",
785 len, td->output_text.len);
789 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
791 const struct rte_ipv4_hdr *iph4;
792 iph4 = (const struct rte_ipv4_hdr *)output_text;
793 if (iph4->fragment_offset) {
794 printf("Output packet is fragmented");
799 skip = test_ipsec_tunnel_hdr_len_get(td);
804 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
806 if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
807 ret = test_ipsec_l3_csum_verify(m);
811 if (ret == TEST_FAILED)
812 printf("Inner IP checksum test failed\n");
817 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
819 if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
820 ret = test_ipsec_l4_csum_verify(m);
824 if (ret == TEST_FAILED)
825 printf("Inner L4 checksum test failed\n");
830 memcpy(td_output_text, td->output_text.data + skip, len);
832 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
833 flags->dec_ttl_or_hop_limit) {
834 if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) {
835 printf("Inner TTL/hop limit decrement test failed\n");
840 if (test_ipsec_pkt_update(td_output_text, flags)) {
841 printf("Could not update expected vector");
845 if (memcmp(output_text, td_output_text, len)) {
849 printf("TestCase %s line %d: %s\n", __func__, __LINE__,
850 "output text not as expected\n");
852 rte_hexdump(stdout, "expected", td_output_text, len);
853 rte_hexdump(stdout, "actual", output_text, len);
861 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td,
862 struct ipsec_test_data *res_d)
864 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
865 uint32_t len = rte_pktmbuf_pkt_len(m);
867 memcpy(res_d, td, sizeof(*res_d));
868 memcpy(res_d->input_text.data, output_text, len);
869 res_d->input_text.len = len;
871 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
873 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
875 res_d->xform.chain.cipher.cipher.op =
876 RTE_CRYPTO_CIPHER_OP_DECRYPT;
877 res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
884 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
885 const struct ipsec_test_flags *flags)
890 if (!is_valid_ipv4_pkt(iph4)) {
891 printf("Tunnel outer header is not IPv4\n");
895 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
896 printf("Tunnel outer header proto is not AH\n");
900 f_off = rte_be_to_cpu_16(iph4->fragment_offset);
901 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
902 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
903 if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
904 printf("DF bit is not set\n");
908 if (f_off & RTE_IPV4_HDR_DF_FLAG) {
909 printf("DF bit is set\n");
914 tos = iph4->type_of_service;
915 dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
917 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
918 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
919 if (dscp != TEST_IPSEC_DSCP_VAL) {
920 printf("DSCP value is not matching [exp: %x, actual: %x]\n",
921 TEST_IPSEC_DSCP_VAL, dscp);
926 printf("DSCP value is set [exp: 0, actual: %x]\n",
936 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
937 const struct ipsec_test_flags *flags)
943 if (!is_valid_ipv6_pkt(iph6)) {
944 printf("Tunnel outer header is not IPv6\n");
948 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
949 dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
950 (RTE_IPV6_HDR_TC_SHIFT + 2);
952 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
953 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
954 if (dscp != TEST_IPSEC_DSCP_VAL) {
955 printf("DSCP value is not matching [exp: %x, actual: %x]\n",
956 TEST_IPSEC_DSCP_VAL, dscp);
961 printf("DSCP value is set [exp: 0, actual: %x]\n",
967 flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK;
969 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
970 flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
971 if (flabel != TEST_IPSEC_FLABEL_VAL) {
972 printf("FLABEL value is not matching [exp: %x, actual: %x]\n",
973 TEST_IPSEC_FLABEL_VAL, flabel);
978 printf("FLABEL value is set [exp: 0, actual: %x]\n",
988 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td,
989 struct ipsec_test_data *res_d, bool silent,
990 const struct ipsec_test_flags *flags)
992 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
995 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
996 const struct rte_ipv4_hdr *iph4;
997 const struct rte_ipv6_hdr *iph6;
1000 ret = test_ipsec_iv_verify_push(m, td);
1001 if (ret != TEST_SUCCESS)
1005 iph4 = (const struct rte_ipv4_hdr *)output_text;
1007 if (td->ipsec_xform.mode ==
1008 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
1010 iph6 = (const struct rte_ipv6_hdr *)output_text;
1011 if (is_valid_ipv6_pkt(iph6) == false) {
1012 printf("Transport packet is not IPv6\n");
1016 if (is_valid_ipv4_pkt(iph4) == false) {
1017 printf("Transport packet is not IPv4\n");
1021 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
1022 printf("Transport IPv4 header proto is not AH\n");
1027 if (td->ipsec_xform.tunnel.type ==
1028 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1029 if (test_ipsec_iph4_hdr_validate(iph4, flags))
1032 iph6 = (const struct rte_ipv6_hdr *)output_text;
1033 if (test_ipsec_iph6_hdr_validate(iph6, flags))
1040 * In case of known vector tests & all inbound tests, res_d provided
1041 * would be NULL and output data need to be validated against expected.
1042 * For inbound, output_text would be plain packet and for outbound
1043 * output_text would IPsec packet. Validate by comparing against
1046 * In case of combined mode tests, the output_text from outbound
1047 * operation (ie, IPsec packet) would need to be inbound processed to
1048 * obtain the plain text. Copy output_text to result data, 'res_d', so
1049 * that inbound processing can be done.
1053 return test_ipsec_td_verify(m, td, silent, flags);
1055 return test_ipsec_res_d_prepare(m, td, res_d);
1059 test_ipsec_status_check(const struct ipsec_test_data *td,
1060 struct rte_crypto_op *op,
1061 const struct ipsec_test_flags *flags,
1062 enum rte_security_ipsec_sa_direction dir,
1065 int ret = TEST_SUCCESS;
1067 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1069 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1070 printf("Anti replay test case failed\n");
1073 return TEST_SUCCESS;
1077 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
1078 flags->sa_expiry_pkts_hard &&
1079 pkt_num == IPSEC_TEST_PACKETS_MAX) {
1080 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1081 printf("SA hard expiry (pkts) test failed\n");
1084 return TEST_SUCCESS;
1088 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1089 flags->tunnel_hdr_verify) {
1090 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1091 printf("Tunnel header verify test case failed\n");
1094 return TEST_SUCCESS;
1098 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
1099 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1100 printf("ICV corruption test case failed\n");
1104 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
1105 printf("Security op processing failed [pkt_num: %d]\n",
1111 if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
1112 if (!(op->aux_flags &
1113 RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
1114 printf("SA soft expiry (pkts) test failed\n");
1123 test_ipsec_stats_verify(struct rte_security_ctx *ctx,
1124 struct rte_security_session *sess,
1125 const struct ipsec_test_flags *flags,
1126 enum rte_security_ipsec_sa_direction dir)
1128 struct rte_security_stats stats = {0};
1129 int ret = TEST_SUCCESS;
1131 if (flags->stats_success) {
1132 if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1135 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1136 if (stats.ipsec.opackets != 1 ||
1137 stats.ipsec.oerrors != 0)
1140 if (stats.ipsec.ipackets != 1 ||
1141 stats.ipsec.ierrors != 0)
1150 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1152 struct rte_ipv4_hdr *iph4;
1153 struct rte_ipv6_hdr *iph6;
1154 bool cksum_dirty = false;
1156 iph4 = (struct rte_ipv4_hdr *)pkt;
1158 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1159 flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1160 flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1161 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1164 if (!is_ipv4(iph4)) {
1165 printf("Invalid packet type\n");
1169 frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1171 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1172 flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1173 frag_off |= RTE_IPV4_HDR_DF_FLAG;
1175 frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1177 iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1181 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1182 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1183 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1184 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 ||
1185 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1186 flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 ||
1187 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 ||
1188 flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) {
1190 if (is_ipv4(iph4)) {
1193 tos = iph4->type_of_service;
1194 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1195 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1196 tos |= (RTE_IPV4_HDR_DSCP_MASK &
1197 (TEST_IPSEC_DSCP_VAL << 2));
1199 tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1201 iph4->type_of_service = tos;
1206 iph6 = (struct rte_ipv6_hdr *)pkt;
1208 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1209 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1210 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1211 vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1212 (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1214 vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1216 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 ||
1217 flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
1218 vtc_flow |= (RTE_IPV6_HDR_FL_MASK &
1219 (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT));
1221 vtc_flow &= ~RTE_IPV6_HDR_FL_MASK;
1223 iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1227 if (cksum_dirty && is_ipv4(iph4)) {
1228 iph4->hdr_checksum = 0;
1229 iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1235 #endif /* !RTE_EXEC_ENV_WINDOWS */