1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
9 #include <rte_security.h>
14 #include "test_cryptodev_security_ipsec.h"
18 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
19 (RTE_DIM(cipher_list) *
23 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
25 /* The IP version number must be 4 */
26 if (((pkt->version_ihl) >> 4) != 4)
29 * The IP header length field must be large enough to hold the
30 * minimum length legal IP datagram (20 bytes = 5 words).
32 if ((pkt->version_ihl & 0xf) < 5)
36 * The IP total length field must be large enough to hold the IP
37 * datagram header, whose length is specified in the IP header length
40 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
47 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
49 /* The IP version number must be 6 */
50 if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
57 test_ipsec_alg_list_populate(void)
59 unsigned long i, j, index = 0;
61 for (i = 0; i < RTE_DIM(aead_list); i++) {
62 alg_list[index].param1 = &aead_list[i];
63 alg_list[index].param2 = NULL;
67 for (i = 0; i < RTE_DIM(cipher_list); i++) {
68 for (j = 0; j < RTE_DIM(auth_list); j++) {
69 alg_list[index].param1 = &cipher_list[i];
70 alg_list[index].param2 = &auth_list[j];
77 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
78 const struct rte_security_capability *sec_cap,
81 /* Verify security capabilities */
83 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
85 RTE_LOG(INFO, USER1, "ESN is not supported\n");
89 if (ipsec_xform->options.udp_encap == 1 &&
90 sec_cap->ipsec.options.udp_encap == 0) {
92 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
96 if (ipsec_xform->options.udp_ports_verify == 1 &&
97 sec_cap->ipsec.options.udp_ports_verify == 0) {
99 RTE_LOG(INFO, USER1, "UDP encapsulation ports "
100 "verification is not supported\n");
104 if (ipsec_xform->options.copy_dscp == 1 &&
105 sec_cap->ipsec.options.copy_dscp == 0) {
107 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
111 if (ipsec_xform->options.copy_flabel == 1 &&
112 sec_cap->ipsec.options.copy_flabel == 0) {
114 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
118 if (ipsec_xform->options.copy_df == 1 &&
119 sec_cap->ipsec.options.copy_df == 0) {
121 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
125 if (ipsec_xform->options.dec_ttl == 1 &&
126 sec_cap->ipsec.options.dec_ttl == 0) {
128 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
132 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
134 RTE_LOG(INFO, USER1, "ECN is not supported\n");
138 if (ipsec_xform->options.stats == 1 &&
139 sec_cap->ipsec.options.stats == 0) {
141 RTE_LOG(INFO, USER1, "Stats is not supported\n");
145 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
146 (ipsec_xform->options.iv_gen_disable == 1) &&
147 (sec_cap->ipsec.options.iv_gen_disable != 1)) {
150 "Application provided IV is not supported\n");
154 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
155 (ipsec_xform->options.tunnel_hdr_verify >
156 sec_cap->ipsec.options.tunnel_hdr_verify)) {
159 "Tunnel header verify is not supported\n");
163 if (ipsec_xform->options.ip_csum_enable == 1 &&
164 sec_cap->ipsec.options.ip_csum_enable == 0) {
167 "Inner IP checksum is not supported\n");
171 if (ipsec_xform->options.l4_csum_enable == 1 &&
172 sec_cap->ipsec.options.l4_csum_enable == 0) {
175 "Inner L4 checksum is not supported\n");
183 test_ipsec_crypto_caps_aead_verify(
184 const struct rte_security_capability *sec_cap,
185 struct rte_crypto_sym_xform *aead)
187 const struct rte_cryptodev_symmetric_capability *sym_cap;
188 const struct rte_cryptodev_capabilities *crypto_cap;
191 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
192 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
193 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
194 crypto_cap->sym.xform_type == aead->type &&
195 crypto_cap->sym.aead.algo == aead->aead.algo) {
196 sym_cap = &crypto_cap->sym;
197 if (rte_cryptodev_sym_capability_check_aead(sym_cap,
198 aead->aead.key.length,
199 aead->aead.digest_length,
200 aead->aead.aad_length,
201 aead->aead.iv.length) == 0)
210 test_ipsec_crypto_caps_cipher_verify(
211 const struct rte_security_capability *sec_cap,
212 struct rte_crypto_sym_xform *cipher)
214 const struct rte_cryptodev_symmetric_capability *sym_cap;
215 const struct rte_cryptodev_capabilities *cap;
218 while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
219 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
220 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
221 cap->sym.xform_type == cipher->type &&
222 cap->sym.cipher.algo == cipher->cipher.algo) {
224 if (rte_cryptodev_sym_capability_check_cipher(sym_cap,
225 cipher->cipher.key.length,
226 cipher->cipher.iv.length) == 0)
235 test_ipsec_crypto_caps_auth_verify(
236 const struct rte_security_capability *sec_cap,
237 struct rte_crypto_sym_xform *auth)
239 const struct rte_cryptodev_symmetric_capability *sym_cap;
240 const struct rte_cryptodev_capabilities *cap;
243 while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
244 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
245 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
246 cap->sym.xform_type == auth->type &&
247 cap->sym.auth.algo == auth->auth.algo) {
249 if (rte_cryptodev_sym_capability_check_auth(sym_cap,
250 auth->auth.key.length,
251 auth->auth.digest_length,
252 auth->auth.iv.length) == 0)
261 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
262 struct ipsec_test_data *td_in)
264 memcpy(td_in, td_out, sizeof(*td_in));
266 /* Populate output text of td_in with input text of td_out */
267 memcpy(td_in->output_text.data, td_out->input_text.data,
268 td_out->input_text.len);
269 td_in->output_text.len = td_out->input_text.len;
271 /* Populate input text of td_in with output text of td_out */
272 memcpy(td_in->input_text.data, td_out->output_text.data,
273 td_out->output_text.len);
274 td_in->input_text.len = td_out->output_text.len;
276 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
279 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
281 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
282 td_in->xform.chain.cipher.cipher.op =
283 RTE_CRYPTO_CIPHER_OP_DECRYPT;
290 struct rte_ipv4_hdr *ipv4 = ip;
293 ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
294 if (ip_ver == IPVERSION)
301 test_ipsec_csum_init(void *ip, bool l3, bool l4)
303 struct rte_ipv4_hdr *ipv4;
304 struct rte_tcp_hdr *tcp;
305 struct rte_udp_hdr *udp;
311 size = sizeof(struct rte_ipv4_hdr);
312 next_proto = ipv4->next_proto_id;
315 ipv4->hdr_checksum = 0;
317 size = sizeof(struct rte_ipv6_hdr);
318 next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
322 switch (next_proto) {
324 tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
328 udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
329 udp->dgram_cksum = 0;
338 test_ipsec_td_prepare(const struct crypto_param *param1,
339 const struct crypto_param *param2,
340 const struct ipsec_test_flags *flags,
341 struct ipsec_test_data *td_array,
345 struct ipsec_test_data *td;
348 memset(td_array, 0, nb_td * sizeof(*td));
350 for (i = 0; i < nb_td; i++) {
353 /* Prepare fields based on param */
355 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
356 /* Copy template for packet & key fields */
358 memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
360 memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
363 td->xform.aead.aead.algo = param1->alg.aead;
364 td->xform.aead.aead.key.length = param1->key_length;
366 /* Copy template for packet & key fields */
368 memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
371 memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
375 td->xform.chain.cipher.cipher.algo = param1->alg.cipher;
376 td->xform.chain.cipher.cipher.key.length =
378 td->xform.chain.cipher.cipher.iv.length =
380 td->xform.chain.auth.auth.algo = param2->alg.auth;
381 td->xform.chain.auth.auth.key.length =
383 td->xform.chain.auth.auth.digest_length =
384 param2->digest_length;
389 td->ipsec_xform.options.iv_gen_disable = 0;
391 if (flags->sa_expiry_pkts_soft)
392 td->ipsec_xform.life.packets_soft_limit =
393 IPSEC_TEST_PACKETS_MAX - 1;
395 if (flags->ip_csum) {
396 td->ipsec_xform.options.ip_csum_enable = 1;
397 test_ipsec_csum_init(&td->input_text.data, true, false);
400 if (flags->l4_csum) {
401 td->ipsec_xform.options.l4_csum_enable = 1;
402 test_ipsec_csum_init(&td->input_text.data, false, true);
405 if (flags->transport) {
406 td->ipsec_xform.mode =
407 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
409 td->ipsec_xform.mode =
410 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
412 if (flags->tunnel_ipv6)
413 td->ipsec_xform.tunnel.type =
414 RTE_SECURITY_IPSEC_TUNNEL_IPV6;
416 td->ipsec_xform.tunnel.type =
417 RTE_SECURITY_IPSEC_TUNNEL_IPV4;
420 if (flags->stats_success)
421 td->ipsec_xform.options.stats = 1;
423 if (flags->fragment) {
424 struct rte_ipv4_hdr *ip;
425 ip = (struct rte_ipv4_hdr *)&td->input_text.data;
426 ip->fragment_offset = 4;
427 ip->hdr_checksum = rte_ipv4_cksum(ip);
434 test_ipsec_td_update(struct ipsec_test_data td_inb[],
435 const struct ipsec_test_data td_outb[],
437 const struct ipsec_test_flags *flags)
441 for (i = 0; i < nb_td; i++) {
442 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
443 td_outb[i].input_text.len);
444 td_inb[i].output_text.len = td_outb->input_text.len;
446 if (flags->icv_corrupt) {
447 int icv_pos = td_inb[i].input_text.len - 4;
448 td_inb[i].input_text.data[icv_pos] += 1;
451 if (flags->sa_expiry_pkts_hard)
452 td_inb[i].ipsec_xform.life.packets_hard_limit =
453 IPSEC_TEST_PACKETS_MAX - 1;
455 if (flags->udp_encap)
456 td_inb[i].ipsec_xform.options.udp_encap = 1;
458 if (flags->udp_ports_verify)
459 td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
461 td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
462 flags->tunnel_hdr_verify;
465 td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
468 td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
470 /* Clear outbound specific flags */
471 td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
476 test_ipsec_display_alg(const struct crypto_param *param1,
477 const struct crypto_param *param2)
479 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
481 rte_crypto_aead_algorithm_strings[param1->alg.aead],
482 param1->key_length * 8);
485 rte_crypto_cipher_algorithm_strings[param1->alg.cipher]);
486 if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
487 printf(" [%d]", param1->key_length * 8);
489 rte_crypto_auth_algorithm_strings[param2->alg.auth]);
490 if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
491 printf(" [%dB ICV]", param2->digest_length);
497 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
501 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
502 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
503 if (td->ipsec_xform.tunnel.type ==
504 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
505 len += sizeof(struct rte_ipv4_hdr);
507 len += sizeof(struct rte_ipv6_hdr);
515 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td)
517 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
518 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *);
519 int i, iv_pos, iv_len;
523 iv_len = td->xform.aead.aead.iv.length - td->salt.len;
525 iv_len = td->xform.chain.cipher.cipher.iv.length;
527 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
528 output_text += iv_pos;
530 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
532 /* Compare against previous values */
533 for (i = 0; i < index; i++) {
534 iv_tmp = &iv_queue[i * IV_LEN_MAX];
536 if (memcmp(output_text, iv_tmp, iv_len) == 0) {
537 printf("IV repeated");
542 /* Save IV for future comparisons */
544 iv_tmp = &iv_queue[index * IV_LEN_MAX];
545 memcpy(iv_tmp, output_text, iv_len);
548 if (index == IPSEC_TEST_PACKETS_MAX)
555 test_ipsec_l3_csum_verify(struct rte_mbuf *m)
557 uint16_t actual_cksum, expected_cksum;
558 struct rte_ipv4_hdr *ip;
560 ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
562 if (!is_ipv4((void *)ip))
565 actual_cksum = ip->hdr_checksum;
567 ip->hdr_checksum = 0;
569 expected_cksum = rte_ipv4_cksum(ip);
571 if (actual_cksum != expected_cksum)
578 test_ipsec_l4_csum_verify(struct rte_mbuf *m)
580 uint16_t actual_cksum = 0, expected_cksum = 0;
581 struct rte_ipv4_hdr *ipv4;
582 struct rte_ipv6_hdr *ipv6;
583 struct rte_tcp_hdr *tcp;
584 struct rte_udp_hdr *udp;
587 ip = rte_pktmbuf_mtod(m, void *);
591 l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
593 switch (ipv4->next_proto_id) {
595 tcp = (struct rte_tcp_hdr *)l4;
596 actual_cksum = tcp->cksum;
598 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
601 udp = (struct rte_udp_hdr *)l4;
602 actual_cksum = udp->dgram_cksum;
603 udp->dgram_cksum = 0;
604 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
611 l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
613 switch (ipv6->proto) {
615 tcp = (struct rte_tcp_hdr *)l4;
616 actual_cksum = tcp->cksum;
618 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
621 udp = (struct rte_udp_hdr *)l4;
622 actual_cksum = udp->dgram_cksum;
623 udp->dgram_cksum = 0;
624 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
631 if (actual_cksum != expected_cksum)
638 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
639 bool silent, const struct ipsec_test_flags *flags)
641 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
642 uint32_t skip, len = rte_pktmbuf_pkt_len(m);
645 /* For tests with status as error for test success, skip verification */
646 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
647 (flags->icv_corrupt ||
648 flags->sa_expiry_pkts_hard ||
649 flags->tunnel_hdr_verify))
652 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
654 const struct rte_ipv4_hdr *iph4;
655 const struct rte_ipv6_hdr *iph6;
657 if (td->ipsec_xform.tunnel.type ==
658 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
659 iph4 = (const struct rte_ipv4_hdr *)output_text;
660 if (iph4->next_proto_id != IPPROTO_UDP) {
661 printf("UDP header is not found\n");
665 iph6 = (const struct rte_ipv6_hdr *)output_text;
666 if (iph6->proto != IPPROTO_UDP) {
667 printf("UDP header is not found\n");
672 len -= sizeof(struct rte_udp_hdr);
673 output_text += sizeof(struct rte_udp_hdr);
676 if (len != td->output_text.len) {
677 printf("Output length (%d) not matching with expected (%d)\n",
678 len, td->output_text.len);
682 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
684 const struct rte_ipv4_hdr *iph4;
685 iph4 = (const struct rte_ipv4_hdr *)output_text;
686 if (iph4->fragment_offset) {
687 printf("Output packet is fragmented");
692 skip = test_ipsec_tunnel_hdr_len_get(td);
697 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
699 if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
700 ret = test_ipsec_l3_csum_verify(m);
704 if (ret == TEST_FAILED)
705 printf("Inner IP checksum test failed\n");
710 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
712 if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
713 ret = test_ipsec_l4_csum_verify(m);
717 if (ret == TEST_FAILED)
718 printf("Inner L4 checksum test failed\n");
724 if (memcmp(output_text, td->output_text.data + skip, len)) {
728 printf("TestCase %s line %d: %s\n", __func__, __LINE__,
729 "output text not as expected\n");
731 rte_hexdump(stdout, "expected", td->output_text.data + skip,
733 rte_hexdump(stdout, "actual", output_text, len);
741 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td,
742 struct ipsec_test_data *res_d)
744 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
745 uint32_t len = rte_pktmbuf_pkt_len(m);
747 memcpy(res_d, td, sizeof(*res_d));
748 memcpy(res_d->input_text.data, output_text, len);
749 res_d->input_text.len = len;
751 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
753 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
755 res_d->xform.chain.cipher.cipher.op =
756 RTE_CRYPTO_CIPHER_OP_DECRYPT;
757 res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
764 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td,
765 struct ipsec_test_data *res_d, bool silent,
766 const struct ipsec_test_flags *flags)
768 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
771 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
772 const struct rte_ipv4_hdr *iph4;
773 const struct rte_ipv6_hdr *iph6;
776 ret = test_ipsec_iv_verify_push(m, td);
777 if (ret != TEST_SUCCESS)
781 iph4 = (const struct rte_ipv4_hdr *)output_text;
783 if (td->ipsec_xform.mode ==
784 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
786 iph6 = (const struct rte_ipv6_hdr *)output_text;
787 if (is_valid_ipv6_pkt(iph6) == false) {
788 printf("Transport packet is not IPv6\n");
792 if (is_valid_ipv4_pkt(iph4) == false) {
793 printf("Transport packet is not IPv4\n");
798 if (td->ipsec_xform.tunnel.type ==
799 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
800 if (is_valid_ipv4_pkt(iph4) == false) {
801 printf("Tunnel outer header is not IPv4\n");
805 iph6 = (const struct rte_ipv6_hdr *)output_text;
806 if (is_valid_ipv6_pkt(iph6) == false) {
807 printf("Tunnel outer header is not IPv6\n");
815 * In case of known vector tests & all inbound tests, res_d provided
816 * would be NULL and output data need to be validated against expected.
817 * For inbound, output_text would be plain packet and for outbound
818 * output_text would IPsec packet. Validate by comparing against
821 * In case of combined mode tests, the output_text from outbound
822 * operation (ie, IPsec packet) would need to be inbound processed to
823 * obtain the plain text. Copy output_text to result data, 'res_d', so
824 * that inbound processing can be done.
828 return test_ipsec_td_verify(m, td, silent, flags);
830 return test_ipsec_res_d_prepare(m, td, res_d);
834 test_ipsec_status_check(struct rte_crypto_op *op,
835 const struct ipsec_test_flags *flags,
836 enum rte_security_ipsec_sa_direction dir,
839 int ret = TEST_SUCCESS;
841 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
842 flags->sa_expiry_pkts_hard &&
843 pkt_num == IPSEC_TEST_PACKETS_MAX) {
844 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
845 printf("SA hard expiry (pkts) test failed\n");
852 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
853 flags->tunnel_hdr_verify) {
854 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
855 printf("Tunnel header verify test case failed\n");
862 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
863 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
864 printf("ICV corruption test case failed\n");
868 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
869 printf("Security op processing failed [pkt_num: %d]\n",
875 if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
876 if (!(op->aux_flags &
877 RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
878 printf("SA soft expiry (pkts) test failed\n");
887 test_ipsec_stats_verify(struct rte_security_ctx *ctx,
888 struct rte_security_session *sess,
889 const struct ipsec_test_flags *flags,
890 enum rte_security_ipsec_sa_direction dir)
892 struct rte_security_stats stats = {0};
893 int ret = TEST_SUCCESS;
895 if (flags->stats_success) {
896 if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
899 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
900 if (stats.ipsec.opackets != 1 ||
901 stats.ipsec.oerrors != 0)
904 if (stats.ipsec.ipackets != 1 ||
905 stats.ipsec.ierrors != 0)