1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
9 #include <rte_security.h>
14 #include "test_cryptodev_security_ipsec.h"
18 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
19 (RTE_DIM(cipher_list) *
23 test_ipsec_alg_list_populate(void)
25 unsigned long i, j, index = 0;
27 for (i = 0; i < RTE_DIM(aead_list); i++) {
28 alg_list[index].param1 = &aead_list[i];
29 alg_list[index].param2 = NULL;
33 for (i = 0; i < RTE_DIM(cipher_list); i++) {
34 for (j = 0; j < RTE_DIM(auth_list); j++) {
35 alg_list[index].param1 = &cipher_list[i];
36 alg_list[index].param2 = &auth_list[j];
43 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
44 const struct rte_security_capability *sec_cap,
47 /* Verify security capabilities */
49 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
51 RTE_LOG(INFO, USER1, "ESN is not supported\n");
55 if (ipsec_xform->options.udp_encap == 1 &&
56 sec_cap->ipsec.options.udp_encap == 0) {
58 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
62 if (ipsec_xform->options.udp_ports_verify == 1 &&
63 sec_cap->ipsec.options.udp_ports_verify == 0) {
65 RTE_LOG(INFO, USER1, "UDP encapsulation ports "
66 "verification is not supported\n");
70 if (ipsec_xform->options.copy_dscp == 1 &&
71 sec_cap->ipsec.options.copy_dscp == 0) {
73 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
77 if (ipsec_xform->options.copy_flabel == 1 &&
78 sec_cap->ipsec.options.copy_flabel == 0) {
80 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
84 if (ipsec_xform->options.copy_df == 1 &&
85 sec_cap->ipsec.options.copy_df == 0) {
87 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
91 if (ipsec_xform->options.dec_ttl == 1 &&
92 sec_cap->ipsec.options.dec_ttl == 0) {
94 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
98 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
100 RTE_LOG(INFO, USER1, "ECN is not supported\n");
104 if (ipsec_xform->options.stats == 1 &&
105 sec_cap->ipsec.options.stats == 0) {
107 RTE_LOG(INFO, USER1, "Stats is not supported\n");
111 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
112 (ipsec_xform->options.iv_gen_disable == 1) &&
113 (sec_cap->ipsec.options.iv_gen_disable != 1)) {
116 "Application provided IV is not supported\n");
120 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
121 (ipsec_xform->options.tunnel_hdr_verify >
122 sec_cap->ipsec.options.tunnel_hdr_verify)) {
125 "Tunnel header verify is not supported\n");
129 if (ipsec_xform->options.ip_csum_enable == 1 &&
130 sec_cap->ipsec.options.ip_csum_enable == 0) {
133 "Inner IP checksum is not supported\n");
137 if (ipsec_xform->options.l4_csum_enable == 1 &&
138 sec_cap->ipsec.options.l4_csum_enable == 0) {
141 "Inner L4 checksum is not supported\n");
149 test_ipsec_crypto_caps_aead_verify(
150 const struct rte_security_capability *sec_cap,
151 struct rte_crypto_sym_xform *aead)
153 const struct rte_cryptodev_symmetric_capability *sym_cap;
154 const struct rte_cryptodev_capabilities *crypto_cap;
157 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
158 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
159 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
160 crypto_cap->sym.xform_type == aead->type &&
161 crypto_cap->sym.aead.algo == aead->aead.algo) {
162 sym_cap = &crypto_cap->sym;
163 if (rte_cryptodev_sym_capability_check_aead(sym_cap,
164 aead->aead.key.length,
165 aead->aead.digest_length,
166 aead->aead.aad_length,
167 aead->aead.iv.length) == 0)
176 test_ipsec_crypto_caps_cipher_verify(
177 const struct rte_security_capability *sec_cap,
178 struct rte_crypto_sym_xform *cipher)
180 const struct rte_cryptodev_symmetric_capability *sym_cap;
181 const struct rte_cryptodev_capabilities *cap;
184 while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
185 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
186 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
187 cap->sym.xform_type == cipher->type &&
188 cap->sym.cipher.algo == cipher->cipher.algo) {
190 if (rte_cryptodev_sym_capability_check_cipher(sym_cap,
191 cipher->cipher.key.length,
192 cipher->cipher.iv.length) == 0)
201 test_ipsec_crypto_caps_auth_verify(
202 const struct rte_security_capability *sec_cap,
203 struct rte_crypto_sym_xform *auth)
205 const struct rte_cryptodev_symmetric_capability *sym_cap;
206 const struct rte_cryptodev_capabilities *cap;
209 while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
210 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
211 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
212 cap->sym.xform_type == auth->type &&
213 cap->sym.auth.algo == auth->auth.algo) {
215 if (rte_cryptodev_sym_capability_check_auth(sym_cap,
216 auth->auth.key.length,
217 auth->auth.digest_length,
218 auth->auth.iv.length) == 0)
227 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
228 struct ipsec_test_data *td_in)
230 memcpy(td_in, td_out, sizeof(*td_in));
232 /* Populate output text of td_in with input text of td_out */
233 memcpy(td_in->output_text.data, td_out->input_text.data,
234 td_out->input_text.len);
235 td_in->output_text.len = td_out->input_text.len;
237 /* Populate input text of td_in with output text of td_out */
238 memcpy(td_in->input_text.data, td_out->output_text.data,
239 td_out->output_text.len);
240 td_in->input_text.len = td_out->output_text.len;
242 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
245 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
247 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
248 td_in->xform.chain.cipher.cipher.op =
249 RTE_CRYPTO_CIPHER_OP_DECRYPT;
256 struct rte_ipv4_hdr *ipv4 = ip;
259 ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
260 if (ip_ver == IPVERSION)
267 test_ipsec_csum_init(void *ip, bool l3, bool l4)
269 struct rte_ipv4_hdr *ipv4;
270 struct rte_tcp_hdr *tcp;
271 struct rte_udp_hdr *udp;
277 size = sizeof(struct rte_ipv4_hdr);
278 next_proto = ipv4->next_proto_id;
281 ipv4->hdr_checksum = 0;
283 size = sizeof(struct rte_ipv6_hdr);
284 next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
288 switch (next_proto) {
290 tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
294 udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
295 udp->dgram_cksum = 0;
304 test_ipsec_td_prepare(const struct crypto_param *param1,
305 const struct crypto_param *param2,
306 const struct ipsec_test_flags *flags,
307 struct ipsec_test_data *td_array,
311 struct ipsec_test_data *td;
314 memset(td_array, 0, nb_td * sizeof(*td));
316 for (i = 0; i < nb_td; i++) {
319 /* Prepare fields based on param */
321 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
322 /* Copy template for packet & key fields */
323 memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
326 td->xform.aead.aead.algo = param1->alg.aead;
327 td->xform.aead.aead.key.length = param1->key_length;
329 /* Copy template for packet & key fields */
330 memcpy(td, &pkt_aes_128_cbc_hmac_sha256, sizeof(*td));
333 td->xform.chain.cipher.cipher.algo = param1->alg.cipher;
334 td->xform.chain.cipher.cipher.key.length =
336 td->xform.chain.auth.auth.algo = param2->alg.auth;
337 td->xform.chain.auth.auth.key.length =
339 td->xform.chain.auth.auth.digest_length =
340 param2->digest_length;
345 td->ipsec_xform.options.iv_gen_disable = 0;
347 if (flags->sa_expiry_pkts_soft)
348 td->ipsec_xform.life.packets_soft_limit =
349 IPSEC_TEST_PACKETS_MAX - 1;
351 if (flags->ip_csum) {
352 td->ipsec_xform.options.ip_csum_enable = 1;
353 test_ipsec_csum_init(&td->input_text.data, true, false);
356 if (flags->l4_csum) {
357 td->ipsec_xform.options.l4_csum_enable = 1;
358 test_ipsec_csum_init(&td->input_text.data, false, true);
365 test_ipsec_td_update(struct ipsec_test_data td_inb[],
366 const struct ipsec_test_data td_outb[],
368 const struct ipsec_test_flags *flags)
372 for (i = 0; i < nb_td; i++) {
373 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
374 td_outb[i].input_text.len);
375 td_inb[i].output_text.len = td_outb->input_text.len;
377 if (flags->icv_corrupt) {
378 int icv_pos = td_inb[i].input_text.len - 4;
379 td_inb[i].input_text.data[icv_pos] += 1;
382 if (flags->sa_expiry_pkts_hard)
383 td_inb[i].ipsec_xform.life.packets_hard_limit =
384 IPSEC_TEST_PACKETS_MAX - 1;
386 if (flags->udp_encap)
387 td_inb[i].ipsec_xform.options.udp_encap = 1;
389 if (flags->udp_ports_verify)
390 td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
392 td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
393 flags->tunnel_hdr_verify;
396 td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
399 td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
401 /* Clear outbound specific flags */
402 td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
407 test_ipsec_display_alg(const struct crypto_param *param1,
408 const struct crypto_param *param2)
410 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
412 rte_crypto_aead_algorithm_strings[param1->alg.aead],
413 param1->key_length * 8);
416 rte_crypto_cipher_algorithm_strings[param1->alg.cipher]);
417 if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
418 printf(" [%d]", param1->key_length * 8);
420 rte_crypto_auth_algorithm_strings[param2->alg.auth]);
421 if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
422 printf(" [%dB ICV]", param2->digest_length);
428 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
432 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
433 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
434 if (td->ipsec_xform.tunnel.type ==
435 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
436 len += sizeof(struct rte_ipv4_hdr);
438 len += sizeof(struct rte_ipv6_hdr);
446 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td)
448 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
449 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *);
450 int i, iv_pos, iv_len;
454 iv_len = td->xform.aead.aead.iv.length - td->salt.len;
456 iv_len = td->xform.chain.cipher.cipher.iv.length;
458 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
459 output_text += iv_pos;
461 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
463 /* Compare against previous values */
464 for (i = 0; i < index; i++) {
465 iv_tmp = &iv_queue[i * IV_LEN_MAX];
467 if (memcmp(output_text, iv_tmp, iv_len) == 0) {
468 printf("IV repeated");
473 /* Save IV for future comparisons */
475 iv_tmp = &iv_queue[index * IV_LEN_MAX];
476 memcpy(iv_tmp, output_text, iv_len);
479 if (index == IPSEC_TEST_PACKETS_MAX)
486 test_ipsec_l3_csum_verify(struct rte_mbuf *m)
488 uint16_t actual_cksum, expected_cksum;
489 struct rte_ipv4_hdr *ip;
491 ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
493 if (!is_ipv4((void *)ip))
496 actual_cksum = ip->hdr_checksum;
498 ip->hdr_checksum = 0;
500 expected_cksum = rte_ipv4_cksum(ip);
502 if (actual_cksum != expected_cksum)
509 test_ipsec_l4_csum_verify(struct rte_mbuf *m)
511 uint16_t actual_cksum = 0, expected_cksum = 0;
512 struct rte_ipv4_hdr *ipv4;
513 struct rte_ipv6_hdr *ipv6;
514 struct rte_tcp_hdr *tcp;
515 struct rte_udp_hdr *udp;
518 ip = rte_pktmbuf_mtod(m, void *);
522 l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
524 switch (ipv4->next_proto_id) {
526 tcp = (struct rte_tcp_hdr *)l4;
527 actual_cksum = tcp->cksum;
529 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
532 udp = (struct rte_udp_hdr *)l4;
533 actual_cksum = udp->dgram_cksum;
534 udp->dgram_cksum = 0;
535 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
542 l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
544 switch (ipv6->proto) {
546 tcp = (struct rte_tcp_hdr *)l4;
547 actual_cksum = tcp->cksum;
549 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
552 udp = (struct rte_udp_hdr *)l4;
553 actual_cksum = udp->dgram_cksum;
554 udp->dgram_cksum = 0;
555 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
562 if (actual_cksum != expected_cksum)
569 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
570 bool silent, const struct ipsec_test_flags *flags)
572 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
573 uint32_t skip, len = rte_pktmbuf_pkt_len(m);
576 /* For tests with status as error for test success, skip verification */
577 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
578 (flags->icv_corrupt ||
579 flags->sa_expiry_pkts_hard ||
580 flags->tunnel_hdr_verify))
583 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
585 const struct rte_ipv4_hdr *iph4;
586 const struct rte_ipv6_hdr *iph6;
588 if (td->ipsec_xform.tunnel.type ==
589 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
590 iph4 = (const struct rte_ipv4_hdr *)output_text;
591 if (iph4->next_proto_id != IPPROTO_UDP) {
592 printf("UDP header is not found\n");
596 iph6 = (const struct rte_ipv6_hdr *)output_text;
597 if (iph6->proto != IPPROTO_UDP) {
598 printf("UDP header is not found\n");
603 len -= sizeof(struct rte_udp_hdr);
604 output_text += sizeof(struct rte_udp_hdr);
607 if (len != td->output_text.len) {
608 printf("Output length (%d) not matching with expected (%d)\n",
609 len, td->output_text.len);
613 skip = test_ipsec_tunnel_hdr_len_get(td);
618 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
620 if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
621 ret = test_ipsec_l3_csum_verify(m);
625 if (ret == TEST_FAILED)
626 printf("Inner IP checksum test failed\n");
631 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
633 if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
634 ret = test_ipsec_l4_csum_verify(m);
638 if (ret == TEST_FAILED)
639 printf("Inner L4 checksum test failed\n");
645 if (memcmp(output_text, td->output_text.data + skip, len)) {
649 printf("TestCase %s line %d: %s\n", __func__, __LINE__,
650 "output text not as expected\n");
652 rte_hexdump(stdout, "expected", td->output_text.data + skip,
654 rte_hexdump(stdout, "actual", output_text, len);
662 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td,
663 struct ipsec_test_data *res_d)
665 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
666 uint32_t len = rte_pktmbuf_pkt_len(m);
668 memcpy(res_d, td, sizeof(*res_d));
669 memcpy(res_d->input_text.data, output_text, len);
670 res_d->input_text.len = len;
672 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
674 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
676 res_d->xform.chain.cipher.cipher.op =
677 RTE_CRYPTO_CIPHER_OP_DECRYPT;
678 res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
685 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td,
686 struct ipsec_test_data *res_d, bool silent,
687 const struct ipsec_test_flags *flags)
692 td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
693 ret = test_ipsec_iv_verify_push(m, td);
694 if (ret != TEST_SUCCESS)
699 * In case of known vector tests & all inbound tests, res_d provided
700 * would be NULL and output data need to be validated against expected.
701 * For inbound, output_text would be plain packet and for outbound
702 * output_text would IPsec packet. Validate by comparing against
705 * In case of combined mode tests, the output_text from outbound
706 * operation (ie, IPsec packet) would need to be inbound processed to
707 * obtain the plain text. Copy output_text to result data, 'res_d', so
708 * that inbound processing can be done.
712 return test_ipsec_td_verify(m, td, silent, flags);
714 return test_ipsec_res_d_prepare(m, td, res_d);
718 test_ipsec_status_check(struct rte_crypto_op *op,
719 const struct ipsec_test_flags *flags,
720 enum rte_security_ipsec_sa_direction dir,
723 int ret = TEST_SUCCESS;
725 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
726 flags->sa_expiry_pkts_hard &&
727 pkt_num == IPSEC_TEST_PACKETS_MAX) {
728 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
729 printf("SA hard expiry (pkts) test failed\n");
736 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
737 flags->tunnel_hdr_verify) {
738 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
739 printf("Tunnel header verify test case failed\n");
746 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
747 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
748 printf("ICV corruption test case failed\n");
752 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
753 printf("Security op processing failed [pkt_num: %d]\n",
759 if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
760 if (!(op->aux_flags &
761 RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
762 printf("SA soft expiry (pkts) test failed\n");