1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
9 #include <rte_security.h>
14 #include "test_cryptodev_security_ipsec.h"
18 extern struct ipsec_test_data pkt_aes_256_gcm;
21 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
22 const struct rte_security_capability *sec_cap,
25 /* Verify security capabilities */
27 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
29 RTE_LOG(INFO, USER1, "ESN is not supported\n");
33 if (ipsec_xform->options.udp_encap == 1 &&
34 sec_cap->ipsec.options.udp_encap == 0) {
36 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
40 if (ipsec_xform->options.udp_ports_verify == 1 &&
41 sec_cap->ipsec.options.udp_ports_verify == 0) {
43 RTE_LOG(INFO, USER1, "UDP encapsulation ports "
44 "verification is not supported\n");
48 if (ipsec_xform->options.copy_dscp == 1 &&
49 sec_cap->ipsec.options.copy_dscp == 0) {
51 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
55 if (ipsec_xform->options.copy_flabel == 1 &&
56 sec_cap->ipsec.options.copy_flabel == 0) {
58 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
62 if (ipsec_xform->options.copy_df == 1 &&
63 sec_cap->ipsec.options.copy_df == 0) {
65 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
69 if (ipsec_xform->options.dec_ttl == 1 &&
70 sec_cap->ipsec.options.dec_ttl == 0) {
72 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
76 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
78 RTE_LOG(INFO, USER1, "ECN is not supported\n");
82 if (ipsec_xform->options.stats == 1 &&
83 sec_cap->ipsec.options.stats == 0) {
85 RTE_LOG(INFO, USER1, "Stats is not supported\n");
89 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
90 (ipsec_xform->options.iv_gen_disable == 1) &&
91 (sec_cap->ipsec.options.iv_gen_disable != 1)) {
94 "Application provided IV is not supported\n");
98 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
99 (ipsec_xform->options.tunnel_hdr_verify >
100 sec_cap->ipsec.options.tunnel_hdr_verify)) {
103 "Tunnel header verify is not supported\n");
107 if (ipsec_xform->options.ip_csum_enable == 1 &&
108 sec_cap->ipsec.options.ip_csum_enable == 0) {
111 "Inner IP checksum is not supported\n");
115 if (ipsec_xform->options.l4_csum_enable == 1 &&
116 sec_cap->ipsec.options.l4_csum_enable == 0) {
119 "Inner L4 checksum is not supported\n");
127 test_ipsec_crypto_caps_aead_verify(
128 const struct rte_security_capability *sec_cap,
129 struct rte_crypto_sym_xform *aead)
131 const struct rte_cryptodev_symmetric_capability *sym_cap;
132 const struct rte_cryptodev_capabilities *crypto_cap;
135 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
136 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
137 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
138 crypto_cap->sym.xform_type == aead->type &&
139 crypto_cap->sym.aead.algo == aead->aead.algo) {
140 sym_cap = &crypto_cap->sym;
141 if (rte_cryptodev_sym_capability_check_aead(sym_cap,
142 aead->aead.key.length,
143 aead->aead.digest_length,
144 aead->aead.aad_length,
145 aead->aead.iv.length) == 0)
154 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
155 struct ipsec_test_data *td_in)
157 memcpy(td_in, td_out, sizeof(*td_in));
159 /* Populate output text of td_in with input text of td_out */
160 memcpy(td_in->output_text.data, td_out->input_text.data,
161 td_out->input_text.len);
162 td_in->output_text.len = td_out->input_text.len;
164 /* Populate input text of td_in with output text of td_out */
165 memcpy(td_in->input_text.data, td_out->output_text.data,
166 td_out->output_text.len);
167 td_in->input_text.len = td_out->output_text.len;
169 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
172 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
174 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
175 td_in->xform.chain.cipher.cipher.op =
176 RTE_CRYPTO_CIPHER_OP_DECRYPT;
183 struct rte_ipv4_hdr *ipv4 = ip;
186 ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
187 if (ip_ver == IPVERSION)
194 test_ipsec_csum_init(void *ip, bool l3, bool l4)
196 struct rte_ipv4_hdr *ipv4;
197 struct rte_tcp_hdr *tcp;
198 struct rte_udp_hdr *udp;
204 size = sizeof(struct rte_ipv4_hdr);
205 next_proto = ipv4->next_proto_id;
208 ipv4->hdr_checksum = 0;
210 size = sizeof(struct rte_ipv6_hdr);
211 next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
215 switch (next_proto) {
217 tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
221 udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
222 udp->dgram_cksum = 0;
231 test_ipsec_td_prepare(const struct crypto_param *param1,
232 const struct crypto_param *param2,
233 const struct ipsec_test_flags *flags,
234 struct ipsec_test_data *td_array,
238 struct ipsec_test_data *td;
241 memset(td_array, 0, nb_td * sizeof(*td));
243 for (i = 0; i < nb_td; i++) {
245 /* Copy template for packet & key fields */
246 memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
248 /* Override fields based on param */
250 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD)
255 td->xform.aead.aead.algo = param1->alg.aead;
256 td->xform.aead.aead.key.length = param1->key_length;
259 td->ipsec_xform.options.iv_gen_disable = 0;
261 if (flags->sa_expiry_pkts_soft)
262 td->ipsec_xform.life.packets_soft_limit =
263 IPSEC_TEST_PACKETS_MAX - 1;
265 if (flags->ip_csum) {
266 td->ipsec_xform.options.ip_csum_enable = 1;
267 test_ipsec_csum_init(&td->input_text.data, true, false);
270 if (flags->l4_csum) {
271 td->ipsec_xform.options.l4_csum_enable = 1;
272 test_ipsec_csum_init(&td->input_text.data, false, true);
277 RTE_SET_USED(param2);
281 test_ipsec_td_update(struct ipsec_test_data td_inb[],
282 const struct ipsec_test_data td_outb[],
284 const struct ipsec_test_flags *flags)
288 for (i = 0; i < nb_td; i++) {
289 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
290 td_outb[i].input_text.len);
291 td_inb[i].output_text.len = td_outb->input_text.len;
293 if (flags->icv_corrupt) {
294 int icv_pos = td_inb[i].input_text.len - 4;
295 td_inb[i].input_text.data[icv_pos] += 1;
298 if (flags->sa_expiry_pkts_hard)
299 td_inb[i].ipsec_xform.life.packets_hard_limit =
300 IPSEC_TEST_PACKETS_MAX - 1;
302 if (flags->udp_encap)
303 td_inb[i].ipsec_xform.options.udp_encap = 1;
305 if (flags->udp_ports_verify)
306 td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
308 td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
309 flags->tunnel_hdr_verify;
312 td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
315 td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
317 /* Clear outbound specific flags */
318 td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
323 test_ipsec_display_alg(const struct crypto_param *param1,
324 const struct crypto_param *param2)
326 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD)
327 printf("\t%s [%d]\n",
328 rte_crypto_aead_algorithm_strings[param1->alg.aead],
331 RTE_SET_USED(param2);
335 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
339 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
340 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
341 if (td->ipsec_xform.tunnel.type ==
342 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
343 len += sizeof(struct rte_ipv4_hdr);
345 len += sizeof(struct rte_ipv6_hdr);
353 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td)
355 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
356 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *);
357 int i, iv_pos, iv_len;
361 iv_len = td->xform.aead.aead.iv.length - td->salt.len;
363 iv_len = td->xform.chain.cipher.cipher.iv.length;
365 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
366 output_text += iv_pos;
368 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
370 /* Compare against previous values */
371 for (i = 0; i < index; i++) {
372 iv_tmp = &iv_queue[i * IV_LEN_MAX];
374 if (memcmp(output_text, iv_tmp, iv_len) == 0) {
375 printf("IV repeated");
380 /* Save IV for future comparisons */
382 iv_tmp = &iv_queue[index * IV_LEN_MAX];
383 memcpy(iv_tmp, output_text, iv_len);
386 if (index == IPSEC_TEST_PACKETS_MAX)
393 test_ipsec_l3_csum_verify(struct rte_mbuf *m)
395 uint16_t actual_cksum, expected_cksum;
396 struct rte_ipv4_hdr *ip;
398 ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
400 if (!is_ipv4((void *)ip))
403 actual_cksum = ip->hdr_checksum;
405 ip->hdr_checksum = 0;
407 expected_cksum = rte_ipv4_cksum(ip);
409 if (actual_cksum != expected_cksum)
416 test_ipsec_l4_csum_verify(struct rte_mbuf *m)
418 uint16_t actual_cksum = 0, expected_cksum = 0;
419 struct rte_ipv4_hdr *ipv4;
420 struct rte_ipv6_hdr *ipv6;
421 struct rte_tcp_hdr *tcp;
422 struct rte_udp_hdr *udp;
425 ip = rte_pktmbuf_mtod(m, void *);
429 l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
431 switch (ipv4->next_proto_id) {
433 tcp = (struct rte_tcp_hdr *)l4;
434 actual_cksum = tcp->cksum;
436 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
439 udp = (struct rte_udp_hdr *)l4;
440 actual_cksum = udp->dgram_cksum;
441 udp->dgram_cksum = 0;
442 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
449 l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
451 switch (ipv6->proto) {
453 tcp = (struct rte_tcp_hdr *)l4;
454 actual_cksum = tcp->cksum;
456 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
459 udp = (struct rte_udp_hdr *)l4;
460 actual_cksum = udp->dgram_cksum;
461 udp->dgram_cksum = 0;
462 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
469 if (actual_cksum != expected_cksum)
476 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
477 bool silent, const struct ipsec_test_flags *flags)
479 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
480 uint32_t skip, len = rte_pktmbuf_pkt_len(m);
483 /* For tests with status as error for test success, skip verification */
484 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
485 (flags->icv_corrupt ||
486 flags->sa_expiry_pkts_hard ||
487 flags->tunnel_hdr_verify))
490 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
492 const struct rte_ipv4_hdr *iph4;
493 const struct rte_ipv6_hdr *iph6;
495 if (td->ipsec_xform.tunnel.type ==
496 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
497 iph4 = (const struct rte_ipv4_hdr *)output_text;
498 if (iph4->next_proto_id != IPPROTO_UDP) {
499 printf("UDP header is not found\n");
503 iph6 = (const struct rte_ipv6_hdr *)output_text;
504 if (iph6->proto != IPPROTO_UDP) {
505 printf("UDP header is not found\n");
510 len -= sizeof(struct rte_udp_hdr);
511 output_text += sizeof(struct rte_udp_hdr);
514 if (len != td->output_text.len) {
515 printf("Output length (%d) not matching with expected (%d)\n",
516 len, td->output_text.len);
520 skip = test_ipsec_tunnel_hdr_len_get(td);
525 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
527 if (m->ol_flags & PKT_RX_IP_CKSUM_GOOD)
528 ret = test_ipsec_l3_csum_verify(m);
532 if (ret == TEST_FAILED)
533 printf("Inner IP checksum test failed\n");
538 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
540 if (m->ol_flags & PKT_RX_L4_CKSUM_GOOD)
541 ret = test_ipsec_l4_csum_verify(m);
545 if (ret == TEST_FAILED)
546 printf("Inner L4 checksum test failed\n");
552 if (memcmp(output_text, td->output_text.data + skip, len)) {
556 printf("TestCase %s line %d: %s\n", __func__, __LINE__,
557 "output text not as expected\n");
559 rte_hexdump(stdout, "expected", td->output_text.data + skip,
561 rte_hexdump(stdout, "actual", output_text, len);
569 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td,
570 struct ipsec_test_data *res_d)
572 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
573 uint32_t len = rte_pktmbuf_pkt_len(m);
575 memcpy(res_d, td, sizeof(*res_d));
576 memcpy(res_d->input_text.data, output_text, len);
577 res_d->input_text.len = len;
579 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
581 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
583 printf("Only AEAD supported\n");
591 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td,
592 struct ipsec_test_data *res_d, bool silent,
593 const struct ipsec_test_flags *flags)
598 td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
599 ret = test_ipsec_iv_verify_push(m, td);
600 if (ret != TEST_SUCCESS)
605 * In case of known vector tests & all inbound tests, res_d provided
606 * would be NULL and output data need to be validated against expected.
607 * For inbound, output_text would be plain packet and for outbound
608 * output_text would IPsec packet. Validate by comparing against
611 * In case of combined mode tests, the output_text from outbound
612 * operation (ie, IPsec packet) would need to be inbound processed to
613 * obtain the plain text. Copy output_text to result data, 'res_d', so
614 * that inbound processing can be done.
618 return test_ipsec_td_verify(m, td, silent, flags);
620 return test_ipsec_res_d_prepare(m, td, res_d);
624 test_ipsec_status_check(struct rte_crypto_op *op,
625 const struct ipsec_test_flags *flags,
626 enum rte_security_ipsec_sa_direction dir,
629 int ret = TEST_SUCCESS;
631 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
632 flags->sa_expiry_pkts_hard &&
633 pkt_num == IPSEC_TEST_PACKETS_MAX) {
634 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
635 printf("SA hard expiry (pkts) test failed\n");
642 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
643 flags->tunnel_hdr_verify) {
644 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
645 printf("Tunnel header verify test case failed\n");
652 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
653 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
654 printf("ICV corruption test case failed\n");
658 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
659 printf("Security op processing failed [pkt_num: %d]\n",
665 if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
666 if (!(op->aux_flags &
667 RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
668 printf("SA soft expiry (pkts) test failed\n");