1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_common.h>
6 #include <rte_cryptodev.h>
9 #include <rte_security.h>
13 #include "test_cryptodev_security_ipsec.h"
17 extern struct ipsec_test_data pkt_aes_256_gcm;
20 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
21 const struct rte_security_capability *sec_cap,
24 /* Verify security capabilities */
26 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
28 RTE_LOG(INFO, USER1, "ESN is not supported\n");
32 if (ipsec_xform->options.udp_encap == 1 &&
33 sec_cap->ipsec.options.udp_encap == 0) {
35 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
39 if (ipsec_xform->options.copy_dscp == 1 &&
40 sec_cap->ipsec.options.copy_dscp == 0) {
42 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
46 if (ipsec_xform->options.copy_flabel == 1 &&
47 sec_cap->ipsec.options.copy_flabel == 0) {
49 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
53 if (ipsec_xform->options.copy_df == 1 &&
54 sec_cap->ipsec.options.copy_df == 0) {
56 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
60 if (ipsec_xform->options.dec_ttl == 1 &&
61 sec_cap->ipsec.options.dec_ttl == 0) {
63 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
67 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
69 RTE_LOG(INFO, USER1, "ECN is not supported\n");
73 if (ipsec_xform->options.stats == 1 &&
74 sec_cap->ipsec.options.stats == 0) {
76 RTE_LOG(INFO, USER1, "Stats is not supported\n");
80 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
81 (ipsec_xform->options.iv_gen_disable == 1) &&
82 (sec_cap->ipsec.options.iv_gen_disable != 1)) {
85 "Application provided IV is not supported\n");
93 test_ipsec_crypto_caps_aead_verify(
94 const struct rte_security_capability *sec_cap,
95 struct rte_crypto_sym_xform *aead)
97 const struct rte_cryptodev_symmetric_capability *sym_cap;
98 const struct rte_cryptodev_capabilities *crypto_cap;
101 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
102 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
103 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
104 crypto_cap->sym.xform_type == aead->type &&
105 crypto_cap->sym.aead.algo == aead->aead.algo) {
106 sym_cap = &crypto_cap->sym;
107 if (rte_cryptodev_sym_capability_check_aead(sym_cap,
108 aead->aead.key.length,
109 aead->aead.digest_length,
110 aead->aead.aad_length,
111 aead->aead.iv.length) == 0)
120 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
121 struct ipsec_test_data *td_in)
123 memcpy(td_in, td_out, sizeof(*td_in));
125 /* Populate output text of td_in with input text of td_out */
126 memcpy(td_in->output_text.data, td_out->input_text.data,
127 td_out->input_text.len);
128 td_in->output_text.len = td_out->input_text.len;
130 /* Populate input text of td_in with output text of td_out */
131 memcpy(td_in->input_text.data, td_out->output_text.data,
132 td_out->output_text.len);
133 td_in->input_text.len = td_out->output_text.len;
135 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
138 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
140 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
141 td_in->xform.chain.cipher.cipher.op =
142 RTE_CRYPTO_CIPHER_OP_DECRYPT;
147 test_ipsec_td_prepare(const struct crypto_param *param1,
148 const struct crypto_param *param2,
149 const struct ipsec_test_flags *flags,
150 struct ipsec_test_data *td_array,
154 struct ipsec_test_data *td;
157 memset(td_array, 0, nb_td * sizeof(*td));
159 for (i = 0; i < nb_td; i++) {
161 /* Copy template for packet & key fields */
162 memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
164 /* Override fields based on param */
166 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD)
171 td->xform.aead.aead.algo = param1->alg.aead;
172 td->xform.aead.aead.key.length = param1->key_length;
175 td->ipsec_xform.options.iv_gen_disable = 0;
178 RTE_SET_USED(param2);
182 test_ipsec_td_update(struct ipsec_test_data td_inb[],
183 const struct ipsec_test_data td_outb[],
185 const struct ipsec_test_flags *flags)
189 for (i = 0; i < nb_td; i++) {
190 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
191 td_outb[i].input_text.len);
192 td_inb[i].output_text.len = td_outb->input_text.len;
194 if (flags->icv_corrupt) {
195 int icv_pos = td_inb[i].input_text.len - 4;
196 td_inb[i].input_text.data[icv_pos] += 1;
199 if (flags->udp_encap)
200 td_inb[i].ipsec_xform.options.udp_encap = 1;
202 /* Clear outbound specific flags */
203 td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
208 test_ipsec_display_alg(const struct crypto_param *param1,
209 const struct crypto_param *param2)
211 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD)
212 printf("\t%s [%d]\n",
213 rte_crypto_aead_algorithm_strings[param1->alg.aead],
216 RTE_SET_USED(param2);
220 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
224 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
225 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
226 if (td->ipsec_xform.tunnel.type ==
227 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
228 len += sizeof(struct rte_ipv4_hdr);
230 len += sizeof(struct rte_ipv6_hdr);
238 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td)
240 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
241 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *);
242 int i, iv_pos, iv_len;
246 iv_len = td->xform.aead.aead.iv.length - td->salt.len;
248 iv_len = td->xform.chain.cipher.cipher.iv.length;
250 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
251 output_text += iv_pos;
253 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
255 /* Compare against previous values */
256 for (i = 0; i < index; i++) {
257 iv_tmp = &iv_queue[i * IV_LEN_MAX];
259 if (memcmp(output_text, iv_tmp, iv_len) == 0) {
260 printf("IV repeated");
265 /* Save IV for future comparisons */
267 iv_tmp = &iv_queue[index * IV_LEN_MAX];
268 memcpy(iv_tmp, output_text, iv_len);
271 if (index == IPSEC_TEST_PACKETS_MAX)
278 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
279 bool silent, const struct ipsec_test_flags *flags)
281 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
282 uint32_t skip, len = rte_pktmbuf_pkt_len(m);
284 /* For negative tests, no need to do verification */
285 if (flags->icv_corrupt &&
286 td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
289 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
291 const struct rte_ipv4_hdr *iph4;
292 const struct rte_ipv6_hdr *iph6;
294 if (td->ipsec_xform.tunnel.type ==
295 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
296 iph4 = (const struct rte_ipv4_hdr *)output_text;
297 if (iph4->next_proto_id != IPPROTO_UDP) {
298 printf("UDP header is not found\n");
302 iph6 = (const struct rte_ipv6_hdr *)output_text;
303 if (iph6->proto != IPPROTO_UDP) {
304 printf("UDP header is not found\n");
309 len -= sizeof(struct rte_udp_hdr);
310 output_text += sizeof(struct rte_udp_hdr);
313 if (len != td->output_text.len) {
314 printf("Output length (%d) not matching with expected (%d)\n",
315 len, td->output_text.len);
319 skip = test_ipsec_tunnel_hdr_len_get(td);
324 if (memcmp(output_text, td->output_text.data + skip, len)) {
328 printf("TestCase %s line %d: %s\n", __func__, __LINE__,
329 "output text not as expected\n");
331 rte_hexdump(stdout, "expected", td->output_text.data + skip,
333 rte_hexdump(stdout, "actual", output_text, len);
341 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td,
342 struct ipsec_test_data *res_d)
344 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
345 uint32_t len = rte_pktmbuf_pkt_len(m);
347 memcpy(res_d, td, sizeof(*res_d));
348 memcpy(res_d->input_text.data, output_text, len);
349 res_d->input_text.len = len;
351 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
353 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
355 printf("Only AEAD supported\n");
363 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td,
364 struct ipsec_test_data *res_d, bool silent,
365 const struct ipsec_test_flags *flags)
370 td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
371 ret = test_ipsec_iv_verify_push(m, td);
372 if (ret != TEST_SUCCESS)
377 * In case of known vector tests & all inbound tests, res_d provided
378 * would be NULL and output data need to be validated against expected.
379 * For inbound, output_text would be plain packet and for outbound
380 * output_text would IPsec packet. Validate by comparing against
383 * In case of combined mode tests, the output_text from outbound
384 * operation (ie, IPsec packet) would need to be inbound processed to
385 * obtain the plain text. Copy output_text to result data, 'res_d', so
386 * that inbound processing can be done.
390 return test_ipsec_td_verify(m, td, silent, flags);
392 return test_ipsec_res_d_prepare(m, td, res_d);
396 test_ipsec_status_check(struct rte_crypto_op *op,
397 const struct ipsec_test_flags *flags,
398 enum rte_security_ipsec_sa_direction dir)
400 int ret = TEST_SUCCESS;
402 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
403 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
404 printf("ICV corruption test case failed\n");
408 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
409 printf("Security op processing failed\n");