1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <rte_cryptodev.h>
10 #include "cperf_test_vectors.h"
13 cperf_set_ops_asym(struct rte_crypto_op **ops,
14 uint32_t src_buf_offset __rte_unused,
15 uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
16 struct rte_cryptodev_sym_session *sess,
17 const struct cperf_options *options,
18 const struct cperf_test_vector *test_vector __rte_unused,
19 uint16_t iv_offset __rte_unused,
20 uint32_t *imix_idx __rte_unused,
21 uint64_t *tsc_start __rte_unused)
24 void *asym_sess = (void *)sess;
26 for (i = 0; i < nb_ops; i++) {
27 struct rte_crypto_asym_op *asym_op = ops[i]->asym;
29 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
30 asym_op->modex.base.data = options->modex_data->base.data;
31 asym_op->modex.base.length = options->modex_data->base.len;
32 asym_op->modex.result.data = options->modex_data->result.data;
33 asym_op->modex.result.length = options->modex_data->result.len;
34 rte_crypto_op_attach_asym_session(ops[i], asym_sess);
39 #ifdef RTE_LIB_SECURITY
41 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options,
42 const struct cperf_test_vector *test_vector)
44 struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
46 if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ||
47 (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
48 memcpy(ip, test_vector->plaintext.data,
49 sizeof(struct rte_ipv4_hdr));
51 ip->total_length = rte_cpu_to_be_16(m->data_len);
56 cperf_set_ops_security(struct rte_crypto_op **ops,
57 uint32_t src_buf_offset __rte_unused,
58 uint32_t dst_buf_offset __rte_unused,
59 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
60 const struct cperf_options *options,
61 const struct cperf_test_vector *test_vector,
62 uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
67 for (i = 0; i < nb_ops; i++) {
68 struct rte_crypto_sym_op *sym_op = ops[i]->sym;
69 struct rte_security_session *sec_sess =
70 (struct rte_security_session *)sess;
73 uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
74 uint32_t *, iv_offset);
75 *per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
77 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
78 rte_security_attach_session(ops[i], sec_sess);
79 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
82 if (options->op_type == CPERF_PDCP) {
83 sym_op->m_src->buf_len = options->segment_sz;
84 sym_op->m_src->data_len = options->test_buffer_size;
85 sym_op->m_src->pkt_len = sym_op->m_src->data_len;
88 if (options->op_type == CPERF_DOCSIS) {
89 if (options->imix_distribution_count) {
90 buf_sz = options->imix_buffer_sizes[*imix_idx];
91 *imix_idx = (*imix_idx + 1) % options->pool_sz;
93 buf_sz = options->test_buffer_size;
95 sym_op->m_src->buf_len = options->segment_sz;
96 sym_op->m_src->data_len = buf_sz;
97 sym_op->m_src->pkt_len = buf_sz;
99 /* DOCSIS header is not CRC'ed */
100 sym_op->auth.data.offset = options->docsis_hdr_sz;
101 sym_op->auth.data.length = buf_sz -
102 sym_op->auth.data.offset - RTE_ETHER_CRC_LEN;
104 * DOCSIS header and SRC and DST MAC addresses are not
107 sym_op->cipher.data.offset = sym_op->auth.data.offset +
108 RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN;
109 sym_op->cipher.data.length = buf_sz -
110 sym_op->cipher.data.offset;
113 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
114 if (dst_buf_offset == 0)
115 sym_op->m_dst = NULL;
117 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
121 RTE_SET_USED(tsc_start);
122 RTE_SET_USED(test_vector);
128 cperf_set_ops_security_ipsec(struct rte_crypto_op **ops,
129 uint32_t src_buf_offset __rte_unused,
130 uint32_t dst_buf_offset __rte_unused,
131 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
132 const struct cperf_options *options,
133 const struct cperf_test_vector *test_vector,
134 uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
137 struct rte_security_session *sec_sess =
138 (struct rte_security_session *)sess;
139 const uint32_t test_buffer_size = options->test_buffer_size;
140 const uint32_t headroom_sz = options->headroom_sz;
141 const uint32_t segment_sz = options->segment_sz;
142 uint64_t tsc_start_temp, tsc_end_temp;
145 RTE_SET_USED(imix_idx);
147 for (i = 0; i < nb_ops; i++) {
148 struct rte_crypto_sym_op *sym_op = ops[i]->sym;
149 struct rte_mbuf *m = sym_op->m_src;
151 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
152 rte_security_attach_session(ops[i], sec_sess);
153 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
156 /* In case of IPsec, headroom is consumed by PMD,
157 * hence resetting it.
159 m->data_off = headroom_sz;
161 m->buf_len = segment_sz;
162 m->data_len = test_buffer_size;
163 m->pkt_len = test_buffer_size;
165 sym_op->m_dst = NULL;
168 if (options->test_file != NULL)
171 tsc_start_temp = rte_rdtsc_precise();
173 for (i = 0; i < nb_ops; i++) {
174 struct rte_crypto_sym_op *sym_op = ops[i]->sym;
175 struct rte_mbuf *m = sym_op->m_src;
177 test_ipsec_vec_populate(m, options, test_vector);
180 tsc_end_temp = rte_rdtsc_precise();
181 *tsc_start += tsc_end_temp - tsc_start_temp;
189 cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
190 uint32_t src_buf_offset, uint32_t dst_buf_offset,
191 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
192 const struct cperf_options *options,
193 const struct cperf_test_vector *test_vector __rte_unused,
194 uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
195 uint64_t *tsc_start __rte_unused)
199 for (i = 0; i < nb_ops; i++) {
200 struct rte_crypto_sym_op *sym_op = ops[i]->sym;
202 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
203 rte_crypto_op_attach_sym_session(ops[i], sess);
205 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
208 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
209 if (dst_buf_offset == 0)
210 sym_op->m_dst = NULL;
212 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
215 /* cipher parameters */
216 if (options->imix_distribution_count) {
217 sym_op->cipher.data.length =
218 options->imix_buffer_sizes[*imix_idx];
219 *imix_idx = (*imix_idx + 1) % options->pool_sz;
221 sym_op->cipher.data.length = options->test_buffer_size;
222 sym_op->cipher.data.offset = 0;
229 cperf_set_ops_null_auth(struct rte_crypto_op **ops,
230 uint32_t src_buf_offset, uint32_t dst_buf_offset,
231 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
232 const struct cperf_options *options,
233 const struct cperf_test_vector *test_vector __rte_unused,
234 uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
235 uint64_t *tsc_start __rte_unused)
239 for (i = 0; i < nb_ops; i++) {
240 struct rte_crypto_sym_op *sym_op = ops[i]->sym;
242 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
243 rte_crypto_op_attach_sym_session(ops[i], sess);
245 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
248 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
249 if (dst_buf_offset == 0)
250 sym_op->m_dst = NULL;
252 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
255 /* auth parameters */
256 if (options->imix_distribution_count) {
257 sym_op->auth.data.length =
258 options->imix_buffer_sizes[*imix_idx];
259 *imix_idx = (*imix_idx + 1) % options->pool_sz;
261 sym_op->auth.data.length = options->test_buffer_size;
262 sym_op->auth.data.offset = 0;
269 cperf_set_ops_cipher(struct rte_crypto_op **ops,
270 uint32_t src_buf_offset, uint32_t dst_buf_offset,
271 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
272 const struct cperf_options *options,
273 const struct cperf_test_vector *test_vector,
274 uint16_t iv_offset, uint32_t *imix_idx,
275 uint64_t *tsc_start __rte_unused)
279 for (i = 0; i < nb_ops; i++) {
280 struct rte_crypto_sym_op *sym_op = ops[i]->sym;
282 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
283 rte_crypto_op_attach_sym_session(ops[i], sess);
285 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
288 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
289 if (dst_buf_offset == 0)
290 sym_op->m_dst = NULL;
292 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
295 /* cipher parameters */
296 if (options->imix_distribution_count) {
297 sym_op->cipher.data.length =
298 options->imix_buffer_sizes[*imix_idx];
299 *imix_idx = (*imix_idx + 1) % options->pool_sz;
301 sym_op->cipher.data.length = options->test_buffer_size;
303 if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
304 options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
305 options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
306 sym_op->cipher.data.length <<= 3;
308 sym_op->cipher.data.offset = 0;
311 if (options->test == CPERF_TEST_TYPE_VERIFY) {
312 for (i = 0; i < nb_ops; i++) {
313 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
314 uint8_t *, iv_offset);
316 memcpy(iv_ptr, test_vector->cipher_iv.data,
317 test_vector->cipher_iv.length);
326 cperf_set_ops_auth(struct rte_crypto_op **ops,
327 uint32_t src_buf_offset, uint32_t dst_buf_offset,
328 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
329 const struct cperf_options *options,
330 const struct cperf_test_vector *test_vector,
331 uint16_t iv_offset, uint32_t *imix_idx,
332 uint64_t *tsc_start __rte_unused)
336 for (i = 0; i < nb_ops; i++) {
337 struct rte_crypto_sym_op *sym_op = ops[i]->sym;
339 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
340 rte_crypto_op_attach_sym_session(ops[i], sess);
342 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
345 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
346 if (dst_buf_offset == 0)
347 sym_op->m_dst = NULL;
349 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
352 if (test_vector->auth_iv.length) {
353 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
356 memcpy(iv_ptr, test_vector->auth_iv.data,
357 test_vector->auth_iv.length);
360 /* authentication parameters */
361 if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
362 sym_op->auth.digest.data = test_vector->digest.data;
363 sym_op->auth.digest.phys_addr =
364 test_vector->digest.phys_addr;
367 uint32_t offset = options->test_buffer_size;
368 struct rte_mbuf *buf, *tbuf;
370 if (options->out_of_place) {
373 tbuf = sym_op->m_src;
374 while ((tbuf->next != NULL) &&
375 (offset >= tbuf->data_len)) {
376 offset -= tbuf->data_len;
380 * If there is not enough room in segment,
381 * place the digest in the next segment
383 if ((tbuf->data_len - offset) < options->digest_sz) {
390 sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
392 sym_op->auth.digest.phys_addr =
393 rte_pktmbuf_iova_offset(buf, offset);
397 if (options->imix_distribution_count) {
398 sym_op->auth.data.length =
399 options->imix_buffer_sizes[*imix_idx];
400 *imix_idx = (*imix_idx + 1) % options->pool_sz;
402 sym_op->auth.data.length = options->test_buffer_size;
404 if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
405 options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
406 options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
407 sym_op->auth.data.length <<= 3;
409 sym_op->auth.data.offset = 0;
412 if (options->test == CPERF_TEST_TYPE_VERIFY) {
413 if (test_vector->auth_iv.length) {
414 for (i = 0; i < nb_ops; i++) {
415 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
416 uint8_t *, iv_offset);
418 memcpy(iv_ptr, test_vector->auth_iv.data,
419 test_vector->auth_iv.length);
427 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
428 uint32_t src_buf_offset, uint32_t dst_buf_offset,
429 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
430 const struct cperf_options *options,
431 const struct cperf_test_vector *test_vector,
432 uint16_t iv_offset, uint32_t *imix_idx,
433 uint64_t *tsc_start __rte_unused)
437 for (i = 0; i < nb_ops; i++) {
438 struct rte_crypto_sym_op *sym_op = ops[i]->sym;
440 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
441 rte_crypto_op_attach_sym_session(ops[i], sess);
443 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
446 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
447 if (dst_buf_offset == 0)
448 sym_op->m_dst = NULL;
450 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
453 /* cipher parameters */
454 if (options->imix_distribution_count) {
455 sym_op->cipher.data.length =
456 options->imix_buffer_sizes[*imix_idx];
457 *imix_idx = (*imix_idx + 1) % options->pool_sz;
459 sym_op->cipher.data.length = options->test_buffer_size;
461 if ((options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) &&
462 (options->op_type == CPERF_AUTH_THEN_CIPHER))
463 sym_op->cipher.data.length += options->digest_sz;
465 if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
466 options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
467 options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
468 sym_op->cipher.data.length <<= 3;
470 sym_op->cipher.data.offset = 0;
472 /* authentication parameters */
473 if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
474 sym_op->auth.digest.data = test_vector->digest.data;
475 sym_op->auth.digest.phys_addr =
476 test_vector->digest.phys_addr;
479 uint32_t offset = options->test_buffer_size;
480 struct rte_mbuf *buf, *tbuf;
482 if (options->out_of_place) {
485 tbuf = sym_op->m_src;
486 while ((tbuf->next != NULL) &&
487 (offset >= tbuf->data_len)) {
488 offset -= tbuf->data_len;
492 * If there is not enough room in segment,
493 * place the digest in the next segment
495 if ((tbuf->data_len - offset) < options->digest_sz) {
502 sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
504 sym_op->auth.digest.phys_addr =
505 rte_pktmbuf_iova_offset(buf, offset);
508 if (options->imix_distribution_count) {
509 sym_op->auth.data.length =
510 options->imix_buffer_sizes[*imix_idx];
511 *imix_idx = (*imix_idx + 1) % options->pool_sz;
513 sym_op->auth.data.length = options->test_buffer_size;
515 if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
516 options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
517 options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
518 sym_op->auth.data.length <<= 3;
520 sym_op->auth.data.offset = 0;
523 if (options->test == CPERF_TEST_TYPE_VERIFY) {
524 for (i = 0; i < nb_ops; i++) {
525 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
526 uint8_t *, iv_offset);
528 memcpy(iv_ptr, test_vector->cipher_iv.data,
529 test_vector->cipher_iv.length);
530 if (test_vector->auth_iv.length) {
532 * Copy IV after the crypto operation and
535 iv_ptr += test_vector->cipher_iv.length;
536 memcpy(iv_ptr, test_vector->auth_iv.data,
537 test_vector->auth_iv.length);
547 cperf_set_ops_aead(struct rte_crypto_op **ops,
548 uint32_t src_buf_offset, uint32_t dst_buf_offset,
549 uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
550 const struct cperf_options *options,
551 const struct cperf_test_vector *test_vector,
552 uint16_t iv_offset, uint32_t *imix_idx,
553 uint64_t *tsc_start __rte_unused)
556 /* AAD is placed after the IV */
557 uint16_t aad_offset = iv_offset +
558 RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
560 for (i = 0; i < nb_ops; i++) {
561 struct rte_crypto_sym_op *sym_op = ops[i]->sym;
563 ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
564 rte_crypto_op_attach_sym_session(ops[i], sess);
566 sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
569 /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
570 if (dst_buf_offset == 0)
571 sym_op->m_dst = NULL;
573 sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
576 /* AEAD parameters */
577 if (options->imix_distribution_count) {
578 sym_op->aead.data.length =
579 options->imix_buffer_sizes[*imix_idx];
580 *imix_idx = (*imix_idx + 1) % options->pool_sz;
582 sym_op->aead.data.length = options->test_buffer_size;
583 sym_op->aead.data.offset = 0;
585 sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
586 uint8_t *, aad_offset);
587 sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
590 if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
591 sym_op->aead.digest.data = test_vector->digest.data;
592 sym_op->aead.digest.phys_addr =
593 test_vector->digest.phys_addr;
596 uint32_t offset = sym_op->aead.data.length +
597 sym_op->aead.data.offset;
598 struct rte_mbuf *buf, *tbuf;
600 if (options->out_of_place) {
603 tbuf = sym_op->m_src;
604 while ((tbuf->next != NULL) &&
605 (offset >= tbuf->data_len)) {
606 offset -= tbuf->data_len;
610 * If there is not enough room in segment,
611 * place the digest in the next segment
613 if ((tbuf->data_len - offset) < options->digest_sz) {
620 sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
622 sym_op->aead.digest.phys_addr =
623 rte_pktmbuf_iova_offset(buf, offset);
627 if ((options->test == CPERF_TEST_TYPE_VERIFY) ||
628 (options->test == CPERF_TEST_TYPE_LATENCY)) {
629 for (i = 0; i < nb_ops; i++) {
630 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
631 uint8_t *, iv_offset);
634 * If doing AES-CCM, nonce is copied one byte
635 * after the start of IV field, and AAD is copied
636 * 18 bytes after the start of the AAD field.
638 if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
639 memcpy(iv_ptr + 1, test_vector->aead_iv.data,
640 test_vector->aead_iv.length);
642 memcpy(ops[i]->sym->aead.aad.data + 18,
643 test_vector->aad.data,
644 test_vector->aad.length);
646 memcpy(iv_ptr, test_vector->aead_iv.data,
647 test_vector->aead_iv.length);
649 memcpy(ops[i]->sym->aead.aad.data,
650 test_vector->aad.data,
651 test_vector->aad.length);
659 static struct rte_cryptodev_sym_session *
660 create_ipsec_session(struct rte_mempool *sess_mp,
661 struct rte_mempool *priv_mp,
663 const struct cperf_options *options,
664 const struct cperf_test_vector *test_vector,
667 struct rte_crypto_sym_xform xform = {0};
668 struct rte_crypto_sym_xform auth_xform = {0};
670 if (options->aead_algo != 0) {
671 /* Setup AEAD Parameters */
672 xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
674 xform.aead.algo = options->aead_algo;
675 xform.aead.op = options->aead_op;
676 xform.aead.iv.offset = iv_offset;
677 xform.aead.key.data = test_vector->aead_key.data;
678 xform.aead.key.length = test_vector->aead_key.length;
679 xform.aead.iv.length = test_vector->aead_iv.length;
680 xform.aead.digest_length = options->digest_sz;
681 xform.aead.aad_length = options->aead_aad_sz;
682 } else if (options->cipher_algo != 0 && options->auth_algo != 0) {
683 /* Setup Cipher Parameters */
684 xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
686 xform.cipher.algo = options->cipher_algo;
687 xform.cipher.op = options->cipher_op;
688 xform.cipher.iv.offset = iv_offset;
689 xform.cipher.iv.length = test_vector->cipher_iv.length;
690 /* cipher different than null */
691 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
692 xform.cipher.key.data = test_vector->cipher_key.data;
693 xform.cipher.key.length =
694 test_vector->cipher_key.length;
696 xform.cipher.key.data = NULL;
697 xform.cipher.key.length = 0;
700 /* Setup Auth Parameters */
701 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
702 auth_xform.next = NULL;
703 auth_xform.auth.algo = options->auth_algo;
704 auth_xform.auth.op = options->auth_op;
705 auth_xform.auth.iv.offset = iv_offset +
706 xform.cipher.iv.length;
707 /* auth different than null */
708 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
709 auth_xform.auth.digest_length = options->digest_sz;
710 auth_xform.auth.key.length =
711 test_vector->auth_key.length;
712 auth_xform.auth.key.data = test_vector->auth_key.data;
713 auth_xform.auth.iv.length = test_vector->auth_iv.length;
715 auth_xform.auth.digest_length = 0;
716 auth_xform.auth.key.length = 0;
717 auth_xform.auth.key.data = NULL;
718 auth_xform.auth.iv.length = 0;
721 xform.next = &auth_xform;
726 #define CPERF_IPSEC_SRC_IP 0x01010101
727 #define CPERF_IPSEC_DST_IP 0x02020202
728 #define CPERF_IPSEC_SALT 0x0
729 #define CPERF_IPSEC_DEFTTL 64
730 struct rte_security_ipsec_tunnel_param tunnel = {
731 .type = RTE_SECURITY_IPSEC_TUNNEL_IPV4,
733 .src_ip = { .s_addr = CPERF_IPSEC_SRC_IP},
734 .dst_ip = { .s_addr = CPERF_IPSEC_DST_IP},
737 .ttl = CPERF_IPSEC_DEFTTL,
740 struct rte_security_session_conf sess_conf = {
741 .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
742 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
744 .spi = rte_lcore_id(),
745 /**< For testing sake, lcore_id is taken as SPI so that
746 * for every core a different session is created.
748 .salt = CPERF_IPSEC_SALT,
752 ((options->cipher_op ==
753 RTE_CRYPTO_CIPHER_OP_ENCRYPT) &&
755 RTE_CRYPTO_AUTH_OP_GENERATE)) ||
757 RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
758 RTE_SECURITY_IPSEC_SA_DIR_EGRESS :
759 RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
760 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
761 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
765 .crypto_xform = &xform
768 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
769 rte_cryptodev_get_sec_ctx(dev_id);
771 /* Create security session */
772 return (void *)rte_security_session_create(ctx,
773 &sess_conf, sess_mp, priv_mp);
776 static struct rte_cryptodev_sym_session *
777 cperf_create_session(struct rte_mempool *sess_mp,
778 struct rte_mempool *priv_mp,
780 const struct cperf_options *options,
781 const struct cperf_test_vector *test_vector,
784 struct rte_crypto_sym_xform cipher_xform;
785 struct rte_crypto_sym_xform auth_xform;
786 struct rte_crypto_sym_xform aead_xform;
787 struct rte_cryptodev_sym_session *sess = NULL;
788 void *asym_sess = NULL;
789 struct rte_crypto_asym_xform xform = {0};
792 if (options->op_type == CPERF_ASYM_MODEX) {
794 xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
795 xform.modex.modulus.data = options->modex_data->modulus.data;
796 xform.modex.modulus.length = options->modex_data->modulus.len;
797 xform.modex.exponent.data = options->modex_data->exponent.data;
798 xform.modex.exponent.length = options->modex_data->exponent.len;
800 ret = rte_cryptodev_asym_session_create(dev_id, &xform,
801 sess_mp, &asym_sess);
803 RTE_LOG(ERR, USER1, "Asym session create failed\n");
808 #ifdef RTE_LIB_SECURITY
812 if (options->op_type == CPERF_PDCP) {
813 /* Setup Cipher Parameters */
814 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
815 cipher_xform.next = NULL;
816 cipher_xform.cipher.algo = options->cipher_algo;
817 cipher_xform.cipher.op = options->cipher_op;
818 cipher_xform.cipher.iv.offset = iv_offset;
819 cipher_xform.cipher.iv.length = 4;
821 /* cipher different than null */
822 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
823 cipher_xform.cipher.key.data = test_vector->cipher_key.data;
824 cipher_xform.cipher.key.length = test_vector->cipher_key.length;
826 cipher_xform.cipher.key.data = NULL;
827 cipher_xform.cipher.key.length = 0;
830 /* Setup Auth Parameters */
831 if (options->auth_algo != 0) {
832 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
833 auth_xform.next = NULL;
834 auth_xform.auth.algo = options->auth_algo;
835 auth_xform.auth.op = options->auth_op;
836 auth_xform.auth.iv.offset = iv_offset +
837 cipher_xform.cipher.iv.length;
839 /* auth different than null */
840 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
841 auth_xform.auth.digest_length = options->digest_sz;
842 auth_xform.auth.key.length = test_vector->auth_key.length;
843 auth_xform.auth.key.data = test_vector->auth_key.data;
844 auth_xform.auth.iv.length = test_vector->auth_iv.length;
846 auth_xform.auth.digest_length = 0;
847 auth_xform.auth.key.length = 0;
848 auth_xform.auth.key.data = NULL;
849 auth_xform.auth.iv.length = 0;
852 cipher_xform.next = &auth_xform;
854 cipher_xform.next = NULL;
857 struct rte_security_session_conf sess_conf = {
858 .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
859 .protocol = RTE_SECURITY_PROTOCOL_PDCP,
862 .domain = options->pdcp_domain,
864 .sn_size = options->pdcp_sn_sz,
865 .hfn = options->pdcp_ses_hfn_en ?
866 PDCP_DEFAULT_HFN : 0,
867 .hfn_threshold = 0x70C0A,
868 .hfn_ovrd = !(options->pdcp_ses_hfn_en),
870 .crypto_xform = &cipher_xform
873 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
874 rte_cryptodev_get_sec_ctx(dev_id);
876 /* Create security session */
877 return (void *)rte_security_session_create(ctx,
878 &sess_conf, sess_mp, priv_mp);
881 if (options->op_type == CPERF_IPSEC) {
882 return create_ipsec_session(sess_mp, priv_mp, dev_id,
883 options, test_vector, iv_offset);
886 if (options->op_type == CPERF_DOCSIS) {
887 enum rte_security_docsis_direction direction;
889 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
890 cipher_xform.next = NULL;
891 cipher_xform.cipher.algo = options->cipher_algo;
892 cipher_xform.cipher.op = options->cipher_op;
893 cipher_xform.cipher.iv.offset = iv_offset;
894 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
895 cipher_xform.cipher.key.data =
896 test_vector->cipher_key.data;
897 cipher_xform.cipher.key.length =
898 test_vector->cipher_key.length;
899 cipher_xform.cipher.iv.length =
900 test_vector->cipher_iv.length;
902 cipher_xform.cipher.key.data = NULL;
903 cipher_xform.cipher.key.length = 0;
904 cipher_xform.cipher.iv.length = 0;
906 cipher_xform.next = NULL;
908 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
909 direction = RTE_SECURITY_DOCSIS_DOWNLINK;
911 direction = RTE_SECURITY_DOCSIS_UPLINK;
913 struct rte_security_session_conf sess_conf = {
915 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
916 .protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
918 .direction = direction,
920 .crypto_xform = &cipher_xform
922 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
923 rte_cryptodev_get_sec_ctx(dev_id);
925 /* Create security session */
926 return (void *)rte_security_session_create(ctx,
927 &sess_conf, sess_mp, priv_mp);
930 sess = rte_cryptodev_sym_session_create(sess_mp);
934 if (options->op_type == CPERF_CIPHER_ONLY) {
935 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
936 cipher_xform.next = NULL;
937 cipher_xform.cipher.algo = options->cipher_algo;
938 cipher_xform.cipher.op = options->cipher_op;
939 cipher_xform.cipher.iv.offset = iv_offset;
941 /* cipher different than null */
942 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
943 cipher_xform.cipher.key.data =
944 test_vector->cipher_key.data;
945 cipher_xform.cipher.key.length =
946 test_vector->cipher_key.length;
947 cipher_xform.cipher.iv.length =
948 test_vector->cipher_iv.length;
950 cipher_xform.cipher.key.data = NULL;
951 cipher_xform.cipher.key.length = 0;
952 cipher_xform.cipher.iv.length = 0;
954 /* create crypto session */
955 rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform,
960 } else if (options->op_type == CPERF_AUTH_ONLY) {
961 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
962 auth_xform.next = NULL;
963 auth_xform.auth.algo = options->auth_algo;
964 auth_xform.auth.op = options->auth_op;
965 auth_xform.auth.iv.offset = iv_offset;
967 /* auth different than null */
968 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
969 auth_xform.auth.digest_length =
971 auth_xform.auth.key.length =
972 test_vector->auth_key.length;
973 auth_xform.auth.key.data = test_vector->auth_key.data;
974 auth_xform.auth.iv.length =
975 test_vector->auth_iv.length;
977 auth_xform.auth.digest_length = 0;
978 auth_xform.auth.key.length = 0;
979 auth_xform.auth.key.data = NULL;
980 auth_xform.auth.iv.length = 0;
982 /* create crypto session */
983 rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform,
988 } else if (options->op_type == CPERF_CIPHER_THEN_AUTH
989 || options->op_type == CPERF_AUTH_THEN_CIPHER) {
993 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
994 cipher_xform.next = NULL;
995 cipher_xform.cipher.algo = options->cipher_algo;
996 cipher_xform.cipher.op = options->cipher_op;
997 cipher_xform.cipher.iv.offset = iv_offset;
999 /* cipher different than null */
1000 if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
1001 cipher_xform.cipher.key.data =
1002 test_vector->cipher_key.data;
1003 cipher_xform.cipher.key.length =
1004 test_vector->cipher_key.length;
1005 cipher_xform.cipher.iv.length =
1006 test_vector->cipher_iv.length;
1008 cipher_xform.cipher.key.data = NULL;
1009 cipher_xform.cipher.key.length = 0;
1010 cipher_xform.cipher.iv.length = 0;
1016 auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1017 auth_xform.next = NULL;
1018 auth_xform.auth.algo = options->auth_algo;
1019 auth_xform.auth.op = options->auth_op;
1020 auth_xform.auth.iv.offset = iv_offset +
1021 cipher_xform.cipher.iv.length;
1023 /* auth different than null */
1024 if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
1025 auth_xform.auth.digest_length = options->digest_sz;
1026 auth_xform.auth.iv.length = test_vector->auth_iv.length;
1027 auth_xform.auth.key.length =
1028 test_vector->auth_key.length;
1029 auth_xform.auth.key.data =
1030 test_vector->auth_key.data;
1032 auth_xform.auth.digest_length = 0;
1033 auth_xform.auth.key.length = 0;
1034 auth_xform.auth.key.data = NULL;
1035 auth_xform.auth.iv.length = 0;
1038 /* cipher then auth */
1039 if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
1040 cipher_xform.next = &auth_xform;
1041 /* create crypto session */
1042 rte_cryptodev_sym_session_init(dev_id,
1043 sess, &cipher_xform, priv_mp);
1044 } else { /* auth then cipher */
1045 auth_xform.next = &cipher_xform;
1046 /* create crypto session */
1047 rte_cryptodev_sym_session_init(dev_id,
1048 sess, &auth_xform, priv_mp);
1050 } else { /* options->op_type == CPERF_AEAD */
1051 aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1052 aead_xform.next = NULL;
1053 aead_xform.aead.algo = options->aead_algo;
1054 aead_xform.aead.op = options->aead_op;
1055 aead_xform.aead.iv.offset = iv_offset;
1057 aead_xform.aead.key.data =
1058 test_vector->aead_key.data;
1059 aead_xform.aead.key.length =
1060 test_vector->aead_key.length;
1061 aead_xform.aead.iv.length = test_vector->aead_iv.length;
1063 aead_xform.aead.digest_length = options->digest_sz;
1064 aead_xform.aead.aad_length =
1065 options->aead_aad_sz;
1067 /* Create crypto session */
1068 rte_cryptodev_sym_session_init(dev_id,
1069 sess, &aead_xform, priv_mp);
1076 cperf_get_op_functions(const struct cperf_options *options,
1077 struct cperf_op_fns *op_fns)
1079 memset(op_fns, 0, sizeof(struct cperf_op_fns));
1081 op_fns->sess_create = cperf_create_session;
1083 switch (options->op_type) {
1085 op_fns->populate_ops = cperf_set_ops_aead;
1088 case CPERF_AUTH_THEN_CIPHER:
1089 case CPERF_CIPHER_THEN_AUTH:
1090 op_fns->populate_ops = cperf_set_ops_cipher_auth;
1092 case CPERF_AUTH_ONLY:
1093 if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
1094 op_fns->populate_ops = cperf_set_ops_null_auth;
1096 op_fns->populate_ops = cperf_set_ops_auth;
1098 case CPERF_CIPHER_ONLY:
1099 if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
1100 op_fns->populate_ops = cperf_set_ops_null_cipher;
1102 op_fns->populate_ops = cperf_set_ops_cipher;
1104 case CPERF_ASYM_MODEX:
1105 op_fns->populate_ops = cperf_set_ops_asym;
1107 #ifdef RTE_LIB_SECURITY
1110 op_fns->populate_ops = cperf_set_ops_security;
1113 op_fns->populate_ops = cperf_set_ops_security_ipsec;