+static int
+cperf_set_ops_asym(struct rte_crypto_op **ops,
+ uint32_t src_buf_offset __rte_unused,
+ uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
+ struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options __rte_unused,
+ const struct cperf_test_vector *test_vector __rte_unused,
+ uint16_t iv_offset __rte_unused,
+ uint32_t *imix_idx __rte_unused,
+ uint64_t *tsc_start __rte_unused)
+{
+ uint16_t i;
+ uint8_t result[sizeof(perf_mod_p)] = { 0 };
+ struct rte_cryptodev_asym_session *asym_sess = (void *)sess;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_asym_op *asym_op = ops[i]->asym;
+
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ asym_op->modex.base.data = perf_base;
+ asym_op->modex.base.length = sizeof(perf_base);
+ asym_op->modex.result.data = result;
+ asym_op->modex.result.length = sizeof(result);
+ rte_crypto_op_attach_asym_session(ops[i], asym_sess);
+ }
+ return 0;
+}
+
+#ifdef RTE_LIB_SECURITY
+static void
+test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
+
+ if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ||
+ (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
+ memcpy(ip, test_vector->plaintext.data, m->data_len);
+
+ ip->total_length = rte_cpu_to_be_16(m->data_len);
+ }
+}
+
+static int
+cperf_set_ops_security(struct rte_crypto_op **ops,
+ uint32_t src_buf_offset __rte_unused,
+ uint32_t dst_buf_offset __rte_unused,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
+ uint64_t *tsc_start)
+{
+ uint64_t tsc_start_temp, tsc_end_temp;
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ struct rte_security_session *sec_sess =
+ (struct rte_security_session *)sess;
+ uint32_t buf_sz;
+
+ uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
+ uint32_t *, iv_offset);
+ *per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
+
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ rte_security_attach_session(ops[i], sec_sess);
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ if (options->op_type == CPERF_PDCP ||
+ options->op_type == CPERF_IPSEC) {
+ /* In case of IPsec, headroom is consumed by PMD,
+ * hence resetting it.
+ */
+ sym_op->m_src->data_off = options->headroom_sz;
+
+ sym_op->m_src->buf_len = options->segment_sz;
+ sym_op->m_src->data_len = options->test_buffer_size;
+ sym_op->m_src->pkt_len = sym_op->m_src->data_len;
+
+ if ((options->op_type == CPERF_IPSEC) &&
+ (options->test_file == NULL) &&
+ (options->test == CPERF_TEST_TYPE_THROUGHPUT)) {
+ tsc_start_temp = rte_rdtsc_precise();
+ test_ipsec_vec_populate(sym_op->m_src, options,
+ test_vector);
+ tsc_end_temp = rte_rdtsc_precise();
+
+ *tsc_start += (tsc_end_temp - tsc_start_temp);
+ }
+ }
+
+ if (options->op_type == CPERF_DOCSIS) {
+ if (options->imix_distribution_count) {
+ buf_sz = options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ buf_sz = options->test_buffer_size;
+
+ sym_op->m_src->buf_len = options->segment_sz;
+ sym_op->m_src->data_len = buf_sz;
+ sym_op->m_src->pkt_len = buf_sz;
+
+ /* DOCSIS header is not CRC'ed */
+ sym_op->auth.data.offset = options->docsis_hdr_sz;
+ sym_op->auth.data.length = buf_sz -
+ sym_op->auth.data.offset - RTE_ETHER_CRC_LEN;
+ /*
+ * DOCSIS header and SRC and DST MAC addresses are not
+ * ciphered
+ */
+ sym_op->cipher.data.offset = sym_op->auth.data.offset +
+ RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN;
+ sym_op->cipher.data.length = buf_sz -
+ sym_op->cipher.data.offset;
+ }
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
+ }
+
+ return 0;
+}
+#endif
+