1 /* SPDX-License-Identifier: BSD-3-Clause
5 #include <rte_byteorder.h>
6 #include <rte_common.h>
7 #include <cryptodev_pmd.h>
8 #include <rte_crypto.h>
9 #include <rte_cryptodev.h>
10 #ifdef RTE_LIB_SECURITY
11 #include <rte_security_driver.h>
14 /* RTA header files */
15 #include <desc/ipsec.h>
17 #include <rte_dpaa_bus.h>
19 #include <dpaa_sec_log.h>
21 struct dpaa_sec_raw_dp_ctx {
22 dpaa_sec_session *session;
25 uint16_t cached_enqueue;
26 uint16_t cached_dequeue;
29 static __rte_always_inline int
30 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
32 RTE_SET_USED(qp_data);
33 RTE_SET_USED(drv_ctx);
39 static __rte_always_inline int
40 dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
42 RTE_SET_USED(qp_data);
43 RTE_SET_USED(drv_ctx);
49 static inline struct dpaa_sec_op_ctx *
50 dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
52 struct dpaa_sec_op_ctx *ctx;
55 retval = rte_mempool_get(
56 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
59 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
63 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
64 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
65 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
66 * each packet, memset is costlier than dcbz_64().
68 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
69 dcbz_64(&ctx->job.sg[i]);
71 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
72 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
77 static struct dpaa_sec_job *
78 build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
79 struct rte_crypto_sgl *sgl,
80 struct rte_crypto_sgl *dest_sgl,
81 struct rte_crypto_va_iova_ptr *iv,
82 struct rte_crypto_va_iova_ptr *digest,
83 struct rte_crypto_va_iova_ptr *auth_iv,
84 union rte_crypto_sym_ofs ofs,
87 RTE_SET_USED(drv_ctx);
89 RTE_SET_USED(dest_sgl);
92 RTE_SET_USED(auth_iv);
94 RTE_SET_USED(userdata);
99 static struct dpaa_sec_job *
100 build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
101 struct rte_crypto_sgl *sgl,
102 struct rte_crypto_sgl *dest_sgl,
103 struct rte_crypto_va_iova_ptr *iv,
104 struct rte_crypto_va_iova_ptr *digest,
105 struct rte_crypto_va_iova_ptr *auth_iv,
106 union rte_crypto_sym_ofs ofs,
109 RTE_SET_USED(digest);
110 RTE_SET_USED(auth_iv);
111 dpaa_sec_session *ses =
112 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
113 struct dpaa_sec_job *cf;
114 struct dpaa_sec_op_ctx *ctx;
115 struct qm_sg_entry *sg, *out_sg, *in_sg;
117 uint8_t *IV_ptr = iv->va;
118 int data_len, total_len = 0, data_offset;
120 for (i = 0; i < sgl->num; i++)
121 total_len += sgl->vec[i].len;
123 data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
124 data_offset = ofs.ofs.cipher.head;
126 /* Support lengths in bits only for SNOW3G and ZUC */
127 if (sgl->num > MAX_SG_ENTRIES) {
128 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
133 ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
138 ctx->userdata = (void *)userdata;
142 out_sg->extension = 1;
143 out_sg->length = data_len;
144 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
145 cpu_to_hw_sg(out_sg);
150 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
151 sg->length = dest_sgl->vec[0].len - data_offset;
152 sg->offset = data_offset;
154 /* Successive segs */
155 for (i = 1; i < dest_sgl->num; i++) {
158 qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
159 sg->length = dest_sgl->vec[i].len;
164 qm_sg_entry_set64(sg, sgl->vec[0].iova);
165 sg->length = sgl->vec[0].len - data_offset;
166 sg->offset = data_offset;
168 /* Successive segs */
169 for (i = 1; i < sgl->num; i++) {
172 qm_sg_entry_set64(sg, sgl->vec[i].iova);
173 sg->length = sgl->vec[i].len;
182 in_sg->extension = 1;
184 in_sg->length = data_len + ses->iv.length;
187 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
191 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
192 sg->length = ses->iv.length;
197 qm_sg_entry_set64(sg, sgl->vec[0].iova);
198 sg->length = sgl->vec[0].len - data_offset;
199 sg->offset = data_offset;
201 /* Successive segs */
202 for (i = 1; i < sgl->num; i++) {
205 qm_sg_entry_set64(sg, sgl->vec[i].iova);
206 sg->length = sgl->vec[i].len;
215 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
216 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
217 void *user_data[], int *status)
219 /* Function to transmit the frames to given device and queuepair */
221 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
223 struct qm_fd fds[DPAA_SEC_BURST], *fd;
224 uint32_t frames_to_send;
225 struct dpaa_sec_job *cf;
226 dpaa_sec_session *ses =
227 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
228 uint32_t flags[DPAA_SEC_BURST] = {0};
229 struct qman_fq *inq[DPAA_SEC_BURST];
231 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
232 if (rte_dpaa_portal_init((void *)0)) {
233 DPAA_SEC_ERR("Failure in affining portal");
239 frames_to_send = (vec->num > DPAA_SEC_BURST) ?
240 DPAA_SEC_BURST : vec->num;
241 for (loop = 0; loop < frames_to_send; loop++) {
242 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
243 if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
244 frames_to_send = loop;
247 } else if (unlikely(ses->qp[rte_lcore_id() %
248 MAX_DPAA_CORES] != dpaa_qp)) {
249 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
251 ses->qp[rte_lcore_id() %
252 MAX_DPAA_CORES], dpaa_qp);
253 frames_to_send = loop;
257 /*Clear the unused FD fields before sending*/
259 memset(fd, 0, sizeof(struct qm_fd));
260 cf = ses->build_raw_dp_fd(drv_ctx,
262 &vec->dest_sgl[loop],
269 DPAA_SEC_ERR("error: Improper packet contents"
270 " for crypto operation");
273 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
276 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
277 fd->_format1 = qm_fd_compound;
278 fd->length29 = 2 * sizeof(struct qm_sg_entry);
284 while (loop < frames_to_send) {
285 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
286 &flags[loop], frames_to_send - loop);
288 vec->num -= frames_to_send;
289 num_tx += frames_to_send;
293 dpaa_qp->tx_pkts += num_tx;
294 dpaa_qp->tx_errs += vec->num - num_tx;
300 dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
301 uint8_t is_user_data_array,
302 rte_cryptodev_raw_post_dequeue_t post_dequeue,
306 unsigned int pkts = 0;
307 int num_rx_bufs, ret;
308 struct qm_dqrr_entry *dq;
309 uint32_t vdqcr_flags = 0;
310 uint8_t is_success = 0;
314 * Until request for four buffers, we provide exact number of buffers.
315 * Otherwise we do not set the QM_VDQCR_EXACT flag.
316 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
317 * requested, so we request two less in this case.
320 vdqcr_flags = QM_VDQCR_EXACT;
321 num_rx_bufs = nb_ops;
323 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
324 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
326 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
331 const struct qm_fd *fd;
332 struct dpaa_sec_job *job;
333 struct dpaa_sec_op_ctx *ctx;
335 dq = qman_dequeue(fq);
340 /* sg is embedded in an op ctx,
341 * sg[0] is for output
344 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
346 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
347 ctx->fd_status = fd->status;
348 if (is_user_data_array)
349 out_user_data[pkts] = ctx->userdata;
351 out_user_data[0] = ctx->userdata;
353 if (!ctx->fd_status) {
357 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
359 post_dequeue(ctx->op, pkts, is_success);
362 /* report op status to sym->op and then free the ctx memory */
363 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
365 qman_dqrr_consume(fq, dq);
366 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
372 static __rte_always_inline uint32_t
373 dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
374 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
375 uint32_t max_nb_to_dequeue,
376 rte_cryptodev_raw_post_dequeue_t post_dequeue,
377 void **out_user_data, uint8_t is_user_data_array,
378 uint32_t *n_success, int *dequeue_status)
380 RTE_SET_USED(drv_ctx);
381 RTE_SET_USED(get_dequeue_count);
383 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
384 uint32_t nb_ops = max_nb_to_dequeue;
386 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
387 if (rte_dpaa_portal_init((void *)0)) {
388 DPAA_SEC_ERR("Failure in affining portal");
393 num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
394 is_user_data_array, post_dequeue, nb_ops);
396 dpaa_qp->rx_pkts += num_rx;
400 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
405 static __rte_always_inline int
406 dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
407 struct rte_crypto_vec *data_vec,
408 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
409 struct rte_crypto_va_iova_ptr *iv,
410 struct rte_crypto_va_iova_ptr *digest,
411 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
414 RTE_SET_USED(qp_data);
415 RTE_SET_USED(drv_ctx);
416 RTE_SET_USED(data_vec);
417 RTE_SET_USED(n_data_vecs);
420 RTE_SET_USED(digest);
421 RTE_SET_USED(aad_or_auth_iv);
422 RTE_SET_USED(user_data);
427 static __rte_always_inline void *
428 dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
429 enum rte_crypto_op_status *op_status)
431 RTE_SET_USED(qp_data);
432 RTE_SET_USED(drv_ctx);
433 RTE_SET_USED(dequeue_status);
434 RTE_SET_USED(op_status);
440 dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
441 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
442 enum rte_crypto_op_sess_type sess_type,
443 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
445 dpaa_sec_session *sess;
446 struct dpaa_sec_raw_dp_ctx *dp_ctx;
450 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
451 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
454 if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
455 sess = (dpaa_sec_session *)get_sec_session_private_data(
456 session_ctx.sec_sess);
457 else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
458 sess = (dpaa_sec_session *)get_sym_session_private_data(
459 session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
462 raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
463 raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
464 raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
465 raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
466 raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
467 raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
469 if (sess->ctxt == DPAA_SEC_CIPHER)
470 sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
471 else if (sess->ctxt == DPAA_SEC_AUTH)
472 sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
475 dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
476 dp_ctx->session = sess;
482 dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
484 return sizeof(struct dpaa_sec_raw_dp_ctx);