crypto/dpaa2_sec: support raw datapath API
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_raw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4
5 #include <cryptodev_pmd.h>
6 #include <rte_fslmc.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
10
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
13
14 struct dpaa2_sec_raw_dp_ctx {
15         dpaa2_sec_session *session;
16         uint32_t tail;
17         uint32_t head;
18         uint16_t cached_enqueue;
19         uint16_t cached_dequeue;
20 };
21
22 static int
23 build_raw_dp_chain_fd(uint8_t *drv_ctx,
24                        struct rte_crypto_sgl *sgl,
25                        struct rte_crypto_va_iova_ptr *iv,
26                        struct rte_crypto_va_iova_ptr *digest,
27                        struct rte_crypto_va_iova_ptr *auth_iv,
28                        union rte_crypto_sym_ofs ofs,
29                        void *userdata,
30                        struct qbman_fd *fd)
31 {
32         RTE_SET_USED(drv_ctx);
33         RTE_SET_USED(sgl);
34         RTE_SET_USED(iv);
35         RTE_SET_USED(digest);
36         RTE_SET_USED(auth_iv);
37         RTE_SET_USED(ofs);
38         RTE_SET_USED(userdata);
39         RTE_SET_USED(fd);
40
41         return 0;
42 }
43
44 static int
45 build_raw_dp_aead_fd(uint8_t *drv_ctx,
46                        struct rte_crypto_sgl *sgl,
47                        struct rte_crypto_va_iova_ptr *iv,
48                        struct rte_crypto_va_iova_ptr *digest,
49                        struct rte_crypto_va_iova_ptr *auth_iv,
50                        union rte_crypto_sym_ofs ofs,
51                        void *userdata,
52                        struct qbman_fd *fd)
53 {
54         RTE_SET_USED(drv_ctx);
55         RTE_SET_USED(sgl);
56         RTE_SET_USED(iv);
57         RTE_SET_USED(digest);
58         RTE_SET_USED(auth_iv);
59         RTE_SET_USED(ofs);
60         RTE_SET_USED(userdata);
61         RTE_SET_USED(fd);
62
63         return 0;
64 }
65
66 static int
67 build_raw_dp_auth_fd(uint8_t *drv_ctx,
68                        struct rte_crypto_sgl *sgl,
69                        struct rte_crypto_va_iova_ptr *iv,
70                        struct rte_crypto_va_iova_ptr *digest,
71                        struct rte_crypto_va_iova_ptr *auth_iv,
72                        union rte_crypto_sym_ofs ofs,
73                        void *userdata,
74                        struct qbman_fd *fd)
75 {
76         RTE_SET_USED(drv_ctx);
77         RTE_SET_USED(sgl);
78         RTE_SET_USED(iv);
79         RTE_SET_USED(digest);
80         RTE_SET_USED(auth_iv);
81         RTE_SET_USED(ofs);
82         RTE_SET_USED(userdata);
83         RTE_SET_USED(fd);
84
85         return 0;
86 }
87
88 static int
89 build_raw_dp_proto_fd(uint8_t *drv_ctx,
90                        struct rte_crypto_sgl *sgl,
91                        struct rte_crypto_va_iova_ptr *iv,
92                        struct rte_crypto_va_iova_ptr *digest,
93                        struct rte_crypto_va_iova_ptr *auth_iv,
94                        union rte_crypto_sym_ofs ofs,
95                        void *userdata,
96                        struct qbman_fd *fd)
97 {
98         RTE_SET_USED(drv_ctx);
99         RTE_SET_USED(sgl);
100         RTE_SET_USED(iv);
101         RTE_SET_USED(digest);
102         RTE_SET_USED(auth_iv);
103         RTE_SET_USED(ofs);
104         RTE_SET_USED(userdata);
105         RTE_SET_USED(fd);
106
107         return 0;
108 }
109
110 static int
111 build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
112                        struct rte_crypto_sgl *sgl,
113                        struct rte_crypto_va_iova_ptr *iv,
114                        struct rte_crypto_va_iova_ptr *digest,
115                        struct rte_crypto_va_iova_ptr *auth_iv,
116                        union rte_crypto_sym_ofs ofs,
117                        void *userdata,
118                        struct qbman_fd *fd)
119 {
120         RTE_SET_USED(drv_ctx);
121         RTE_SET_USED(sgl);
122         RTE_SET_USED(iv);
123         RTE_SET_USED(digest);
124         RTE_SET_USED(auth_iv);
125         RTE_SET_USED(ofs);
126         RTE_SET_USED(userdata);
127         RTE_SET_USED(fd);
128
129         return 0;
130 }
131
132 static int
133 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
134                        struct rte_crypto_sgl *sgl,
135                        struct rte_crypto_va_iova_ptr *iv,
136                        struct rte_crypto_va_iova_ptr *digest,
137                        struct rte_crypto_va_iova_ptr *auth_iv,
138                        union rte_crypto_sym_ofs ofs,
139                        void *userdata,
140                        struct qbman_fd *fd)
141 {
142         RTE_SET_USED(digest);
143         RTE_SET_USED(auth_iv);
144
145         dpaa2_sec_session *sess =
146                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
147         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
148         int total_len = 0, data_len = 0, data_offset;
149         struct sec_flow_context *flc;
150         struct ctxt_priv *priv = sess->ctxt;
151         unsigned int i;
152
153         for (i = 0; i < sgl->num; i++)
154                 total_len += sgl->vec[i].len;
155
156         data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
157         data_offset = ofs.ofs.cipher.head;
158
159         if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
160                 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
161                 if ((data_len & 7) || (data_offset & 7)) {
162                         DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
163                         return -ENOTSUP;
164                 }
165
166                 data_len = data_len >> 3;
167                 data_offset = data_offset >> 3;
168         }
169
170         /* first FLE entry used to store mbuf and session ctxt */
171         fle = (struct qbman_fle *)rte_malloc(NULL,
172                         FLE_SG_MEM_SIZE(2*sgl->num),
173                         RTE_CACHE_LINE_SIZE);
174         if (!fle) {
175                 DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
176                 return -ENOMEM;
177         }
178         memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
179         /* first FLE entry used to store userdata and session ctxt */
180         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
181         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
182
183         op_fle = fle + 1;
184         ip_fle = fle + 2;
185         sge = fle + 3;
186
187         flc = &priv->flc_desc[0].flc;
188
189         DPAA2_SEC_DP_DEBUG(
190                 "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
191                 data_offset,
192                 data_len,
193                 sess->iv.length);
194
195         /* o/p fle */
196         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
197         op_fle->length = data_len;
198         DPAA2_SET_FLE_SG_EXT(op_fle);
199
200         /* o/p 1st seg */
201         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
202         DPAA2_SET_FLE_OFFSET(sge, data_offset);
203         sge->length = sgl->vec[0].len - data_offset;
204
205         /* o/p segs */
206         for (i = 1; i < sgl->num; i++) {
207                 sge++;
208                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
209                 DPAA2_SET_FLE_OFFSET(sge, 0);
210                 sge->length = sgl->vec[i].len;
211         }
212         DPAA2_SET_FLE_FIN(sge);
213
214         DPAA2_SEC_DP_DEBUG(
215                 "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
216                 flc, fle, fle->addr_hi, fle->addr_lo,
217                 fle->length);
218
219         /* i/p fle */
220         sge++;
221         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
222         ip_fle->length = sess->iv.length + data_len;
223         DPAA2_SET_FLE_SG_EXT(ip_fle);
224
225         /* i/p IV */
226         DPAA2_SET_FLE_ADDR(sge, iv->iova);
227         DPAA2_SET_FLE_OFFSET(sge, 0);
228         sge->length = sess->iv.length;
229
230         sge++;
231
232         /* i/p 1st seg */
233         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
234         DPAA2_SET_FLE_OFFSET(sge, data_offset);
235         sge->length = sgl->vec[0].len - data_offset;
236
237         /* i/p segs */
238         for (i = 1; i < sgl->num; i++) {
239                 sge++;
240                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
241                 DPAA2_SET_FLE_OFFSET(sge, 0);
242                 sge->length = sgl->vec[i].len;
243         }
244         DPAA2_SET_FLE_FIN(sge);
245         DPAA2_SET_FLE_FIN(ip_fle);
246
247         /* sg fd */
248         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
249         DPAA2_SET_FD_LEN(fd, ip_fle->length);
250         DPAA2_SET_FD_COMPOUND_FMT(fd);
251         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
252
253         DPAA2_SEC_DP_DEBUG(
254                 "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
255                 DPAA2_GET_FD_ADDR(fd),
256                 DPAA2_GET_FD_OFFSET(fd),
257                 DPAA2_GET_FD_LEN(fd));
258
259         return 0;
260 }
261
262 static __rte_always_inline uint32_t
263 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
264         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
265         void *user_data[], int *status)
266 {
267         RTE_SET_USED(user_data);
268         uint32_t loop;
269         int32_t ret;
270         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
271         uint32_t frames_to_send, retry_count;
272         struct qbman_eq_desc eqdesc;
273         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
274         dpaa2_sec_session *sess =
275                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
276         struct qbman_swp *swp;
277         uint16_t num_tx = 0;
278         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
279
280         if (unlikely(vec->num == 0))
281                 return 0;
282
283         if (sess == NULL) {
284                 DPAA2_SEC_ERR("sessionless raw crypto not supported");
285                 return 0;
286         }
287         /*Prepare enqueue descriptor*/
288         qbman_eq_desc_clear(&eqdesc);
289         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
290         qbman_eq_desc_set_response(&eqdesc, 0, 0);
291         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
292
293         if (!DPAA2_PER_LCORE_DPIO) {
294                 ret = dpaa2_affine_qbman_swp();
295                 if (ret) {
296                         DPAA2_SEC_ERR(
297                                 "Failed to allocate IO portal, tid: %d\n",
298                                 rte_gettid());
299                         return 0;
300                 }
301         }
302         swp = DPAA2_PER_LCORE_PORTAL;
303
304         while (vec->num) {
305                 frames_to_send = (vec->num > dpaa2_eqcr_size) ?
306                         dpaa2_eqcr_size : vec->num;
307
308                 for (loop = 0; loop < frames_to_send; loop++) {
309                         /*Clear the unused FD fields before sending*/
310                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
311                         ret = sess->build_raw_dp_fd(drv_ctx,
312                                                     &vec->src_sgl[loop],
313                                                     &vec->iv[loop],
314                                                     &vec->digest[loop],
315                                                     &vec->auth_iv[loop],
316                                                     ofs,
317                                                     user_data[loop],
318                                                     &fd_arr[loop]);
319                         if (ret) {
320                                 DPAA2_SEC_ERR("error: Improper packet contents"
321                                               " for crypto operation");
322                                 goto skip_tx;
323                         }
324                         status[loop] = 1;
325                 }
326
327                 loop = 0;
328                 retry_count = 0;
329                 while (loop < frames_to_send) {
330                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
331                                                          &fd_arr[loop],
332                                                          &flags[loop],
333                                                          frames_to_send - loop);
334                         if (unlikely(ret < 0)) {
335                                 retry_count++;
336                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
337                                         num_tx += loop;
338                                         vec->num -= loop;
339                                         goto skip_tx;
340                                 }
341                         } else {
342                                 loop += ret;
343                                 retry_count = 0;
344                         }
345                 }
346
347                 num_tx += loop;
348                 vec->num -= loop;
349         }
350 skip_tx:
351         dpaa2_qp->tx_vq.tx_pkts += num_tx;
352         dpaa2_qp->tx_vq.err_pkts += vec->num;
353
354         return num_tx;
355 }
356
357 static __rte_always_inline int
358 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
359         struct rte_crypto_vec *data_vec,
360         uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
361         struct rte_crypto_va_iova_ptr *iv,
362         struct rte_crypto_va_iova_ptr *digest,
363         struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
364         void *user_data)
365 {
366         RTE_SET_USED(qp_data);
367         RTE_SET_USED(drv_ctx);
368         RTE_SET_USED(data_vec);
369         RTE_SET_USED(n_data_vecs);
370         RTE_SET_USED(ofs);
371         RTE_SET_USED(iv);
372         RTE_SET_USED(digest);
373         RTE_SET_USED(aad_or_auth_iv);
374         RTE_SET_USED(user_data);
375
376         return 0;
377 }
378
379 static inline void *
380 sec_fd_to_userdata(const struct qbman_fd *fd)
381 {
382         struct qbman_fle *fle;
383         void *userdata;
384         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
385
386         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
387                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
388         userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
389         /* free the fle memory */
390         rte_free((void *)(fle-1));
391
392         return userdata;
393 }
394
395 static __rte_always_inline uint32_t
396 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
397         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
398         uint32_t max_nb_to_dequeue,
399         rte_cryptodev_raw_post_dequeue_t post_dequeue,
400         void **out_user_data, uint8_t is_user_data_array,
401         uint32_t *n_success, int *dequeue_status)
402 {
403         RTE_SET_USED(drv_ctx);
404         RTE_SET_USED(get_dequeue_count);
405
406         /* Function is responsible to receive frames for a given device and VQ*/
407         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
408         struct qbman_result *dq_storage;
409         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
410         int ret, num_rx = 0;
411         uint8_t is_last = 0, status;
412         struct qbman_swp *swp;
413         const struct qbman_fd *fd;
414         struct qbman_pull_desc pulldesc;
415         void *user_data;
416         uint32_t nb_ops = max_nb_to_dequeue;
417
418         if (!DPAA2_PER_LCORE_DPIO) {
419                 ret = dpaa2_affine_qbman_swp();
420                 if (ret) {
421                         DPAA2_SEC_ERR(
422                                 "Failed to allocate IO portal, tid: %d\n",
423                                 rte_gettid());
424                         return 0;
425                 }
426         }
427         swp = DPAA2_PER_LCORE_PORTAL;
428         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
429
430         qbman_pull_desc_clear(&pulldesc);
431         qbman_pull_desc_set_numframes(&pulldesc,
432                                       (nb_ops > dpaa2_dqrr_size) ?
433                                       dpaa2_dqrr_size : nb_ops);
434         qbman_pull_desc_set_fq(&pulldesc, fqid);
435         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
436                                     (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
437                                     1);
438
439         /*Issue a volatile dequeue command. */
440         while (1) {
441                 if (qbman_swp_pull(swp, &pulldesc)) {
442                         DPAA2_SEC_WARN(
443                                 "SEC VDQ command is not issued : QBMAN busy");
444                         /* Portal was busy, try again */
445                         continue;
446                 }
447                 break;
448         };
449
450         /* Receive the packets till Last Dequeue entry is found with
451          * respect to the above issues PULL command.
452          */
453         while (!is_last) {
454                 /* Check if the previous issued command is completed.
455                  * Also seems like the SWP is shared between the Ethernet Driver
456                  * and the SEC driver.
457                  */
458                 while (!qbman_check_command_complete(dq_storage))
459                         ;
460
461                 /* Loop until the dq_storage is updated with
462                  * new token by QBMAN
463                  */
464                 while (!qbman_check_new_result(dq_storage))
465                         ;
466                 /* Check whether Last Pull command is Expired and
467                  * setting Condition for Loop termination
468                  */
469                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
470                         is_last = 1;
471                         /* Check for valid frame. */
472                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
473                         if (unlikely(
474                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
475                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
476                                 continue;
477                         }
478                 }
479
480                 fd = qbman_result_DQ_fd(dq_storage);
481                 user_data = sec_fd_to_userdata(fd);
482                 if (is_user_data_array)
483                         out_user_data[num_rx] = user_data;
484                 else
485                         out_user_data[0] = user_data;
486                 if (unlikely(fd->simple.frc)) {
487                         /* TODO Parse SEC errors */
488                         DPAA2_SEC_ERR("SEC returned Error - %x",
489                                       fd->simple.frc);
490                         status = RTE_CRYPTO_OP_STATUS_ERROR;
491                 } else {
492                         status = RTE_CRYPTO_OP_STATUS_SUCCESS;
493                 }
494                 post_dequeue(user_data, num_rx, status);
495
496                 num_rx++;
497                 dq_storage++;
498         } /* End of Packet Rx loop */
499
500         dpaa2_qp->rx_vq.rx_pkts += num_rx;
501         *dequeue_status = 1;
502         *n_success = num_rx;
503
504         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
505         /*Return the total number of packets received to DPAA2 app*/
506         return num_rx;
507 }
508
509 static __rte_always_inline void *
510 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
511                 enum rte_crypto_op_status *op_status)
512 {
513         RTE_SET_USED(qp_data);
514         RTE_SET_USED(drv_ctx);
515         RTE_SET_USED(dequeue_status);
516         RTE_SET_USED(op_status);
517
518         return NULL;
519 }
520
521 static __rte_always_inline int
522 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
523 {
524         RTE_SET_USED(qp_data);
525         RTE_SET_USED(drv_ctx);
526         RTE_SET_USED(n);
527
528         return 0;
529 }
530
531 static __rte_always_inline int
532 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
533 {
534         RTE_SET_USED(qp_data);
535         RTE_SET_USED(drv_ctx);
536         RTE_SET_USED(n);
537
538         return 0;
539 }
540
541 int
542 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
543         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
544         enum rte_crypto_op_sess_type sess_type,
545         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
546 {
547         dpaa2_sec_session *sess;
548         struct dpaa2_sec_raw_dp_ctx *dp_ctx;
549         RTE_SET_USED(qp_id);
550
551         if (!is_update) {
552                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
553                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
554         }
555
556         if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
557                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
558                                 session_ctx.sec_sess);
559         else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
560                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
561                         session_ctx.crypto_sess, cryptodev_driver_id);
562         else
563                 return -ENOTSUP;
564         raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
565         raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
566         raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
567         raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
568         raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
569         raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
570
571         if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
572                 sess->build_raw_dp_fd = build_raw_dp_chain_fd;
573         else if (sess->ctxt_type == DPAA2_SEC_AEAD)
574                 sess->build_raw_dp_fd = build_raw_dp_aead_fd;
575         else if (sess->ctxt_type == DPAA2_SEC_AUTH)
576                 sess->build_raw_dp_fd = build_raw_dp_auth_fd;
577         else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
578                 sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
579         else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
580                 sess->build_raw_dp_fd = build_raw_dp_proto_fd;
581         else if (sess->ctxt_type == DPAA2_SEC_PDCP)
582                 sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
583         else
584                 return -ENOTSUP;
585         dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
586         dp_ctx->session = sess;
587
588         return 0;
589 }
590
591 int
592 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
593 {
594         return sizeof(struct dpaa2_sec_raw_dp_ctx);
595 }