raw/cnxk_gpio: add custom IRQ handlers
[dpdk.git] / drivers / crypto / qat / qat_sym_hw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <cryptodev_pmd.h>
6
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
10
11 #include "qat_sym.h"
12 #include "qat_sym_pmd.h"
13 #include "qat_sym_session.h"
14 #include "qat_qp.h"
15
16 struct qat_sym_dp_ctx {
17         struct qat_sym_session *session;
18         uint32_t tail;
19         uint32_t head;
20         uint16_t cached_enqueue;
21         uint16_t cached_dequeue;
22 };
23
24 static __rte_always_inline int32_t
25 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
26                 struct rte_crypto_vec *data, uint16_t n_data_vecs)
27 {
28         struct qat_queue *tx_queue;
29         struct qat_sym_op_cookie *cookie;
30         struct qat_sgl *list;
31         uint32_t i;
32         uint32_t total_len;
33
34         if (likely(n_data_vecs == 1)) {
35                 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
36                         data[0].iova;
37                 req->comn_mid.src_length = req->comn_mid.dst_length =
38                         data[0].len;
39                 return data[0].len;
40         }
41
42         if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
43                 return -1;
44
45         total_len = 0;
46         tx_queue = &qp->tx_q;
47
48         ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
49                         QAT_COMN_PTR_TYPE_SGL);
50         cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
51         list = (struct qat_sgl *)&cookie->qat_sgl_src;
52
53         for (i = 0; i < n_data_vecs; i++) {
54                 list->buffers[i].len = data[i].len;
55                 list->buffers[i].resrvd = 0;
56                 list->buffers[i].addr = data[i].iova;
57                 if (total_len + data[i].len > UINT32_MAX) {
58                         QAT_DP_LOG(ERR, "Message too long");
59                         return -1;
60                 }
61                 total_len += data[i].len;
62         }
63
64         list->num_bufs = i;
65         req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
66                         cookie->qat_sgl_src_phys_addr;
67         req->comn_mid.src_length = req->comn_mid.dst_length = 0;
68         return total_len;
69 }
70
71 static __rte_always_inline void
72 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
73                 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
74                 struct icp_qat_fw_la_bulk_req *qat_req)
75 {
76         /* copy IV into request if it fits */
77         if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
78                 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
79                                 iv_len);
80         else {
81                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
82                                 qat_req->comn_hdr.serv_specif_flags,
83                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
84                 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
85         }
86 }
87
88 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
89         (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
90         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
91
92 static __rte_always_inline void
93 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
94 {
95         uint32_t i;
96
97         for (i = 0; i < n; i++)
98                 sta[i] = status;
99 }
100
101 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
102         RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
103
104 static __rte_always_inline void
105 enqueue_one_cipher_job(struct qat_sym_session *ctx,
106         struct icp_qat_fw_la_bulk_req *req,
107         struct rte_crypto_va_iova_ptr *iv,
108         union rte_crypto_sym_ofs ofs, uint32_t data_len)
109 {
110         struct icp_qat_fw_la_cipher_req_params *cipher_param;
111
112         cipher_param = (void *)&req->serv_specif_rqpars;
113
114         /* cipher IV */
115         set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
116         cipher_param->cipher_offset = ofs.ofs.cipher.head;
117         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
118                         ofs.ofs.cipher.tail;
119 }
120
121 static __rte_always_inline int
122 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
123         struct rte_crypto_vec *data, uint16_t n_data_vecs,
124         union rte_crypto_sym_ofs ofs,
125         struct rte_crypto_va_iova_ptr *iv,
126         struct rte_crypto_va_iova_ptr *digest __rte_unused,
127         struct rte_crypto_va_iova_ptr *aad __rte_unused,
128         void *user_data)
129 {
130         struct qat_qp *qp = qp_data;
131         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
132         struct qat_queue *tx_queue = &qp->tx_q;
133         struct qat_sym_session *ctx = dp_ctx->session;
134         struct icp_qat_fw_la_bulk_req *req;
135         int32_t data_len;
136         uint32_t tail = dp_ctx->tail;
137
138         req = (struct icp_qat_fw_la_bulk_req *)(
139                 (uint8_t *)tx_queue->base_addr + tail);
140         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
141         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
142         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
143         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
144         if (unlikely(data_len < 0))
145                 return -1;
146         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
147
148         enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
149
150         dp_ctx->tail = tail;
151         dp_ctx->cached_enqueue++;
152
153         return 0;
154 }
155
156 static __rte_always_inline uint32_t
157 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
158         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
159         void *user_data[], int *status)
160 {
161         struct qat_qp *qp = qp_data;
162         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
163         struct qat_queue *tx_queue = &qp->tx_q;
164         struct qat_sym_session *ctx = dp_ctx->session;
165         uint32_t i, n;
166         uint32_t tail;
167         struct icp_qat_fw_la_bulk_req *req;
168         int32_t data_len;
169
170         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
171         if (unlikely(n == 0)) {
172                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
173                 *status = 0;
174                 return 0;
175         }
176
177         tail = dp_ctx->tail;
178
179         for (i = 0; i < n; i++) {
180                 req  = (struct icp_qat_fw_la_bulk_req *)(
181                         (uint8_t *)tx_queue->base_addr + tail);
182                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
183
184                 data_len = qat_sym_dp_parse_data_vec(qp, req,
185                         vec->src_sgl[i].vec,
186                         vec->src_sgl[i].num);
187                 if (unlikely(data_len < 0))
188                         break;
189                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
190                 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
191                         (uint32_t)data_len);
192                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
193         }
194
195         if (unlikely(i < n))
196                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
197
198         dp_ctx->tail = tail;
199         dp_ctx->cached_enqueue += i;
200         *status = 0;
201         return i;
202 }
203
204 static __rte_always_inline void
205 enqueue_one_auth_job(struct qat_sym_session *ctx,
206         struct icp_qat_fw_la_bulk_req *req,
207         struct rte_crypto_va_iova_ptr *digest,
208         struct rte_crypto_va_iova_ptr *auth_iv,
209         union rte_crypto_sym_ofs ofs, uint32_t data_len)
210 {
211         struct icp_qat_fw_la_cipher_req_params *cipher_param;
212         struct icp_qat_fw_la_auth_req_params *auth_param;
213
214         cipher_param = (void *)&req->serv_specif_rqpars;
215         auth_param = (void *)((uint8_t *)cipher_param +
216                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
217
218         auth_param->auth_off = ofs.ofs.auth.head;
219         auth_param->auth_len = data_len - ofs.ofs.auth.head -
220                         ofs.ofs.auth.tail;
221         auth_param->auth_res_addr = digest->iova;
222
223         switch (ctx->qat_hash_alg) {
224         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
225         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
226         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
227                 auth_param->u1.aad_adr = auth_iv->iova;
228                 break;
229         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
230         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
231                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
232                         req->comn_hdr.serv_specif_flags,
233                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
234                 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
235                                 ctx->auth_iv.length);
236                 break;
237         default:
238                 break;
239         }
240 }
241
242 static __rte_always_inline int
243 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
244         struct rte_crypto_vec *data, uint16_t n_data_vecs,
245         union rte_crypto_sym_ofs ofs,
246         struct rte_crypto_va_iova_ptr *iv __rte_unused,
247         struct rte_crypto_va_iova_ptr *digest,
248         struct rte_crypto_va_iova_ptr *auth_iv,
249         void *user_data)
250 {
251         struct qat_qp *qp = qp_data;
252         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
253         struct qat_queue *tx_queue = &qp->tx_q;
254         struct qat_sym_session *ctx = dp_ctx->session;
255         struct icp_qat_fw_la_bulk_req *req;
256         int32_t data_len;
257         uint32_t tail = dp_ctx->tail;
258
259         req = (struct icp_qat_fw_la_bulk_req *)(
260                 (uint8_t *)tx_queue->base_addr + tail);
261         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
262         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
263         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
264         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
265         if (unlikely(data_len < 0))
266                 return -1;
267         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
268
269         enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
270                         (uint32_t)data_len);
271
272         dp_ctx->tail = tail;
273         dp_ctx->cached_enqueue++;
274
275         return 0;
276 }
277
278 static __rte_always_inline uint32_t
279 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
280         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
281         void *user_data[], int *status)
282 {
283         struct qat_qp *qp = qp_data;
284         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
285         struct qat_queue *tx_queue = &qp->tx_q;
286         struct qat_sym_session *ctx = dp_ctx->session;
287         uint32_t i, n;
288         uint32_t tail;
289         struct icp_qat_fw_la_bulk_req *req;
290         int32_t data_len;
291
292         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
293         if (unlikely(n == 0)) {
294                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
295                 *status = 0;
296                 return 0;
297         }
298
299         tail = dp_ctx->tail;
300
301         for (i = 0; i < n; i++) {
302                 req  = (struct icp_qat_fw_la_bulk_req *)(
303                         (uint8_t *)tx_queue->base_addr + tail);
304                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
305
306                 data_len = qat_sym_dp_parse_data_vec(qp, req,
307                         vec->src_sgl[i].vec,
308                         vec->src_sgl[i].num);
309                 if (unlikely(data_len < 0))
310                         break;
311                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
312                 enqueue_one_auth_job(ctx, req, &vec->digest[i],
313                         &vec->auth_iv[i], ofs, (uint32_t)data_len);
314                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
315         }
316
317         if (unlikely(i < n))
318                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
319
320         dp_ctx->tail = tail;
321         dp_ctx->cached_enqueue += i;
322         *status = 0;
323         return i;
324 }
325
326 static __rte_always_inline int
327 enqueue_one_chain_job(struct qat_sym_session *ctx,
328         struct icp_qat_fw_la_bulk_req *req,
329         struct rte_crypto_vec *data,
330         uint16_t n_data_vecs,
331         struct rte_crypto_va_iova_ptr *cipher_iv,
332         struct rte_crypto_va_iova_ptr *digest,
333         struct rte_crypto_va_iova_ptr *auth_iv,
334         union rte_crypto_sym_ofs ofs, uint32_t data_len)
335 {
336         struct icp_qat_fw_la_cipher_req_params *cipher_param;
337         struct icp_qat_fw_la_auth_req_params *auth_param;
338         rte_iova_t auth_iova_end;
339         int32_t cipher_len, auth_len;
340
341         cipher_param = (void *)&req->serv_specif_rqpars;
342         auth_param = (void *)((uint8_t *)cipher_param +
343                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
344
345         cipher_len = data_len - ofs.ofs.cipher.head -
346                         ofs.ofs.cipher.tail;
347         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
348
349         if (unlikely(cipher_len < 0 || auth_len < 0))
350                 return -1;
351
352         cipher_param->cipher_offset = ofs.ofs.cipher.head;
353         cipher_param->cipher_length = cipher_len;
354         set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
355
356         auth_param->auth_off = ofs.ofs.auth.head;
357         auth_param->auth_len = auth_len;
358         auth_param->auth_res_addr = digest->iova;
359
360         switch (ctx->qat_hash_alg) {
361         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
362         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
363         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
364                 auth_param->u1.aad_adr = auth_iv->iova;
365                 break;
366         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
367         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
368                 break;
369         default:
370                 break;
371         }
372
373         if (unlikely(n_data_vecs > 1)) {
374                 int auth_end_get = 0, i = n_data_vecs - 1;
375                 struct rte_crypto_vec *cvec = &data[0];
376                 uint32_t len;
377
378                 len = data_len - ofs.ofs.auth.tail;
379
380                 while (i >= 0 && len > 0) {
381                         if (cvec->len >= len) {
382                                 auth_iova_end = cvec->iova + len;
383                                 len = 0;
384                                 auth_end_get = 1;
385                                 break;
386                         }
387                         len -= cvec->len;
388                         i--;
389                         cvec++;
390                 }
391
392                 if (unlikely(auth_end_get == 0))
393                         return -1;
394         } else
395                 auth_iova_end = data[0].iova + auth_param->auth_off +
396                         auth_param->auth_len;
397
398         /* Then check if digest-encrypted conditions are met */
399         if ((auth_param->auth_off + auth_param->auth_len <
400                 cipher_param->cipher_offset +
401                 cipher_param->cipher_length) &&
402                 (digest->iova == auth_iova_end)) {
403                 /* Handle partial digest encryption */
404                 if (cipher_param->cipher_offset +
405                                 cipher_param->cipher_length <
406                                 auth_param->auth_off +
407                                 auth_param->auth_len +
408                                 ctx->digest_length)
409                         req->comn_mid.dst_length =
410                                 req->comn_mid.src_length =
411                                 auth_param->auth_off +
412                                 auth_param->auth_len +
413                                 ctx->digest_length;
414                 struct icp_qat_fw_comn_req_hdr *header =
415                         &req->comn_hdr;
416                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
417                         header->serv_specif_flags,
418                         ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
419         }
420
421         return 0;
422 }
423
424 static __rte_always_inline int
425 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
426         struct rte_crypto_vec *data, uint16_t n_data_vecs,
427         union rte_crypto_sym_ofs ofs,
428         struct rte_crypto_va_iova_ptr *cipher_iv,
429         struct rte_crypto_va_iova_ptr *digest,
430         struct rte_crypto_va_iova_ptr *auth_iv,
431         void *user_data)
432 {
433         struct qat_qp *qp = qp_data;
434         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
435         struct qat_queue *tx_queue = &qp->tx_q;
436         struct qat_sym_session *ctx = dp_ctx->session;
437         struct icp_qat_fw_la_bulk_req *req;
438         int32_t data_len;
439         uint32_t tail = dp_ctx->tail;
440
441         req = (struct icp_qat_fw_la_bulk_req *)(
442                 (uint8_t *)tx_queue->base_addr + tail);
443         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
444         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
445         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
446         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
447         if (unlikely(data_len < 0))
448                 return -1;
449         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
450
451         if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
452                         cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
453                 return -1;
454
455         dp_ctx->tail = tail;
456         dp_ctx->cached_enqueue++;
457
458         return 0;
459 }
460
461 static __rte_always_inline uint32_t
462 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
463         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
464         void *user_data[], int *status)
465 {
466         struct qat_qp *qp = qp_data;
467         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
468         struct qat_queue *tx_queue = &qp->tx_q;
469         struct qat_sym_session *ctx = dp_ctx->session;
470         uint32_t i, n;
471         uint32_t tail;
472         struct icp_qat_fw_la_bulk_req *req;
473         int32_t data_len;
474
475         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
476         if (unlikely(n == 0)) {
477                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
478                 *status = 0;
479                 return 0;
480         }
481
482         tail = dp_ctx->tail;
483
484         for (i = 0; i < n; i++) {
485                 req  = (struct icp_qat_fw_la_bulk_req *)(
486                         (uint8_t *)tx_queue->base_addr + tail);
487                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
488
489                 data_len = qat_sym_dp_parse_data_vec(qp, req,
490                         vec->src_sgl[i].vec,
491                         vec->src_sgl[i].num);
492                 if (unlikely(data_len < 0))
493                         break;
494                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
495                 if (unlikely(enqueue_one_chain_job(ctx, req,
496                         vec->src_sgl[i].vec, vec->src_sgl[i].num,
497                         &vec->iv[i], &vec->digest[i],
498                         &vec->auth_iv[i], ofs, (uint32_t)data_len)))
499                         break;
500
501                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
502         }
503
504         if (unlikely(i < n))
505                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
506
507         dp_ctx->tail = tail;
508         dp_ctx->cached_enqueue += i;
509         *status = 0;
510         return i;
511 }
512
513 static __rte_always_inline void
514 enqueue_one_aead_job(struct qat_sym_session *ctx,
515         struct icp_qat_fw_la_bulk_req *req,
516         struct rte_crypto_va_iova_ptr *iv,
517         struct rte_crypto_va_iova_ptr *digest,
518         struct rte_crypto_va_iova_ptr *aad,
519         union rte_crypto_sym_ofs ofs, uint32_t data_len)
520 {
521         struct icp_qat_fw_la_cipher_req_params *cipher_param =
522                 (void *)&req->serv_specif_rqpars;
523         struct icp_qat_fw_la_auth_req_params *auth_param =
524                 (void *)((uint8_t *)&req->serv_specif_rqpars +
525                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
526         uint8_t *aad_data;
527         uint8_t aad_ccm_real_len;
528         uint8_t aad_len_field_sz;
529         uint32_t msg_len_be;
530         rte_iova_t aad_iova = 0;
531         uint8_t q;
532
533         /* CPM 1.7 uses single pass to treat AEAD as cipher operation */
534         if (ctx->is_single_pass) {
535                 enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
536
537                 if (ctx->is_ucs) {
538                         /* QAT GEN4 uses single pass to treat AEAD as cipher
539                          * operation
540                          */
541                         struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
542                                 (void *)&req->serv_specif_rqpars;
543                         cipher_param_20->spc_aad_addr = aad->iova;
544                         cipher_param_20->spc_auth_res_addr = digest->iova;
545                 } else {
546                         cipher_param->spc_aad_addr = aad->iova;
547                         cipher_param->spc_auth_res_addr = digest->iova;
548                 }
549
550                 return;
551         }
552
553         switch (ctx->qat_hash_alg) {
554         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
555         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
556                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
557                         req->comn_hdr.serv_specif_flags,
558                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
559                 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
560                                 ctx->cipher_iv.length);
561                 aad_iova = aad->iova;
562                 break;
563         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
564                 aad_data = aad->va;
565                 aad_iova = aad->iova;
566                 aad_ccm_real_len = 0;
567                 aad_len_field_sz = 0;
568                 msg_len_be = rte_bswap32((uint32_t)data_len -
569                                 ofs.ofs.cipher.head);
570
571                 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
572                         aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
573                         aad_ccm_real_len = ctx->aad_len -
574                                 ICP_QAT_HW_CCM_AAD_B0_LEN -
575                                 ICP_QAT_HW_CCM_AAD_LEN_INFO;
576                 } else {
577                         aad_data = iv->va;
578                         aad_iova = iv->iova;
579                 }
580
581                 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
582                 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
583                         aad_len_field_sz, ctx->digest_length, q);
584                 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
585                         memcpy(aad_data + ctx->cipher_iv.length +
586                                 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
587                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
588                                 (uint8_t *)&msg_len_be,
589                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
590                 } else {
591                         memcpy(aad_data + ctx->cipher_iv.length +
592                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
593                                 (uint8_t *)&msg_len_be +
594                                 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
595                                 - q), q);
596                 }
597
598                 if (aad_len_field_sz > 0) {
599                         *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
600                                 rte_bswap16(aad_ccm_real_len);
601
602                         if ((aad_ccm_real_len + aad_len_field_sz)
603                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
604                                 uint8_t pad_len = 0;
605                                 uint8_t pad_idx = 0;
606
607                                 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
608                                         ((aad_ccm_real_len +
609                                         aad_len_field_sz) %
610                                         ICP_QAT_HW_CCM_AAD_B0_LEN);
611                                 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
612                                         aad_ccm_real_len +
613                                         aad_len_field_sz;
614                                 memset(&aad_data[pad_idx], 0, pad_len);
615                         }
616                 }
617
618                 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
619                         + ICP_QAT_HW_CCM_NONCE_OFFSET,
620                         (uint8_t *)iv->va +
621                         ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
622                 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
623                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
624
625                 rte_memcpy((uint8_t *)aad->va +
626                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
627                         (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
628                         ctx->cipher_iv.length);
629                 break;
630         default:
631                 break;
632         }
633
634         cipher_param->cipher_offset = ofs.ofs.cipher.head;
635         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
636                         ofs.ofs.cipher.tail;
637         auth_param->auth_off = ofs.ofs.cipher.head;
638         auth_param->auth_len = cipher_param->cipher_length;
639         auth_param->auth_res_addr = digest->iova;
640         auth_param->u1.aad_adr = aad_iova;
641 }
642
643 static __rte_always_inline int
644 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
645         struct rte_crypto_vec *data, uint16_t n_data_vecs,
646         union rte_crypto_sym_ofs ofs,
647         struct rte_crypto_va_iova_ptr *iv,
648         struct rte_crypto_va_iova_ptr *digest,
649         struct rte_crypto_va_iova_ptr *aad,
650         void *user_data)
651 {
652         struct qat_qp *qp = qp_data;
653         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
654         struct qat_queue *tx_queue = &qp->tx_q;
655         struct qat_sym_session *ctx = dp_ctx->session;
656         struct icp_qat_fw_la_bulk_req *req;
657         int32_t data_len;
658         uint32_t tail = dp_ctx->tail;
659
660         req = (struct icp_qat_fw_la_bulk_req *)(
661                 (uint8_t *)tx_queue->base_addr + tail);
662         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
663         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
664         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
665         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
666         if (unlikely(data_len < 0))
667                 return -1;
668         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
669
670         enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
671                 (uint32_t)data_len);
672
673         dp_ctx->tail = tail;
674         dp_ctx->cached_enqueue++;
675
676         return 0;
677 }
678
679 static __rte_always_inline uint32_t
680 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
681         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
682         void *user_data[], int *status)
683 {
684         struct qat_qp *qp = qp_data;
685         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
686         struct qat_queue *tx_queue = &qp->tx_q;
687         struct qat_sym_session *ctx = dp_ctx->session;
688         uint32_t i, n;
689         uint32_t tail;
690         struct icp_qat_fw_la_bulk_req *req;
691         int32_t data_len;
692
693         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
694         if (unlikely(n == 0)) {
695                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
696                 *status = 0;
697                 return 0;
698         }
699
700         tail = dp_ctx->tail;
701
702         for (i = 0; i < n; i++) {
703                 req  = (struct icp_qat_fw_la_bulk_req *)(
704                         (uint8_t *)tx_queue->base_addr + tail);
705                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
706
707                 data_len = qat_sym_dp_parse_data_vec(qp, req,
708                         vec->src_sgl[i].vec,
709                         vec->src_sgl[i].num);
710                 if (unlikely(data_len < 0))
711                         break;
712                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
713                 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
714                         &vec->aad[i], ofs, (uint32_t)data_len);
715                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
716         }
717
718         if (unlikely(i < n))
719                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
720
721         dp_ctx->tail = tail;
722         dp_ctx->cached_enqueue += i;
723         *status = 0;
724         return i;
725 }
726
727 static __rte_always_inline uint32_t
728 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
729         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
730         uint32_t max_nb_to_dequeue,
731         rte_cryptodev_raw_post_dequeue_t post_dequeue,
732         void **out_user_data, uint8_t is_user_data_array,
733         uint32_t *n_success_jobs, int *return_status)
734 {
735         struct qat_qp *qp = qp_data;
736         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
737         struct qat_queue *rx_queue = &qp->rx_q;
738         struct icp_qat_fw_comn_resp *resp;
739         void *resp_opaque;
740         uint32_t i, n, inflight;
741         uint32_t head;
742         uint8_t status;
743
744         *n_success_jobs = 0;
745         *return_status = 0;
746         head = dp_ctx->head;
747
748         inflight = qp->enqueued - qp->dequeued;
749         if (unlikely(inflight == 0))
750                 return 0;
751
752         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
753                         head);
754         /* no operation ready */
755         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
756                 return 0;
757
758         resp_opaque = (void *)(uintptr_t)resp->opaque_data;
759         /* get the dequeue count */
760         if (get_dequeue_count) {
761                 n = get_dequeue_count(resp_opaque);
762                 if (unlikely(n == 0))
763                         return 0;
764         } else {
765                 if (unlikely(max_nb_to_dequeue == 0))
766                         return 0;
767                 n = max_nb_to_dequeue;
768         }
769
770         out_user_data[0] = resp_opaque;
771         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
772         post_dequeue(resp_opaque, 0, status);
773         *n_success_jobs += status;
774
775         head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
776
777         /* we already finished dequeue when n == 1 */
778         if (unlikely(n == 1)) {
779                 i = 1;
780                 goto end_deq;
781         }
782
783         if (is_user_data_array) {
784                 for (i = 1; i < n; i++) {
785                         resp = (struct icp_qat_fw_comn_resp *)(
786                                 (uint8_t *)rx_queue->base_addr + head);
787                         if (unlikely(*(uint32_t *)resp ==
788                                         ADF_RING_EMPTY_SIG))
789                                 goto end_deq;
790                         out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
791                         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
792                         *n_success_jobs += status;
793                         post_dequeue(out_user_data[i], i, status);
794                         head = (head + rx_queue->msg_size) &
795                                         rx_queue->modulo_mask;
796                 }
797
798                 goto end_deq;
799         }
800
801         /* opaque is not array */
802         for (i = 1; i < n; i++) {
803                 resp = (struct icp_qat_fw_comn_resp *)(
804                         (uint8_t *)rx_queue->base_addr + head);
805                 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
806                 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
807                         goto end_deq;
808                 head = (head + rx_queue->msg_size) &
809                                 rx_queue->modulo_mask;
810                 post_dequeue(resp_opaque, i, status);
811                 *n_success_jobs += status;
812         }
813
814 end_deq:
815         dp_ctx->head = head;
816         dp_ctx->cached_dequeue += i;
817         return i;
818 }
819
820 static __rte_always_inline void *
821 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
822                 enum rte_crypto_op_status *op_status)
823 {
824         struct qat_qp *qp = qp_data;
825         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
826         struct qat_queue *rx_queue = &qp->rx_q;
827         register struct icp_qat_fw_comn_resp *resp;
828
829         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
830                         dp_ctx->head);
831
832         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
833                 return NULL;
834
835         dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
836                         rx_queue->modulo_mask;
837         dp_ctx->cached_dequeue++;
838
839         *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
840                         RTE_CRYPTO_OP_STATUS_SUCCESS :
841                         RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
842         *dequeue_status = 0;
843         return (void *)(uintptr_t)resp->opaque_data;
844 }
845
846 static __rte_always_inline int
847 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
848 {
849         struct qat_qp *qp = qp_data;
850         struct qat_queue *tx_queue = &qp->tx_q;
851         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
852
853         if (unlikely(dp_ctx->cached_enqueue != n))
854                 return -1;
855
856         qp->enqueued += n;
857         qp->stats.enqueued_count += n;
858
859         tx_queue->tail = dp_ctx->tail;
860
861         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
862                         tx_queue->hw_bundle_number,
863                         tx_queue->hw_queue_number, tx_queue->tail);
864         tx_queue->csr_tail = tx_queue->tail;
865         dp_ctx->cached_enqueue = 0;
866
867         return 0;
868 }
869
870 static __rte_always_inline int
871 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
872 {
873         struct qat_qp *qp = qp_data;
874         struct qat_queue *rx_queue = &qp->rx_q;
875         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
876
877         if (unlikely(dp_ctx->cached_dequeue != n))
878                 return -1;
879
880         rx_queue->head = dp_ctx->head;
881         rx_queue->nb_processed_responses += n;
882         qp->dequeued += n;
883         qp->stats.dequeued_count += n;
884         if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
885                 uint32_t old_head, new_head;
886                 uint32_t max_head;
887
888                 old_head = rx_queue->csr_head;
889                 new_head = rx_queue->head;
890                 max_head = qp->nb_descriptors * rx_queue->msg_size;
891
892                 /* write out free descriptors */
893                 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
894
895                 if (new_head < old_head) {
896                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
897                                         max_head - old_head);
898                         memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
899                                         new_head);
900                 } else {
901                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
902                                         old_head);
903                 }
904                 rx_queue->nb_processed_responses = 0;
905                 rx_queue->csr_head = new_head;
906
907                 /* write current head to CSR */
908                 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
909                         rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
910                         new_head);
911         }
912
913         dp_ctx->cached_dequeue = 0;
914         return 0;
915 }
916
917 int
918 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
919         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
920         enum rte_crypto_op_sess_type sess_type,
921         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
922 {
923         struct qat_qp *qp;
924         struct qat_sym_session *ctx;
925         struct qat_sym_dp_ctx *dp_ctx;
926
927         qp = dev->data->queue_pairs[qp_id];
928         dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
929
930         if (!is_update) {
931                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
932                                 sizeof(struct qat_sym_dp_ctx));
933                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
934                 dp_ctx->tail = qp->tx_q.tail;
935                 dp_ctx->head = qp->rx_q.head;
936                 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
937         }
938
939         if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
940                 return -EINVAL;
941
942         ctx = (struct qat_sym_session *)get_sym_session_private_data(
943                         session_ctx.crypto_sess, qat_sym_driver_id);
944
945         dp_ctx->session = ctx;
946
947         raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
948         raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
949         raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
950         raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
951
952         if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
953                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
954                         !ctx->is_gmac) {
955                 /* AES-GCM or AES-CCM */
956                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
957                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
958                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
959                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
960                         && ctx->qat_hash_alg ==
961                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
962                         raw_dp_ctx->enqueue_burst =
963                                         qat_sym_dp_enqueue_aead_jobs;
964                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
965                 } else {
966                         raw_dp_ctx->enqueue_burst =
967                                         qat_sym_dp_enqueue_chain_jobs;
968                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
969                 }
970         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
971                 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
972                 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
973         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
974                 if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
975                         ctx->qat_cipher_alg ==
976                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
977                         raw_dp_ctx->enqueue_burst =
978                                         qat_sym_dp_enqueue_aead_jobs;
979                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
980                 } else {
981                         raw_dp_ctx->enqueue_burst =
982                                         qat_sym_dp_enqueue_cipher_jobs;
983                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
984                 }
985         } else
986                 return -1;
987
988         return 0;
989 }
990
991 int
992 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
993 {
994         return sizeof(struct qat_sym_dp_ctx);
995 }