drivers/crypto: move Windows build check
[dpdk.git] / drivers / crypto / qat / qat_sym_hw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <cryptodev_pmd.h>
6
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
10
11 #include "qat_sym.h"
12 #include "qat_sym_pmd.h"
13 #include "qat_sym_session.h"
14 #include "qat_qp.h"
15
16 struct qat_sym_dp_ctx {
17         struct qat_sym_session *session;
18         uint32_t tail;
19         uint32_t head;
20         uint16_t cached_enqueue;
21         uint16_t cached_dequeue;
22 };
23
24 static __rte_always_inline int32_t
25 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
26                 struct rte_crypto_vec *data, uint16_t n_data_vecs)
27 {
28         struct qat_queue *tx_queue;
29         struct qat_sym_op_cookie *cookie;
30         struct qat_sgl *list;
31         uint32_t i;
32         uint32_t total_len;
33
34         if (likely(n_data_vecs == 1)) {
35                 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
36                         data[0].iova;
37                 req->comn_mid.src_length = req->comn_mid.dst_length =
38                         data[0].len;
39                 return data[0].len;
40         }
41
42         if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
43                 return -1;
44
45         total_len = 0;
46         tx_queue = &qp->tx_q;
47
48         ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
49                         QAT_COMN_PTR_TYPE_SGL);
50         cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
51         list = (struct qat_sgl *)&cookie->qat_sgl_src;
52
53         for (i = 0; i < n_data_vecs; i++) {
54                 list->buffers[i].len = data[i].len;
55                 list->buffers[i].resrvd = 0;
56                 list->buffers[i].addr = data[i].iova;
57                 if (total_len + data[i].len > UINT32_MAX) {
58                         QAT_DP_LOG(ERR, "Message too long");
59                         return -1;
60                 }
61                 total_len += data[i].len;
62         }
63
64         list->num_bufs = i;
65         req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
66                         cookie->qat_sgl_src_phys_addr;
67         req->comn_mid.src_length = req->comn_mid.dst_length = 0;
68         return total_len;
69 }
70
71 static __rte_always_inline void
72 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
73                 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
74                 struct icp_qat_fw_la_bulk_req *qat_req)
75 {
76         /* copy IV into request if it fits */
77         if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
78                 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
79                                 iv_len);
80         else {
81                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
82                                 qat_req->comn_hdr.serv_specif_flags,
83                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
84                 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
85         }
86 }
87
88 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
89         (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
90         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
91
92 static __rte_always_inline void
93 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
94 {
95         uint32_t i;
96
97         for (i = 0; i < n; i++)
98                 sta[i] = status;
99 }
100
101 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
102         RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
103
104 static __rte_always_inline void
105 enqueue_one_cipher_job(struct qat_sym_session *ctx,
106         struct icp_qat_fw_la_bulk_req *req,
107         struct rte_crypto_va_iova_ptr *iv,
108         union rte_crypto_sym_ofs ofs, uint32_t data_len)
109 {
110         struct icp_qat_fw_la_cipher_req_params *cipher_param;
111
112         cipher_param = (void *)&req->serv_specif_rqpars;
113
114         /* cipher IV */
115         set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
116         cipher_param->cipher_offset = ofs.ofs.cipher.head;
117         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
118                         ofs.ofs.cipher.tail;
119 }
120
121 static __rte_always_inline int
122 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
123         struct rte_crypto_vec *data, uint16_t n_data_vecs,
124         union rte_crypto_sym_ofs ofs,
125         struct rte_crypto_va_iova_ptr *iv,
126         struct rte_crypto_va_iova_ptr *digest __rte_unused,
127         struct rte_crypto_va_iova_ptr *aad __rte_unused,
128         void *user_data)
129 {
130         struct qat_qp *qp = qp_data;
131         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
132         struct qat_queue *tx_queue = &qp->tx_q;
133         struct qat_sym_session *ctx = dp_ctx->session;
134         struct icp_qat_fw_la_bulk_req *req;
135         int32_t data_len;
136         uint32_t tail = dp_ctx->tail;
137
138         req = (struct icp_qat_fw_la_bulk_req *)(
139                 (uint8_t *)tx_queue->base_addr + tail);
140         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
141         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
142         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
143         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
144         if (unlikely(data_len < 0))
145                 return -1;
146         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
147
148         enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
149
150         dp_ctx->tail = tail;
151         dp_ctx->cached_enqueue++;
152
153         return 0;
154 }
155
156 static __rte_always_inline uint32_t
157 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
158         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
159         void *user_data[], int *status)
160 {
161         struct qat_qp *qp = qp_data;
162         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
163         struct qat_queue *tx_queue = &qp->tx_q;
164         struct qat_sym_session *ctx = dp_ctx->session;
165         uint32_t i, n;
166         uint32_t tail;
167         struct icp_qat_fw_la_bulk_req *req;
168         int32_t data_len;
169
170         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
171         if (unlikely(n == 0)) {
172                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
173                 *status = 0;
174                 return 0;
175         }
176
177         tail = dp_ctx->tail;
178
179         for (i = 0; i < n; i++) {
180                 req  = (struct icp_qat_fw_la_bulk_req *)(
181                         (uint8_t *)tx_queue->base_addr + tail);
182                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
183
184                 data_len = qat_sym_dp_parse_data_vec(qp, req,
185                         vec->src_sgl[i].vec,
186                         vec->src_sgl[i].num);
187                 if (unlikely(data_len < 0))
188                         break;
189                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
190                 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
191                         (uint32_t)data_len);
192                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
193         }
194
195         if (unlikely(i < n))
196                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
197
198         dp_ctx->tail = tail;
199         dp_ctx->cached_enqueue += i;
200         *status = 0;
201         return i;
202 }
203
204 static __rte_always_inline void
205 enqueue_one_auth_job(struct qat_sym_session *ctx,
206         struct icp_qat_fw_la_bulk_req *req,
207         struct rte_crypto_va_iova_ptr *digest,
208         struct rte_crypto_va_iova_ptr *auth_iv,
209         union rte_crypto_sym_ofs ofs, uint32_t data_len)
210 {
211         struct icp_qat_fw_la_cipher_req_params *cipher_param;
212         struct icp_qat_fw_la_auth_req_params *auth_param;
213
214         cipher_param = (void *)&req->serv_specif_rqpars;
215         auth_param = (void *)((uint8_t *)cipher_param +
216                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
217
218         auth_param->auth_off = ofs.ofs.auth.head;
219         auth_param->auth_len = data_len - ofs.ofs.auth.head -
220                         ofs.ofs.auth.tail;
221         auth_param->auth_res_addr = digest->iova;
222
223         switch (ctx->qat_hash_alg) {
224         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
225         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
226         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
227                 auth_param->u1.aad_adr = auth_iv->iova;
228                 break;
229         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
230         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
231                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
232                         req->comn_hdr.serv_specif_flags,
233                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
234                 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
235                                 ctx->auth_iv.length);
236                 break;
237         default:
238                 break;
239         }
240 }
241
242 static __rte_always_inline int
243 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
244         struct rte_crypto_vec *data, uint16_t n_data_vecs,
245         union rte_crypto_sym_ofs ofs,
246         struct rte_crypto_va_iova_ptr *iv __rte_unused,
247         struct rte_crypto_va_iova_ptr *digest,
248         struct rte_crypto_va_iova_ptr *auth_iv,
249         void *user_data)
250 {
251         struct qat_qp *qp = qp_data;
252         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
253         struct qat_queue *tx_queue = &qp->tx_q;
254         struct qat_sym_session *ctx = dp_ctx->session;
255         struct icp_qat_fw_la_bulk_req *req;
256         int32_t data_len;
257         uint32_t tail = dp_ctx->tail;
258
259         req = (struct icp_qat_fw_la_bulk_req *)(
260                 (uint8_t *)tx_queue->base_addr + tail);
261         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
262         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
263         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
264         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
265         if (unlikely(data_len < 0))
266                 return -1;
267         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
268
269         enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
270                         (uint32_t)data_len);
271
272         dp_ctx->tail = tail;
273         dp_ctx->cached_enqueue++;
274
275         return 0;
276 }
277
278 static __rte_always_inline uint32_t
279 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
280         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
281         void *user_data[], int *status)
282 {
283         struct qat_qp *qp = qp_data;
284         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
285         struct qat_queue *tx_queue = &qp->tx_q;
286         struct qat_sym_session *ctx = dp_ctx->session;
287         uint32_t i, n;
288         uint32_t tail;
289         struct icp_qat_fw_la_bulk_req *req;
290         int32_t data_len;
291
292         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
293         if (unlikely(n == 0)) {
294                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
295                 *status = 0;
296                 return 0;
297         }
298
299         tail = dp_ctx->tail;
300
301         for (i = 0; i < n; i++) {
302                 req  = (struct icp_qat_fw_la_bulk_req *)(
303                         (uint8_t *)tx_queue->base_addr + tail);
304                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
305
306                 data_len = qat_sym_dp_parse_data_vec(qp, req,
307                         vec->src_sgl[i].vec,
308                         vec->src_sgl[i].num);
309                 if (unlikely(data_len < 0))
310                         break;
311                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
312                 enqueue_one_auth_job(ctx, req, &vec->digest[i],
313                         &vec->auth_iv[i], ofs, (uint32_t)data_len);
314                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
315         }
316
317         if (unlikely(i < n))
318                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
319
320         dp_ctx->tail = tail;
321         dp_ctx->cached_enqueue += i;
322         *status = 0;
323         return i;
324 }
325
326 static __rte_always_inline int
327 enqueue_one_chain_job(struct qat_sym_session *ctx,
328         struct icp_qat_fw_la_bulk_req *req,
329         struct rte_crypto_vec *data,
330         uint16_t n_data_vecs,
331         struct rte_crypto_va_iova_ptr *cipher_iv,
332         struct rte_crypto_va_iova_ptr *digest,
333         struct rte_crypto_va_iova_ptr *auth_iv,
334         union rte_crypto_sym_ofs ofs, uint32_t data_len)
335 {
336         struct icp_qat_fw_la_cipher_req_params *cipher_param;
337         struct icp_qat_fw_la_auth_req_params *auth_param;
338         rte_iova_t auth_iova_end;
339         int32_t cipher_len, auth_len;
340
341         cipher_param = (void *)&req->serv_specif_rqpars;
342         auth_param = (void *)((uint8_t *)cipher_param +
343                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
344
345         cipher_len = data_len - ofs.ofs.cipher.head -
346                         ofs.ofs.cipher.tail;
347         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
348
349         if (unlikely(cipher_len < 0 || auth_len < 0))
350                 return -1;
351
352         cipher_param->cipher_offset = ofs.ofs.cipher.head;
353         cipher_param->cipher_length = cipher_len;
354         set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
355
356         auth_param->auth_off = ofs.ofs.auth.head;
357         auth_param->auth_len = auth_len;
358         auth_param->auth_res_addr = digest->iova;
359
360         switch (ctx->qat_hash_alg) {
361         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
362         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
363         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
364                 auth_param->u1.aad_adr = auth_iv->iova;
365                 break;
366         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
367         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
368                 break;
369         default:
370                 break;
371         }
372
373         if (unlikely(n_data_vecs > 1)) {
374                 int auth_end_get = 0, i = n_data_vecs - 1;
375                 struct rte_crypto_vec *cvec = &data[0];
376                 uint32_t len;
377
378                 len = data_len - ofs.ofs.auth.tail;
379
380                 while (i >= 0 && len > 0) {
381                         if (cvec->len >= len) {
382                                 auth_iova_end = cvec->iova + len;
383                                 len = 0;
384                                 auth_end_get = 1;
385                                 break;
386                         }
387                         len -= cvec->len;
388                         i--;
389                         cvec++;
390                 }
391
392                 if (unlikely(auth_end_get == 0))
393                         return -1;
394         } else
395                 auth_iova_end = data[0].iova + auth_param->auth_off +
396                         auth_param->auth_len;
397
398         /* Then check if digest-encrypted conditions are met */
399         if ((auth_param->auth_off + auth_param->auth_len <
400                 cipher_param->cipher_offset +
401                 cipher_param->cipher_length) &&
402                 (digest->iova == auth_iova_end)) {
403                 /* Handle partial digest encryption */
404                 if (cipher_param->cipher_offset +
405                                 cipher_param->cipher_length <
406                                 auth_param->auth_off +
407                                 auth_param->auth_len +
408                                 ctx->digest_length)
409                         req->comn_mid.dst_length =
410                                 req->comn_mid.src_length =
411                                 auth_param->auth_off +
412                                 auth_param->auth_len +
413                                 ctx->digest_length;
414                 struct icp_qat_fw_comn_req_hdr *header =
415                         &req->comn_hdr;
416                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
417                         header->serv_specif_flags,
418                         ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
419         }
420
421         return 0;
422 }
423
424 static __rte_always_inline int
425 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
426         struct rte_crypto_vec *data, uint16_t n_data_vecs,
427         union rte_crypto_sym_ofs ofs,
428         struct rte_crypto_va_iova_ptr *cipher_iv,
429         struct rte_crypto_va_iova_ptr *digest,
430         struct rte_crypto_va_iova_ptr *auth_iv,
431         void *user_data)
432 {
433         struct qat_qp *qp = qp_data;
434         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
435         struct qat_queue *tx_queue = &qp->tx_q;
436         struct qat_sym_session *ctx = dp_ctx->session;
437         struct icp_qat_fw_la_bulk_req *req;
438         int32_t data_len;
439         uint32_t tail = dp_ctx->tail;
440
441         req = (struct icp_qat_fw_la_bulk_req *)(
442                 (uint8_t *)tx_queue->base_addr + tail);
443         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
444         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
445         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
446         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
447         if (unlikely(data_len < 0))
448                 return -1;
449         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
450
451         if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
452                         cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
453                 return -1;
454
455         dp_ctx->tail = tail;
456         dp_ctx->cached_enqueue++;
457
458         return 0;
459 }
460
461 static __rte_always_inline uint32_t
462 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
463         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
464         void *user_data[], int *status)
465 {
466         struct qat_qp *qp = qp_data;
467         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
468         struct qat_queue *tx_queue = &qp->tx_q;
469         struct qat_sym_session *ctx = dp_ctx->session;
470         uint32_t i, n;
471         uint32_t tail;
472         struct icp_qat_fw_la_bulk_req *req;
473         int32_t data_len;
474
475         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
476         if (unlikely(n == 0)) {
477                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
478                 *status = 0;
479                 return 0;
480         }
481
482         tail = dp_ctx->tail;
483
484         for (i = 0; i < n; i++) {
485                 req  = (struct icp_qat_fw_la_bulk_req *)(
486                         (uint8_t *)tx_queue->base_addr + tail);
487                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
488
489                 data_len = qat_sym_dp_parse_data_vec(qp, req,
490                         vec->src_sgl[i].vec,
491                         vec->src_sgl[i].num);
492                 if (unlikely(data_len < 0))
493                         break;
494                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
495                 if (unlikely(enqueue_one_chain_job(ctx, req,
496                         vec->src_sgl[i].vec, vec->src_sgl[i].num,
497                         &vec->iv[i], &vec->digest[i],
498                         &vec->auth_iv[i], ofs, (uint32_t)data_len)))
499                         break;
500
501                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
502         }
503
504         if (unlikely(i < n))
505                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
506
507         dp_ctx->tail = tail;
508         dp_ctx->cached_enqueue += i;
509         *status = 0;
510         return i;
511 }
512
513 static __rte_always_inline void
514 enqueue_one_aead_job(struct qat_sym_session *ctx,
515         struct icp_qat_fw_la_bulk_req *req,
516         struct rte_crypto_va_iova_ptr *iv,
517         struct rte_crypto_va_iova_ptr *digest,
518         struct rte_crypto_va_iova_ptr *aad,
519         union rte_crypto_sym_ofs ofs, uint32_t data_len)
520 {
521         struct icp_qat_fw_la_cipher_req_params *cipher_param =
522                 (void *)&req->serv_specif_rqpars;
523         struct icp_qat_fw_la_auth_req_params *auth_param =
524                 (void *)((uint8_t *)&req->serv_specif_rqpars +
525                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
526         uint8_t *aad_data;
527         uint8_t aad_ccm_real_len;
528         uint8_t aad_len_field_sz;
529         uint32_t msg_len_be;
530         rte_iova_t aad_iova = 0;
531         uint8_t q;
532
533         /* CPM 1.7 uses single pass to treat AEAD as cipher operation */
534         if (ctx->is_single_pass) {
535                 enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
536                 cipher_param->spc_aad_addr = aad->iova;
537                 cipher_param->spc_auth_res_addr = digest->iova;
538                 return;
539         }
540
541         switch (ctx->qat_hash_alg) {
542         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
543         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
544                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
545                         req->comn_hdr.serv_specif_flags,
546                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
547                 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
548                                 ctx->cipher_iv.length);
549                 aad_iova = aad->iova;
550                 break;
551         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
552                 aad_data = aad->va;
553                 aad_iova = aad->iova;
554                 aad_ccm_real_len = 0;
555                 aad_len_field_sz = 0;
556                 msg_len_be = rte_bswap32((uint32_t)data_len -
557                                 ofs.ofs.cipher.head);
558
559                 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
560                         aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
561                         aad_ccm_real_len = ctx->aad_len -
562                                 ICP_QAT_HW_CCM_AAD_B0_LEN -
563                                 ICP_QAT_HW_CCM_AAD_LEN_INFO;
564                 } else {
565                         aad_data = iv->va;
566                         aad_iova = iv->iova;
567                 }
568
569                 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
570                 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
571                         aad_len_field_sz, ctx->digest_length, q);
572                 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
573                         memcpy(aad_data + ctx->cipher_iv.length +
574                                 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
575                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
576                                 (uint8_t *)&msg_len_be,
577                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
578                 } else {
579                         memcpy(aad_data + ctx->cipher_iv.length +
580                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
581                                 (uint8_t *)&msg_len_be +
582                                 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
583                                 - q), q);
584                 }
585
586                 if (aad_len_field_sz > 0) {
587                         *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
588                                 rte_bswap16(aad_ccm_real_len);
589
590                         if ((aad_ccm_real_len + aad_len_field_sz)
591                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
592                                 uint8_t pad_len = 0;
593                                 uint8_t pad_idx = 0;
594
595                                 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
596                                         ((aad_ccm_real_len +
597                                         aad_len_field_sz) %
598                                         ICP_QAT_HW_CCM_AAD_B0_LEN);
599                                 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
600                                         aad_ccm_real_len +
601                                         aad_len_field_sz;
602                                 memset(&aad_data[pad_idx], 0, pad_len);
603                         }
604                 }
605
606                 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
607                         + ICP_QAT_HW_CCM_NONCE_OFFSET,
608                         (uint8_t *)iv->va +
609                         ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
610                 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
611                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
612
613                 rte_memcpy((uint8_t *)aad->va +
614                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
615                         (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
616                         ctx->cipher_iv.length);
617                 break;
618         default:
619                 break;
620         }
621
622         cipher_param->cipher_offset = ofs.ofs.cipher.head;
623         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
624                         ofs.ofs.cipher.tail;
625         auth_param->auth_off = ofs.ofs.cipher.head;
626         auth_param->auth_len = cipher_param->cipher_length;
627         auth_param->auth_res_addr = digest->iova;
628         auth_param->u1.aad_adr = aad_iova;
629 }
630
631 static __rte_always_inline int
632 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
633         struct rte_crypto_vec *data, uint16_t n_data_vecs,
634         union rte_crypto_sym_ofs ofs,
635         struct rte_crypto_va_iova_ptr *iv,
636         struct rte_crypto_va_iova_ptr *digest,
637         struct rte_crypto_va_iova_ptr *aad,
638         void *user_data)
639 {
640         struct qat_qp *qp = qp_data;
641         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
642         struct qat_queue *tx_queue = &qp->tx_q;
643         struct qat_sym_session *ctx = dp_ctx->session;
644         struct icp_qat_fw_la_bulk_req *req;
645         int32_t data_len;
646         uint32_t tail = dp_ctx->tail;
647
648         req = (struct icp_qat_fw_la_bulk_req *)(
649                 (uint8_t *)tx_queue->base_addr + tail);
650         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
651         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
652         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
653         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
654         if (unlikely(data_len < 0))
655                 return -1;
656         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
657
658         enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
659                 (uint32_t)data_len);
660
661         dp_ctx->tail = tail;
662         dp_ctx->cached_enqueue++;
663
664         return 0;
665 }
666
667 static __rte_always_inline uint32_t
668 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
669         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
670         void *user_data[], int *status)
671 {
672         struct qat_qp *qp = qp_data;
673         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
674         struct qat_queue *tx_queue = &qp->tx_q;
675         struct qat_sym_session *ctx = dp_ctx->session;
676         uint32_t i, n;
677         uint32_t tail;
678         struct icp_qat_fw_la_bulk_req *req;
679         int32_t data_len;
680
681         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
682         if (unlikely(n == 0)) {
683                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
684                 *status = 0;
685                 return 0;
686         }
687
688         tail = dp_ctx->tail;
689
690         for (i = 0; i < n; i++) {
691                 req  = (struct icp_qat_fw_la_bulk_req *)(
692                         (uint8_t *)tx_queue->base_addr + tail);
693                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
694
695                 data_len = qat_sym_dp_parse_data_vec(qp, req,
696                         vec->src_sgl[i].vec,
697                         vec->src_sgl[i].num);
698                 if (unlikely(data_len < 0))
699                         break;
700                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
701                 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
702                         &vec->aad[i], ofs, (uint32_t)data_len);
703                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
704         }
705
706         if (unlikely(i < n))
707                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
708
709         dp_ctx->tail = tail;
710         dp_ctx->cached_enqueue += i;
711         *status = 0;
712         return i;
713 }
714
715 static __rte_always_inline uint32_t
716 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
717         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
718         uint32_t max_nb_to_dequeue,
719         rte_cryptodev_raw_post_dequeue_t post_dequeue,
720         void **out_user_data, uint8_t is_user_data_array,
721         uint32_t *n_success_jobs, int *return_status)
722 {
723         struct qat_qp *qp = qp_data;
724         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
725         struct qat_queue *rx_queue = &qp->rx_q;
726         struct icp_qat_fw_comn_resp *resp;
727         void *resp_opaque;
728         uint32_t i, n, inflight;
729         uint32_t head;
730         uint8_t status;
731
732         *n_success_jobs = 0;
733         *return_status = 0;
734         head = dp_ctx->head;
735
736         inflight = qp->enqueued - qp->dequeued;
737         if (unlikely(inflight == 0))
738                 return 0;
739
740         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
741                         head);
742         /* no operation ready */
743         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
744                 return 0;
745
746         resp_opaque = (void *)(uintptr_t)resp->opaque_data;
747         /* get the dequeue count */
748         if (get_dequeue_count) {
749                 n = get_dequeue_count(resp_opaque);
750                 if (unlikely(n == 0))
751                         return 0;
752         } else {
753                 if (unlikely(max_nb_to_dequeue == 0))
754                         return 0;
755                 n = max_nb_to_dequeue;
756         }
757
758         out_user_data[0] = resp_opaque;
759         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
760         post_dequeue(resp_opaque, 0, status);
761         *n_success_jobs += status;
762
763         head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
764
765         /* we already finished dequeue when n == 1 */
766         if (unlikely(n == 1)) {
767                 i = 1;
768                 goto end_deq;
769         }
770
771         if (is_user_data_array) {
772                 for (i = 1; i < n; i++) {
773                         resp = (struct icp_qat_fw_comn_resp *)(
774                                 (uint8_t *)rx_queue->base_addr + head);
775                         if (unlikely(*(uint32_t *)resp ==
776                                         ADF_RING_EMPTY_SIG))
777                                 goto end_deq;
778                         out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
779                         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
780                         *n_success_jobs += status;
781                         post_dequeue(out_user_data[i], i, status);
782                         head = (head + rx_queue->msg_size) &
783                                         rx_queue->modulo_mask;
784                 }
785
786                 goto end_deq;
787         }
788
789         /* opaque is not array */
790         for (i = 1; i < n; i++) {
791                 resp = (struct icp_qat_fw_comn_resp *)(
792                         (uint8_t *)rx_queue->base_addr + head);
793                 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
794                 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
795                         goto end_deq;
796                 head = (head + rx_queue->msg_size) &
797                                 rx_queue->modulo_mask;
798                 post_dequeue(resp_opaque, i, status);
799                 *n_success_jobs += status;
800         }
801
802 end_deq:
803         dp_ctx->head = head;
804         dp_ctx->cached_dequeue += i;
805         return i;
806 }
807
808 static __rte_always_inline void *
809 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
810                 enum rte_crypto_op_status *op_status)
811 {
812         struct qat_qp *qp = qp_data;
813         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
814         struct qat_queue *rx_queue = &qp->rx_q;
815         register struct icp_qat_fw_comn_resp *resp;
816
817         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
818                         dp_ctx->head);
819
820         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
821                 return NULL;
822
823         dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
824                         rx_queue->modulo_mask;
825         dp_ctx->cached_dequeue++;
826
827         *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
828                         RTE_CRYPTO_OP_STATUS_SUCCESS :
829                         RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
830         *dequeue_status = 0;
831         return (void *)(uintptr_t)resp->opaque_data;
832 }
833
834 static __rte_always_inline int
835 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
836 {
837         struct qat_qp *qp = qp_data;
838         struct qat_queue *tx_queue = &qp->tx_q;
839         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
840
841         if (unlikely(dp_ctx->cached_enqueue != n))
842                 return -1;
843
844         qp->enqueued += n;
845         qp->stats.enqueued_count += n;
846
847         tx_queue->tail = dp_ctx->tail;
848
849         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
850                         tx_queue->hw_bundle_number,
851                         tx_queue->hw_queue_number, tx_queue->tail);
852         tx_queue->csr_tail = tx_queue->tail;
853         dp_ctx->cached_enqueue = 0;
854
855         return 0;
856 }
857
858 static __rte_always_inline int
859 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
860 {
861         struct qat_qp *qp = qp_data;
862         struct qat_queue *rx_queue = &qp->rx_q;
863         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
864
865         if (unlikely(dp_ctx->cached_dequeue != n))
866                 return -1;
867
868         rx_queue->head = dp_ctx->head;
869         rx_queue->nb_processed_responses += n;
870         qp->dequeued += n;
871         qp->stats.dequeued_count += n;
872         if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
873                 uint32_t old_head, new_head;
874                 uint32_t max_head;
875
876                 old_head = rx_queue->csr_head;
877                 new_head = rx_queue->head;
878                 max_head = qp->nb_descriptors * rx_queue->msg_size;
879
880                 /* write out free descriptors */
881                 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
882
883                 if (new_head < old_head) {
884                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
885                                         max_head - old_head);
886                         memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
887                                         new_head);
888                 } else {
889                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
890                                         old_head);
891                 }
892                 rx_queue->nb_processed_responses = 0;
893                 rx_queue->csr_head = new_head;
894
895                 /* write current head to CSR */
896                 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
897                         rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
898                         new_head);
899         }
900
901         dp_ctx->cached_dequeue = 0;
902         return 0;
903 }
904
905 int
906 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
907         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
908         enum rte_crypto_op_sess_type sess_type,
909         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
910 {
911         struct qat_qp *qp;
912         struct qat_sym_session *ctx;
913         struct qat_sym_dp_ctx *dp_ctx;
914
915         qp = dev->data->queue_pairs[qp_id];
916         dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
917
918         if (!is_update) {
919                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
920                                 sizeof(struct qat_sym_dp_ctx));
921                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
922                 dp_ctx->tail = qp->tx_q.tail;
923                 dp_ctx->head = qp->rx_q.head;
924                 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
925         }
926
927         if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
928                 return -EINVAL;
929
930         ctx = (struct qat_sym_session *)get_sym_session_private_data(
931                         session_ctx.crypto_sess, qat_sym_driver_id);
932
933         dp_ctx->session = ctx;
934
935         raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
936         raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
937         raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
938         raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
939
940         if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
941                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
942                         !ctx->is_gmac) {
943                 /* AES-GCM or AES-CCM */
944                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
945                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
946                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
947                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
948                         && ctx->qat_hash_alg ==
949                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
950                         raw_dp_ctx->enqueue_burst =
951                                         qat_sym_dp_enqueue_aead_jobs;
952                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
953                 } else {
954                         raw_dp_ctx->enqueue_burst =
955                                         qat_sym_dp_enqueue_chain_jobs;
956                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
957                 }
958         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
959                 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
960                 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
961         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
962                 if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
963                         ctx->qat_cipher_alg ==
964                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
965                         raw_dp_ctx->enqueue_burst =
966                                         qat_sym_dp_enqueue_aead_jobs;
967                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
968                 } else {
969                         raw_dp_ctx->enqueue_burst =
970                                         qat_sym_dp_enqueue_cipher_jobs;
971                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
972                 }
973         } else
974                 return -1;
975
976         return 0;
977 }
978
979 int
980 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
981 {
982         return sizeof(struct qat_sym_dp_ctx);
983 }