event/cnxk: fix reading stale Tx queue depth
[dpdk.git] / drivers / crypto / qat / qat_sym_hw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <rte_cryptodev_pmd.h>
6
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
10
11 #include "qat_sym.h"
12 #include "qat_sym_pmd.h"
13 #include "qat_sym_session.h"
14 #include "qat_qp.h"
15
16 struct qat_sym_dp_ctx {
17         struct qat_sym_session *session;
18         uint32_t tail;
19         uint32_t head;
20         uint16_t cached_enqueue;
21         uint16_t cached_dequeue;
22 };
23
24 static __rte_always_inline int32_t
25 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
26                 struct rte_crypto_vec *data, uint16_t n_data_vecs)
27 {
28         struct qat_queue *tx_queue;
29         struct qat_sym_op_cookie *cookie;
30         struct qat_sgl *list;
31         uint32_t i;
32         uint32_t total_len;
33
34         if (likely(n_data_vecs == 1)) {
35                 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
36                         data[0].iova;
37                 req->comn_mid.src_length = req->comn_mid.dst_length =
38                         data[0].len;
39                 return data[0].len;
40         }
41
42         if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
43                 return -1;
44
45         total_len = 0;
46         tx_queue = &qp->tx_q;
47
48         ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
49                         QAT_COMN_PTR_TYPE_SGL);
50         cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
51         list = (struct qat_sgl *)&cookie->qat_sgl_src;
52
53         for (i = 0; i < n_data_vecs; i++) {
54                 list->buffers[i].len = data[i].len;
55                 list->buffers[i].resrvd = 0;
56                 list->buffers[i].addr = data[i].iova;
57                 if (total_len + data[i].len > UINT32_MAX) {
58                         QAT_DP_LOG(ERR, "Message too long");
59                         return -1;
60                 }
61                 total_len += data[i].len;
62         }
63
64         list->num_bufs = i;
65         req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
66                         cookie->qat_sgl_src_phys_addr;
67         req->comn_mid.src_length = req->comn_mid.dst_length = 0;
68         return total_len;
69 }
70
71 static __rte_always_inline void
72 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
73                 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
74                 struct icp_qat_fw_la_bulk_req *qat_req)
75 {
76         /* copy IV into request if it fits */
77         if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
78                 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
79                                 iv_len);
80         else {
81                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
82                                 qat_req->comn_hdr.serv_specif_flags,
83                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
84                 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
85         }
86 }
87
88 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
89         (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
90         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
91
92 static __rte_always_inline void
93 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
94 {
95         uint32_t i;
96
97         for (i = 0; i < n; i++)
98                 sta[i] = status;
99 }
100
101 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
102         RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
103
104 static __rte_always_inline void
105 enqueue_one_cipher_job(struct qat_sym_session *ctx,
106         struct icp_qat_fw_la_bulk_req *req,
107         struct rte_crypto_va_iova_ptr *iv,
108         union rte_crypto_sym_ofs ofs, uint32_t data_len)
109 {
110         struct icp_qat_fw_la_cipher_req_params *cipher_param;
111
112         cipher_param = (void *)&req->serv_specif_rqpars;
113
114         /* cipher IV */
115         set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
116         cipher_param->cipher_offset = ofs.ofs.cipher.head;
117         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
118                         ofs.ofs.cipher.tail;
119 }
120
121 static __rte_always_inline int
122 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
123         struct rte_crypto_vec *data, uint16_t n_data_vecs,
124         union rte_crypto_sym_ofs ofs,
125         struct rte_crypto_va_iova_ptr *iv,
126         struct rte_crypto_va_iova_ptr *digest __rte_unused,
127         struct rte_crypto_va_iova_ptr *aad __rte_unused,
128         void *user_data)
129 {
130         struct qat_qp *qp = qp_data;
131         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
132         struct qat_queue *tx_queue = &qp->tx_q;
133         struct qat_sym_session *ctx = dp_ctx->session;
134         struct icp_qat_fw_la_bulk_req *req;
135         int32_t data_len;
136         uint32_t tail = dp_ctx->tail;
137
138         req = (struct icp_qat_fw_la_bulk_req *)(
139                 (uint8_t *)tx_queue->base_addr + tail);
140         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
141         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
142         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
143         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
144         if (unlikely(data_len < 0))
145                 return -1;
146         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
147
148         enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
149
150         dp_ctx->tail = tail;
151         dp_ctx->cached_enqueue++;
152
153         return 0;
154 }
155
156 static __rte_always_inline uint32_t
157 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
158         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
159         void *user_data[], int *status)
160 {
161         struct qat_qp *qp = qp_data;
162         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
163         struct qat_queue *tx_queue = &qp->tx_q;
164         struct qat_sym_session *ctx = dp_ctx->session;
165         uint32_t i, n;
166         uint32_t tail;
167         struct icp_qat_fw_la_bulk_req *req;
168         int32_t data_len;
169
170         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
171         if (unlikely(n == 0)) {
172                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
173                 *status = 0;
174                 return 0;
175         }
176
177         tail = dp_ctx->tail;
178
179         for (i = 0; i < n; i++) {
180                 req  = (struct icp_qat_fw_la_bulk_req *)(
181                         (uint8_t *)tx_queue->base_addr + tail);
182                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
183
184                 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
185                         vec->sgl[i].num);
186                 if (unlikely(data_len < 0))
187                         break;
188                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
189                 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
190                         (uint32_t)data_len);
191                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
192         }
193
194         if (unlikely(i < n))
195                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
196
197         dp_ctx->tail = tail;
198         dp_ctx->cached_enqueue += i;
199         *status = 0;
200         return i;
201 }
202
203 static __rte_always_inline void
204 enqueue_one_auth_job(struct qat_sym_session *ctx,
205         struct icp_qat_fw_la_bulk_req *req,
206         struct rte_crypto_va_iova_ptr *digest,
207         struct rte_crypto_va_iova_ptr *auth_iv,
208         union rte_crypto_sym_ofs ofs, uint32_t data_len)
209 {
210         struct icp_qat_fw_la_cipher_req_params *cipher_param;
211         struct icp_qat_fw_la_auth_req_params *auth_param;
212
213         cipher_param = (void *)&req->serv_specif_rqpars;
214         auth_param = (void *)((uint8_t *)cipher_param +
215                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
216
217         auth_param->auth_off = ofs.ofs.auth.head;
218         auth_param->auth_len = data_len - ofs.ofs.auth.head -
219                         ofs.ofs.auth.tail;
220         auth_param->auth_res_addr = digest->iova;
221
222         switch (ctx->qat_hash_alg) {
223         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
224         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
225         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
226                 auth_param->u1.aad_adr = auth_iv->iova;
227                 break;
228         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
229         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
230                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
231                         req->comn_hdr.serv_specif_flags,
232                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
233                 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
234                                 ctx->auth_iv.length);
235                 break;
236         default:
237                 break;
238         }
239 }
240
241 static __rte_always_inline int
242 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
243         struct rte_crypto_vec *data, uint16_t n_data_vecs,
244         union rte_crypto_sym_ofs ofs,
245         struct rte_crypto_va_iova_ptr *iv __rte_unused,
246         struct rte_crypto_va_iova_ptr *digest,
247         struct rte_crypto_va_iova_ptr *auth_iv,
248         void *user_data)
249 {
250         struct qat_qp *qp = qp_data;
251         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
252         struct qat_queue *tx_queue = &qp->tx_q;
253         struct qat_sym_session *ctx = dp_ctx->session;
254         struct icp_qat_fw_la_bulk_req *req;
255         int32_t data_len;
256         uint32_t tail = dp_ctx->tail;
257
258         req = (struct icp_qat_fw_la_bulk_req *)(
259                 (uint8_t *)tx_queue->base_addr + tail);
260         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
261         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
262         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
263         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
264         if (unlikely(data_len < 0))
265                 return -1;
266         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
267
268         enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
269                         (uint32_t)data_len);
270
271         dp_ctx->tail = tail;
272         dp_ctx->cached_enqueue++;
273
274         return 0;
275 }
276
277 static __rte_always_inline uint32_t
278 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
279         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
280         void *user_data[], int *status)
281 {
282         struct qat_qp *qp = qp_data;
283         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
284         struct qat_queue *tx_queue = &qp->tx_q;
285         struct qat_sym_session *ctx = dp_ctx->session;
286         uint32_t i, n;
287         uint32_t tail;
288         struct icp_qat_fw_la_bulk_req *req;
289         int32_t data_len;
290
291         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
292         if (unlikely(n == 0)) {
293                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
294                 *status = 0;
295                 return 0;
296         }
297
298         tail = dp_ctx->tail;
299
300         for (i = 0; i < n; i++) {
301                 req  = (struct icp_qat_fw_la_bulk_req *)(
302                         (uint8_t *)tx_queue->base_addr + tail);
303                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
304
305                 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
306                         vec->sgl[i].num);
307                 if (unlikely(data_len < 0))
308                         break;
309                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
310                 enqueue_one_auth_job(ctx, req, &vec->digest[i],
311                         &vec->auth_iv[i], ofs, (uint32_t)data_len);
312                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
313         }
314
315         if (unlikely(i < n))
316                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
317
318         dp_ctx->tail = tail;
319         dp_ctx->cached_enqueue += i;
320         *status = 0;
321         return i;
322 }
323
324 static __rte_always_inline int
325 enqueue_one_chain_job(struct qat_sym_session *ctx,
326         struct icp_qat_fw_la_bulk_req *req,
327         struct rte_crypto_vec *data,
328         uint16_t n_data_vecs,
329         struct rte_crypto_va_iova_ptr *cipher_iv,
330         struct rte_crypto_va_iova_ptr *digest,
331         struct rte_crypto_va_iova_ptr *auth_iv,
332         union rte_crypto_sym_ofs ofs, uint32_t data_len)
333 {
334         struct icp_qat_fw_la_cipher_req_params *cipher_param;
335         struct icp_qat_fw_la_auth_req_params *auth_param;
336         rte_iova_t auth_iova_end;
337         int32_t cipher_len, auth_len;
338
339         cipher_param = (void *)&req->serv_specif_rqpars;
340         auth_param = (void *)((uint8_t *)cipher_param +
341                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
342
343         cipher_len = data_len - ofs.ofs.cipher.head -
344                         ofs.ofs.cipher.tail;
345         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
346
347         if (unlikely(cipher_len < 0 || auth_len < 0))
348                 return -1;
349
350         cipher_param->cipher_offset = ofs.ofs.cipher.head;
351         cipher_param->cipher_length = cipher_len;
352         set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
353
354         auth_param->auth_off = ofs.ofs.auth.head;
355         auth_param->auth_len = auth_len;
356         auth_param->auth_res_addr = digest->iova;
357
358         switch (ctx->qat_hash_alg) {
359         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
360         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
361         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
362                 auth_param->u1.aad_adr = auth_iv->iova;
363                 break;
364         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
365         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
366                 break;
367         default:
368                 break;
369         }
370
371         if (unlikely(n_data_vecs > 1)) {
372                 int auth_end_get = 0, i = n_data_vecs - 1;
373                 struct rte_crypto_vec *cvec = &data[0];
374                 uint32_t len;
375
376                 len = data_len - ofs.ofs.auth.tail;
377
378                 while (i >= 0 && len > 0) {
379                         if (cvec->len >= len) {
380                                 auth_iova_end = cvec->iova + len;
381                                 len = 0;
382                                 auth_end_get = 1;
383                                 break;
384                         }
385                         len -= cvec->len;
386                         i--;
387                         cvec++;
388                 }
389
390                 if (unlikely(auth_end_get == 0))
391                         return -1;
392         } else
393                 auth_iova_end = data[0].iova + auth_param->auth_off +
394                         auth_param->auth_len;
395
396         /* Then check if digest-encrypted conditions are met */
397         if ((auth_param->auth_off + auth_param->auth_len <
398                 cipher_param->cipher_offset +
399                 cipher_param->cipher_length) &&
400                 (digest->iova == auth_iova_end)) {
401                 /* Handle partial digest encryption */
402                 if (cipher_param->cipher_offset +
403                                 cipher_param->cipher_length <
404                                 auth_param->auth_off +
405                                 auth_param->auth_len +
406                                 ctx->digest_length)
407                         req->comn_mid.dst_length =
408                                 req->comn_mid.src_length =
409                                 auth_param->auth_off +
410                                 auth_param->auth_len +
411                                 ctx->digest_length;
412                 struct icp_qat_fw_comn_req_hdr *header =
413                         &req->comn_hdr;
414                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
415                         header->serv_specif_flags,
416                         ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
417         }
418
419         return 0;
420 }
421
422 static __rte_always_inline int
423 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
424         struct rte_crypto_vec *data, uint16_t n_data_vecs,
425         union rte_crypto_sym_ofs ofs,
426         struct rte_crypto_va_iova_ptr *cipher_iv,
427         struct rte_crypto_va_iova_ptr *digest,
428         struct rte_crypto_va_iova_ptr *auth_iv,
429         void *user_data)
430 {
431         struct qat_qp *qp = qp_data;
432         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
433         struct qat_queue *tx_queue = &qp->tx_q;
434         struct qat_sym_session *ctx = dp_ctx->session;
435         struct icp_qat_fw_la_bulk_req *req;
436         int32_t data_len;
437         uint32_t tail = dp_ctx->tail;
438
439         req = (struct icp_qat_fw_la_bulk_req *)(
440                 (uint8_t *)tx_queue->base_addr + tail);
441         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
442         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
443         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
444         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
445         if (unlikely(data_len < 0))
446                 return -1;
447         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
448
449         if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
450                         cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
451                 return -1;
452
453         dp_ctx->tail = tail;
454         dp_ctx->cached_enqueue++;
455
456         return 0;
457 }
458
459 static __rte_always_inline uint32_t
460 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
461         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
462         void *user_data[], int *status)
463 {
464         struct qat_qp *qp = qp_data;
465         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
466         struct qat_queue *tx_queue = &qp->tx_q;
467         struct qat_sym_session *ctx = dp_ctx->session;
468         uint32_t i, n;
469         uint32_t tail;
470         struct icp_qat_fw_la_bulk_req *req;
471         int32_t data_len;
472
473         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
474         if (unlikely(n == 0)) {
475                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
476                 *status = 0;
477                 return 0;
478         }
479
480         tail = dp_ctx->tail;
481
482         for (i = 0; i < n; i++) {
483                 req  = (struct icp_qat_fw_la_bulk_req *)(
484                         (uint8_t *)tx_queue->base_addr + tail);
485                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
486
487                 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
488                         vec->sgl[i].num);
489                 if (unlikely(data_len < 0))
490                         break;
491                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
492                 if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
493                         vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
494                                 &vec->auth_iv[i], ofs, (uint32_t)data_len)))
495                         break;
496
497                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
498         }
499
500         if (unlikely(i < n))
501                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
502
503         dp_ctx->tail = tail;
504         dp_ctx->cached_enqueue += i;
505         *status = 0;
506         return i;
507 }
508
509 static __rte_always_inline void
510 enqueue_one_aead_job(struct qat_sym_session *ctx,
511         struct icp_qat_fw_la_bulk_req *req,
512         struct rte_crypto_va_iova_ptr *iv,
513         struct rte_crypto_va_iova_ptr *digest,
514         struct rte_crypto_va_iova_ptr *aad,
515         union rte_crypto_sym_ofs ofs, uint32_t data_len)
516 {
517         struct icp_qat_fw_la_cipher_req_params *cipher_param =
518                 (void *)&req->serv_specif_rqpars;
519         struct icp_qat_fw_la_auth_req_params *auth_param =
520                 (void *)((uint8_t *)&req->serv_specif_rqpars +
521                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
522         uint8_t *aad_data;
523         uint8_t aad_ccm_real_len;
524         uint8_t aad_len_field_sz;
525         uint32_t msg_len_be;
526         rte_iova_t aad_iova = 0;
527         uint8_t q;
528
529         /* CPM 1.7 uses single pass to treat AEAD as cipher operation */
530         if (ctx->is_single_pass) {
531                 enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
532                 cipher_param->spc_aad_addr = aad->iova;
533                 cipher_param->spc_auth_res_addr = digest->iova;
534                 return;
535         }
536
537         switch (ctx->qat_hash_alg) {
538         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
539         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
540                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
541                         req->comn_hdr.serv_specif_flags,
542                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
543                 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
544                                 ctx->cipher_iv.length);
545                 aad_iova = aad->iova;
546                 break;
547         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
548                 aad_data = aad->va;
549                 aad_iova = aad->iova;
550                 aad_ccm_real_len = 0;
551                 aad_len_field_sz = 0;
552                 msg_len_be = rte_bswap32((uint32_t)data_len -
553                                 ofs.ofs.cipher.head);
554
555                 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
556                         aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
557                         aad_ccm_real_len = ctx->aad_len -
558                                 ICP_QAT_HW_CCM_AAD_B0_LEN -
559                                 ICP_QAT_HW_CCM_AAD_LEN_INFO;
560                 } else {
561                         aad_data = iv->va;
562                         aad_iova = iv->iova;
563                 }
564
565                 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
566                 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
567                         aad_len_field_sz, ctx->digest_length, q);
568                 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
569                         memcpy(aad_data + ctx->cipher_iv.length +
570                                 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
571                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
572                                 (uint8_t *)&msg_len_be,
573                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
574                 } else {
575                         memcpy(aad_data + ctx->cipher_iv.length +
576                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
577                                 (uint8_t *)&msg_len_be +
578                                 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
579                                 - q), q);
580                 }
581
582                 if (aad_len_field_sz > 0) {
583                         *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
584                                 rte_bswap16(aad_ccm_real_len);
585
586                         if ((aad_ccm_real_len + aad_len_field_sz)
587                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
588                                 uint8_t pad_len = 0;
589                                 uint8_t pad_idx = 0;
590
591                                 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
592                                         ((aad_ccm_real_len +
593                                         aad_len_field_sz) %
594                                         ICP_QAT_HW_CCM_AAD_B0_LEN);
595                                 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
596                                         aad_ccm_real_len +
597                                         aad_len_field_sz;
598                                 memset(&aad_data[pad_idx], 0, pad_len);
599                         }
600                 }
601
602                 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
603                         + ICP_QAT_HW_CCM_NONCE_OFFSET,
604                         (uint8_t *)iv->va +
605                         ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
606                 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
607                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
608
609                 rte_memcpy((uint8_t *)aad->va +
610                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
611                         (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
612                         ctx->cipher_iv.length);
613                 break;
614         default:
615                 break;
616         }
617
618         cipher_param->cipher_offset = ofs.ofs.cipher.head;
619         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
620                         ofs.ofs.cipher.tail;
621         auth_param->auth_off = ofs.ofs.cipher.head;
622         auth_param->auth_len = cipher_param->cipher_length;
623         auth_param->auth_res_addr = digest->iova;
624         auth_param->u1.aad_adr = aad_iova;
625 }
626
627 static __rte_always_inline int
628 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
629         struct rte_crypto_vec *data, uint16_t n_data_vecs,
630         union rte_crypto_sym_ofs ofs,
631         struct rte_crypto_va_iova_ptr *iv,
632         struct rte_crypto_va_iova_ptr *digest,
633         struct rte_crypto_va_iova_ptr *aad,
634         void *user_data)
635 {
636         struct qat_qp *qp = qp_data;
637         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
638         struct qat_queue *tx_queue = &qp->tx_q;
639         struct qat_sym_session *ctx = dp_ctx->session;
640         struct icp_qat_fw_la_bulk_req *req;
641         int32_t data_len;
642         uint32_t tail = dp_ctx->tail;
643
644         req = (struct icp_qat_fw_la_bulk_req *)(
645                 (uint8_t *)tx_queue->base_addr + tail);
646         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
647         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
648         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
649         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
650         if (unlikely(data_len < 0))
651                 return -1;
652         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
653
654         enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
655                 (uint32_t)data_len);
656
657         dp_ctx->tail = tail;
658         dp_ctx->cached_enqueue++;
659
660         return 0;
661 }
662
663 static __rte_always_inline uint32_t
664 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
665         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
666         void *user_data[], int *status)
667 {
668         struct qat_qp *qp = qp_data;
669         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
670         struct qat_queue *tx_queue = &qp->tx_q;
671         struct qat_sym_session *ctx = dp_ctx->session;
672         uint32_t i, n;
673         uint32_t tail;
674         struct icp_qat_fw_la_bulk_req *req;
675         int32_t data_len;
676
677         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
678         if (unlikely(n == 0)) {
679                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
680                 *status = 0;
681                 return 0;
682         }
683
684         tail = dp_ctx->tail;
685
686         for (i = 0; i < n; i++) {
687                 req  = (struct icp_qat_fw_la_bulk_req *)(
688                         (uint8_t *)tx_queue->base_addr + tail);
689                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
690
691                 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
692                         vec->sgl[i].num);
693                 if (unlikely(data_len < 0))
694                         break;
695                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
696                 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
697                         &vec->aad[i], ofs, (uint32_t)data_len);
698                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
699         }
700
701         if (unlikely(i < n))
702                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
703
704         dp_ctx->tail = tail;
705         dp_ctx->cached_enqueue += i;
706         *status = 0;
707         return i;
708 }
709
710 static __rte_always_inline uint32_t
711 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
712         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
713         uint32_t max_nb_to_dequeue,
714         rte_cryptodev_raw_post_dequeue_t post_dequeue,
715         void **out_user_data, uint8_t is_user_data_array,
716         uint32_t *n_success_jobs, int *return_status)
717 {
718         struct qat_qp *qp = qp_data;
719         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
720         struct qat_queue *rx_queue = &qp->rx_q;
721         struct icp_qat_fw_comn_resp *resp;
722         void *resp_opaque;
723         uint32_t i, n, inflight;
724         uint32_t head;
725         uint8_t status;
726
727         *n_success_jobs = 0;
728         *return_status = 0;
729         head = dp_ctx->head;
730
731         inflight = qp->enqueued - qp->dequeued;
732         if (unlikely(inflight == 0))
733                 return 0;
734
735         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
736                         head);
737         /* no operation ready */
738         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
739                 return 0;
740
741         resp_opaque = (void *)(uintptr_t)resp->opaque_data;
742         /* get the dequeue count */
743         if (get_dequeue_count) {
744                 n = get_dequeue_count(resp_opaque);
745                 if (unlikely(n == 0))
746                         return 0;
747                 else if (n > 1) {
748                         head = (head + rx_queue->msg_size * (n - 1)) &
749                                 rx_queue->modulo_mask;
750                         resp = (struct icp_qat_fw_comn_resp *)(
751                                 (uint8_t *)rx_queue->base_addr + head);
752                         if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
753                                 return 0;
754                 }
755         } else {
756                 if (unlikely(max_nb_to_dequeue == 0))
757                         return 0;
758                 n = max_nb_to_dequeue;
759         }
760
761         out_user_data[0] = resp_opaque;
762         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
763         post_dequeue(resp_opaque, 0, status);
764         *n_success_jobs += status;
765
766         head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
767
768         /* we already finished dequeue when n == 1 */
769         if (unlikely(n == 1)) {
770                 i = 1;
771                 goto end_deq;
772         }
773
774         if (is_user_data_array) {
775                 for (i = 1; i < n; i++) {
776                         resp = (struct icp_qat_fw_comn_resp *)(
777                                 (uint8_t *)rx_queue->base_addr + head);
778                         if (unlikely(*(uint32_t *)resp ==
779                                         ADF_RING_EMPTY_SIG))
780                                 goto end_deq;
781                         out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
782                         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
783                         *n_success_jobs += status;
784                         post_dequeue(out_user_data[i], i, status);
785                         head = (head + rx_queue->msg_size) &
786                                         rx_queue->modulo_mask;
787                 }
788
789                 goto end_deq;
790         }
791
792         /* opaque is not array */
793         for (i = 1; i < n; i++) {
794                 resp = (struct icp_qat_fw_comn_resp *)(
795                         (uint8_t *)rx_queue->base_addr + head);
796                 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
797                 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
798                         goto end_deq;
799                 head = (head + rx_queue->msg_size) &
800                                 rx_queue->modulo_mask;
801                 post_dequeue(resp_opaque, i, status);
802                 *n_success_jobs += status;
803         }
804
805 end_deq:
806         dp_ctx->head = head;
807         dp_ctx->cached_dequeue += i;
808         return i;
809 }
810
811 static __rte_always_inline void *
812 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
813                 enum rte_crypto_op_status *op_status)
814 {
815         struct qat_qp *qp = qp_data;
816         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
817         struct qat_queue *rx_queue = &qp->rx_q;
818         register struct icp_qat_fw_comn_resp *resp;
819
820         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
821                         dp_ctx->head);
822
823         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
824                 return NULL;
825
826         dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
827                         rx_queue->modulo_mask;
828         dp_ctx->cached_dequeue++;
829
830         *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
831                         RTE_CRYPTO_OP_STATUS_SUCCESS :
832                         RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
833         *dequeue_status = 0;
834         return (void *)(uintptr_t)resp->opaque_data;
835 }
836
837 static __rte_always_inline int
838 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
839 {
840         struct qat_qp *qp = qp_data;
841         struct qat_queue *tx_queue = &qp->tx_q;
842         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
843
844         if (unlikely(dp_ctx->cached_enqueue != n))
845                 return -1;
846
847         qp->enqueued += n;
848         qp->stats.enqueued_count += n;
849
850         tx_queue->tail = dp_ctx->tail;
851
852         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
853                         tx_queue->hw_bundle_number,
854                         tx_queue->hw_queue_number, tx_queue->tail);
855         tx_queue->csr_tail = tx_queue->tail;
856         dp_ctx->cached_enqueue = 0;
857
858         return 0;
859 }
860
861 static __rte_always_inline int
862 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
863 {
864         struct qat_qp *qp = qp_data;
865         struct qat_queue *rx_queue = &qp->rx_q;
866         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
867
868         if (unlikely(dp_ctx->cached_dequeue != n))
869                 return -1;
870
871         rx_queue->head = dp_ctx->head;
872         rx_queue->nb_processed_responses += n;
873         qp->dequeued += n;
874         qp->stats.dequeued_count += n;
875         if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
876                 uint32_t old_head, new_head;
877                 uint32_t max_head;
878
879                 old_head = rx_queue->csr_head;
880                 new_head = rx_queue->head;
881                 max_head = qp->nb_descriptors * rx_queue->msg_size;
882
883                 /* write out free descriptors */
884                 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
885
886                 if (new_head < old_head) {
887                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
888                                         max_head - old_head);
889                         memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
890                                         new_head);
891                 } else {
892                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
893                                         old_head);
894                 }
895                 rx_queue->nb_processed_responses = 0;
896                 rx_queue->csr_head = new_head;
897
898                 /* write current head to CSR */
899                 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
900                         rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
901                         new_head);
902         }
903
904         dp_ctx->cached_dequeue = 0;
905         return 0;
906 }
907
908 int
909 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
910         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
911         enum rte_crypto_op_sess_type sess_type,
912         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
913 {
914         struct qat_qp *qp;
915         struct qat_sym_session *ctx;
916         struct qat_sym_dp_ctx *dp_ctx;
917
918         qp = dev->data->queue_pairs[qp_id];
919         dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
920
921         if (!is_update) {
922                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
923                                 sizeof(struct qat_sym_dp_ctx));
924                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
925                 dp_ctx->tail = qp->tx_q.tail;
926                 dp_ctx->head = qp->rx_q.head;
927                 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
928         }
929
930         if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
931                 return -EINVAL;
932
933         ctx = (struct qat_sym_session *)get_sym_session_private_data(
934                         session_ctx.crypto_sess, qat_sym_driver_id);
935
936         dp_ctx->session = ctx;
937
938         raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
939         raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
940         raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
941         raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
942
943         if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
944                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
945                         !ctx->is_gmac) {
946                 /* AES-GCM or AES-CCM */
947                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
948                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
949                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
950                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
951                         && ctx->qat_hash_alg ==
952                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
953                         raw_dp_ctx->enqueue_burst =
954                                         qat_sym_dp_enqueue_aead_jobs;
955                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
956                 } else {
957                         raw_dp_ctx->enqueue_burst =
958                                         qat_sym_dp_enqueue_chain_jobs;
959                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
960                 }
961         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
962                 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
963                 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
964         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
965                 if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
966                         ctx->qat_cipher_alg ==
967                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
968                         raw_dp_ctx->enqueue_burst =
969                                         qat_sym_dp_enqueue_aead_jobs;
970                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
971                 } else {
972                         raw_dp_ctx->enqueue_burst =
973                                         qat_sym_dp_enqueue_cipher_jobs;
974                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
975                 }
976         } else
977                 return -1;
978
979         return 0;
980 }
981
982 int
983 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
984 {
985         return sizeof(struct qat_sym_dp_ctx);
986 }