crypto/qat: rework burst data path
[dpdk.git] / drivers / crypto / qat / qat_sym_hw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4
5 #include <cryptodev_pmd.h>
6
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
10
11 #include "qat_sym.h"
12 #include "qat_sym_session.h"
13 #include "qat_qp.h"
14
15 static __rte_always_inline int32_t
16 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
17                 struct rte_crypto_vec *data, uint16_t n_data_vecs)
18 {
19         struct qat_queue *tx_queue;
20         struct qat_sym_op_cookie *cookie;
21         struct qat_sgl *list;
22         uint32_t i;
23         uint32_t total_len;
24
25         if (likely(n_data_vecs == 1)) {
26                 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
27                         data[0].iova;
28                 req->comn_mid.src_length = req->comn_mid.dst_length =
29                         data[0].len;
30                 return data[0].len;
31         }
32
33         if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
34                 return -1;
35
36         total_len = 0;
37         tx_queue = &qp->tx_q;
38
39         ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
40                         QAT_COMN_PTR_TYPE_SGL);
41         cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
42         list = (struct qat_sgl *)&cookie->qat_sgl_src;
43
44         for (i = 0; i < n_data_vecs; i++) {
45                 list->buffers[i].len = data[i].len;
46                 list->buffers[i].resrvd = 0;
47                 list->buffers[i].addr = data[i].iova;
48                 if (total_len + data[i].len > UINT32_MAX) {
49                         QAT_DP_LOG(ERR, "Message too long");
50                         return -1;
51                 }
52                 total_len += data[i].len;
53         }
54
55         list->num_bufs = i;
56         req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
57                         cookie->qat_sgl_src_phys_addr;
58         req->comn_mid.src_length = req->comn_mid.dst_length = 0;
59         return total_len;
60 }
61
62 static __rte_always_inline void
63 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
64                 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
65                 struct icp_qat_fw_la_bulk_req *qat_req)
66 {
67         /* copy IV into request if it fits */
68         if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
69                 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
70                                 iv_len);
71         else {
72                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
73                                 qat_req->comn_hdr.serv_specif_flags,
74                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
75                 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
76         }
77 }
78
79 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
80         (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
81         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
82
83 static __rte_always_inline void
84 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
85 {
86         uint32_t i;
87
88         for (i = 0; i < n; i++)
89                 sta[i] = status;
90 }
91
92 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
93         RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
94
95 static __rte_always_inline void
96 enqueue_one_cipher_job(struct qat_sym_session *ctx,
97         struct icp_qat_fw_la_bulk_req *req,
98         struct rte_crypto_va_iova_ptr *iv,
99         union rte_crypto_sym_ofs ofs, uint32_t data_len)
100 {
101         struct icp_qat_fw_la_cipher_req_params *cipher_param;
102
103         cipher_param = (void *)&req->serv_specif_rqpars;
104
105         /* cipher IV */
106         set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
107         cipher_param->cipher_offset = ofs.ofs.cipher.head;
108         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
109                         ofs.ofs.cipher.tail;
110 }
111
112 static __rte_always_inline int
113 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
114         struct rte_crypto_vec *data, uint16_t n_data_vecs,
115         union rte_crypto_sym_ofs ofs,
116         struct rte_crypto_va_iova_ptr *iv,
117         struct rte_crypto_va_iova_ptr *digest __rte_unused,
118         struct rte_crypto_va_iova_ptr *aad __rte_unused,
119         void *user_data)
120 {
121         struct qat_qp *qp = qp_data;
122         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
123         struct qat_queue *tx_queue = &qp->tx_q;
124         struct qat_sym_session *ctx = dp_ctx->session;
125         struct icp_qat_fw_la_bulk_req *req;
126         int32_t data_len;
127         uint32_t tail = dp_ctx->tail;
128
129         req = (struct icp_qat_fw_la_bulk_req *)(
130                 (uint8_t *)tx_queue->base_addr + tail);
131         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
132         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
133         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
134         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
135         if (unlikely(data_len < 0))
136                 return -1;
137         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
138
139         enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
140
141         dp_ctx->tail = tail;
142         dp_ctx->cached_enqueue++;
143
144         return 0;
145 }
146
147 static __rte_always_inline uint32_t
148 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
149         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
150         void *user_data[], int *status)
151 {
152         struct qat_qp *qp = qp_data;
153         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
154         struct qat_queue *tx_queue = &qp->tx_q;
155         struct qat_sym_session *ctx = dp_ctx->session;
156         uint32_t i, n;
157         uint32_t tail;
158         struct icp_qat_fw_la_bulk_req *req;
159         int32_t data_len;
160
161         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
162         if (unlikely(n == 0)) {
163                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
164                 *status = 0;
165                 return 0;
166         }
167
168         tail = dp_ctx->tail;
169
170         for (i = 0; i < n; i++) {
171                 req  = (struct icp_qat_fw_la_bulk_req *)(
172                         (uint8_t *)tx_queue->base_addr + tail);
173                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
174
175                 data_len = qat_sym_dp_parse_data_vec(qp, req,
176                         vec->src_sgl[i].vec,
177                         vec->src_sgl[i].num);
178                 if (unlikely(data_len < 0))
179                         break;
180                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
181                 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
182                         (uint32_t)data_len);
183                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
184         }
185
186         if (unlikely(i < n))
187                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
188
189         dp_ctx->tail = tail;
190         dp_ctx->cached_enqueue += i;
191         *status = 0;
192         return i;
193 }
194
195 static __rte_always_inline void
196 enqueue_one_auth_job(struct qat_sym_session *ctx,
197         struct icp_qat_fw_la_bulk_req *req,
198         struct rte_crypto_va_iova_ptr *digest,
199         struct rte_crypto_va_iova_ptr *auth_iv,
200         union rte_crypto_sym_ofs ofs, uint32_t data_len)
201 {
202         struct icp_qat_fw_la_cipher_req_params *cipher_param;
203         struct icp_qat_fw_la_auth_req_params *auth_param;
204
205         cipher_param = (void *)&req->serv_specif_rqpars;
206         auth_param = (void *)((uint8_t *)cipher_param +
207                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
208
209         auth_param->auth_off = ofs.ofs.auth.head;
210         auth_param->auth_len = data_len - ofs.ofs.auth.head -
211                         ofs.ofs.auth.tail;
212         auth_param->auth_res_addr = digest->iova;
213
214         switch (ctx->qat_hash_alg) {
215         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
216         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
217         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
218                 auth_param->u1.aad_adr = auth_iv->iova;
219                 break;
220         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
221         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
222                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
223                         req->comn_hdr.serv_specif_flags,
224                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
225                 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
226                                 ctx->auth_iv.length);
227                 break;
228         default:
229                 break;
230         }
231 }
232
233 static __rte_always_inline int
234 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
235         struct rte_crypto_vec *data, uint16_t n_data_vecs,
236         union rte_crypto_sym_ofs ofs,
237         struct rte_crypto_va_iova_ptr *iv __rte_unused,
238         struct rte_crypto_va_iova_ptr *digest,
239         struct rte_crypto_va_iova_ptr *auth_iv,
240         void *user_data)
241 {
242         struct qat_qp *qp = qp_data;
243         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
244         struct qat_queue *tx_queue = &qp->tx_q;
245         struct qat_sym_session *ctx = dp_ctx->session;
246         struct icp_qat_fw_la_bulk_req *req;
247         int32_t data_len;
248         uint32_t tail = dp_ctx->tail;
249
250         req = (struct icp_qat_fw_la_bulk_req *)(
251                 (uint8_t *)tx_queue->base_addr + tail);
252         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
253         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
254         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
255         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
256         if (unlikely(data_len < 0))
257                 return -1;
258         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
259
260         enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
261                         (uint32_t)data_len);
262
263         dp_ctx->tail = tail;
264         dp_ctx->cached_enqueue++;
265
266         return 0;
267 }
268
269 static __rte_always_inline uint32_t
270 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
271         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
272         void *user_data[], int *status)
273 {
274         struct qat_qp *qp = qp_data;
275         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
276         struct qat_queue *tx_queue = &qp->tx_q;
277         struct qat_sym_session *ctx = dp_ctx->session;
278         uint32_t i, n;
279         uint32_t tail;
280         struct icp_qat_fw_la_bulk_req *req;
281         int32_t data_len;
282
283         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
284         if (unlikely(n == 0)) {
285                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
286                 *status = 0;
287                 return 0;
288         }
289
290         tail = dp_ctx->tail;
291
292         for (i = 0; i < n; i++) {
293                 req  = (struct icp_qat_fw_la_bulk_req *)(
294                         (uint8_t *)tx_queue->base_addr + tail);
295                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
296
297                 data_len = qat_sym_dp_parse_data_vec(qp, req,
298                         vec->src_sgl[i].vec,
299                         vec->src_sgl[i].num);
300                 if (unlikely(data_len < 0))
301                         break;
302                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
303                 enqueue_one_auth_job(ctx, req, &vec->digest[i],
304                         &vec->auth_iv[i], ofs, (uint32_t)data_len);
305                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
306         }
307
308         if (unlikely(i < n))
309                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
310
311         dp_ctx->tail = tail;
312         dp_ctx->cached_enqueue += i;
313         *status = 0;
314         return i;
315 }
316
317 static __rte_always_inline int
318 enqueue_one_chain_job(struct qat_sym_session *ctx,
319         struct icp_qat_fw_la_bulk_req *req,
320         struct rte_crypto_vec *data,
321         uint16_t n_data_vecs,
322         struct rte_crypto_va_iova_ptr *cipher_iv,
323         struct rte_crypto_va_iova_ptr *digest,
324         struct rte_crypto_va_iova_ptr *auth_iv,
325         union rte_crypto_sym_ofs ofs, uint32_t data_len)
326 {
327         struct icp_qat_fw_la_cipher_req_params *cipher_param;
328         struct icp_qat_fw_la_auth_req_params *auth_param;
329         rte_iova_t auth_iova_end;
330         int32_t cipher_len, auth_len;
331
332         cipher_param = (void *)&req->serv_specif_rqpars;
333         auth_param = (void *)((uint8_t *)cipher_param +
334                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
335
336         cipher_len = data_len - ofs.ofs.cipher.head -
337                         ofs.ofs.cipher.tail;
338         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
339
340         if (unlikely(cipher_len < 0 || auth_len < 0))
341                 return -1;
342
343         cipher_param->cipher_offset = ofs.ofs.cipher.head;
344         cipher_param->cipher_length = cipher_len;
345         set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
346
347         auth_param->auth_off = ofs.ofs.auth.head;
348         auth_param->auth_len = auth_len;
349         auth_param->auth_res_addr = digest->iova;
350
351         switch (ctx->qat_hash_alg) {
352         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
353         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
354         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
355                 auth_param->u1.aad_adr = auth_iv->iova;
356                 break;
357         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
358         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
359                 break;
360         default:
361                 break;
362         }
363
364         if (unlikely(n_data_vecs > 1)) {
365                 int auth_end_get = 0, i = n_data_vecs - 1;
366                 struct rte_crypto_vec *cvec = &data[0];
367                 uint32_t len;
368
369                 len = data_len - ofs.ofs.auth.tail;
370
371                 while (i >= 0 && len > 0) {
372                         if (cvec->len >= len) {
373                                 auth_iova_end = cvec->iova + len;
374                                 len = 0;
375                                 auth_end_get = 1;
376                                 break;
377                         }
378                         len -= cvec->len;
379                         i--;
380                         cvec++;
381                 }
382
383                 if (unlikely(auth_end_get == 0))
384                         return -1;
385         } else
386                 auth_iova_end = data[0].iova + auth_param->auth_off +
387                         auth_param->auth_len;
388
389         /* Then check if digest-encrypted conditions are met */
390         if ((auth_param->auth_off + auth_param->auth_len <
391                 cipher_param->cipher_offset +
392                 cipher_param->cipher_length) &&
393                 (digest->iova == auth_iova_end)) {
394                 /* Handle partial digest encryption */
395                 if (cipher_param->cipher_offset +
396                                 cipher_param->cipher_length <
397                                 auth_param->auth_off +
398                                 auth_param->auth_len +
399                                 ctx->digest_length)
400                         req->comn_mid.dst_length =
401                                 req->comn_mid.src_length =
402                                 auth_param->auth_off +
403                                 auth_param->auth_len +
404                                 ctx->digest_length;
405                 struct icp_qat_fw_comn_req_hdr *header =
406                         &req->comn_hdr;
407                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
408                         header->serv_specif_flags,
409                         ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
410         }
411
412         return 0;
413 }
414
415 static __rte_always_inline int
416 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
417         struct rte_crypto_vec *data, uint16_t n_data_vecs,
418         union rte_crypto_sym_ofs ofs,
419         struct rte_crypto_va_iova_ptr *cipher_iv,
420         struct rte_crypto_va_iova_ptr *digest,
421         struct rte_crypto_va_iova_ptr *auth_iv,
422         void *user_data)
423 {
424         struct qat_qp *qp = qp_data;
425         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
426         struct qat_queue *tx_queue = &qp->tx_q;
427         struct qat_sym_session *ctx = dp_ctx->session;
428         struct icp_qat_fw_la_bulk_req *req;
429         int32_t data_len;
430         uint32_t tail = dp_ctx->tail;
431
432         req = (struct icp_qat_fw_la_bulk_req *)(
433                 (uint8_t *)tx_queue->base_addr + tail);
434         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
435         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
436         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
437         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
438         if (unlikely(data_len < 0))
439                 return -1;
440         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
441
442         if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
443                         cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
444                 return -1;
445
446         dp_ctx->tail = tail;
447         dp_ctx->cached_enqueue++;
448
449         return 0;
450 }
451
452 static __rte_always_inline uint32_t
453 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
454         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
455         void *user_data[], int *status)
456 {
457         struct qat_qp *qp = qp_data;
458         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
459         struct qat_queue *tx_queue = &qp->tx_q;
460         struct qat_sym_session *ctx = dp_ctx->session;
461         uint32_t i, n;
462         uint32_t tail;
463         struct icp_qat_fw_la_bulk_req *req;
464         int32_t data_len;
465
466         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
467         if (unlikely(n == 0)) {
468                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
469                 *status = 0;
470                 return 0;
471         }
472
473         tail = dp_ctx->tail;
474
475         for (i = 0; i < n; i++) {
476                 req  = (struct icp_qat_fw_la_bulk_req *)(
477                         (uint8_t *)tx_queue->base_addr + tail);
478                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
479
480                 data_len = qat_sym_dp_parse_data_vec(qp, req,
481                         vec->src_sgl[i].vec,
482                         vec->src_sgl[i].num);
483                 if (unlikely(data_len < 0))
484                         break;
485                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
486                 if (unlikely(enqueue_one_chain_job(ctx, req,
487                         vec->src_sgl[i].vec, vec->src_sgl[i].num,
488                         &vec->iv[i], &vec->digest[i],
489                         &vec->auth_iv[i], ofs, (uint32_t)data_len)))
490                         break;
491
492                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
493         }
494
495         if (unlikely(i < n))
496                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
497
498         dp_ctx->tail = tail;
499         dp_ctx->cached_enqueue += i;
500         *status = 0;
501         return i;
502 }
503
504 static __rte_always_inline void
505 enqueue_one_aead_job(struct qat_sym_session *ctx,
506         struct icp_qat_fw_la_bulk_req *req,
507         struct rte_crypto_va_iova_ptr *iv,
508         struct rte_crypto_va_iova_ptr *digest,
509         struct rte_crypto_va_iova_ptr *aad,
510         union rte_crypto_sym_ofs ofs, uint32_t data_len)
511 {
512         struct icp_qat_fw_la_cipher_req_params *cipher_param =
513                 (void *)&req->serv_specif_rqpars;
514         struct icp_qat_fw_la_auth_req_params *auth_param =
515                 (void *)((uint8_t *)&req->serv_specif_rqpars +
516                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
517         uint8_t *aad_data;
518         uint8_t aad_ccm_real_len;
519         uint8_t aad_len_field_sz;
520         uint32_t msg_len_be;
521         rte_iova_t aad_iova = 0;
522         uint8_t q;
523
524         /* CPM 1.7 uses single pass to treat AEAD as cipher operation */
525         if (ctx->is_single_pass) {
526                 enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
527
528                 if (ctx->is_ucs) {
529                         /* QAT GEN4 uses single pass to treat AEAD as cipher
530                          * operation
531                          */
532                         struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
533                                 (void *)&req->serv_specif_rqpars;
534                         cipher_param_20->spc_aad_addr = aad->iova;
535                         cipher_param_20->spc_auth_res_addr = digest->iova;
536                 } else {
537                         cipher_param->spc_aad_addr = aad->iova;
538                         cipher_param->spc_auth_res_addr = digest->iova;
539                 }
540
541                 return;
542         }
543
544         switch (ctx->qat_hash_alg) {
545         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
546         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
547                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
548                         req->comn_hdr.serv_specif_flags,
549                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
550                 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
551                                 ctx->cipher_iv.length);
552                 aad_iova = aad->iova;
553                 break;
554         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
555                 aad_data = aad->va;
556                 aad_iova = aad->iova;
557                 aad_ccm_real_len = 0;
558                 aad_len_field_sz = 0;
559                 msg_len_be = rte_bswap32((uint32_t)data_len -
560                                 ofs.ofs.cipher.head);
561
562                 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
563                         aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
564                         aad_ccm_real_len = ctx->aad_len -
565                                 ICP_QAT_HW_CCM_AAD_B0_LEN -
566                                 ICP_QAT_HW_CCM_AAD_LEN_INFO;
567                 } else {
568                         aad_data = iv->va;
569                         aad_iova = iv->iova;
570                 }
571
572                 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
573                 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
574                         aad_len_field_sz, ctx->digest_length, q);
575                 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
576                         memcpy(aad_data + ctx->cipher_iv.length +
577                                 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
578                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
579                                 (uint8_t *)&msg_len_be,
580                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
581                 } else {
582                         memcpy(aad_data + ctx->cipher_iv.length +
583                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
584                                 (uint8_t *)&msg_len_be +
585                                 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
586                                 - q), q);
587                 }
588
589                 if (aad_len_field_sz > 0) {
590                         *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
591                                 rte_bswap16(aad_ccm_real_len);
592
593                         if ((aad_ccm_real_len + aad_len_field_sz)
594                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
595                                 uint8_t pad_len = 0;
596                                 uint8_t pad_idx = 0;
597
598                                 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
599                                         ((aad_ccm_real_len +
600                                         aad_len_field_sz) %
601                                         ICP_QAT_HW_CCM_AAD_B0_LEN);
602                                 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
603                                         aad_ccm_real_len +
604                                         aad_len_field_sz;
605                                 memset(&aad_data[pad_idx], 0, pad_len);
606                         }
607                 }
608
609                 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
610                         + ICP_QAT_HW_CCM_NONCE_OFFSET,
611                         (uint8_t *)iv->va +
612                         ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
613                 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
614                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
615
616                 rte_memcpy((uint8_t *)aad->va +
617                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
618                         (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
619                         ctx->cipher_iv.length);
620                 break;
621         default:
622                 break;
623         }
624
625         cipher_param->cipher_offset = ofs.ofs.cipher.head;
626         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
627                         ofs.ofs.cipher.tail;
628         auth_param->auth_off = ofs.ofs.cipher.head;
629         auth_param->auth_len = cipher_param->cipher_length;
630         auth_param->auth_res_addr = digest->iova;
631         auth_param->u1.aad_adr = aad_iova;
632 }
633
634 static __rte_always_inline int
635 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
636         struct rte_crypto_vec *data, uint16_t n_data_vecs,
637         union rte_crypto_sym_ofs ofs,
638         struct rte_crypto_va_iova_ptr *iv,
639         struct rte_crypto_va_iova_ptr *digest,
640         struct rte_crypto_va_iova_ptr *aad,
641         void *user_data)
642 {
643         struct qat_qp *qp = qp_data;
644         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
645         struct qat_queue *tx_queue = &qp->tx_q;
646         struct qat_sym_session *ctx = dp_ctx->session;
647         struct icp_qat_fw_la_bulk_req *req;
648         int32_t data_len;
649         uint32_t tail = dp_ctx->tail;
650
651         req = (struct icp_qat_fw_la_bulk_req *)(
652                 (uint8_t *)tx_queue->base_addr + tail);
653         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
654         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
655         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
656         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
657         if (unlikely(data_len < 0))
658                 return -1;
659         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
660
661         enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
662                 (uint32_t)data_len);
663
664         dp_ctx->tail = tail;
665         dp_ctx->cached_enqueue++;
666
667         return 0;
668 }
669
670 static __rte_always_inline uint32_t
671 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
672         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
673         void *user_data[], int *status)
674 {
675         struct qat_qp *qp = qp_data;
676         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
677         struct qat_queue *tx_queue = &qp->tx_q;
678         struct qat_sym_session *ctx = dp_ctx->session;
679         uint32_t i, n;
680         uint32_t tail;
681         struct icp_qat_fw_la_bulk_req *req;
682         int32_t data_len;
683
684         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
685         if (unlikely(n == 0)) {
686                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
687                 *status = 0;
688                 return 0;
689         }
690
691         tail = dp_ctx->tail;
692
693         for (i = 0; i < n; i++) {
694                 req  = (struct icp_qat_fw_la_bulk_req *)(
695                         (uint8_t *)tx_queue->base_addr + tail);
696                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
697
698                 data_len = qat_sym_dp_parse_data_vec(qp, req,
699                         vec->src_sgl[i].vec,
700                         vec->src_sgl[i].num);
701                 if (unlikely(data_len < 0))
702                         break;
703                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
704                 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
705                         &vec->aad[i], ofs, (uint32_t)data_len);
706                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
707         }
708
709         if (unlikely(i < n))
710                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
711
712         dp_ctx->tail = tail;
713         dp_ctx->cached_enqueue += i;
714         *status = 0;
715         return i;
716 }
717
718 static __rte_always_inline uint32_t
719 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
720         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
721         uint32_t max_nb_to_dequeue,
722         rte_cryptodev_raw_post_dequeue_t post_dequeue,
723         void **out_user_data, uint8_t is_user_data_array,
724         uint32_t *n_success_jobs, int *return_status)
725 {
726         struct qat_qp *qp = qp_data;
727         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
728         struct qat_queue *rx_queue = &qp->rx_q;
729         struct icp_qat_fw_comn_resp *resp;
730         void *resp_opaque;
731         uint32_t i, n, inflight;
732         uint32_t head;
733         uint8_t status;
734
735         *n_success_jobs = 0;
736         *return_status = 0;
737         head = dp_ctx->head;
738
739         inflight = qp->enqueued - qp->dequeued;
740         if (unlikely(inflight == 0))
741                 return 0;
742
743         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
744                         head);
745         /* no operation ready */
746         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
747                 return 0;
748
749         resp_opaque = (void *)(uintptr_t)resp->opaque_data;
750         /* get the dequeue count */
751         if (get_dequeue_count) {
752                 n = get_dequeue_count(resp_opaque);
753                 if (unlikely(n == 0))
754                         return 0;
755         } else {
756                 if (unlikely(max_nb_to_dequeue == 0))
757                         return 0;
758                 n = max_nb_to_dequeue;
759         }
760
761         out_user_data[0] = resp_opaque;
762         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
763         post_dequeue(resp_opaque, 0, status);
764         *n_success_jobs += status;
765
766         head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
767
768         /* we already finished dequeue when n == 1 */
769         if (unlikely(n == 1)) {
770                 i = 1;
771                 goto end_deq;
772         }
773
774         if (is_user_data_array) {
775                 for (i = 1; i < n; i++) {
776                         resp = (struct icp_qat_fw_comn_resp *)(
777                                 (uint8_t *)rx_queue->base_addr + head);
778                         if (unlikely(*(uint32_t *)resp ==
779                                         ADF_RING_EMPTY_SIG))
780                                 goto end_deq;
781                         out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
782                         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
783                         *n_success_jobs += status;
784                         post_dequeue(out_user_data[i], i, status);
785                         head = (head + rx_queue->msg_size) &
786                                         rx_queue->modulo_mask;
787                 }
788
789                 goto end_deq;
790         }
791
792         /* opaque is not array */
793         for (i = 1; i < n; i++) {
794                 resp = (struct icp_qat_fw_comn_resp *)(
795                         (uint8_t *)rx_queue->base_addr + head);
796                 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
797                 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
798                         goto end_deq;
799                 head = (head + rx_queue->msg_size) &
800                                 rx_queue->modulo_mask;
801                 post_dequeue(resp_opaque, i, status);
802                 *n_success_jobs += status;
803         }
804
805 end_deq:
806         dp_ctx->head = head;
807         dp_ctx->cached_dequeue += i;
808         return i;
809 }
810
811 static __rte_always_inline void *
812 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
813                 enum rte_crypto_op_status *op_status)
814 {
815         struct qat_qp *qp = qp_data;
816         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
817         struct qat_queue *rx_queue = &qp->rx_q;
818         register struct icp_qat_fw_comn_resp *resp;
819
820         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
821                         dp_ctx->head);
822
823         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
824                 return NULL;
825
826         dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
827                         rx_queue->modulo_mask;
828         dp_ctx->cached_dequeue++;
829
830         *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
831                         RTE_CRYPTO_OP_STATUS_SUCCESS :
832                         RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
833         *dequeue_status = 0;
834         return (void *)(uintptr_t)resp->opaque_data;
835 }
836
837 static __rte_always_inline int
838 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
839 {
840         struct qat_qp *qp = qp_data;
841         struct qat_queue *tx_queue = &qp->tx_q;
842         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
843
844         if (unlikely(dp_ctx->cached_enqueue != n))
845                 return -1;
846
847         qp->enqueued += n;
848         qp->stats.enqueued_count += n;
849
850         tx_queue->tail = dp_ctx->tail;
851
852         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
853                         tx_queue->hw_bundle_number,
854                         tx_queue->hw_queue_number, tx_queue->tail);
855         tx_queue->csr_tail = tx_queue->tail;
856         dp_ctx->cached_enqueue = 0;
857
858         return 0;
859 }
860
861 static __rte_always_inline int
862 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
863 {
864         struct qat_qp *qp = qp_data;
865         struct qat_queue *rx_queue = &qp->rx_q;
866         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
867
868         if (unlikely(dp_ctx->cached_dequeue != n))
869                 return -1;
870
871         rx_queue->head = dp_ctx->head;
872         rx_queue->nb_processed_responses += n;
873         qp->dequeued += n;
874         qp->stats.dequeued_count += n;
875         if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
876                 uint32_t old_head, new_head;
877                 uint32_t max_head;
878
879                 old_head = rx_queue->csr_head;
880                 new_head = rx_queue->head;
881                 max_head = qp->nb_descriptors * rx_queue->msg_size;
882
883                 /* write out free descriptors */
884                 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
885
886                 if (new_head < old_head) {
887                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
888                                         max_head - old_head);
889                         memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
890                                         new_head);
891                 } else {
892                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
893                                         old_head);
894                 }
895                 rx_queue->nb_processed_responses = 0;
896                 rx_queue->csr_head = new_head;
897
898                 /* write current head to CSR */
899                 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
900                         rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
901                         new_head);
902         }
903
904         dp_ctx->cached_dequeue = 0;
905         return 0;
906 }
907
908 int
909 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
910         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
911         enum rte_crypto_op_sess_type sess_type,
912         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
913 {
914         struct qat_qp *qp;
915         struct qat_sym_session *ctx;
916         struct qat_sym_dp_ctx *dp_ctx;
917
918         qp = dev->data->queue_pairs[qp_id];
919         dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
920
921         if (!is_update) {
922                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
923                                 sizeof(struct qat_sym_dp_ctx));
924                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
925                 dp_ctx->tail = qp->tx_q.tail;
926                 dp_ctx->head = qp->rx_q.head;
927                 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
928         }
929
930         if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
931                 return -EINVAL;
932
933         ctx = (struct qat_sym_session *)get_sym_session_private_data(
934                         session_ctx.crypto_sess, qat_sym_driver_id);
935
936         dp_ctx->session = ctx;
937
938         raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
939         raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
940         raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
941         raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
942
943         if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
944                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
945                         !ctx->is_gmac) {
946                 /* AES-GCM or AES-CCM */
947                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
948                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
949                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
950                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
951                         && ctx->qat_hash_alg ==
952                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
953                         raw_dp_ctx->enqueue_burst =
954                                         qat_sym_dp_enqueue_aead_jobs;
955                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
956                 } else {
957                         raw_dp_ctx->enqueue_burst =
958                                         qat_sym_dp_enqueue_chain_jobs;
959                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
960                 }
961         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
962                 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
963                 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
964         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
965                 if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
966                         ctx->qat_cipher_alg ==
967                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
968                         raw_dp_ctx->enqueue_burst =
969                                         qat_sym_dp_enqueue_aead_jobs;
970                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
971                 } else {
972                         raw_dp_ctx->enqueue_burst =
973                                         qat_sym_dp_enqueue_cipher_jobs;
974                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
975                 }
976         } else
977                 return -1;
978
979         return 0;
980 }
981
982 int
983 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
984 {
985         return sizeof(struct qat_sym_dp_ctx);
986 }