ethdev: add queue state in queried queue information
[dpdk.git] / drivers / crypto / qat / qat_sym_hw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <rte_cryptodev_pmd.h>
6
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
10
11 #include "qat_sym.h"
12 #include "qat_sym_pmd.h"
13 #include "qat_sym_session.h"
14 #include "qat_qp.h"
15
16 struct qat_sym_dp_ctx {
17         struct qat_sym_session *session;
18         uint32_t tail;
19         uint32_t head;
20         uint16_t cached_enqueue;
21         uint16_t cached_dequeue;
22 };
23
24 static __rte_always_inline int32_t
25 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
26                 struct rte_crypto_vec *data, uint16_t n_data_vecs)
27 {
28         struct qat_queue *tx_queue;
29         struct qat_sym_op_cookie *cookie;
30         struct qat_sgl *list;
31         uint32_t i;
32         uint32_t total_len;
33
34         if (likely(n_data_vecs == 1)) {
35                 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
36                         data[0].iova;
37                 req->comn_mid.src_length = req->comn_mid.dst_length =
38                         data[0].len;
39                 return data[0].len;
40         }
41
42         if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
43                 return -1;
44
45         total_len = 0;
46         tx_queue = &qp->tx_q;
47
48         ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
49                         QAT_COMN_PTR_TYPE_SGL);
50         cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
51         list = (struct qat_sgl *)&cookie->qat_sgl_src;
52
53         for (i = 0; i < n_data_vecs; i++) {
54                 list->buffers[i].len = data[i].len;
55                 list->buffers[i].resrvd = 0;
56                 list->buffers[i].addr = data[i].iova;
57                 if (total_len + data[i].len > UINT32_MAX) {
58                         QAT_DP_LOG(ERR, "Message too long");
59                         return -1;
60                 }
61                 total_len += data[i].len;
62         }
63
64         list->num_bufs = i;
65         req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
66                         cookie->qat_sgl_src_phys_addr;
67         req->comn_mid.src_length = req->comn_mid.dst_length = 0;
68         return total_len;
69 }
70
71 static __rte_always_inline void
72 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
73                 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
74                 struct icp_qat_fw_la_bulk_req *qat_req)
75 {
76         /* copy IV into request if it fits */
77         if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
78                 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
79                                 iv_len);
80         else {
81                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
82                                 qat_req->comn_hdr.serv_specif_flags,
83                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
84                 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
85         }
86 }
87
88 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
89         (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
90         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
91
92 static __rte_always_inline void
93 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
94 {
95         uint32_t i;
96
97         for (i = 0; i < n; i++)
98                 sta[i] = status;
99 }
100
101 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
102         RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
103
104 static __rte_always_inline void
105 enqueue_one_aead_job(struct qat_sym_session *ctx,
106         struct icp_qat_fw_la_bulk_req *req,
107         struct rte_crypto_va_iova_ptr *iv,
108         struct rte_crypto_va_iova_ptr *digest,
109         struct rte_crypto_va_iova_ptr *aad,
110         union rte_crypto_sym_ofs ofs, uint32_t data_len)
111 {
112         struct icp_qat_fw_la_cipher_req_params *cipher_param =
113                 (void *)&req->serv_specif_rqpars;
114         struct icp_qat_fw_la_auth_req_params *auth_param =
115                 (void *)((uint8_t *)&req->serv_specif_rqpars +
116                 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
117         uint8_t *aad_data;
118         uint8_t aad_ccm_real_len;
119         uint8_t aad_len_field_sz;
120         uint32_t msg_len_be;
121         rte_iova_t aad_iova = 0;
122         uint8_t q;
123
124         switch (ctx->qat_hash_alg) {
125         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
126         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
127                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
128                         req->comn_hdr.serv_specif_flags,
129                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
130                 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
131                                 ctx->cipher_iv.length);
132                 aad_iova = aad->iova;
133                 break;
134         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
135                 aad_data = aad->va;
136                 aad_iova = aad->iova;
137                 aad_ccm_real_len = 0;
138                 aad_len_field_sz = 0;
139                 msg_len_be = rte_bswap32((uint32_t)data_len -
140                                 ofs.ofs.cipher.head);
141
142                 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
143                         aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
144                         aad_ccm_real_len = ctx->aad_len -
145                                 ICP_QAT_HW_CCM_AAD_B0_LEN -
146                                 ICP_QAT_HW_CCM_AAD_LEN_INFO;
147                 } else {
148                         aad_data = iv->va;
149                         aad_iova = iv->iova;
150                 }
151
152                 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
153                 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
154                         aad_len_field_sz, ctx->digest_length, q);
155                 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
156                         memcpy(aad_data + ctx->cipher_iv.length +
157                                 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
158                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
159                                 (uint8_t *)&msg_len_be,
160                                 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
161                 } else {
162                         memcpy(aad_data + ctx->cipher_iv.length +
163                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
164                                 (uint8_t *)&msg_len_be +
165                                 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
166                                 - q), q);
167                 }
168
169                 if (aad_len_field_sz > 0) {
170                         *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
171                                 rte_bswap16(aad_ccm_real_len);
172
173                         if ((aad_ccm_real_len + aad_len_field_sz)
174                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
175                                 uint8_t pad_len = 0;
176                                 uint8_t pad_idx = 0;
177
178                                 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
179                                         ((aad_ccm_real_len +
180                                         aad_len_field_sz) %
181                                         ICP_QAT_HW_CCM_AAD_B0_LEN);
182                                 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
183                                         aad_ccm_real_len +
184                                         aad_len_field_sz;
185                                 memset(&aad_data[pad_idx], 0, pad_len);
186                         }
187                 }
188
189                 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
190                         + ICP_QAT_HW_CCM_NONCE_OFFSET,
191                         (uint8_t *)iv->va +
192                         ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
193                 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
194                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
195
196                 rte_memcpy((uint8_t *)aad->va +
197                                 ICP_QAT_HW_CCM_NONCE_OFFSET,
198                         (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
199                         ctx->cipher_iv.length);
200                 break;
201         default:
202                 break;
203         }
204
205         cipher_param->cipher_offset = ofs.ofs.cipher.head;
206         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
207                         ofs.ofs.cipher.tail;
208         auth_param->auth_off = ofs.ofs.cipher.head;
209         auth_param->auth_len = cipher_param->cipher_length;
210         auth_param->auth_res_addr = digest->iova;
211         auth_param->u1.aad_adr = aad_iova;
212
213         if (ctx->is_single_pass) {
214                 cipher_param->spc_aad_addr = aad_iova;
215                 cipher_param->spc_auth_res_addr = digest->iova;
216         }
217 }
218
219 static __rte_always_inline int
220 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
221         struct rte_crypto_vec *data, uint16_t n_data_vecs,
222         union rte_crypto_sym_ofs ofs,
223         struct rte_crypto_va_iova_ptr *iv,
224         struct rte_crypto_va_iova_ptr *digest,
225         struct rte_crypto_va_iova_ptr *aad,
226         void *user_data)
227 {
228         struct qat_qp *qp = qp_data;
229         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
230         struct qat_queue *tx_queue = &qp->tx_q;
231         struct qat_sym_session *ctx = dp_ctx->session;
232         struct icp_qat_fw_la_bulk_req *req;
233         int32_t data_len;
234         uint32_t tail = dp_ctx->tail;
235
236         req = (struct icp_qat_fw_la_bulk_req *)(
237                 (uint8_t *)tx_queue->base_addr + tail);
238         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
239         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
240         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
241         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
242         if (unlikely(data_len < 0))
243                 return -1;
244         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
245
246         enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
247                 (uint32_t)data_len);
248
249         dp_ctx->tail = tail;
250         dp_ctx->cached_enqueue++;
251
252         return 0;
253 }
254
255 static __rte_always_inline uint32_t
256 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
257         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
258         void *user_data[], int *status)
259 {
260         struct qat_qp *qp = qp_data;
261         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
262         struct qat_queue *tx_queue = &qp->tx_q;
263         struct qat_sym_session *ctx = dp_ctx->session;
264         uint32_t i, n;
265         uint32_t tail;
266         struct icp_qat_fw_la_bulk_req *req;
267         int32_t data_len;
268
269         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
270         if (unlikely(n == 0)) {
271                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
272                 *status = 0;
273                 return 0;
274         }
275
276         tail = dp_ctx->tail;
277
278         for (i = 0; i < n; i++) {
279                 req  = (struct icp_qat_fw_la_bulk_req *)(
280                         (uint8_t *)tx_queue->base_addr + tail);
281                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
282
283                 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
284                         vec->sgl[i].num);
285                 if (unlikely(data_len < 0))
286                         break;
287                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
288                 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
289                         &vec->aad[i], ofs, (uint32_t)data_len);
290                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
291         }
292
293         if (unlikely(i < n))
294                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
295
296         dp_ctx->tail = tail;
297         dp_ctx->cached_enqueue += i;
298         *status = 0;
299         return i;
300 }
301
302 static __rte_always_inline void
303 enqueue_one_cipher_job(struct qat_sym_session *ctx,
304         struct icp_qat_fw_la_bulk_req *req,
305         struct rte_crypto_va_iova_ptr *iv,
306         union rte_crypto_sym_ofs ofs, uint32_t data_len)
307 {
308         struct icp_qat_fw_la_cipher_req_params *cipher_param;
309
310         cipher_param = (void *)&req->serv_specif_rqpars;
311
312         /* cipher IV */
313         set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
314         cipher_param->cipher_offset = ofs.ofs.cipher.head;
315         cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
316                         ofs.ofs.cipher.tail;
317 }
318
319 static __rte_always_inline int
320 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
321         struct rte_crypto_vec *data, uint16_t n_data_vecs,
322         union rte_crypto_sym_ofs ofs,
323         struct rte_crypto_va_iova_ptr *iv,
324         struct rte_crypto_va_iova_ptr *digest __rte_unused,
325         struct rte_crypto_va_iova_ptr *aad __rte_unused,
326         void *user_data)
327 {
328         struct qat_qp *qp = qp_data;
329         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
330         struct qat_queue *tx_queue = &qp->tx_q;
331         struct qat_sym_session *ctx = dp_ctx->session;
332         struct icp_qat_fw_la_bulk_req *req;
333         int32_t data_len;
334         uint32_t tail = dp_ctx->tail;
335
336         req = (struct icp_qat_fw_la_bulk_req *)(
337                 (uint8_t *)tx_queue->base_addr + tail);
338         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
339         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
340         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
341         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
342         if (unlikely(data_len < 0))
343                 return -1;
344         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
345
346         enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
347
348         dp_ctx->tail = tail;
349         dp_ctx->cached_enqueue++;
350
351         return 0;
352 }
353
354 static __rte_always_inline uint32_t
355 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
356         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
357         void *user_data[], int *status)
358 {
359         struct qat_qp *qp = qp_data;
360         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
361         struct qat_queue *tx_queue = &qp->tx_q;
362         struct qat_sym_session *ctx = dp_ctx->session;
363         uint32_t i, n;
364         uint32_t tail;
365         struct icp_qat_fw_la_bulk_req *req;
366         int32_t data_len;
367
368         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
369         if (unlikely(n == 0)) {
370                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
371                 *status = 0;
372                 return 0;
373         }
374
375         tail = dp_ctx->tail;
376
377         for (i = 0; i < n; i++) {
378                 req  = (struct icp_qat_fw_la_bulk_req *)(
379                         (uint8_t *)tx_queue->base_addr + tail);
380                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
381
382                 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
383                         vec->sgl[i].num);
384                 if (unlikely(data_len < 0))
385                         break;
386                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
387                 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
388                         (uint32_t)data_len);
389                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
390         }
391
392         if (unlikely(i < n))
393                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
394
395         dp_ctx->tail = tail;
396         dp_ctx->cached_enqueue += i;
397         *status = 0;
398         return i;
399 }
400
401 static __rte_always_inline void
402 enqueue_one_auth_job(struct qat_sym_session *ctx,
403         struct icp_qat_fw_la_bulk_req *req,
404         struct rte_crypto_va_iova_ptr *digest,
405         struct rte_crypto_va_iova_ptr *auth_iv,
406         union rte_crypto_sym_ofs ofs, uint32_t data_len)
407 {
408         struct icp_qat_fw_la_cipher_req_params *cipher_param;
409         struct icp_qat_fw_la_auth_req_params *auth_param;
410
411         cipher_param = (void *)&req->serv_specif_rqpars;
412         auth_param = (void *)((uint8_t *)cipher_param +
413                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
414
415         auth_param->auth_off = ofs.ofs.auth.head;
416         auth_param->auth_len = data_len - ofs.ofs.auth.head -
417                         ofs.ofs.auth.tail;
418         auth_param->auth_res_addr = digest->iova;
419
420         switch (ctx->qat_hash_alg) {
421         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
422         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
423         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
424                 auth_param->u1.aad_adr = auth_iv->iova;
425                 break;
426         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
427         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
428                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
429                         req->comn_hdr.serv_specif_flags,
430                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
431                 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
432                                 ctx->auth_iv.length);
433                 break;
434         default:
435                 break;
436         }
437 }
438
439 static __rte_always_inline int
440 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
441         struct rte_crypto_vec *data, uint16_t n_data_vecs,
442         union rte_crypto_sym_ofs ofs,
443         struct rte_crypto_va_iova_ptr *iv __rte_unused,
444         struct rte_crypto_va_iova_ptr *digest,
445         struct rte_crypto_va_iova_ptr *auth_iv,
446         void *user_data)
447 {
448         struct qat_qp *qp = qp_data;
449         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
450         struct qat_queue *tx_queue = &qp->tx_q;
451         struct qat_sym_session *ctx = dp_ctx->session;
452         struct icp_qat_fw_la_bulk_req *req;
453         int32_t data_len;
454         uint32_t tail = dp_ctx->tail;
455
456         req = (struct icp_qat_fw_la_bulk_req *)(
457                 (uint8_t *)tx_queue->base_addr + tail);
458         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
459         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
460         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
461         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
462         if (unlikely(data_len < 0))
463                 return -1;
464         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
465
466         enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
467                         (uint32_t)data_len);
468
469         dp_ctx->tail = tail;
470         dp_ctx->cached_enqueue++;
471
472         return 0;
473 }
474
475 static __rte_always_inline uint32_t
476 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
477         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
478         void *user_data[], int *status)
479 {
480         struct qat_qp *qp = qp_data;
481         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
482         struct qat_queue *tx_queue = &qp->tx_q;
483         struct qat_sym_session *ctx = dp_ctx->session;
484         uint32_t i, n;
485         uint32_t tail;
486         struct icp_qat_fw_la_bulk_req *req;
487         int32_t data_len;
488
489         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
490         if (unlikely(n == 0)) {
491                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
492                 *status = 0;
493                 return 0;
494         }
495
496         tail = dp_ctx->tail;
497
498         for (i = 0; i < n; i++) {
499                 req  = (struct icp_qat_fw_la_bulk_req *)(
500                         (uint8_t *)tx_queue->base_addr + tail);
501                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
502
503                 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
504                         vec->sgl[i].num);
505                 if (unlikely(data_len < 0))
506                         break;
507                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
508                 enqueue_one_auth_job(ctx, req, &vec->digest[i],
509                         &vec->auth_iv[i], ofs, (uint32_t)data_len);
510                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
511         }
512
513         if (unlikely(i < n))
514                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
515
516         dp_ctx->tail = tail;
517         dp_ctx->cached_enqueue += i;
518         *status = 0;
519         return i;
520 }
521
522 static __rte_always_inline int
523 enqueue_one_chain_job(struct qat_sym_session *ctx,
524         struct icp_qat_fw_la_bulk_req *req,
525         struct rte_crypto_vec *data,
526         uint16_t n_data_vecs,
527         struct rte_crypto_va_iova_ptr *cipher_iv,
528         struct rte_crypto_va_iova_ptr *digest,
529         struct rte_crypto_va_iova_ptr *auth_iv,
530         union rte_crypto_sym_ofs ofs, uint32_t data_len)
531 {
532         struct icp_qat_fw_la_cipher_req_params *cipher_param;
533         struct icp_qat_fw_la_auth_req_params *auth_param;
534         rte_iova_t auth_iova_end;
535         int32_t cipher_len, auth_len;
536
537         cipher_param = (void *)&req->serv_specif_rqpars;
538         auth_param = (void *)((uint8_t *)cipher_param +
539                         ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
540
541         cipher_len = data_len - ofs.ofs.cipher.head -
542                         ofs.ofs.cipher.tail;
543         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
544
545         if (unlikely(cipher_len < 0 || auth_len < 0))
546                 return -1;
547
548         cipher_param->cipher_offset = ofs.ofs.cipher.head;
549         cipher_param->cipher_length = cipher_len;
550         set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
551
552         auth_param->auth_off = ofs.ofs.auth.head;
553         auth_param->auth_len = auth_len;
554         auth_param->auth_res_addr = digest->iova;
555
556         switch (ctx->qat_hash_alg) {
557         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
558         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
559         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
560                 auth_param->u1.aad_adr = auth_iv->iova;
561                 break;
562         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
563         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
564                 break;
565         default:
566                 break;
567         }
568
569         if (unlikely(n_data_vecs > 1)) {
570                 int auth_end_get = 0, i = n_data_vecs - 1;
571                 struct rte_crypto_vec *cvec = &data[0];
572                 uint32_t len;
573
574                 len = data_len - ofs.ofs.auth.tail;
575
576                 while (i >= 0 && len > 0) {
577                         if (cvec->len >= len) {
578                                 auth_iova_end = cvec->iova + len;
579                                 len = 0;
580                                 auth_end_get = 1;
581                                 break;
582                         }
583                         len -= cvec->len;
584                         i--;
585                         cvec++;
586                 }
587
588                 if (unlikely(auth_end_get == 0))
589                         return -1;
590         } else
591                 auth_iova_end = data[0].iova + auth_param->auth_off +
592                         auth_param->auth_len;
593
594         /* Then check if digest-encrypted conditions are met */
595         if ((auth_param->auth_off + auth_param->auth_len <
596                 cipher_param->cipher_offset +
597                 cipher_param->cipher_length) &&
598                 (digest->iova == auth_iova_end)) {
599                 /* Handle partial digest encryption */
600                 if (cipher_param->cipher_offset +
601                                 cipher_param->cipher_length <
602                                 auth_param->auth_off +
603                                 auth_param->auth_len +
604                                 ctx->digest_length)
605                         req->comn_mid.dst_length =
606                                 req->comn_mid.src_length =
607                                 auth_param->auth_off +
608                                 auth_param->auth_len +
609                                 ctx->digest_length;
610                 struct icp_qat_fw_comn_req_hdr *header =
611                         &req->comn_hdr;
612                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
613                         header->serv_specif_flags,
614                         ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
615         }
616
617         return 0;
618 }
619
620 static __rte_always_inline int
621 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
622         struct rte_crypto_vec *data, uint16_t n_data_vecs,
623         union rte_crypto_sym_ofs ofs,
624         struct rte_crypto_va_iova_ptr *cipher_iv,
625         struct rte_crypto_va_iova_ptr *digest,
626         struct rte_crypto_va_iova_ptr *auth_iv,
627         void *user_data)
628 {
629         struct qat_qp *qp = qp_data;
630         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
631         struct qat_queue *tx_queue = &qp->tx_q;
632         struct qat_sym_session *ctx = dp_ctx->session;
633         struct icp_qat_fw_la_bulk_req *req;
634         int32_t data_len;
635         uint32_t tail = dp_ctx->tail;
636
637         req = (struct icp_qat_fw_la_bulk_req *)(
638                 (uint8_t *)tx_queue->base_addr + tail);
639         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
640         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
641         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
642         data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
643         if (unlikely(data_len < 0))
644                 return -1;
645         req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
646
647         if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
648                         cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
649                 return -1;
650
651         dp_ctx->tail = tail;
652         dp_ctx->cached_enqueue++;
653
654         return 0;
655 }
656
657 static __rte_always_inline uint32_t
658 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
659         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
660         void *user_data[], int *status)
661 {
662         struct qat_qp *qp = qp_data;
663         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
664         struct qat_queue *tx_queue = &qp->tx_q;
665         struct qat_sym_session *ctx = dp_ctx->session;
666         uint32_t i, n;
667         uint32_t tail;
668         struct icp_qat_fw_la_bulk_req *req;
669         int32_t data_len;
670
671         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
672         if (unlikely(n == 0)) {
673                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
674                 *status = 0;
675                 return 0;
676         }
677
678         tail = dp_ctx->tail;
679
680         for (i = 0; i < n; i++) {
681                 req  = (struct icp_qat_fw_la_bulk_req *)(
682                         (uint8_t *)tx_queue->base_addr + tail);
683                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
684
685                 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
686                         vec->sgl[i].num);
687                 if (unlikely(data_len < 0))
688                         break;
689                 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
690                 if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
691                         vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
692                                 &vec->auth_iv[i], ofs, (uint32_t)data_len)))
693                         break;
694
695                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
696         }
697
698         if (unlikely(i < n))
699                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
700
701         dp_ctx->tail = tail;
702         dp_ctx->cached_enqueue += i;
703         *status = 0;
704         return i;
705 }
706
707 static __rte_always_inline uint32_t
708 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
709         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
710         uint32_t max_nb_to_dequeue,
711         rte_cryptodev_raw_post_dequeue_t post_dequeue,
712         void **out_user_data, uint8_t is_user_data_array,
713         uint32_t *n_success_jobs, int *return_status)
714 {
715         struct qat_qp *qp = qp_data;
716         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
717         struct qat_queue *rx_queue = &qp->rx_q;
718         struct icp_qat_fw_comn_resp *resp;
719         void *resp_opaque;
720         uint32_t i, n, inflight;
721         uint32_t head;
722         uint8_t status;
723
724         *n_success_jobs = 0;
725         *return_status = 0;
726         head = dp_ctx->head;
727
728         inflight = qp->enqueued - qp->dequeued;
729         if (unlikely(inflight == 0))
730                 return 0;
731
732         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
733                         head);
734         /* no operation ready */
735         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
736                 return 0;
737
738         resp_opaque = (void *)(uintptr_t)resp->opaque_data;
739         /* get the dequeue count */
740         if (get_dequeue_count) {
741                 n = get_dequeue_count(resp_opaque);
742                 if (unlikely(n == 0))
743                         return 0;
744                 else if (n > 1) {
745                         head = (head + rx_queue->msg_size * (n - 1)) &
746                                 rx_queue->modulo_mask;
747                         resp = (struct icp_qat_fw_comn_resp *)(
748                                 (uint8_t *)rx_queue->base_addr + head);
749                         if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
750                                 return 0;
751                 }
752         } else {
753                 if (unlikely(max_nb_to_dequeue == 0))
754                         return 0;
755                 n = max_nb_to_dequeue;
756         }
757
758         out_user_data[0] = resp_opaque;
759         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
760         post_dequeue(resp_opaque, 0, status);
761         *n_success_jobs += status;
762
763         head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
764
765         /* we already finished dequeue when n == 1 */
766         if (unlikely(n == 1)) {
767                 i = 1;
768                 goto end_deq;
769         }
770
771         if (is_user_data_array) {
772                 for (i = 1; i < n; i++) {
773                         resp = (struct icp_qat_fw_comn_resp *)(
774                                 (uint8_t *)rx_queue->base_addr + head);
775                         if (unlikely(*(uint32_t *)resp ==
776                                         ADF_RING_EMPTY_SIG))
777                                 goto end_deq;
778                         out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
779                         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
780                         *n_success_jobs += status;
781                         post_dequeue(out_user_data[i], i, status);
782                         head = (head + rx_queue->msg_size) &
783                                         rx_queue->modulo_mask;
784                 }
785
786                 goto end_deq;
787         }
788
789         /* opaque is not array */
790         for (i = 1; i < n; i++) {
791                 resp = (struct icp_qat_fw_comn_resp *)(
792                         (uint8_t *)rx_queue->base_addr + head);
793                 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
794                 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
795                         goto end_deq;
796                 head = (head + rx_queue->msg_size) &
797                                 rx_queue->modulo_mask;
798                 post_dequeue(resp_opaque, i, status);
799                 *n_success_jobs += status;
800         }
801
802 end_deq:
803         dp_ctx->head = head;
804         dp_ctx->cached_dequeue += i;
805         return i;
806 }
807
808 static __rte_always_inline void *
809 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
810                 enum rte_crypto_op_status *op_status)
811 {
812         struct qat_qp *qp = qp_data;
813         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
814         struct qat_queue *rx_queue = &qp->rx_q;
815         register struct icp_qat_fw_comn_resp *resp;
816
817         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
818                         dp_ctx->head);
819
820         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
821                 return NULL;
822
823         dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
824                         rx_queue->modulo_mask;
825         dp_ctx->cached_dequeue++;
826
827         *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
828                         RTE_CRYPTO_OP_STATUS_SUCCESS :
829                         RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
830         *dequeue_status = 0;
831         return (void *)(uintptr_t)resp->opaque_data;
832 }
833
834 static __rte_always_inline int
835 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
836 {
837         struct qat_qp *qp = qp_data;
838         struct qat_queue *tx_queue = &qp->tx_q;
839         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
840
841         if (unlikely(dp_ctx->cached_enqueue != n))
842                 return -1;
843
844         qp->enqueued += n;
845         qp->stats.enqueued_count += n;
846
847         tx_queue->tail = dp_ctx->tail;
848
849         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
850                         tx_queue->hw_bundle_number,
851                         tx_queue->hw_queue_number, tx_queue->tail);
852         tx_queue->csr_tail = tx_queue->tail;
853         dp_ctx->cached_enqueue = 0;
854
855         return 0;
856 }
857
858 static __rte_always_inline int
859 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
860 {
861         struct qat_qp *qp = qp_data;
862         struct qat_queue *rx_queue = &qp->rx_q;
863         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
864
865         if (unlikely(dp_ctx->cached_dequeue != n))
866                 return -1;
867
868         rx_queue->head = dp_ctx->head;
869         rx_queue->nb_processed_responses += n;
870         qp->dequeued += n;
871         qp->stats.dequeued_count += n;
872         if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
873                 uint32_t old_head, new_head;
874                 uint32_t max_head;
875
876                 old_head = rx_queue->csr_head;
877                 new_head = rx_queue->head;
878                 max_head = qp->nb_descriptors * rx_queue->msg_size;
879
880                 /* write out free descriptors */
881                 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
882
883                 if (new_head < old_head) {
884                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
885                                         max_head - old_head);
886                         memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
887                                         new_head);
888                 } else {
889                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
890                                         old_head);
891                 }
892                 rx_queue->nb_processed_responses = 0;
893                 rx_queue->csr_head = new_head;
894
895                 /* write current head to CSR */
896                 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
897                         rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
898                         new_head);
899         }
900
901         dp_ctx->cached_dequeue = 0;
902         return 0;
903 }
904
905 int
906 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
907         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
908         enum rte_crypto_op_sess_type sess_type,
909         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
910 {
911         struct qat_qp *qp;
912         struct qat_sym_session *ctx;
913         struct qat_sym_dp_ctx *dp_ctx;
914
915         qp = dev->data->queue_pairs[qp_id];
916         dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
917
918         if (!is_update) {
919                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
920                                 sizeof(struct qat_sym_dp_ctx));
921                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
922                 dp_ctx->tail = qp->tx_q.tail;
923                 dp_ctx->head = qp->rx_q.head;
924                 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
925         }
926
927         if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
928                 return -EINVAL;
929
930         ctx = (struct qat_sym_session *)get_sym_session_private_data(
931                         session_ctx.crypto_sess, qat_sym_driver_id);
932
933         dp_ctx->session = ctx;
934
935         raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
936         raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
937         raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
938         raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
939
940         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
941                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
942                 /* AES-GCM or AES-CCM */
943                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
944                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
945                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
946                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
947                         && ctx->qat_hash_alg ==
948                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
949                         raw_dp_ctx->enqueue_burst =
950                                         qat_sym_dp_enqueue_aead_jobs;
951                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
952                 } else {
953                         raw_dp_ctx->enqueue_burst =
954                                         qat_sym_dp_enqueue_chain_jobs;
955                         raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
956                 }
957         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
958                 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
959                 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
960         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
961                 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_cipher_jobs;
962                 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
963         } else
964                 return -1;
965
966         return 0;
967 }
968
969 int
970 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
971 {
972         return sizeof(struct qat_sym_dp_ctx);
973 }