8007e25d6aa45489708c1dceb82f0050e69a5fbe
[dpdk.git] / drivers / crypto / qat / qat_sym.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_crypto_sym.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11
12 #include <openssl/evp.h>
13
14 #include "qat_logs.h"
15 #include "qat_sym_session.h"
16 #include "qat_sym.h"
17 #include "qat_qp.h"
18 #include "adf_transport_access_macros.h"
19 #include "qat_device.h"
20
21 #define BYTE_LENGTH    8
22 /* bpi is only used for partial blocks of DES and AES
23  * so AES block len can be assumed as max len for iv, src and dst
24  */
25 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
26
27 /** Encrypt a single partial block
28  *  Depends on openssl libcrypto
29  *  Uses ECB+XOR to do CFB encryption, same result, more performant
30  */
31 static inline int
32 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
33                 uint8_t *iv, int ivlen, int srclen,
34                 void *bpi_ctx)
35 {
36         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
37         int encrypted_ivlen;
38         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
39         uint8_t *encr = encrypted_iv;
40
41         /* ECB method: encrypt the IV, then XOR this with plaintext */
42         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
43                                                                 <= 0)
44                 goto cipher_encrypt_err;
45
46         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
47                 *dst = *src ^ *encr;
48
49         return 0;
50
51 cipher_encrypt_err:
52         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
53         return -EINVAL;
54 }
55
56 /** Decrypt a single partial block
57  *  Depends on openssl libcrypto
58  *  Uses ECB+XOR to do CFB encryption, same result, more performant
59  */
60 static inline int
61 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
62                 uint8_t *iv, int ivlen, int srclen,
63                 void *bpi_ctx)
64 {
65         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
66         int encrypted_ivlen;
67         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
68         uint8_t *encr = encrypted_iv;
69
70         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
71         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
72                                                                 <= 0)
73                 goto cipher_decrypt_err;
74
75         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
76                 *dst = *src ^ *encr;
77
78         return 0;
79
80 cipher_decrypt_err:
81         PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
82         return -EINVAL;
83 }
84
85 /** Creates a context in either AES or DES in ECB mode
86  *  Depends on openssl libcrypto
87  */
88
89 static inline uint32_t
90 qat_bpicipher_preprocess(struct qat_sym_session *ctx,
91                                 struct rte_crypto_op *op)
92 {
93         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
94         struct rte_crypto_sym_op *sym_op = op->sym;
95         uint8_t last_block_len = block_len > 0 ?
96                         sym_op->cipher.data.length % block_len : 0;
97
98         if (last_block_len &&
99                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
100
101                 /* Decrypt last block */
102                 uint8_t *last_block, *dst, *iv;
103                 uint32_t last_block_offset = sym_op->cipher.data.offset +
104                                 sym_op->cipher.data.length - last_block_len;
105                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
106                                 uint8_t *, last_block_offset);
107
108                 if (unlikely(sym_op->m_dst != NULL))
109                         /* out-of-place operation (OOP) */
110                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
111                                                 uint8_t *, last_block_offset);
112                 else
113                         dst = last_block;
114
115                 if (last_block_len < sym_op->cipher.data.length)
116                         /* use previous block ciphertext as IV */
117                         iv = last_block - block_len;
118                 else
119                         /* runt block, i.e. less than one full block */
120                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
121                                         ctx->cipher_iv.offset);
122
123 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
124                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
125                         last_block_len);
126                 if (sym_op->m_dst != NULL)
127                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
128                                 last_block_len);
129 #endif
130                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
131                                 last_block_len, ctx->bpi_ctx);
132 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
133                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
134                         last_block_len);
135                 if (sym_op->m_dst != NULL)
136                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
137                                 last_block_len);
138 #endif
139         }
140
141         return sym_op->cipher.data.length - last_block_len;
142 }
143
144 static inline uint32_t
145 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
146                                 struct rte_crypto_op *op)
147 {
148         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
149         struct rte_crypto_sym_op *sym_op = op->sym;
150         uint8_t last_block_len = block_len > 0 ?
151                         sym_op->cipher.data.length % block_len : 0;
152
153         if (last_block_len > 0 &&
154                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
155
156                 /* Encrypt last block */
157                 uint8_t *last_block, *dst, *iv;
158                 uint32_t last_block_offset;
159
160                 last_block_offset = sym_op->cipher.data.offset +
161                                 sym_op->cipher.data.length - last_block_len;
162                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
163                                 uint8_t *, last_block_offset);
164
165                 if (unlikely(sym_op->m_dst != NULL))
166                         /* out-of-place operation (OOP) */
167                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
168                                                 uint8_t *, last_block_offset);
169                 else
170                         dst = last_block;
171
172                 if (last_block_len < sym_op->cipher.data.length)
173                         /* use previous block ciphertext as IV */
174                         iv = dst - block_len;
175                 else
176                         /* runt block, i.e. less than one full block */
177                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
178                                         ctx->cipher_iv.offset);
179
180 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
181                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
182                         last_block_len);
183                 if (sym_op->m_dst != NULL)
184                         rte_hexdump(stdout, "BPI: dst before post-process:",
185                                         dst, last_block_len);
186 #endif
187                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
188                                 last_block_len, ctx->bpi_ctx);
189 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
190                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
191                         last_block_len);
192                 if (sym_op->m_dst != NULL)
193                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
194                                 last_block_len);
195 #endif
196         }
197         return sym_op->cipher.data.length - last_block_len;
198 }
199
200 uint16_t
201 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
202                 uint16_t nb_ops)
203 {
204         return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
205 }
206
207 static int
208 qat_sym_process_response(void **op, uint8_t *resp,
209                 __rte_unused void *op_cookie,
210                 __rte_unused enum qat_device_gen qat_dev_gen)
211 {
212
213         struct icp_qat_fw_comn_resp *resp_msg =
214                         (struct icp_qat_fw_comn_resp *)resp;
215         struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
216                         (resp_msg->opaque_data);
217
218 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
219         rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
220                         sizeof(struct icp_qat_fw_comn_resp));
221 #endif
222
223         if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
224                         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
225                         resp_msg->comn_hdr.comn_status)) {
226
227                 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
228         } else {
229                 struct qat_sym_session *sess = (struct qat_sym_session *)
230                                                 get_session_private_data(
231                                                 rx_op->sym->session,
232                                                 cryptodev_qat_driver_id);
233
234                 if (sess->bpi_ctx)
235                         qat_bpicipher_postprocess(sess, rx_op);
236                 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
237         }
238         *op = (void *)rx_op;
239
240         return 0;
241 }
242
243
244 uint16_t
245 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
246                 uint16_t nb_ops)
247 {
248         return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
249 }
250
251 static inline void
252 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
253                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
254                 struct rte_crypto_op *op,
255                 struct icp_qat_fw_la_bulk_req *qat_req)
256 {
257         /* copy IV into request if it fits */
258         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
259                 rte_memcpy(cipher_param->u.cipher_IV_array,
260                                 rte_crypto_op_ctod_offset(op, uint8_t *,
261                                         iv_offset),
262                                 iv_length);
263         } else {
264                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
265                                 qat_req->comn_hdr.serv_specif_flags,
266                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
267                 cipher_param->u.s.cipher_IV_ptr =
268                                 rte_crypto_op_ctophys_offset(op,
269                                         iv_offset);
270         }
271 }
272
273 /** Set IV for CCM is special case, 0th byte is set to q-1
274  *  where q is padding of nonce in 16 byte block
275  */
276 static inline void
277 set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
278                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
279                 struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
280 {
281         rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
282                         ICP_QAT_HW_CCM_NONCE_OFFSET,
283                         rte_crypto_op_ctod_offset(op, uint8_t *,
284                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
285                         iv_length);
286         *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
287                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
288
289         if (aad_len_field_sz)
290                 rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
291                         rte_crypto_op_ctod_offset(op, uint8_t *,
292                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
293                         iv_length);
294 }
295
296 static int
297 qat_sym_build_request(void *in_op, uint8_t *out_msg,
298                 void *op_cookie, enum qat_device_gen qat_dev_gen)
299 {
300         int ret = 0;
301         struct qat_sym_session *ctx;
302         struct icp_qat_fw_la_cipher_req_params *cipher_param;
303         struct icp_qat_fw_la_auth_req_params *auth_param;
304         register struct icp_qat_fw_la_bulk_req *qat_req;
305         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
306         uint32_t cipher_len = 0, cipher_ofs = 0;
307         uint32_t auth_len = 0, auth_ofs = 0;
308         uint32_t min_ofs = 0;
309         uint64_t src_buf_start = 0, dst_buf_start = 0;
310         uint8_t do_sgl = 0;
311         struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
312         struct qat_sym_op_cookie *cookie =
313                                 (struct qat_sym_op_cookie *)op_cookie;
314
315 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
316         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
317                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
318                                 "operation requests, op (%p) is not a "
319                                 "symmetric operation.", op);
320                 return -EINVAL;
321         }
322 #endif
323         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
324                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
325                                 " requests, op (%p) is sessionless.", op);
326                 return -EINVAL;
327         }
328
329         ctx = (struct qat_sym_session *)get_session_private_data(
330                         op->sym->session, cryptodev_qat_driver_id);
331
332         if (unlikely(ctx == NULL)) {
333                 PMD_DRV_LOG(ERR, "Session was not created for this device");
334                 return -EINVAL;
335         }
336
337         if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
338                 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
339                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
340                 return -EINVAL;
341         }
342
343         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
344         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
345         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
346         cipher_param = (void *)&qat_req->serv_specif_rqpars;
347         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
348
349         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
350                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
351                 /* AES-GCM or AES-CCM */
352                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
353                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
354                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
355                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
356                         && ctx->qat_hash_alg ==
357                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
358                         do_aead = 1;
359                 } else {
360                         do_auth = 1;
361                         do_cipher = 1;
362                 }
363         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
364                 do_auth = 1;
365                 do_cipher = 0;
366         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
367                 do_auth = 0;
368                 do_cipher = 1;
369         }
370
371         if (do_cipher) {
372
373                 if (ctx->qat_cipher_alg ==
374                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
375                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
376                         ctx->qat_cipher_alg ==
377                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
378
379                         if (unlikely(
380                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
381                                  || (cipher_param->cipher_offset
382                                                         % BYTE_LENGTH != 0))) {
383                                 PMD_DRV_LOG(ERR,
384                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
385                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
386                                 return -EINVAL;
387                         }
388                         cipher_len = op->sym->cipher.data.length >> 3;
389                         cipher_ofs = op->sym->cipher.data.offset >> 3;
390
391                 } else if (ctx->bpi_ctx) {
392                         /* DOCSIS - only send complete blocks to device
393                          * Process any partial block using CFB mode.
394                          * Even if 0 complete blocks, still send this to device
395                          * to get into rx queue for post-process and dequeuing
396                          */
397                         cipher_len = qat_bpicipher_preprocess(ctx, op);
398                         cipher_ofs = op->sym->cipher.data.offset;
399                 } else {
400                         cipher_len = op->sym->cipher.data.length;
401                         cipher_ofs = op->sym->cipher.data.offset;
402                 }
403
404                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
405                                 cipher_param, op, qat_req);
406                 min_ofs = cipher_ofs;
407         }
408
409         if (do_auth) {
410
411                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
412                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
413                         ctx->qat_hash_alg ==
414                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
415                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
416                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
417                                 PMD_DRV_LOG(ERR,
418                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
419                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
420                                 return -EINVAL;
421                         }
422                         auth_ofs = op->sym->auth.data.offset >> 3;
423                         auth_len = op->sym->auth.data.length >> 3;
424
425                         auth_param->u1.aad_adr =
426                                         rte_crypto_op_ctophys_offset(op,
427                                                         ctx->auth_iv.offset);
428
429                 } else if (ctx->qat_hash_alg ==
430                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
431                                 ctx->qat_hash_alg ==
432                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
433                         /* AES-GMAC */
434                         set_cipher_iv(ctx->auth_iv.length,
435                                 ctx->auth_iv.offset,
436                                 cipher_param, op, qat_req);
437                         auth_ofs = op->sym->auth.data.offset;
438                         auth_len = op->sym->auth.data.length;
439
440                         auth_param->u1.aad_adr = 0;
441                         auth_param->u2.aad_sz = 0;
442
443                         /*
444                          * If len(iv)==12B fw computes J0
445                          */
446                         if (ctx->auth_iv.length == 12) {
447                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
448                                         qat_req->comn_hdr.serv_specif_flags,
449                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
450
451                         }
452                 } else {
453                         auth_ofs = op->sym->auth.data.offset;
454                         auth_len = op->sym->auth.data.length;
455
456                 }
457                 min_ofs = auth_ofs;
458
459                 if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
460                         auth_param->auth_res_addr =
461                                         op->sym->auth.digest.phys_addr;
462
463         }
464
465         if (do_aead) {
466                 /*
467                  * This address may used for setting AAD physical pointer
468                  * into IV offset from op
469                  */
470                 rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
471                 if (ctx->qat_hash_alg ==
472                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
473                                 ctx->qat_hash_alg ==
474                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
475                         /*
476                          * If len(iv)==12B fw computes J0
477                          */
478                         if (ctx->cipher_iv.length == 12) {
479                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
480                                         qat_req->comn_hdr.serv_specif_flags,
481                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
482                         }
483                         set_cipher_iv(ctx->cipher_iv.length,
484                                         ctx->cipher_iv.offset,
485                                         cipher_param, op, qat_req);
486
487                 } else if (ctx->qat_hash_alg ==
488                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
489
490                         /* In case of AES-CCM this may point to user selected
491                          * memory or iv offset in cypto_op
492                          */
493                         uint8_t *aad_data = op->sym->aead.aad.data;
494                         /* This is true AAD length, it not includes 18 bytes of
495                          * preceding data
496                          */
497                         uint8_t aad_ccm_real_len = 0;
498                         uint8_t aad_len_field_sz = 0;
499                         uint32_t msg_len_be =
500                                         rte_bswap32(op->sym->aead.data.length);
501
502                         if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
503                                 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
504                                 aad_ccm_real_len = ctx->aad_len -
505                                         ICP_QAT_HW_CCM_AAD_B0_LEN -
506                                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
507                         } else {
508                                 /*
509                                  * aad_len not greater than 18, so no actual aad
510                                  *  data, then use IV after op for B0 block
511                                  */
512                                 aad_data = rte_crypto_op_ctod_offset(op,
513                                                 uint8_t *,
514                                                 ctx->cipher_iv.offset);
515                                 aad_phys_addr_aead =
516                                                 rte_crypto_op_ctophys_offset(op,
517                                                         ctx->cipher_iv.offset);
518                         }
519
520                         uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
521                                                         ctx->cipher_iv.length;
522
523                         aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
524                                                         aad_len_field_sz,
525                                                         ctx->digest_length, q);
526
527                         if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
528                                 memcpy(aad_data + ctx->cipher_iv.length +
529                                     ICP_QAT_HW_CCM_NONCE_OFFSET +
530                                     (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
531                                     (uint8_t *)&msg_len_be,
532                                     ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
533                         } else {
534                                 memcpy(aad_data + ctx->cipher_iv.length +
535                                     ICP_QAT_HW_CCM_NONCE_OFFSET,
536                                     (uint8_t *)&msg_len_be
537                                     + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
538                                     - q), q);
539                         }
540
541                         if (aad_len_field_sz > 0) {
542                                 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
543                                                 = rte_bswap16(aad_ccm_real_len);
544
545                                 if ((aad_ccm_real_len + aad_len_field_sz)
546                                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
547                                         uint8_t pad_len = 0;
548                                         uint8_t pad_idx = 0;
549
550                                         pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
551                                         ((aad_ccm_real_len + aad_len_field_sz) %
552                                                 ICP_QAT_HW_CCM_AAD_B0_LEN);
553                                         pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
554                                             aad_ccm_real_len + aad_len_field_sz;
555                                         memset(&aad_data[pad_idx],
556                                                         0, pad_len);
557                                 }
558
559                         }
560
561                         set_cipher_iv_ccm(ctx->cipher_iv.length,
562                                         ctx->cipher_iv.offset,
563                                         cipher_param, op, q,
564                                         aad_len_field_sz);
565
566                 }
567
568                 cipher_len = op->sym->aead.data.length;
569                 cipher_ofs = op->sym->aead.data.offset;
570                 auth_len = op->sym->aead.data.length;
571                 auth_ofs = op->sym->aead.data.offset;
572
573                 auth_param->u1.aad_adr = aad_phys_addr_aead;
574                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
575                 min_ofs = op->sym->aead.data.offset;
576         }
577
578         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
579                 do_sgl = 1;
580
581         /* adjust for chain case */
582         if (do_cipher && do_auth)
583                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
584
585         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
586                 min_ofs = 0;
587
588         if (unlikely(op->sym->m_dst != NULL)) {
589                 /* Out-of-place operation (OOP)
590                  * Don't align DMA start. DMA the minimum data-set
591                  * so as not to overwrite data in dest buffer
592                  */
593                 src_buf_start =
594                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
595                 dst_buf_start =
596                         rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
597
598         } else {
599                 /* In-place operation
600                  * Start DMA at nearest aligned address below min_ofs
601                  */
602                 src_buf_start =
603                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
604                                                 & QAT_64_BTYE_ALIGN_MASK;
605
606                 if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
607                                         rte_pktmbuf_headroom(op->sym->m_src))
608                                                         > src_buf_start)) {
609                         /* alignment has pushed addr ahead of start of mbuf
610                          * so revert and take the performance hit
611                          */
612                         src_buf_start =
613                                 rte_pktmbuf_iova_offset(op->sym->m_src,
614                                                                 min_ofs);
615                 }
616                 dst_buf_start = src_buf_start;
617         }
618
619         if (do_cipher || do_aead) {
620                 cipher_param->cipher_offset =
621                                 (uint32_t)rte_pktmbuf_iova_offset(
622                                 op->sym->m_src, cipher_ofs) - src_buf_start;
623                 cipher_param->cipher_length = cipher_len;
624         } else {
625                 cipher_param->cipher_offset = 0;
626                 cipher_param->cipher_length = 0;
627         }
628
629         if (do_auth || do_aead) {
630                 auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
631                                 op->sym->m_src, auth_ofs) - src_buf_start;
632                 auth_param->auth_len = auth_len;
633         } else {
634                 auth_param->auth_off = 0;
635                 auth_param->auth_len = 0;
636         }
637
638         qat_req->comn_mid.dst_length =
639                 qat_req->comn_mid.src_length =
640                 (cipher_param->cipher_offset + cipher_param->cipher_length)
641                 > (auth_param->auth_off + auth_param->auth_len) ?
642                 (cipher_param->cipher_offset + cipher_param->cipher_length)
643                 : (auth_param->auth_off + auth_param->auth_len);
644
645         if (do_sgl) {
646
647                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
648                                 QAT_COMN_PTR_TYPE_SGL);
649                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
650                                 &cookie->qat_sgl_src,
651                                 qat_req->comn_mid.src_length);
652                 if (ret) {
653                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
654                         return ret;
655                 }
656
657                 if (likely(op->sym->m_dst == NULL))
658                         qat_req->comn_mid.dest_data_addr =
659                                 qat_req->comn_mid.src_data_addr =
660                                 cookie->qat_sgl_src_phys_addr;
661                 else {
662                         ret = qat_sgl_fill_array(op->sym->m_dst,
663                                         dst_buf_start,
664                                         &cookie->qat_sgl_dst,
665                                                 qat_req->comn_mid.dst_length);
666
667                         if (ret) {
668                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
669                                                 "fill sgl array");
670                                 return ret;
671                         }
672
673                         qat_req->comn_mid.src_data_addr =
674                                 cookie->qat_sgl_src_phys_addr;
675                         qat_req->comn_mid.dest_data_addr =
676                                         cookie->qat_sgl_dst_phys_addr;
677                 }
678         } else {
679                 qat_req->comn_mid.src_data_addr = src_buf_start;
680                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
681         }
682
683 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
684         rte_hexdump(stdout, "qat_req:", qat_req,
685                         sizeof(struct icp_qat_fw_la_bulk_req));
686         rte_hexdump(stdout, "src_data:",
687                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
688                         rte_pktmbuf_data_len(op->sym->m_src));
689         if (do_cipher) {
690                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
691                                                 uint8_t *,
692                                                 ctx->cipher_iv.offset);
693                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
694                                 ctx->cipher_iv.length);
695         }
696
697         if (do_auth) {
698                 if (ctx->auth_iv.length) {
699                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
700                                                         uint8_t *,
701                                                         ctx->auth_iv.offset);
702                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
703                                                 ctx->auth_iv.length);
704                 }
705                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
706                                 ctx->digest_length);
707         }
708
709         if (do_aead) {
710                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
711                                 ctx->digest_length);
712                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
713                                 ctx->aad_len);
714         }
715 #endif
716         return 0;
717 }
718
719
720 static void qat_stats_get(struct qat_pci_device *dev,
721                 struct qat_common_stats *stats,
722                 enum qat_service_type service)
723 {
724         int i;
725         struct qat_qp **qp;
726
727         if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) {
728                 PMD_DRV_LOG(ERR, "invalid param: stats %p, dev %p, service %d",
729                                 stats, dev, service);
730                 return;
731         }
732
733         qp = dev->qps_in_use[service];
734         for (i = 0; i < ADF_MAX_QPS_PER_BUNDLE; i++) {
735                 if (qp[i] == NULL) {
736                         PMD_DRV_LOG(DEBUG, "Service %d Uninitialised qp %d",
737                                         service, i);
738                         continue;
739                 }
740
741                 stats->enqueued_count += qp[i]->stats.enqueued_count;
742                 stats->dequeued_count += qp[i]->stats.dequeued_count;
743                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
744                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
745         }
746 }
747
748 void qat_sym_stats_get(struct rte_cryptodev *dev,
749                 struct rte_cryptodev_stats *stats)
750 {
751         struct qat_common_stats qat_stats = {0};
752         struct qat_sym_dev_private *qat_priv;
753
754         if (stats == NULL || dev == NULL) {
755                 PMD_DRV_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
756                 return;
757         }
758         qat_priv = dev->data->dev_private;
759
760         qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_SYMMETRIC);
761         stats->enqueued_count = qat_stats.enqueued_count;
762         stats->dequeued_count = qat_stats.dequeued_count;
763         stats->enqueue_err_count = qat_stats.enqueue_err_count;
764         stats->dequeue_err_count = qat_stats.dequeue_err_count;
765 }
766
767 static void qat_stats_reset(struct qat_pci_device *dev,
768                 enum qat_service_type service)
769 {
770         int i;
771         struct qat_qp **qp;
772
773         if (dev == NULL || service >= QAT_SERVICE_INVALID) {
774                 PMD_DRV_LOG(ERR, "invalid param: dev %p, service %d",
775                                 dev, service);
776                 return;
777         }
778
779         qp = dev->qps_in_use[service];
780         for (i = 0; i < ADF_MAX_QPS_PER_BUNDLE; i++) {
781                 if (qp[i] == NULL) {
782                         PMD_DRV_LOG(DEBUG, "Service %d Uninitialised qp %d",
783                                         service, i);
784                         continue;
785                 }
786                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
787         }
788
789         PMD_DRV_LOG(DEBUG, "QAT crypto: %d stats cleared", service);
790 }
791
792 void qat_sym_stats_reset(struct rte_cryptodev *dev)
793 {
794         struct qat_sym_dev_private *qat_priv;
795
796         if (dev == NULL) {
797                 PMD_DRV_LOG(ERR, "invalid cryptodev ptr %p", dev);
798                 return;
799         }
800         qat_priv = dev->data->dev_private;
801
802         qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_SYMMETRIC);
803
804 }
805
806 int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
807 {
808         struct qat_sym_dev_private *qat_private = dev->data->dev_private;
809
810         PMD_DRV_LOG(DEBUG, "Release sym qp %u on device %d",
811                                 queue_pair_id, dev->data->dev_id);
812
813         qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][queue_pair_id]
814                                                 = NULL;
815
816         return qat_qp_release((struct qat_qp **)
817                         &(dev->data->queue_pairs[queue_pair_id]));
818 }
819
820 int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
821         const struct rte_cryptodev_qp_conf *qp_conf,
822         int socket_id, struct rte_mempool *session_pool __rte_unused)
823 {
824         struct qat_qp *qp;
825         int ret = 0;
826         uint32_t i;
827         struct qat_qp_config qat_qp_conf;
828
829         struct qat_qp **qp_addr =
830                         (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
831         struct qat_sym_dev_private *qat_private = dev->data->dev_private;
832         const struct qat_qp_hw_data *sym_hw_qps =
833                         qp_gen_config[qat_private->qat_dev->qat_dev_gen]
834                                       .qp_hw_data[QAT_SERVICE_SYMMETRIC];
835         const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id;
836
837         /* If qp is already in use free ring memory and qp metadata. */
838         if (*qp_addr != NULL) {
839                 ret = qat_sym_qp_release(dev, qp_id);
840                 if (ret < 0)
841                         return ret;
842         }
843         if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) {
844                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device", qp_id);
845                 return -EINVAL;
846         }
847
848         qat_qp_conf.hw = qp_hw_data;
849         qat_qp_conf.build_request = qat_sym_build_request;
850         qat_qp_conf.process_response = qat_sym_process_response;
851         qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
852         qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
853         qat_qp_conf.socket_id = socket_id;
854         qat_qp_conf.service_str = "sym";
855
856         ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
857         if (ret != 0)
858                 return ret;
859
860         /* store a link to the qp in the qat_pci_device */
861         qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id]
862                                                         = *qp_addr;
863
864         qp = (struct qat_qp *)*qp_addr;
865
866         for (i = 0; i < qp->nb_descriptors; i++) {
867
868                 struct qat_sym_op_cookie *sql_cookie =
869                                 qp->op_cookies[i];
870
871                 sql_cookie->qat_sgl_src_phys_addr =
872                                 rte_mempool_virt2iova(sql_cookie) +
873                                 offsetof(struct qat_sym_op_cookie,
874                                 qat_sgl_src);
875
876                 sql_cookie->qat_sgl_dst_phys_addr =
877                                 rte_mempool_virt2iova(sql_cookie) +
878                                 offsetof(struct qat_sym_op_cookie,
879                                 qat_sgl_dst);
880         }
881
882         return ret;
883 }