4f86f22585d81e326e0e91b5105e565fdf0a0100
[dpdk.git] / drivers / crypto / qat / qat_sym.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_crypto_sym.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11
12 #include <openssl/evp.h>
13
14 #include "qat_logs.h"
15 #include "qat_sym_session.h"
16 #include "qat_sym.h"
17 #include "qat_qp.h"
18 #include "adf_transport_access_macros.h"
19
20 #define BYTE_LENGTH    8
21 /* bpi is only used for partial blocks of DES and AES
22  * so AES block len can be assumed as max len for iv, src and dst
23  */
24 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
25
26 #define ADF_SYM_TX_RING_DESC_SIZE               128
27 #define ADF_SYM_RX_RING_DESC_SIZE               32
28 #define ADF_SYM_TX_QUEUE_STARTOFF               2
29 /* Offset from bundle start to 1st Sym Tx queue */
30 #define ADF_SYM_RX_QUEUE_STARTOFF               10
31
32 /** Encrypt a single partial block
33  *  Depends on openssl libcrypto
34  *  Uses ECB+XOR to do CFB encryption, same result, more performant
35  */
36 static inline int
37 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
38                 uint8_t *iv, int ivlen, int srclen,
39                 void *bpi_ctx)
40 {
41         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
42         int encrypted_ivlen;
43         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
44         uint8_t *encr = encrypted_iv;
45
46         /* ECB method: encrypt the IV, then XOR this with plaintext */
47         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
48                                                                 <= 0)
49                 goto cipher_encrypt_err;
50
51         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
52                 *dst = *src ^ *encr;
53
54         return 0;
55
56 cipher_encrypt_err:
57         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
58         return -EINVAL;
59 }
60
61 /** Decrypt a single partial block
62  *  Depends on openssl libcrypto
63  *  Uses ECB+XOR to do CFB encryption, same result, more performant
64  */
65 static inline int
66 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
67                 uint8_t *iv, int ivlen, int srclen,
68                 void *bpi_ctx)
69 {
70         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
71         int encrypted_ivlen;
72         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
73         uint8_t *encr = encrypted_iv;
74
75         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
76         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
77                                                                 <= 0)
78                 goto cipher_decrypt_err;
79
80         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
81                 *dst = *src ^ *encr;
82
83         return 0;
84
85 cipher_decrypt_err:
86         PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
87         return -EINVAL;
88 }
89
90 /** Creates a context in either AES or DES in ECB mode
91  *  Depends on openssl libcrypto
92  */
93
94 static inline uint32_t
95 qat_bpicipher_preprocess(struct qat_sym_session *ctx,
96                                 struct rte_crypto_op *op)
97 {
98         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
99         struct rte_crypto_sym_op *sym_op = op->sym;
100         uint8_t last_block_len = block_len > 0 ?
101                         sym_op->cipher.data.length % block_len : 0;
102
103         if (last_block_len &&
104                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
105
106                 /* Decrypt last block */
107                 uint8_t *last_block, *dst, *iv;
108                 uint32_t last_block_offset = sym_op->cipher.data.offset +
109                                 sym_op->cipher.data.length - last_block_len;
110                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
111                                 uint8_t *, last_block_offset);
112
113                 if (unlikely(sym_op->m_dst != NULL))
114                         /* out-of-place operation (OOP) */
115                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
116                                                 uint8_t *, last_block_offset);
117                 else
118                         dst = last_block;
119
120                 if (last_block_len < sym_op->cipher.data.length)
121                         /* use previous block ciphertext as IV */
122                         iv = last_block - block_len;
123                 else
124                         /* runt block, i.e. less than one full block */
125                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
126                                         ctx->cipher_iv.offset);
127
128 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
129                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
130                         last_block_len);
131                 if (sym_op->m_dst != NULL)
132                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
133                                 last_block_len);
134 #endif
135                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
136                                 last_block_len, ctx->bpi_ctx);
137 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
138                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
139                         last_block_len);
140                 if (sym_op->m_dst != NULL)
141                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
142                                 last_block_len);
143 #endif
144         }
145
146         return sym_op->cipher.data.length - last_block_len;
147 }
148
149 static inline uint32_t
150 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
151                                 struct rte_crypto_op *op)
152 {
153         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
154         struct rte_crypto_sym_op *sym_op = op->sym;
155         uint8_t last_block_len = block_len > 0 ?
156                         sym_op->cipher.data.length % block_len : 0;
157
158         if (last_block_len > 0 &&
159                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
160
161                 /* Encrypt last block */
162                 uint8_t *last_block, *dst, *iv;
163                 uint32_t last_block_offset;
164
165                 last_block_offset = sym_op->cipher.data.offset +
166                                 sym_op->cipher.data.length - last_block_len;
167                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
168                                 uint8_t *, last_block_offset);
169
170                 if (unlikely(sym_op->m_dst != NULL))
171                         /* out-of-place operation (OOP) */
172                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
173                                                 uint8_t *, last_block_offset);
174                 else
175                         dst = last_block;
176
177                 if (last_block_len < sym_op->cipher.data.length)
178                         /* use previous block ciphertext as IV */
179                         iv = dst - block_len;
180                 else
181                         /* runt block, i.e. less than one full block */
182                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
183                                         ctx->cipher_iv.offset);
184
185 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
186                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
187                         last_block_len);
188                 if (sym_op->m_dst != NULL)
189                         rte_hexdump(stdout, "BPI: dst before post-process:",
190                                         dst, last_block_len);
191 #endif
192                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
193                                 last_block_len, ctx->bpi_ctx);
194 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
195                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
196                         last_block_len);
197                 if (sym_op->m_dst != NULL)
198                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
199                                 last_block_len);
200 #endif
201         }
202         return sym_op->cipher.data.length - last_block_len;
203 }
204
205 uint16_t
206 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
207                 uint16_t nb_ops)
208 {
209         return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
210 }
211
212 int
213 qat_sym_process_response(void **op, uint8_t *resp,
214                 __rte_unused void *op_cookie,
215                 __rte_unused enum qat_device_gen qat_dev_gen)
216 {
217
218         struct icp_qat_fw_comn_resp *resp_msg =
219                         (struct icp_qat_fw_comn_resp *)resp;
220         struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
221                         (resp_msg->opaque_data);
222
223 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
224         rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
225                         sizeof(struct icp_qat_fw_comn_resp));
226 #endif
227
228         if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
229                         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
230                         resp_msg->comn_hdr.comn_status)) {
231
232                 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
233         } else {
234                 struct qat_sym_session *sess = (struct qat_sym_session *)
235                                                 get_session_private_data(
236                                                 rx_op->sym->session,
237                                                 cryptodev_qat_driver_id);
238
239                 if (sess->bpi_ctx)
240                         qat_bpicipher_postprocess(sess, rx_op);
241                 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
242         }
243         *op = (void *)rx_op;
244
245         return 0;
246 }
247
248
249 uint16_t
250 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
251                 uint16_t nb_ops)
252 {
253         return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
254 }
255
256 static inline int
257 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
258                 struct qat_alg_buf_list *list, uint32_t data_len)
259 {
260         int nr = 1;
261
262         uint32_t buf_len = rte_pktmbuf_iova(buf) -
263                         buff_start + rte_pktmbuf_data_len(buf);
264
265         list->bufers[0].addr = buff_start;
266         list->bufers[0].resrvd = 0;
267         list->bufers[0].len = buf_len;
268
269         if (data_len <= buf_len) {
270                 list->num_bufs = nr;
271                 list->bufers[0].len = data_len;
272                 return 0;
273         }
274
275         buf = buf->next;
276         while (buf) {
277                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
278                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
279                                         " entry(%u)",
280                                         QAT_SGL_MAX_NUMBER);
281                         return -EINVAL;
282                 }
283
284                 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
285                 list->bufers[nr].resrvd = 0;
286                 list->bufers[nr].addr = rte_pktmbuf_iova(buf);
287
288                 buf_len += list->bufers[nr].len;
289                 buf = buf->next;
290
291                 if (buf_len > data_len) {
292                         list->bufers[nr].len -=
293                                 buf_len - data_len;
294                         buf = NULL;
295                 }
296                 ++nr;
297         }
298         list->num_bufs = nr;
299
300         return 0;
301 }
302
303 static inline void
304 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
305                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
306                 struct rte_crypto_op *op,
307                 struct icp_qat_fw_la_bulk_req *qat_req)
308 {
309         /* copy IV into request if it fits */
310         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
311                 rte_memcpy(cipher_param->u.cipher_IV_array,
312                                 rte_crypto_op_ctod_offset(op, uint8_t *,
313                                         iv_offset),
314                                 iv_length);
315         } else {
316                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
317                                 qat_req->comn_hdr.serv_specif_flags,
318                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
319                 cipher_param->u.s.cipher_IV_ptr =
320                                 rte_crypto_op_ctophys_offset(op,
321                                         iv_offset);
322         }
323 }
324
325 /** Set IV for CCM is special case, 0th byte is set to q-1
326  *  where q is padding of nonce in 16 byte block
327  */
328 static inline void
329 set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
330                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
331                 struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
332 {
333         rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
334                         ICP_QAT_HW_CCM_NONCE_OFFSET,
335                         rte_crypto_op_ctod_offset(op, uint8_t *,
336                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
337                         iv_length);
338         *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
339                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
340
341         if (aad_len_field_sz)
342                 rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
343                         rte_crypto_op_ctod_offset(op, uint8_t *,
344                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
345                         iv_length);
346 }
347
348
349 int
350 qat_sym_build_request(void *in_op, uint8_t *out_msg,
351                 void *op_cookie, enum qat_device_gen qat_dev_gen)
352 {
353         int ret = 0;
354         struct qat_sym_session *ctx;
355         struct icp_qat_fw_la_cipher_req_params *cipher_param;
356         struct icp_qat_fw_la_auth_req_params *auth_param;
357         register struct icp_qat_fw_la_bulk_req *qat_req;
358         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
359         uint32_t cipher_len = 0, cipher_ofs = 0;
360         uint32_t auth_len = 0, auth_ofs = 0;
361         uint32_t min_ofs = 0;
362         uint64_t src_buf_start = 0, dst_buf_start = 0;
363         uint8_t do_sgl = 0;
364         struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
365         struct qat_sym_op_cookie *cookie =
366                                 (struct qat_sym_op_cookie *)op_cookie;
367
368 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
369         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
370                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
371                                 "operation requests, op (%p) is not a "
372                                 "symmetric operation.", op);
373                 return -EINVAL;
374         }
375 #endif
376         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
377                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
378                                 " requests, op (%p) is sessionless.", op);
379                 return -EINVAL;
380         }
381
382         ctx = (struct qat_sym_session *)get_session_private_data(
383                         op->sym->session, cryptodev_qat_driver_id);
384
385         if (unlikely(ctx == NULL)) {
386                 PMD_DRV_LOG(ERR, "Session was not created for this device");
387                 return -EINVAL;
388         }
389
390         if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
391                 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
392                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
393                 return -EINVAL;
394         }
395
396         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
397         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
398         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
399         cipher_param = (void *)&qat_req->serv_specif_rqpars;
400         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
401
402         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
403                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
404                 /* AES-GCM or AES-CCM */
405                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
406                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
407                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
408                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
409                         && ctx->qat_hash_alg ==
410                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
411                         do_aead = 1;
412                 } else {
413                         do_auth = 1;
414                         do_cipher = 1;
415                 }
416         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
417                 do_auth = 1;
418                 do_cipher = 0;
419         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
420                 do_auth = 0;
421                 do_cipher = 1;
422         }
423
424         if (do_cipher) {
425
426                 if (ctx->qat_cipher_alg ==
427                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
428                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
429                         ctx->qat_cipher_alg ==
430                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
431
432                         if (unlikely(
433                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
434                                  || (cipher_param->cipher_offset
435                                                         % BYTE_LENGTH != 0))) {
436                                 PMD_DRV_LOG(ERR,
437                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
438                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
439                                 return -EINVAL;
440                         }
441                         cipher_len = op->sym->cipher.data.length >> 3;
442                         cipher_ofs = op->sym->cipher.data.offset >> 3;
443
444                 } else if (ctx->bpi_ctx) {
445                         /* DOCSIS - only send complete blocks to device
446                          * Process any partial block using CFB mode.
447                          * Even if 0 complete blocks, still send this to device
448                          * to get into rx queue for post-process and dequeuing
449                          */
450                         cipher_len = qat_bpicipher_preprocess(ctx, op);
451                         cipher_ofs = op->sym->cipher.data.offset;
452                 } else {
453                         cipher_len = op->sym->cipher.data.length;
454                         cipher_ofs = op->sym->cipher.data.offset;
455                 }
456
457                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
458                                 cipher_param, op, qat_req);
459                 min_ofs = cipher_ofs;
460         }
461
462         if (do_auth) {
463
464                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
465                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
466                         ctx->qat_hash_alg ==
467                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
468                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
469                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
470                                 PMD_DRV_LOG(ERR,
471                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
472                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
473                                 return -EINVAL;
474                         }
475                         auth_ofs = op->sym->auth.data.offset >> 3;
476                         auth_len = op->sym->auth.data.length >> 3;
477
478                         auth_param->u1.aad_adr =
479                                         rte_crypto_op_ctophys_offset(op,
480                                                         ctx->auth_iv.offset);
481
482                 } else if (ctx->qat_hash_alg ==
483                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
484                                 ctx->qat_hash_alg ==
485                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
486                         /* AES-GMAC */
487                         set_cipher_iv(ctx->auth_iv.length,
488                                 ctx->auth_iv.offset,
489                                 cipher_param, op, qat_req);
490                         auth_ofs = op->sym->auth.data.offset;
491                         auth_len = op->sym->auth.data.length;
492
493                         auth_param->u1.aad_adr = 0;
494                         auth_param->u2.aad_sz = 0;
495
496                         /*
497                          * If len(iv)==12B fw computes J0
498                          */
499                         if (ctx->auth_iv.length == 12) {
500                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
501                                         qat_req->comn_hdr.serv_specif_flags,
502                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
503
504                         }
505                 } else {
506                         auth_ofs = op->sym->auth.data.offset;
507                         auth_len = op->sym->auth.data.length;
508
509                 }
510                 min_ofs = auth_ofs;
511
512                 if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
513                         auth_param->auth_res_addr =
514                                         op->sym->auth.digest.phys_addr;
515
516         }
517
518         if (do_aead) {
519                 /*
520                  * This address may used for setting AAD physical pointer
521                  * into IV offset from op
522                  */
523                 rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
524                 if (ctx->qat_hash_alg ==
525                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
526                                 ctx->qat_hash_alg ==
527                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
528                         /*
529                          * If len(iv)==12B fw computes J0
530                          */
531                         if (ctx->cipher_iv.length == 12) {
532                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
533                                         qat_req->comn_hdr.serv_specif_flags,
534                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
535                         }
536                         set_cipher_iv(ctx->cipher_iv.length,
537                                         ctx->cipher_iv.offset,
538                                         cipher_param, op, qat_req);
539
540                 } else if (ctx->qat_hash_alg ==
541                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
542
543                         /* In case of AES-CCM this may point to user selected
544                          * memory or iv offset in cypto_op
545                          */
546                         uint8_t *aad_data = op->sym->aead.aad.data;
547                         /* This is true AAD length, it not includes 18 bytes of
548                          * preceding data
549                          */
550                         uint8_t aad_ccm_real_len = 0;
551                         uint8_t aad_len_field_sz = 0;
552                         uint32_t msg_len_be =
553                                         rte_bswap32(op->sym->aead.data.length);
554
555                         if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
556                                 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
557                                 aad_ccm_real_len = ctx->aad_len -
558                                         ICP_QAT_HW_CCM_AAD_B0_LEN -
559                                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
560                         } else {
561                                 /*
562                                  * aad_len not greater than 18, so no actual aad
563                                  *  data, then use IV after op for B0 block
564                                  */
565                                 aad_data = rte_crypto_op_ctod_offset(op,
566                                                 uint8_t *,
567                                                 ctx->cipher_iv.offset);
568                                 aad_phys_addr_aead =
569                                                 rte_crypto_op_ctophys_offset(op,
570                                                         ctx->cipher_iv.offset);
571                         }
572
573                         uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
574                                                         ctx->cipher_iv.length;
575
576                         aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
577                                                         aad_len_field_sz,
578                                                         ctx->digest_length, q);
579
580                         if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
581                                 memcpy(aad_data + ctx->cipher_iv.length +
582                                     ICP_QAT_HW_CCM_NONCE_OFFSET +
583                                     (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
584                                     (uint8_t *)&msg_len_be,
585                                     ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
586                         } else {
587                                 memcpy(aad_data + ctx->cipher_iv.length +
588                                     ICP_QAT_HW_CCM_NONCE_OFFSET,
589                                     (uint8_t *)&msg_len_be
590                                     + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
591                                     - q), q);
592                         }
593
594                         if (aad_len_field_sz > 0) {
595                                 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
596                                                 = rte_bswap16(aad_ccm_real_len);
597
598                                 if ((aad_ccm_real_len + aad_len_field_sz)
599                                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
600                                         uint8_t pad_len = 0;
601                                         uint8_t pad_idx = 0;
602
603                                         pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
604                                         ((aad_ccm_real_len + aad_len_field_sz) %
605                                                 ICP_QAT_HW_CCM_AAD_B0_LEN);
606                                         pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
607                                             aad_ccm_real_len + aad_len_field_sz;
608                                         memset(&aad_data[pad_idx],
609                                                         0, pad_len);
610                                 }
611
612                         }
613
614                         set_cipher_iv_ccm(ctx->cipher_iv.length,
615                                         ctx->cipher_iv.offset,
616                                         cipher_param, op, q,
617                                         aad_len_field_sz);
618
619                 }
620
621                 cipher_len = op->sym->aead.data.length;
622                 cipher_ofs = op->sym->aead.data.offset;
623                 auth_len = op->sym->aead.data.length;
624                 auth_ofs = op->sym->aead.data.offset;
625
626                 auth_param->u1.aad_adr = aad_phys_addr_aead;
627                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
628                 min_ofs = op->sym->aead.data.offset;
629         }
630
631         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
632                 do_sgl = 1;
633
634         /* adjust for chain case */
635         if (do_cipher && do_auth)
636                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
637
638         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
639                 min_ofs = 0;
640
641         if (unlikely(op->sym->m_dst != NULL)) {
642                 /* Out-of-place operation (OOP)
643                  * Don't align DMA start. DMA the minimum data-set
644                  * so as not to overwrite data in dest buffer
645                  */
646                 src_buf_start =
647                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
648                 dst_buf_start =
649                         rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
650
651         } else {
652                 /* In-place operation
653                  * Start DMA at nearest aligned address below min_ofs
654                  */
655                 src_buf_start =
656                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
657                                                 & QAT_64_BTYE_ALIGN_MASK;
658
659                 if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
660                                         rte_pktmbuf_headroom(op->sym->m_src))
661                                                         > src_buf_start)) {
662                         /* alignment has pushed addr ahead of start of mbuf
663                          * so revert and take the performance hit
664                          */
665                         src_buf_start =
666                                 rte_pktmbuf_iova_offset(op->sym->m_src,
667                                                                 min_ofs);
668                 }
669                 dst_buf_start = src_buf_start;
670         }
671
672         if (do_cipher || do_aead) {
673                 cipher_param->cipher_offset =
674                                 (uint32_t)rte_pktmbuf_iova_offset(
675                                 op->sym->m_src, cipher_ofs) - src_buf_start;
676                 cipher_param->cipher_length = cipher_len;
677         } else {
678                 cipher_param->cipher_offset = 0;
679                 cipher_param->cipher_length = 0;
680         }
681
682         if (do_auth || do_aead) {
683                 auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
684                                 op->sym->m_src, auth_ofs) - src_buf_start;
685                 auth_param->auth_len = auth_len;
686         } else {
687                 auth_param->auth_off = 0;
688                 auth_param->auth_len = 0;
689         }
690
691         qat_req->comn_mid.dst_length =
692                 qat_req->comn_mid.src_length =
693                 (cipher_param->cipher_offset + cipher_param->cipher_length)
694                 > (auth_param->auth_off + auth_param->auth_len) ?
695                 (cipher_param->cipher_offset + cipher_param->cipher_length)
696                 : (auth_param->auth_off + auth_param->auth_len);
697
698         if (do_sgl) {
699
700                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
701                                 QAT_COMN_PTR_TYPE_SGL);
702                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
703                                 &cookie->qat_sgl_list_src,
704                                 qat_req->comn_mid.src_length);
705                 if (ret) {
706                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
707                         return ret;
708                 }
709
710                 if (likely(op->sym->m_dst == NULL))
711                         qat_req->comn_mid.dest_data_addr =
712                                 qat_req->comn_mid.src_data_addr =
713                                 cookie->qat_sgl_src_phys_addr;
714                 else {
715                         ret = qat_sgl_fill_array(op->sym->m_dst,
716                                         dst_buf_start,
717                                         &cookie->qat_sgl_list_dst,
718                                                 qat_req->comn_mid.dst_length);
719
720                         if (ret) {
721                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
722                                                 "fill sgl array");
723                                 return ret;
724                         }
725
726                         qat_req->comn_mid.src_data_addr =
727                                 cookie->qat_sgl_src_phys_addr;
728                         qat_req->comn_mid.dest_data_addr =
729                                         cookie->qat_sgl_dst_phys_addr;
730                 }
731         } else {
732                 qat_req->comn_mid.src_data_addr = src_buf_start;
733                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
734         }
735
736 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
737         rte_hexdump(stdout, "qat_req:", qat_req,
738                         sizeof(struct icp_qat_fw_la_bulk_req));
739         rte_hexdump(stdout, "src_data:",
740                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
741                         rte_pktmbuf_data_len(op->sym->m_src));
742         if (do_cipher) {
743                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
744                                                 uint8_t *,
745                                                 ctx->cipher_iv.offset);
746                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
747                                 ctx->cipher_iv.length);
748         }
749
750         if (do_auth) {
751                 if (ctx->auth_iv.length) {
752                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
753                                                         uint8_t *,
754                                                         ctx->auth_iv.offset);
755                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
756                                                 ctx->auth_iv.length);
757                 }
758                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
759                                 ctx->digest_length);
760         }
761
762         if (do_aead) {
763                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
764                                 ctx->digest_length);
765                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
766                                 ctx->aad_len);
767         }
768 #endif
769         return 0;
770 }
771
772
773 void qat_sym_stats_get(struct rte_cryptodev *dev,
774                 struct rte_cryptodev_stats *stats)
775 {
776         int i;
777         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
778
779         PMD_INIT_FUNC_TRACE();
780         if (stats == NULL) {
781                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
782                 return;
783         }
784         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
785                 if (qp[i] == NULL) {
786                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
787                         continue;
788                 }
789
790                 stats->enqueued_count += qp[i]->stats.enqueued_count;
791                 stats->dequeued_count += qp[i]->stats.dequeued_count;
792                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
793                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
794         }
795 }
796
797 void qat_sym_stats_reset(struct rte_cryptodev *dev)
798 {
799         int i;
800         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
801
802         PMD_INIT_FUNC_TRACE();
803         for (i = 0; i < dev->data->nb_queue_pairs; i++)
804                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
805         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
806 }
807
808
809
810 int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
811 {
812         return qat_qp_release(dev, queue_pair_id);
813 }
814
815 int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
816         const struct rte_cryptodev_qp_conf *qp_conf,
817         int socket_id, struct rte_mempool *session_pool __rte_unused)
818 {
819         struct qat_qp *qp;
820         int ret = 0;
821         uint32_t i;
822         struct qat_qp_config qat_qp_conf;
823
824         /* If qp is already in use free ring memory and qp metadata. */
825         if (dev->data->queue_pairs[qp_id] != NULL) {
826                 ret = qat_sym_qp_release(dev, qp_id);
827                 if (ret < 0)
828                         return ret;
829         }
830         if (qp_id >= (ADF_NUM_SYM_QPS_PER_BUNDLE *
831                                         ADF_NUM_BUNDLES_PER_DEV)) {
832                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device", qp_id);
833                 return -EINVAL;
834         }
835
836
837         qat_qp_conf.hw_bundle_num = (qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE);
838         qat_qp_conf.tx_ring_num = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
839                         ADF_SYM_TX_QUEUE_STARTOFF;
840         qat_qp_conf.rx_ring_num = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
841                         ADF_SYM_RX_QUEUE_STARTOFF;
842         qat_qp_conf.tx_msg_size = ADF_SYM_TX_RING_DESC_SIZE;
843         qat_qp_conf.rx_msg_size = ADF_SYM_RX_RING_DESC_SIZE;
844         qat_qp_conf.build_request = qat_sym_build_request;
845         qat_qp_conf.process_response = qat_sym_process_response;
846         qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
847         qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
848         qat_qp_conf.socket_id = socket_id;
849         qat_qp_conf.service_str = "sym";
850
851         ret = qat_qp_setup(dev, qp_id, &qat_qp_conf);
852         if (ret != 0)
853                 return ret;
854
855         qp = (struct qat_qp *)dev->data->queue_pairs[qp_id];
856
857         for (i = 0; i < qp->nb_descriptors; i++) {
858
859                 struct qat_sym_op_cookie *sql_cookie =
860                                 qp->op_cookies[i];
861
862                 sql_cookie->qat_sgl_src_phys_addr =
863                                 rte_mempool_virt2iova(sql_cookie) +
864                                 offsetof(struct qat_sym_op_cookie,
865                                 qat_sgl_list_src);
866
867                 sql_cookie->qat_sgl_dst_phys_addr =
868                                 rte_mempool_virt2iova(sql_cookie) +
869                                 offsetof(struct qat_sym_op_cookie,
870                                 qat_sgl_list_dst);
871         }
872
873         return ret;
874
875 }