crypto/qat: rename sgl related objects
[dpdk.git] / drivers / crypto / qat / qat_sym.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_crypto_sym.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11
12 #include <openssl/evp.h>
13
14 #include "qat_logs.h"
15 #include "qat_sym_session.h"
16 #include "qat_sym.h"
17 #include "qat_qp.h"
18 #include "adf_transport_access_macros.h"
19 #include "qat_device.h"
20
21 #define BYTE_LENGTH    8
22 /* bpi is only used for partial blocks of DES and AES
23  * so AES block len can be assumed as max len for iv, src and dst
24  */
25 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
26
27 /** Encrypt a single partial block
28  *  Depends on openssl libcrypto
29  *  Uses ECB+XOR to do CFB encryption, same result, more performant
30  */
31 static inline int
32 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
33                 uint8_t *iv, int ivlen, int srclen,
34                 void *bpi_ctx)
35 {
36         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
37         int encrypted_ivlen;
38         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
39         uint8_t *encr = encrypted_iv;
40
41         /* ECB method: encrypt the IV, then XOR this with plaintext */
42         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
43                                                                 <= 0)
44                 goto cipher_encrypt_err;
45
46         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
47                 *dst = *src ^ *encr;
48
49         return 0;
50
51 cipher_encrypt_err:
52         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
53         return -EINVAL;
54 }
55
56 /** Decrypt a single partial block
57  *  Depends on openssl libcrypto
58  *  Uses ECB+XOR to do CFB encryption, same result, more performant
59  */
60 static inline int
61 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
62                 uint8_t *iv, int ivlen, int srclen,
63                 void *bpi_ctx)
64 {
65         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
66         int encrypted_ivlen;
67         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
68         uint8_t *encr = encrypted_iv;
69
70         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
71         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
72                                                                 <= 0)
73                 goto cipher_decrypt_err;
74
75         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
76                 *dst = *src ^ *encr;
77
78         return 0;
79
80 cipher_decrypt_err:
81         PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
82         return -EINVAL;
83 }
84
85 /** Creates a context in either AES or DES in ECB mode
86  *  Depends on openssl libcrypto
87  */
88
89 static inline uint32_t
90 qat_bpicipher_preprocess(struct qat_sym_session *ctx,
91                                 struct rte_crypto_op *op)
92 {
93         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
94         struct rte_crypto_sym_op *sym_op = op->sym;
95         uint8_t last_block_len = block_len > 0 ?
96                         sym_op->cipher.data.length % block_len : 0;
97
98         if (last_block_len &&
99                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
100
101                 /* Decrypt last block */
102                 uint8_t *last_block, *dst, *iv;
103                 uint32_t last_block_offset = sym_op->cipher.data.offset +
104                                 sym_op->cipher.data.length - last_block_len;
105                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
106                                 uint8_t *, last_block_offset);
107
108                 if (unlikely(sym_op->m_dst != NULL))
109                         /* out-of-place operation (OOP) */
110                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
111                                                 uint8_t *, last_block_offset);
112                 else
113                         dst = last_block;
114
115                 if (last_block_len < sym_op->cipher.data.length)
116                         /* use previous block ciphertext as IV */
117                         iv = last_block - block_len;
118                 else
119                         /* runt block, i.e. less than one full block */
120                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
121                                         ctx->cipher_iv.offset);
122
123 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
124                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
125                         last_block_len);
126                 if (sym_op->m_dst != NULL)
127                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
128                                 last_block_len);
129 #endif
130                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
131                                 last_block_len, ctx->bpi_ctx);
132 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
133                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
134                         last_block_len);
135                 if (sym_op->m_dst != NULL)
136                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
137                                 last_block_len);
138 #endif
139         }
140
141         return sym_op->cipher.data.length - last_block_len;
142 }
143
144 static inline uint32_t
145 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
146                                 struct rte_crypto_op *op)
147 {
148         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
149         struct rte_crypto_sym_op *sym_op = op->sym;
150         uint8_t last_block_len = block_len > 0 ?
151                         sym_op->cipher.data.length % block_len : 0;
152
153         if (last_block_len > 0 &&
154                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
155
156                 /* Encrypt last block */
157                 uint8_t *last_block, *dst, *iv;
158                 uint32_t last_block_offset;
159
160                 last_block_offset = sym_op->cipher.data.offset +
161                                 sym_op->cipher.data.length - last_block_len;
162                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
163                                 uint8_t *, last_block_offset);
164
165                 if (unlikely(sym_op->m_dst != NULL))
166                         /* out-of-place operation (OOP) */
167                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
168                                                 uint8_t *, last_block_offset);
169                 else
170                         dst = last_block;
171
172                 if (last_block_len < sym_op->cipher.data.length)
173                         /* use previous block ciphertext as IV */
174                         iv = dst - block_len;
175                 else
176                         /* runt block, i.e. less than one full block */
177                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
178                                         ctx->cipher_iv.offset);
179
180 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
181                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
182                         last_block_len);
183                 if (sym_op->m_dst != NULL)
184                         rte_hexdump(stdout, "BPI: dst before post-process:",
185                                         dst, last_block_len);
186 #endif
187                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
188                                 last_block_len, ctx->bpi_ctx);
189 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
190                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
191                         last_block_len);
192                 if (sym_op->m_dst != NULL)
193                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
194                                 last_block_len);
195 #endif
196         }
197         return sym_op->cipher.data.length - last_block_len;
198 }
199
200 uint16_t
201 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
202                 uint16_t nb_ops)
203 {
204         return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
205 }
206
207 int
208 qat_sym_process_response(void **op, uint8_t *resp,
209                 __rte_unused void *op_cookie,
210                 __rte_unused enum qat_device_gen qat_dev_gen)
211 {
212
213         struct icp_qat_fw_comn_resp *resp_msg =
214                         (struct icp_qat_fw_comn_resp *)resp;
215         struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
216                         (resp_msg->opaque_data);
217
218 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
219         rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
220                         sizeof(struct icp_qat_fw_comn_resp));
221 #endif
222
223         if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
224                         ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
225                         resp_msg->comn_hdr.comn_status)) {
226
227                 rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
228         } else {
229                 struct qat_sym_session *sess = (struct qat_sym_session *)
230                                                 get_session_private_data(
231                                                 rx_op->sym->session,
232                                                 cryptodev_qat_driver_id);
233
234                 if (sess->bpi_ctx)
235                         qat_bpicipher_postprocess(sess, rx_op);
236                 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
237         }
238         *op = (void *)rx_op;
239
240         return 0;
241 }
242
243
244 uint16_t
245 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
246                 uint16_t nb_ops)
247 {
248         return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
249 }
250
251 static inline int
252 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
253                 struct qat_sgl *list, uint32_t data_len)
254 {
255         int nr = 1;
256
257         uint32_t buf_len = rte_pktmbuf_iova(buf) -
258                         buff_start + rte_pktmbuf_data_len(buf);
259
260         list->buffers[0].addr = buff_start;
261         list->buffers[0].resrvd = 0;
262         list->buffers[0].len = buf_len;
263
264         if (data_len <= buf_len) {
265                 list->num_bufs = nr;
266                 list->buffers[0].len = data_len;
267                 return 0;
268         }
269
270         buf = buf->next;
271         while (buf) {
272                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
273                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
274                                         " entry(%u)",
275                                         QAT_SGL_MAX_NUMBER);
276                         return -EINVAL;
277                 }
278
279                 list->buffers[nr].len = rte_pktmbuf_data_len(buf);
280                 list->buffers[nr].resrvd = 0;
281                 list->buffers[nr].addr = rte_pktmbuf_iova(buf);
282
283                 buf_len += list->buffers[nr].len;
284                 buf = buf->next;
285
286                 if (buf_len > data_len) {
287                         list->buffers[nr].len -=
288                                 buf_len - data_len;
289                         buf = NULL;
290                 }
291                 ++nr;
292         }
293         list->num_bufs = nr;
294
295         return 0;
296 }
297
298 static inline void
299 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
300                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
301                 struct rte_crypto_op *op,
302                 struct icp_qat_fw_la_bulk_req *qat_req)
303 {
304         /* copy IV into request if it fits */
305         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
306                 rte_memcpy(cipher_param->u.cipher_IV_array,
307                                 rte_crypto_op_ctod_offset(op, uint8_t *,
308                                         iv_offset),
309                                 iv_length);
310         } else {
311                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
312                                 qat_req->comn_hdr.serv_specif_flags,
313                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
314                 cipher_param->u.s.cipher_IV_ptr =
315                                 rte_crypto_op_ctophys_offset(op,
316                                         iv_offset);
317         }
318 }
319
320 /** Set IV for CCM is special case, 0th byte is set to q-1
321  *  where q is padding of nonce in 16 byte block
322  */
323 static inline void
324 set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
325                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
326                 struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
327 {
328         rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
329                         ICP_QAT_HW_CCM_NONCE_OFFSET,
330                         rte_crypto_op_ctod_offset(op, uint8_t *,
331                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
332                         iv_length);
333         *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
334                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
335
336         if (aad_len_field_sz)
337                 rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
338                         rte_crypto_op_ctod_offset(op, uint8_t *,
339                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
340                         iv_length);
341 }
342
343
344 int
345 qat_sym_build_request(void *in_op, uint8_t *out_msg,
346                 void *op_cookie, enum qat_device_gen qat_dev_gen)
347 {
348         int ret = 0;
349         struct qat_sym_session *ctx;
350         struct icp_qat_fw_la_cipher_req_params *cipher_param;
351         struct icp_qat_fw_la_auth_req_params *auth_param;
352         register struct icp_qat_fw_la_bulk_req *qat_req;
353         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
354         uint32_t cipher_len = 0, cipher_ofs = 0;
355         uint32_t auth_len = 0, auth_ofs = 0;
356         uint32_t min_ofs = 0;
357         uint64_t src_buf_start = 0, dst_buf_start = 0;
358         uint8_t do_sgl = 0;
359         struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
360         struct qat_sym_op_cookie *cookie =
361                                 (struct qat_sym_op_cookie *)op_cookie;
362
363 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
364         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
365                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
366                                 "operation requests, op (%p) is not a "
367                                 "symmetric operation.", op);
368                 return -EINVAL;
369         }
370 #endif
371         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
372                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
373                                 " requests, op (%p) is sessionless.", op);
374                 return -EINVAL;
375         }
376
377         ctx = (struct qat_sym_session *)get_session_private_data(
378                         op->sym->session, cryptodev_qat_driver_id);
379
380         if (unlikely(ctx == NULL)) {
381                 PMD_DRV_LOG(ERR, "Session was not created for this device");
382                 return -EINVAL;
383         }
384
385         if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
386                 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
387                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
388                 return -EINVAL;
389         }
390
391         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
392         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
393         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
394         cipher_param = (void *)&qat_req->serv_specif_rqpars;
395         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
396
397         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
398                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
399                 /* AES-GCM or AES-CCM */
400                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
401                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
402                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
403                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
404                         && ctx->qat_hash_alg ==
405                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
406                         do_aead = 1;
407                 } else {
408                         do_auth = 1;
409                         do_cipher = 1;
410                 }
411         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
412                 do_auth = 1;
413                 do_cipher = 0;
414         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
415                 do_auth = 0;
416                 do_cipher = 1;
417         }
418
419         if (do_cipher) {
420
421                 if (ctx->qat_cipher_alg ==
422                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
423                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
424                         ctx->qat_cipher_alg ==
425                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
426
427                         if (unlikely(
428                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
429                                  || (cipher_param->cipher_offset
430                                                         % BYTE_LENGTH != 0))) {
431                                 PMD_DRV_LOG(ERR,
432                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
433                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
434                                 return -EINVAL;
435                         }
436                         cipher_len = op->sym->cipher.data.length >> 3;
437                         cipher_ofs = op->sym->cipher.data.offset >> 3;
438
439                 } else if (ctx->bpi_ctx) {
440                         /* DOCSIS - only send complete blocks to device
441                          * Process any partial block using CFB mode.
442                          * Even if 0 complete blocks, still send this to device
443                          * to get into rx queue for post-process and dequeuing
444                          */
445                         cipher_len = qat_bpicipher_preprocess(ctx, op);
446                         cipher_ofs = op->sym->cipher.data.offset;
447                 } else {
448                         cipher_len = op->sym->cipher.data.length;
449                         cipher_ofs = op->sym->cipher.data.offset;
450                 }
451
452                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
453                                 cipher_param, op, qat_req);
454                 min_ofs = cipher_ofs;
455         }
456
457         if (do_auth) {
458
459                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
460                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
461                         ctx->qat_hash_alg ==
462                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
463                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
464                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
465                                 PMD_DRV_LOG(ERR,
466                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
467                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
468                                 return -EINVAL;
469                         }
470                         auth_ofs = op->sym->auth.data.offset >> 3;
471                         auth_len = op->sym->auth.data.length >> 3;
472
473                         auth_param->u1.aad_adr =
474                                         rte_crypto_op_ctophys_offset(op,
475                                                         ctx->auth_iv.offset);
476
477                 } else if (ctx->qat_hash_alg ==
478                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
479                                 ctx->qat_hash_alg ==
480                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
481                         /* AES-GMAC */
482                         set_cipher_iv(ctx->auth_iv.length,
483                                 ctx->auth_iv.offset,
484                                 cipher_param, op, qat_req);
485                         auth_ofs = op->sym->auth.data.offset;
486                         auth_len = op->sym->auth.data.length;
487
488                         auth_param->u1.aad_adr = 0;
489                         auth_param->u2.aad_sz = 0;
490
491                         /*
492                          * If len(iv)==12B fw computes J0
493                          */
494                         if (ctx->auth_iv.length == 12) {
495                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
496                                         qat_req->comn_hdr.serv_specif_flags,
497                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
498
499                         }
500                 } else {
501                         auth_ofs = op->sym->auth.data.offset;
502                         auth_len = op->sym->auth.data.length;
503
504                 }
505                 min_ofs = auth_ofs;
506
507                 if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
508                         auth_param->auth_res_addr =
509                                         op->sym->auth.digest.phys_addr;
510
511         }
512
513         if (do_aead) {
514                 /*
515                  * This address may used for setting AAD physical pointer
516                  * into IV offset from op
517                  */
518                 rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
519                 if (ctx->qat_hash_alg ==
520                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
521                                 ctx->qat_hash_alg ==
522                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
523                         /*
524                          * If len(iv)==12B fw computes J0
525                          */
526                         if (ctx->cipher_iv.length == 12) {
527                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
528                                         qat_req->comn_hdr.serv_specif_flags,
529                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
530                         }
531                         set_cipher_iv(ctx->cipher_iv.length,
532                                         ctx->cipher_iv.offset,
533                                         cipher_param, op, qat_req);
534
535                 } else if (ctx->qat_hash_alg ==
536                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
537
538                         /* In case of AES-CCM this may point to user selected
539                          * memory or iv offset in cypto_op
540                          */
541                         uint8_t *aad_data = op->sym->aead.aad.data;
542                         /* This is true AAD length, it not includes 18 bytes of
543                          * preceding data
544                          */
545                         uint8_t aad_ccm_real_len = 0;
546                         uint8_t aad_len_field_sz = 0;
547                         uint32_t msg_len_be =
548                                         rte_bswap32(op->sym->aead.data.length);
549
550                         if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
551                                 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
552                                 aad_ccm_real_len = ctx->aad_len -
553                                         ICP_QAT_HW_CCM_AAD_B0_LEN -
554                                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
555                         } else {
556                                 /*
557                                  * aad_len not greater than 18, so no actual aad
558                                  *  data, then use IV after op for B0 block
559                                  */
560                                 aad_data = rte_crypto_op_ctod_offset(op,
561                                                 uint8_t *,
562                                                 ctx->cipher_iv.offset);
563                                 aad_phys_addr_aead =
564                                                 rte_crypto_op_ctophys_offset(op,
565                                                         ctx->cipher_iv.offset);
566                         }
567
568                         uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
569                                                         ctx->cipher_iv.length;
570
571                         aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
572                                                         aad_len_field_sz,
573                                                         ctx->digest_length, q);
574
575                         if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
576                                 memcpy(aad_data + ctx->cipher_iv.length +
577                                     ICP_QAT_HW_CCM_NONCE_OFFSET +
578                                     (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
579                                     (uint8_t *)&msg_len_be,
580                                     ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
581                         } else {
582                                 memcpy(aad_data + ctx->cipher_iv.length +
583                                     ICP_QAT_HW_CCM_NONCE_OFFSET,
584                                     (uint8_t *)&msg_len_be
585                                     + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
586                                     - q), q);
587                         }
588
589                         if (aad_len_field_sz > 0) {
590                                 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
591                                                 = rte_bswap16(aad_ccm_real_len);
592
593                                 if ((aad_ccm_real_len + aad_len_field_sz)
594                                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
595                                         uint8_t pad_len = 0;
596                                         uint8_t pad_idx = 0;
597
598                                         pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
599                                         ((aad_ccm_real_len + aad_len_field_sz) %
600                                                 ICP_QAT_HW_CCM_AAD_B0_LEN);
601                                         pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
602                                             aad_ccm_real_len + aad_len_field_sz;
603                                         memset(&aad_data[pad_idx],
604                                                         0, pad_len);
605                                 }
606
607                         }
608
609                         set_cipher_iv_ccm(ctx->cipher_iv.length,
610                                         ctx->cipher_iv.offset,
611                                         cipher_param, op, q,
612                                         aad_len_field_sz);
613
614                 }
615
616                 cipher_len = op->sym->aead.data.length;
617                 cipher_ofs = op->sym->aead.data.offset;
618                 auth_len = op->sym->aead.data.length;
619                 auth_ofs = op->sym->aead.data.offset;
620
621                 auth_param->u1.aad_adr = aad_phys_addr_aead;
622                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
623                 min_ofs = op->sym->aead.data.offset;
624         }
625
626         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
627                 do_sgl = 1;
628
629         /* adjust for chain case */
630         if (do_cipher && do_auth)
631                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
632
633         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
634                 min_ofs = 0;
635
636         if (unlikely(op->sym->m_dst != NULL)) {
637                 /* Out-of-place operation (OOP)
638                  * Don't align DMA start. DMA the minimum data-set
639                  * so as not to overwrite data in dest buffer
640                  */
641                 src_buf_start =
642                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
643                 dst_buf_start =
644                         rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
645
646         } else {
647                 /* In-place operation
648                  * Start DMA at nearest aligned address below min_ofs
649                  */
650                 src_buf_start =
651                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
652                                                 & QAT_64_BTYE_ALIGN_MASK;
653
654                 if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
655                                         rte_pktmbuf_headroom(op->sym->m_src))
656                                                         > src_buf_start)) {
657                         /* alignment has pushed addr ahead of start of mbuf
658                          * so revert and take the performance hit
659                          */
660                         src_buf_start =
661                                 rte_pktmbuf_iova_offset(op->sym->m_src,
662                                                                 min_ofs);
663                 }
664                 dst_buf_start = src_buf_start;
665         }
666
667         if (do_cipher || do_aead) {
668                 cipher_param->cipher_offset =
669                                 (uint32_t)rte_pktmbuf_iova_offset(
670                                 op->sym->m_src, cipher_ofs) - src_buf_start;
671                 cipher_param->cipher_length = cipher_len;
672         } else {
673                 cipher_param->cipher_offset = 0;
674                 cipher_param->cipher_length = 0;
675         }
676
677         if (do_auth || do_aead) {
678                 auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
679                                 op->sym->m_src, auth_ofs) - src_buf_start;
680                 auth_param->auth_len = auth_len;
681         } else {
682                 auth_param->auth_off = 0;
683                 auth_param->auth_len = 0;
684         }
685
686         qat_req->comn_mid.dst_length =
687                 qat_req->comn_mid.src_length =
688                 (cipher_param->cipher_offset + cipher_param->cipher_length)
689                 > (auth_param->auth_off + auth_param->auth_len) ?
690                 (cipher_param->cipher_offset + cipher_param->cipher_length)
691                 : (auth_param->auth_off + auth_param->auth_len);
692
693         if (do_sgl) {
694
695                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
696                                 QAT_COMN_PTR_TYPE_SGL);
697                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
698                                 &cookie->qat_sgl_src,
699                                 qat_req->comn_mid.src_length);
700                 if (ret) {
701                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
702                         return ret;
703                 }
704
705                 if (likely(op->sym->m_dst == NULL))
706                         qat_req->comn_mid.dest_data_addr =
707                                 qat_req->comn_mid.src_data_addr =
708                                 cookie->qat_sgl_src_phys_addr;
709                 else {
710                         ret = qat_sgl_fill_array(op->sym->m_dst,
711                                         dst_buf_start,
712                                         &cookie->qat_sgl_dst,
713                                                 qat_req->comn_mid.dst_length);
714
715                         if (ret) {
716                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
717                                                 "fill sgl array");
718                                 return ret;
719                         }
720
721                         qat_req->comn_mid.src_data_addr =
722                                 cookie->qat_sgl_src_phys_addr;
723                         qat_req->comn_mid.dest_data_addr =
724                                         cookie->qat_sgl_dst_phys_addr;
725                 }
726         } else {
727                 qat_req->comn_mid.src_data_addr = src_buf_start;
728                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
729         }
730
731 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
732         rte_hexdump(stdout, "qat_req:", qat_req,
733                         sizeof(struct icp_qat_fw_la_bulk_req));
734         rte_hexdump(stdout, "src_data:",
735                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
736                         rte_pktmbuf_data_len(op->sym->m_src));
737         if (do_cipher) {
738                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
739                                                 uint8_t *,
740                                                 ctx->cipher_iv.offset);
741                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
742                                 ctx->cipher_iv.length);
743         }
744
745         if (do_auth) {
746                 if (ctx->auth_iv.length) {
747                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
748                                                         uint8_t *,
749                                                         ctx->auth_iv.offset);
750                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
751                                                 ctx->auth_iv.length);
752                 }
753                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
754                                 ctx->digest_length);
755         }
756
757         if (do_aead) {
758                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
759                                 ctx->digest_length);
760                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
761                                 ctx->aad_len);
762         }
763 #endif
764         return 0;
765 }
766
767
768 void qat_sym_stats_get(struct rte_cryptodev *dev,
769                 struct rte_cryptodev_stats *stats)
770 {
771         int i;
772         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
773
774         PMD_INIT_FUNC_TRACE();
775         if (stats == NULL) {
776                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
777                 return;
778         }
779         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
780                 if (qp[i] == NULL) {
781                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
782                         continue;
783                 }
784
785                 stats->enqueued_count += qp[i]->stats.enqueued_count;
786                 stats->dequeued_count += qp[i]->stats.dequeued_count;
787                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
788                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
789         }
790 }
791
792 void qat_sym_stats_reset(struct rte_cryptodev *dev)
793 {
794         int i;
795         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
796
797         PMD_INIT_FUNC_TRACE();
798         for (i = 0; i < dev->data->nb_queue_pairs; i++)
799                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
800         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
801 }
802
803 int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
804 {
805         PMD_DRV_LOG(DEBUG, "Release sym qp %u on device %d",
806                                 queue_pair_id, dev->data->dev_id);
807
808         return qat_qp_release((struct qat_qp **)
809                         &(dev->data->queue_pairs[queue_pair_id]));
810 }
811
812 int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
813         const struct rte_cryptodev_qp_conf *qp_conf,
814         int socket_id, struct rte_mempool *session_pool __rte_unused)
815 {
816         struct qat_qp *qp;
817         int ret = 0;
818         uint32_t i;
819         struct qat_qp_config qat_qp_conf;
820
821         struct qat_qp **qp_addr =
822                         (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
823         struct qat_pmd_private *qat_private = dev->data->dev_private;
824         const struct qat_qp_hw_data *sym_hw_qps =
825                         qp_gen_config[qat_private->qat_dev_gen]
826                                       .qp_hw_data[QAT_SERVICE_SYMMETRIC];
827         const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id;
828
829         /* If qp is already in use free ring memory and qp metadata. */
830         if (*qp_addr != NULL) {
831                 ret = qat_sym_qp_release(dev, qp_id);
832                 if (ret < 0)
833                         return ret;
834         }
835         if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) {
836                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device", qp_id);
837                 return -EINVAL;
838         }
839
840         qat_qp_conf.hw = qp_hw_data;
841         qat_qp_conf.build_request = qat_sym_build_request;
842         qat_qp_conf.process_response = qat_sym_process_response;
843         qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
844         qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
845         qat_qp_conf.socket_id = socket_id;
846         qat_qp_conf.service_str = "sym";
847
848         ret = qat_qp_setup(qat_private, qp_addr, qp_id, &qat_qp_conf);
849         if (ret != 0)
850                 return ret;
851
852         qp = (struct qat_qp *)*qp_addr;
853
854         for (i = 0; i < qp->nb_descriptors; i++) {
855
856                 struct qat_sym_op_cookie *sql_cookie =
857                                 qp->op_cookies[i];
858
859                 sql_cookie->qat_sgl_src_phys_addr =
860                                 rte_mempool_virt2iova(sql_cookie) +
861                                 offsetof(struct qat_sym_op_cookie,
862                                 qat_sgl_src);
863
864                 sql_cookie->qat_sgl_dst_phys_addr =
865                                 rte_mempool_virt2iova(sql_cookie) +
866                                 offsetof(struct qat_sym_op_cookie,
867                                 qat_sgl_dst);
868         }
869
870         return ret;
871 }