crypto/qat: make enqueue function generic
[dpdk.git] / drivers / crypto / qat / qat_sym.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_crypto_sym.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11
12 #include <openssl/evp.h>
13
14 #include "qat_logs.h"
15 #include "qat_sym_session.h"
16 #include "qat_sym.h"
17 #include "adf_transport_access_macros.h"
18
19 #define BYTE_LENGTH    8
20 /* bpi is only used for partial blocks of DES and AES
21  * so AES block len can be assumed as max len for iv, src and dst
22  */
23 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
24
25 /** Encrypt a single partial block
26  *  Depends on openssl libcrypto
27  *  Uses ECB+XOR to do CFB encryption, same result, more performant
28  */
29 static inline int
30 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
31                 uint8_t *iv, int ivlen, int srclen,
32                 void *bpi_ctx)
33 {
34         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
35         int encrypted_ivlen;
36         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
37         uint8_t *encr = encrypted_iv;
38
39         /* ECB method: encrypt the IV, then XOR this with plaintext */
40         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
41                                                                 <= 0)
42                 goto cipher_encrypt_err;
43
44         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
45                 *dst = *src ^ *encr;
46
47         return 0;
48
49 cipher_encrypt_err:
50         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
51         return -EINVAL;
52 }
53
54 /** Decrypt a single partial block
55  *  Depends on openssl libcrypto
56  *  Uses ECB+XOR to do CFB encryption, same result, more performant
57  */
58 static inline int
59 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
60                 uint8_t *iv, int ivlen, int srclen,
61                 void *bpi_ctx)
62 {
63         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
64         int encrypted_ivlen;
65         uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
66         uint8_t *encr = encrypted_iv;
67
68         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
69         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
70                                                                 <= 0)
71                 goto cipher_decrypt_err;
72
73         for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
74                 *dst = *src ^ *encr;
75
76         return 0;
77
78 cipher_decrypt_err:
79         PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
80         return -EINVAL;
81 }
82
83 /** Creates a context in either AES or DES in ECB mode
84  *  Depends on openssl libcrypto
85  */
86 static inline uint32_t
87 adf_modulo(uint32_t data, uint32_t shift);
88
89 static inline uint32_t
90 qat_bpicipher_preprocess(struct qat_sym_session *ctx,
91                                 struct rte_crypto_op *op)
92 {
93         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
94         struct rte_crypto_sym_op *sym_op = op->sym;
95         uint8_t last_block_len = block_len > 0 ?
96                         sym_op->cipher.data.length % block_len : 0;
97
98         if (last_block_len &&
99                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
100
101                 /* Decrypt last block */
102                 uint8_t *last_block, *dst, *iv;
103                 uint32_t last_block_offset = sym_op->cipher.data.offset +
104                                 sym_op->cipher.data.length - last_block_len;
105                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
106                                 uint8_t *, last_block_offset);
107
108                 if (unlikely(sym_op->m_dst != NULL))
109                         /* out-of-place operation (OOP) */
110                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
111                                                 uint8_t *, last_block_offset);
112                 else
113                         dst = last_block;
114
115                 if (last_block_len < sym_op->cipher.data.length)
116                         /* use previous block ciphertext as IV */
117                         iv = last_block - block_len;
118                 else
119                         /* runt block, i.e. less than one full block */
120                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
121                                         ctx->cipher_iv.offset);
122
123 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
124                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
125                         last_block_len);
126                 if (sym_op->m_dst != NULL)
127                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
128                                 last_block_len);
129 #endif
130                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
131                                 last_block_len, ctx->bpi_ctx);
132 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
133                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
134                         last_block_len);
135                 if (sym_op->m_dst != NULL)
136                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
137                                 last_block_len);
138 #endif
139         }
140
141         return sym_op->cipher.data.length - last_block_len;
142 }
143
144 static inline uint32_t
145 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
146                                 struct rte_crypto_op *op)
147 {
148         int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
149         struct rte_crypto_sym_op *sym_op = op->sym;
150         uint8_t last_block_len = block_len > 0 ?
151                         sym_op->cipher.data.length % block_len : 0;
152
153         if (last_block_len > 0 &&
154                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
155
156                 /* Encrypt last block */
157                 uint8_t *last_block, *dst, *iv;
158                 uint32_t last_block_offset;
159
160                 last_block_offset = sym_op->cipher.data.offset +
161                                 sym_op->cipher.data.length - last_block_len;
162                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
163                                 uint8_t *, last_block_offset);
164
165                 if (unlikely(sym_op->m_dst != NULL))
166                         /* out-of-place operation (OOP) */
167                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
168                                                 uint8_t *, last_block_offset);
169                 else
170                         dst = last_block;
171
172                 if (last_block_len < sym_op->cipher.data.length)
173                         /* use previous block ciphertext as IV */
174                         iv = dst - block_len;
175                 else
176                         /* runt block, i.e. less than one full block */
177                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
178                                         ctx->cipher_iv.offset);
179
180 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
181                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
182                         last_block_len);
183                 if (sym_op->m_dst != NULL)
184                         rte_hexdump(stdout, "BPI: dst before post-process:",
185                                         dst, last_block_len);
186 #endif
187                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
188                                 last_block_len, ctx->bpi_ctx);
189 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
190                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
191                         last_block_len);
192                 if (sym_op->m_dst != NULL)
193                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
194                                 last_block_len);
195 #endif
196         }
197         return sym_op->cipher.data.length - last_block_len;
198 }
199
200 static inline void
201 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
202         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
203                         q->hw_queue_number, q->tail);
204         q->nb_pending_requests = 0;
205         q->csr_tail = q->tail;
206 }
207
208 static uint16_t
209 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
210 {
211         register struct qat_queue *queue;
212         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
213         register uint32_t nb_ops_sent = 0;
214         register int ret;
215         uint16_t nb_ops_possible = nb_ops;
216         register uint8_t *base_addr;
217         register uint32_t tail;
218         int overflow;
219
220         if (unlikely(nb_ops == 0))
221                 return 0;
222
223         /* read params used a lot in main loop into registers */
224         queue = &(tmp_qp->tx_q);
225         base_addr = (uint8_t *)queue->base_addr;
226         tail = queue->tail;
227
228         /* Find how many can actually fit on the ring */
229         tmp_qp->inflights16 += nb_ops;
230         overflow = tmp_qp->inflights16 - queue->max_inflights;
231         if (overflow > 0) {
232                 tmp_qp->inflights16 -= overflow;
233                 nb_ops_possible = nb_ops - overflow;
234                 if (nb_ops_possible == 0)
235                         return 0;
236         }
237
238         while (nb_ops_sent != nb_ops_possible) {
239                 ret = tmp_qp->build_request(*ops, base_addr + tail,
240                                 tmp_qp->op_cookies[tail / queue->msg_size],
241                                 tmp_qp->qat_dev_gen);
242                 if (ret != 0) {
243                         tmp_qp->stats.enqueue_err_count++;
244                         /*
245                          * This message cannot be enqueued,
246                          * decrease number of ops that wasn't sent
247                          */
248                         tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
249                         if (nb_ops_sent == 0)
250                                 return 0;
251                         goto kick_tail;
252                 }
253
254                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
255                 ops++;
256                 nb_ops_sent++;
257         }
258 kick_tail:
259         queue->tail = tail;
260         tmp_qp->stats.enqueued_count += nb_ops_sent;
261         queue->nb_pending_requests += nb_ops_sent;
262         if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
263                         queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
264                 txq_write_tail(tmp_qp, queue);
265         }
266         return nb_ops_sent;
267 }
268
269 static inline
270 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
271 {
272         uint32_t old_head, new_head;
273         uint32_t max_head;
274
275         old_head = q->csr_head;
276         new_head = q->head;
277         max_head = qp->nb_descriptors * q->msg_size;
278
279         /* write out free descriptors */
280         void *cur_desc = (uint8_t *)q->base_addr + old_head;
281
282         if (new_head < old_head) {
283                 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
284                 memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
285         } else {
286                 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
287         }
288         q->nb_processed_responses = 0;
289         q->csr_head = new_head;
290
291         /* write current head to CSR */
292         WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
293                             q->hw_queue_number, new_head);
294 }
295
296 uint16_t
297 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
298                 uint16_t nb_ops)
299 {
300         return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
301 }
302
303 uint16_t
304 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
305                 uint16_t nb_ops)
306 {
307         struct qat_queue *rx_queue, *tx_queue;
308         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
309         uint32_t msg_counter = 0;
310         struct rte_crypto_op *rx_op;
311         struct icp_qat_fw_comn_resp *resp_msg;
312         uint32_t head;
313
314         rx_queue = &(tmp_qp->rx_q);
315         tx_queue = &(tmp_qp->tx_q);
316         head = rx_queue->head;
317         resp_msg = (struct icp_qat_fw_comn_resp *)
318                         ((uint8_t *)rx_queue->base_addr + head);
319
320         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
321                         msg_counter != nb_ops) {
322                 rx_op = (struct rte_crypto_op *)(uintptr_t)
323                                 (resp_msg->opaque_data);
324
325 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
326                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
327                         sizeof(struct icp_qat_fw_comn_resp));
328 #endif
329                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
330                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
331                                         resp_msg->comn_hdr.comn_status)) {
332                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
333                 } else {
334                         struct qat_sym_session *sess =
335                                 (struct qat_sym_session *)
336                                         get_session_private_data(
337                                                 rx_op->sym->session,
338                                                 cryptodev_qat_driver_id);
339
340                         if (sess->bpi_ctx)
341                                 qat_bpicipher_postprocess(sess, rx_op);
342                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
343                 }
344
345                 head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
346                 resp_msg = (struct icp_qat_fw_comn_resp *)
347                                 ((uint8_t *)rx_queue->base_addr + head);
348                 *ops = rx_op;
349                 ops++;
350                 msg_counter++;
351         }
352         if (msg_counter > 0) {
353                 rx_queue->head = head;
354                 tmp_qp->stats.dequeued_count += msg_counter;
355                 rx_queue->nb_processed_responses += msg_counter;
356                 tmp_qp->inflights16 -= msg_counter;
357
358                 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
359                         rxq_free_desc(tmp_qp, rx_queue);
360         }
361         /* also check if tail needs to be advanced */
362         if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
363                         tx_queue->tail != tx_queue->csr_tail) {
364                 txq_write_tail(tmp_qp, tx_queue);
365         }
366         return msg_counter;
367 }
368
369 static inline int
370 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
371                 struct qat_alg_buf_list *list, uint32_t data_len)
372 {
373         int nr = 1;
374
375         uint32_t buf_len = rte_pktmbuf_iova(buf) -
376                         buff_start + rte_pktmbuf_data_len(buf);
377
378         list->bufers[0].addr = buff_start;
379         list->bufers[0].resrvd = 0;
380         list->bufers[0].len = buf_len;
381
382         if (data_len <= buf_len) {
383                 list->num_bufs = nr;
384                 list->bufers[0].len = data_len;
385                 return 0;
386         }
387
388         buf = buf->next;
389         while (buf) {
390                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
391                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
392                                         " entry(%u)",
393                                         QAT_SGL_MAX_NUMBER);
394                         return -EINVAL;
395                 }
396
397                 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
398                 list->bufers[nr].resrvd = 0;
399                 list->bufers[nr].addr = rte_pktmbuf_iova(buf);
400
401                 buf_len += list->bufers[nr].len;
402                 buf = buf->next;
403
404                 if (buf_len > data_len) {
405                         list->bufers[nr].len -=
406                                 buf_len - data_len;
407                         buf = NULL;
408                 }
409                 ++nr;
410         }
411         list->num_bufs = nr;
412
413         return 0;
414 }
415
416 static inline void
417 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
418                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
419                 struct rte_crypto_op *op,
420                 struct icp_qat_fw_la_bulk_req *qat_req)
421 {
422         /* copy IV into request if it fits */
423         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
424                 rte_memcpy(cipher_param->u.cipher_IV_array,
425                                 rte_crypto_op_ctod_offset(op, uint8_t *,
426                                         iv_offset),
427                                 iv_length);
428         } else {
429                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
430                                 qat_req->comn_hdr.serv_specif_flags,
431                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
432                 cipher_param->u.s.cipher_IV_ptr =
433                                 rte_crypto_op_ctophys_offset(op,
434                                         iv_offset);
435         }
436 }
437
438 /** Set IV for CCM is special case, 0th byte is set to q-1
439  *  where q is padding of nonce in 16 byte block
440  */
441 static inline void
442 set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
443                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
444                 struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
445 {
446         rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
447                         ICP_QAT_HW_CCM_NONCE_OFFSET,
448                         rte_crypto_op_ctod_offset(op, uint8_t *,
449                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
450                         iv_length);
451         *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
452                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
453
454         if (aad_len_field_sz)
455                 rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
456                         rte_crypto_op_ctod_offset(op, uint8_t *,
457                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
458                         iv_length);
459 }
460
461
462 int
463 qat_sym_build_request(void *in_op, uint8_t *out_msg,
464                 void *op_cookie, enum qat_device_gen qat_dev_gen)
465 {
466         int ret = 0;
467         struct qat_sym_session *ctx;
468         struct icp_qat_fw_la_cipher_req_params *cipher_param;
469         struct icp_qat_fw_la_auth_req_params *auth_param;
470         register struct icp_qat_fw_la_bulk_req *qat_req;
471         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
472         uint32_t cipher_len = 0, cipher_ofs = 0;
473         uint32_t auth_len = 0, auth_ofs = 0;
474         uint32_t min_ofs = 0;
475         uint64_t src_buf_start = 0, dst_buf_start = 0;
476         uint8_t do_sgl = 0;
477         struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
478         struct qat_sym_op_cookie *cookie =
479                                 (struct qat_sym_op_cookie *)op_cookie;
480
481 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
482         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
483                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
484                                 "operation requests, op (%p) is not a "
485                                 "symmetric operation.", op);
486                 return -EINVAL;
487         }
488 #endif
489         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
490                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
491                                 " requests, op (%p) is sessionless.", op);
492                 return -EINVAL;
493         }
494
495         ctx = (struct qat_sym_session *)get_session_private_data(
496                         op->sym->session, cryptodev_qat_driver_id);
497
498         if (unlikely(ctx == NULL)) {
499                 PMD_DRV_LOG(ERR, "Session was not created for this device");
500                 return -EINVAL;
501         }
502
503         if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
504                 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
505                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
506                 return -EINVAL;
507         }
508
509         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
510         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
511         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
512         cipher_param = (void *)&qat_req->serv_specif_rqpars;
513         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
514
515         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
516                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
517                 /* AES-GCM or AES-CCM */
518                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
519                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
520                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
521                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
522                         && ctx->qat_hash_alg ==
523                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
524                         do_aead = 1;
525                 } else {
526                         do_auth = 1;
527                         do_cipher = 1;
528                 }
529         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
530                 do_auth = 1;
531                 do_cipher = 0;
532         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
533                 do_auth = 0;
534                 do_cipher = 1;
535         }
536
537         if (do_cipher) {
538
539                 if (ctx->qat_cipher_alg ==
540                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
541                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
542                         ctx->qat_cipher_alg ==
543                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
544
545                         if (unlikely(
546                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
547                                  || (cipher_param->cipher_offset
548                                                         % BYTE_LENGTH != 0))) {
549                                 PMD_DRV_LOG(ERR,
550                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
551                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
552                                 return -EINVAL;
553                         }
554                         cipher_len = op->sym->cipher.data.length >> 3;
555                         cipher_ofs = op->sym->cipher.data.offset >> 3;
556
557                 } else if (ctx->bpi_ctx) {
558                         /* DOCSIS - only send complete blocks to device
559                          * Process any partial block using CFB mode.
560                          * Even if 0 complete blocks, still send this to device
561                          * to get into rx queue for post-process and dequeuing
562                          */
563                         cipher_len = qat_bpicipher_preprocess(ctx, op);
564                         cipher_ofs = op->sym->cipher.data.offset;
565                 } else {
566                         cipher_len = op->sym->cipher.data.length;
567                         cipher_ofs = op->sym->cipher.data.offset;
568                 }
569
570                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
571                                 cipher_param, op, qat_req);
572                 min_ofs = cipher_ofs;
573         }
574
575         if (do_auth) {
576
577                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
578                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
579                         ctx->qat_hash_alg ==
580                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
581                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
582                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
583                                 PMD_DRV_LOG(ERR,
584                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
585                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
586                                 return -EINVAL;
587                         }
588                         auth_ofs = op->sym->auth.data.offset >> 3;
589                         auth_len = op->sym->auth.data.length >> 3;
590
591                         auth_param->u1.aad_adr =
592                                         rte_crypto_op_ctophys_offset(op,
593                                                         ctx->auth_iv.offset);
594
595                 } else if (ctx->qat_hash_alg ==
596                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
597                                 ctx->qat_hash_alg ==
598                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
599                         /* AES-GMAC */
600                         set_cipher_iv(ctx->auth_iv.length,
601                                 ctx->auth_iv.offset,
602                                 cipher_param, op, qat_req);
603                         auth_ofs = op->sym->auth.data.offset;
604                         auth_len = op->sym->auth.data.length;
605
606                         auth_param->u1.aad_adr = 0;
607                         auth_param->u2.aad_sz = 0;
608
609                         /*
610                          * If len(iv)==12B fw computes J0
611                          */
612                         if (ctx->auth_iv.length == 12) {
613                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
614                                         qat_req->comn_hdr.serv_specif_flags,
615                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
616
617                         }
618                 } else {
619                         auth_ofs = op->sym->auth.data.offset;
620                         auth_len = op->sym->auth.data.length;
621
622                 }
623                 min_ofs = auth_ofs;
624
625                 if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
626                         auth_param->auth_res_addr =
627                                         op->sym->auth.digest.phys_addr;
628
629         }
630
631         if (do_aead) {
632                 /*
633                  * This address may used for setting AAD physical pointer
634                  * into IV offset from op
635                  */
636                 rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
637                 if (ctx->qat_hash_alg ==
638                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
639                                 ctx->qat_hash_alg ==
640                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
641                         /*
642                          * If len(iv)==12B fw computes J0
643                          */
644                         if (ctx->cipher_iv.length == 12) {
645                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
646                                         qat_req->comn_hdr.serv_specif_flags,
647                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
648                         }
649                         set_cipher_iv(ctx->cipher_iv.length,
650                                         ctx->cipher_iv.offset,
651                                         cipher_param, op, qat_req);
652
653                 } else if (ctx->qat_hash_alg ==
654                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
655
656                         /* In case of AES-CCM this may point to user selected
657                          * memory or iv offset in cypto_op
658                          */
659                         uint8_t *aad_data = op->sym->aead.aad.data;
660                         /* This is true AAD length, it not includes 18 bytes of
661                          * preceding data
662                          */
663                         uint8_t aad_ccm_real_len = 0;
664                         uint8_t aad_len_field_sz = 0;
665                         uint32_t msg_len_be =
666                                         rte_bswap32(op->sym->aead.data.length);
667
668                         if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
669                                 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
670                                 aad_ccm_real_len = ctx->aad_len -
671                                         ICP_QAT_HW_CCM_AAD_B0_LEN -
672                                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
673                         } else {
674                                 /*
675                                  * aad_len not greater than 18, so no actual aad
676                                  *  data, then use IV after op for B0 block
677                                  */
678                                 aad_data = rte_crypto_op_ctod_offset(op,
679                                                 uint8_t *,
680                                                 ctx->cipher_iv.offset);
681                                 aad_phys_addr_aead =
682                                                 rte_crypto_op_ctophys_offset(op,
683                                                         ctx->cipher_iv.offset);
684                         }
685
686                         uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
687                                                         ctx->cipher_iv.length;
688
689                         aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
690                                                         aad_len_field_sz,
691                                                         ctx->digest_length, q);
692
693                         if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
694                                 memcpy(aad_data + ctx->cipher_iv.length +
695                                     ICP_QAT_HW_CCM_NONCE_OFFSET +
696                                     (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
697                                     (uint8_t *)&msg_len_be,
698                                     ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
699                         } else {
700                                 memcpy(aad_data + ctx->cipher_iv.length +
701                                     ICP_QAT_HW_CCM_NONCE_OFFSET,
702                                     (uint8_t *)&msg_len_be
703                                     + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
704                                     - q), q);
705                         }
706
707                         if (aad_len_field_sz > 0) {
708                                 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
709                                                 = rte_bswap16(aad_ccm_real_len);
710
711                                 if ((aad_ccm_real_len + aad_len_field_sz)
712                                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
713                                         uint8_t pad_len = 0;
714                                         uint8_t pad_idx = 0;
715
716                                         pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
717                                         ((aad_ccm_real_len + aad_len_field_sz) %
718                                                 ICP_QAT_HW_CCM_AAD_B0_LEN);
719                                         pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
720                                             aad_ccm_real_len + aad_len_field_sz;
721                                         memset(&aad_data[pad_idx],
722                                                         0, pad_len);
723                                 }
724
725                         }
726
727                         set_cipher_iv_ccm(ctx->cipher_iv.length,
728                                         ctx->cipher_iv.offset,
729                                         cipher_param, op, q,
730                                         aad_len_field_sz);
731
732                 }
733
734                 cipher_len = op->sym->aead.data.length;
735                 cipher_ofs = op->sym->aead.data.offset;
736                 auth_len = op->sym->aead.data.length;
737                 auth_ofs = op->sym->aead.data.offset;
738
739                 auth_param->u1.aad_adr = aad_phys_addr_aead;
740                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
741                 min_ofs = op->sym->aead.data.offset;
742         }
743
744         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
745                 do_sgl = 1;
746
747         /* adjust for chain case */
748         if (do_cipher && do_auth)
749                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
750
751         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
752                 min_ofs = 0;
753
754         if (unlikely(op->sym->m_dst != NULL)) {
755                 /* Out-of-place operation (OOP)
756                  * Don't align DMA start. DMA the minimum data-set
757                  * so as not to overwrite data in dest buffer
758                  */
759                 src_buf_start =
760                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
761                 dst_buf_start =
762                         rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
763
764         } else {
765                 /* In-place operation
766                  * Start DMA at nearest aligned address below min_ofs
767                  */
768                 src_buf_start =
769                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
770                                                 & QAT_64_BTYE_ALIGN_MASK;
771
772                 if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
773                                         rte_pktmbuf_headroom(op->sym->m_src))
774                                                         > src_buf_start)) {
775                         /* alignment has pushed addr ahead of start of mbuf
776                          * so revert and take the performance hit
777                          */
778                         src_buf_start =
779                                 rte_pktmbuf_iova_offset(op->sym->m_src,
780                                                                 min_ofs);
781                 }
782                 dst_buf_start = src_buf_start;
783         }
784
785         if (do_cipher || do_aead) {
786                 cipher_param->cipher_offset =
787                                 (uint32_t)rte_pktmbuf_iova_offset(
788                                 op->sym->m_src, cipher_ofs) - src_buf_start;
789                 cipher_param->cipher_length = cipher_len;
790         } else {
791                 cipher_param->cipher_offset = 0;
792                 cipher_param->cipher_length = 0;
793         }
794
795         if (do_auth || do_aead) {
796                 auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
797                                 op->sym->m_src, auth_ofs) - src_buf_start;
798                 auth_param->auth_len = auth_len;
799         } else {
800                 auth_param->auth_off = 0;
801                 auth_param->auth_len = 0;
802         }
803
804         qat_req->comn_mid.dst_length =
805                 qat_req->comn_mid.src_length =
806                 (cipher_param->cipher_offset + cipher_param->cipher_length)
807                 > (auth_param->auth_off + auth_param->auth_len) ?
808                 (cipher_param->cipher_offset + cipher_param->cipher_length)
809                 : (auth_param->auth_off + auth_param->auth_len);
810
811         if (do_sgl) {
812
813                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
814                                 QAT_COMN_PTR_TYPE_SGL);
815                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
816                                 &cookie->qat_sgl_list_src,
817                                 qat_req->comn_mid.src_length);
818                 if (ret) {
819                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
820                         return ret;
821                 }
822
823                 if (likely(op->sym->m_dst == NULL))
824                         qat_req->comn_mid.dest_data_addr =
825                                 qat_req->comn_mid.src_data_addr =
826                                 cookie->qat_sgl_src_phys_addr;
827                 else {
828                         ret = qat_sgl_fill_array(op->sym->m_dst,
829                                         dst_buf_start,
830                                         &cookie->qat_sgl_list_dst,
831                                                 qat_req->comn_mid.dst_length);
832
833                         if (ret) {
834                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
835                                                 "fill sgl array");
836                                 return ret;
837                         }
838
839                         qat_req->comn_mid.src_data_addr =
840                                 cookie->qat_sgl_src_phys_addr;
841                         qat_req->comn_mid.dest_data_addr =
842                                         cookie->qat_sgl_dst_phys_addr;
843                 }
844         } else {
845                 qat_req->comn_mid.src_data_addr = src_buf_start;
846                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
847         }
848
849 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
850         rte_hexdump(stdout, "qat_req:", qat_req,
851                         sizeof(struct icp_qat_fw_la_bulk_req));
852         rte_hexdump(stdout, "src_data:",
853                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
854                         rte_pktmbuf_data_len(op->sym->m_src));
855         if (do_cipher) {
856                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
857                                                 uint8_t *,
858                                                 ctx->cipher_iv.offset);
859                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
860                                 ctx->cipher_iv.length);
861         }
862
863         if (do_auth) {
864                 if (ctx->auth_iv.length) {
865                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
866                                                         uint8_t *,
867                                                         ctx->auth_iv.offset);
868                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
869                                                 ctx->auth_iv.length);
870                 }
871                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
872                                 ctx->digest_length);
873         }
874
875         if (do_aead) {
876                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
877                                 ctx->digest_length);
878                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
879                                 ctx->aad_len);
880         }
881 #endif
882         return 0;
883 }
884
885 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
886 {
887         uint32_t div = data >> shift;
888         uint32_t mult = div << shift;
889
890         return data - mult;
891 }
892
893 void qat_sym_stats_get(struct rte_cryptodev *dev,
894                 struct rte_cryptodev_stats *stats)
895 {
896         int i;
897         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
898
899         PMD_INIT_FUNC_TRACE();
900         if (stats == NULL) {
901                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
902                 return;
903         }
904         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
905                 if (qp[i] == NULL) {
906                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
907                         continue;
908                 }
909
910                 stats->enqueued_count += qp[i]->stats.enqueued_count;
911                 stats->dequeued_count += qp[i]->stats.dequeued_count;
912                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
913                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
914         }
915 }
916
917 void qat_sym_stats_reset(struct rte_cryptodev *dev)
918 {
919         int i;
920         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
921
922         PMD_INIT_FUNC_TRACE();
923         for (i = 0; i < dev->data->nb_queue_pairs; i++)
924                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
925         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
926 }