crypto/cnxk: remove useless return code
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
38
39 static __rte_always_inline int
40 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess);
41
42 static inline void
43 cpt_pack_iv(uint8_t *iv_src, uint8_t *iv_dst)
44 {
45         iv_dst[16] = iv_src[16];
46         /* pack the last 8 bytes of IV to 6 bytes.
47          * discard the 2 MSB bits of each byte
48          */
49         iv_dst[17] = (((iv_src[17] & 0x3f) << 2) | ((iv_src[18] >> 4) & 0x3));
50         iv_dst[18] = (((iv_src[18] & 0xf) << 4) | ((iv_src[19] >> 2) & 0xf));
51         iv_dst[19] = (((iv_src[19] & 0x3) << 6) | (iv_src[20] & 0x3f));
52
53         iv_dst[20] = (((iv_src[21] & 0x3f) << 2) | ((iv_src[22] >> 4) & 0x3));
54         iv_dst[21] = (((iv_src[22] & 0xf) << 4) | ((iv_src[23] >> 2) & 0xf));
55         iv_dst[22] = (((iv_src[23] & 0x3) << 6) | (iv_src[24] & 0x3f));
56 }
57
58 static inline void
59 pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type,
60              uint8_t pack_iv)
61 {
62         uint32_t *iv_s_temp, iv_temp[4];
63         int j;
64
65         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
66                 /*
67                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
68                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
69                  */
70
71                 iv_s_temp = (uint32_t *)iv_s;
72
73                 for (j = 0; j < 4; j++)
74                         iv_temp[j] = iv_s_temp[3 - j];
75                 memcpy(iv_d, iv_temp, 16);
76         } else if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_ZUC) {
77                 /* ZUC doesn't need a swap */
78                 memcpy(iv_d, iv_s, 16);
79                 if (pack_iv)
80                         cpt_pack_iv(iv_s, iv_d);
81         } else {
82                 /* AES-CMAC EIA2, microcode expects 16B zeroized IV */
83                 for (j = 0; j < 4; j++)
84                         iv_d[j] = 0;
85         }
86 }
87
88 static __rte_always_inline int
89 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
90 {
91         uint16_t mac_len = auth->digest_length;
92         int ret;
93
94         switch (auth->algo) {
95         case RTE_CRYPTO_AUTH_MD5:
96         case RTE_CRYPTO_AUTH_MD5_HMAC:
97                 ret = (mac_len == 16) ? 0 : -1;
98                 break;
99         case RTE_CRYPTO_AUTH_SHA1:
100         case RTE_CRYPTO_AUTH_SHA1_HMAC:
101                 ret = (mac_len == 20) ? 0 : -1;
102                 break;
103         case RTE_CRYPTO_AUTH_SHA224:
104         case RTE_CRYPTO_AUTH_SHA224_HMAC:
105                 ret = (mac_len == 28) ? 0 : -1;
106                 break;
107         case RTE_CRYPTO_AUTH_SHA256:
108         case RTE_CRYPTO_AUTH_SHA256_HMAC:
109                 ret = (mac_len == 32) ? 0 : -1;
110                 break;
111         case RTE_CRYPTO_AUTH_SHA384:
112         case RTE_CRYPTO_AUTH_SHA384_HMAC:
113                 ret = (mac_len == 48) ? 0 : -1;
114                 break;
115         case RTE_CRYPTO_AUTH_SHA512:
116         case RTE_CRYPTO_AUTH_SHA512_HMAC:
117                 ret = (mac_len == 64) ? 0 : -1;
118                 break;
119         case RTE_CRYPTO_AUTH_NULL:
120                 ret = 0;
121                 break;
122         default:
123                 ret = -1;
124         }
125
126         return ret;
127 }
128
129 static __rte_always_inline void
130 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
131 {
132         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
133         memcpy(fctx->enc.encr_iv, salt, 4);
134 }
135
136 static __rte_always_inline uint32_t
137 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
138              uint32_t size)
139 {
140         struct roc_se_sglist_comp *to = &list[i >> 2];
141
142         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
143         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
144         i++;
145         return i;
146 }
147
148 static __rte_always_inline uint32_t
149 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
150                       struct roc_se_buf_ptr *from)
151 {
152         struct roc_se_sglist_comp *to = &list[i >> 2];
153
154         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
155         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
156         i++;
157         return i;
158 }
159
160 static __rte_always_inline uint32_t
161 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
162                           struct roc_se_buf_ptr *from, uint32_t *psize)
163 {
164         struct roc_se_sglist_comp *to = &list[i >> 2];
165         uint32_t size = *psize;
166         uint32_t e_len;
167
168         e_len = (size > from->size) ? from->size : size;
169         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
170         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
171         *psize -= e_len;
172         i++;
173         return i;
174 }
175
176 /*
177  * This fills the MC expected SGIO list
178  * from IOV given by user.
179  */
180 static __rte_always_inline uint32_t
181 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
182                       struct roc_se_iov_ptr *from, uint32_t from_offset,
183                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
184                       uint32_t extra_offset)
185 {
186         int32_t j;
187         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
188         uint32_t size = *psize;
189         struct roc_se_buf_ptr *bufs;
190
191         bufs = from->bufs;
192         for (j = 0; (j < from->buf_cnt) && size; j++) {
193                 uint64_t e_vaddr;
194                 uint32_t e_len;
195                 struct roc_se_sglist_comp *to = &list[i >> 2];
196
197                 if (unlikely(from_offset)) {
198                         if (from_offset >= bufs[j].size) {
199                                 from_offset -= bufs[j].size;
200                                 continue;
201                         }
202                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
203                         e_len = (size > (bufs[j].size - from_offset)) ?
204                                         (bufs[j].size - from_offset) :
205                                         size;
206                         from_offset = 0;
207                 } else {
208                         e_vaddr = (uint64_t)bufs[j].vaddr;
209                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
210                 }
211
212                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
213                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
214
215                 if (extra_len && (e_len >= extra_offset)) {
216                         /* Break the data at given offset */
217                         uint32_t next_len = e_len - extra_offset;
218                         uint64_t next_vaddr = e_vaddr + extra_offset;
219
220                         if (!extra_offset) {
221                                 i--;
222                         } else {
223                                 e_len = extra_offset;
224                                 size -= e_len;
225                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
226                         }
227
228                         extra_len = RTE_MIN(extra_len, size);
229                         /* Insert extra data ptr */
230                         if (extra_len) {
231                                 i++;
232                                 to = &list[i >> 2];
233                                 to->u.s.len[i % 4] =
234                                         rte_cpu_to_be_16(extra_len);
235                                 to->ptr[i % 4] = rte_cpu_to_be_64(
236                                         (uint64_t)extra_buf->vaddr);
237                                 size -= extra_len;
238                         }
239
240                         next_len = RTE_MIN(next_len, size);
241                         /* insert the rest of the data */
242                         if (next_len) {
243                                 i++;
244                                 to = &list[i >> 2];
245                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
246                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
247                                 size -= next_len;
248                         }
249                         extra_len = 0;
250
251                 } else {
252                         size -= e_len;
253                 }
254                 if (extra_offset)
255                         extra_offset -= size;
256                 i++;
257         }
258
259         *psize = size;
260         return (uint32_t)i;
261 }
262
263 static __rte_always_inline int
264 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
265                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
266 {
267         void *m_vaddr = params->meta_buf.vaddr;
268         uint32_t size, i;
269         uint16_t data_len, mac_len, key_len;
270         roc_se_auth_type hash_type;
271         struct roc_se_ctx *ctx;
272         struct roc_se_sglist_comp *gather_comp;
273         struct roc_se_sglist_comp *scatter_comp;
274         uint8_t *in_buffer;
275         uint32_t g_size_bytes, s_size_bytes;
276         union cpt_inst_w4 cpt_inst_w4;
277
278         ctx = params->ctx_buf.vaddr;
279
280         hash_type = ctx->hash_type;
281         mac_len = ctx->mac_len;
282         key_len = ctx->auth_key_len;
283         data_len = ROC_SE_AUTH_DLEN(d_lens);
284
285         /*GP op header */
286         cpt_inst_w4.s.opcode_minor = 0;
287         cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
288         if (ctx->hmac) {
289                 cpt_inst_w4.s.opcode_major =
290                         ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
291                 cpt_inst_w4.s.param1 = key_len;
292                 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
293         } else {
294                 cpt_inst_w4.s.opcode_major =
295                         ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
296                 cpt_inst_w4.s.param1 = 0;
297                 cpt_inst_w4.s.dlen = data_len;
298         }
299
300         /* Null auth only case enters the if */
301         if (unlikely(!hash_type && !ctx->enc_cipher)) {
302                 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
303                 /* Minor op is passthrough */
304                 cpt_inst_w4.s.opcode_minor = 0x03;
305                 /* Send out completion code only */
306                 cpt_inst_w4.s.param2 = 0x1;
307         }
308
309         /* DPTR has SG list */
310         in_buffer = m_vaddr;
311
312         ((uint16_t *)in_buffer)[0] = 0;
313         ((uint16_t *)in_buffer)[1] = 0;
314
315         /* TODO Add error check if space will be sufficient */
316         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
317
318         /*
319          * Input gather list
320          */
321
322         i = 0;
323
324         if (ctx->hmac) {
325                 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
326                 /* Key */
327                 i = fill_sg_comp(gather_comp, i, k_vaddr,
328                                  RTE_ALIGN_CEIL(key_len, 8));
329         }
330
331         /* input data */
332         size = data_len;
333         if (size) {
334                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
335                                           &size, NULL, 0);
336                 if (unlikely(size)) {
337                         plt_dp_err("Insufficient dst IOV size, short by %dB",
338                                    size);
339                         return -1;
340                 }
341         } else {
342                 /*
343                  * Looks like we need to support zero data
344                  * gather ptr in case of hash & hmac
345                  */
346                 i++;
347         }
348         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
349         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
350
351         /*
352          * Output Gather list
353          */
354
355         i = 0;
356         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
357                                                      g_size_bytes);
358
359         if (flags & ROC_SE_VALID_MAC_BUF) {
360                 if (unlikely(params->mac_buf.size < mac_len)) {
361                         plt_dp_err("Insufficient MAC size");
362                         return -1;
363                 }
364
365                 size = mac_len;
366                 i = fill_sg_comp_from_buf_min(scatter_comp, i, &params->mac_buf,
367                                               &size);
368         } else {
369                 size = mac_len;
370                 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
371                                           data_len, &size, NULL, 0);
372                 if (unlikely(size)) {
373                         plt_dp_err("Insufficient dst IOV size, short by %dB",
374                                    size);
375                         return -1;
376                 }
377         }
378
379         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
380         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
381
382         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
383
384         /* This is DPTR len in case of SG mode */
385         cpt_inst_w4.s.dlen = size;
386
387         inst->dptr = (uint64_t)in_buffer;
388         inst->w4.u64 = cpt_inst_w4.u64;
389
390         return 0;
391 }
392
393 static __rte_always_inline int
394 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
395                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
396 {
397         uint32_t iv_offset = 0;
398         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
399         struct roc_se_ctx *se_ctx;
400         uint32_t cipher_type, hash_type;
401         uint32_t mac_len, size;
402         uint8_t iv_len = 16;
403         struct roc_se_buf_ptr *aad_buf = NULL;
404         uint32_t encr_offset, auth_offset;
405         uint32_t encr_data_len, auth_data_len, aad_len = 0;
406         uint32_t passthrough_len = 0;
407         union cpt_inst_w4 cpt_inst_w4;
408         void *offset_vaddr;
409         uint8_t op_minor;
410
411         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
412         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
413         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
414         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
415         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
416                 /* We don't support both AAD and auth data separately */
417                 auth_data_len = 0;
418                 auth_offset = 0;
419                 aad_len = fc_params->aad_buf.size;
420                 aad_buf = &fc_params->aad_buf;
421         }
422         se_ctx = fc_params->ctx_buf.vaddr;
423         cipher_type = se_ctx->enc_cipher;
424         hash_type = se_ctx->hash_type;
425         mac_len = se_ctx->mac_len;
426         op_minor = se_ctx->template_w4.s.opcode_minor;
427
428         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
429                 iv_len = 0;
430                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
431         }
432
433         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
434                 /*
435                  * When AAD is given, data above encr_offset is pass through
436                  * Since AAD is given as separate pointer and not as offset,
437                  * this is a special case as we need to fragment input data
438                  * into passthrough + encr_data and then insert AAD in between.
439                  */
440                 if (hash_type != ROC_SE_GMAC_TYPE) {
441                         passthrough_len = encr_offset;
442                         auth_offset = passthrough_len + iv_len;
443                         encr_offset = passthrough_len + aad_len + iv_len;
444                         auth_data_len = aad_len + encr_data_len;
445                 } else {
446                         passthrough_len = 16 + aad_len;
447                         auth_offset = passthrough_len + iv_len;
448                         auth_data_len = aad_len;
449                 }
450         } else {
451                 encr_offset += iv_len;
452                 auth_offset += iv_len;
453         }
454
455         /* Encryption */
456         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
457         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
458         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
459
460         if (hash_type == ROC_SE_GMAC_TYPE) {
461                 encr_offset = 0;
462                 encr_data_len = 0;
463         }
464
465         auth_dlen = auth_offset + auth_data_len;
466         enc_dlen = encr_data_len + encr_offset;
467         if (unlikely(encr_data_len & 0xf)) {
468                 if ((cipher_type == ROC_SE_DES3_CBC) ||
469                     (cipher_type == ROC_SE_DES3_ECB))
470                         enc_dlen =
471                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
472                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
473                                 (cipher_type == ROC_SE_AES_ECB)))
474                         enc_dlen =
475                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
476         }
477
478         if (unlikely(auth_dlen > enc_dlen)) {
479                 inputlen = auth_dlen;
480                 outputlen = auth_dlen + mac_len;
481         } else {
482                 inputlen = enc_dlen;
483                 outputlen = enc_dlen + mac_len;
484         }
485
486         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
487                 outputlen = enc_dlen;
488
489         /* GP op header */
490         cpt_inst_w4.s.param1 = encr_data_len;
491         cpt_inst_w4.s.param2 = auth_data_len;
492
493         /*
494          * In cn9k, cn10k since we have a limitation of
495          * IV & Offset control word not part of instruction
496          * and need to be part of Data Buffer, we check if
497          * head room is there and then only do the Direct mode processing
498          */
499         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
500                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
501                 void *dm_vaddr = fc_params->bufs[0].vaddr;
502
503                 /* Use Direct mode */
504
505                 offset_vaddr =
506                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
507
508                 /* DPTR */
509                 inst->dptr = (uint64_t)offset_vaddr;
510
511                 /* RPTR should just exclude offset control word */
512                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
513
514                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
515
516                 if (likely(iv_len)) {
517                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
518                                                       ROC_SE_OFF_CTRL_LEN);
519                         uint64_t *src = fc_params->iv_buf;
520                         dest[0] = src[0];
521                         dest[1] = src[1];
522                 }
523
524         } else {
525                 void *m_vaddr = fc_params->meta_buf.vaddr;
526                 uint32_t i, g_size_bytes, s_size_bytes;
527                 struct roc_se_sglist_comp *gather_comp;
528                 struct roc_se_sglist_comp *scatter_comp;
529                 uint8_t *in_buffer;
530
531                 /* This falls under strict SG mode */
532                 offset_vaddr = m_vaddr;
533                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
534
535                 m_vaddr = (uint8_t *)m_vaddr + size;
536
537                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
538
539                 if (likely(iv_len)) {
540                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
541                                                       ROC_SE_OFF_CTRL_LEN);
542                         uint64_t *src = fc_params->iv_buf;
543                         dest[0] = src[0];
544                         dest[1] = src[1];
545                 }
546
547                 /* DPTR has SG list */
548                 in_buffer = m_vaddr;
549
550                 ((uint16_t *)in_buffer)[0] = 0;
551                 ((uint16_t *)in_buffer)[1] = 0;
552
553                 /* TODO Add error check if space will be sufficient */
554                 gather_comp =
555                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
556
557                 /*
558                  * Input Gather List
559                  */
560
561                 i = 0;
562
563                 /* Offset control word that includes iv */
564                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
565                                  ROC_SE_OFF_CTRL_LEN + iv_len);
566
567                 /* Add input data */
568                 size = inputlen - iv_len;
569                 if (likely(size)) {
570                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
571
572                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
573                                 i = fill_sg_comp_from_buf_min(
574                                         gather_comp, i, fc_params->bufs, &size);
575                         } else {
576                                 i = fill_sg_comp_from_iov(
577                                         gather_comp, i, fc_params->src_iov, 0,
578                                         &size, aad_buf, aad_offset);
579                         }
580
581                         if (unlikely(size)) {
582                                 plt_dp_err("Insufficient buffer space,"
583                                            " size %d needed",
584                                            size);
585                                 return -1;
586                         }
587                 }
588                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
589                 g_size_bytes =
590                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
591
592                 /*
593                  * Output Scatter list
594                  */
595                 i = 0;
596                 scatter_comp =
597                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
598                                                       g_size_bytes);
599
600                 /* Add IV */
601                 if (likely(iv_len)) {
602                         i = fill_sg_comp(scatter_comp, i,
603                                          (uint64_t)offset_vaddr +
604                                                  ROC_SE_OFF_CTRL_LEN,
605                                          iv_len);
606                 }
607
608                 /* output data or output data + digest*/
609                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
610                         size = outputlen - iv_len - mac_len;
611                         if (size) {
612                                 uint32_t aad_offset =
613                                         aad_len ? passthrough_len : 0;
614
615                                 if (unlikely(flags &
616                                              ROC_SE_SINGLE_BUF_INPLACE)) {
617                                         i = fill_sg_comp_from_buf_min(
618                                                 scatter_comp, i,
619                                                 fc_params->bufs, &size);
620                                 } else {
621                                         i = fill_sg_comp_from_iov(
622                                                 scatter_comp, i,
623                                                 fc_params->dst_iov, 0, &size,
624                                                 aad_buf, aad_offset);
625                                 }
626                                 if (unlikely(size)) {
627                                         plt_dp_err("Insufficient buffer"
628                                                    " space, size %d needed",
629                                                    size);
630                                         return -1;
631                                 }
632                         }
633                         /* mac_data */
634                         if (mac_len) {
635                                 i = fill_sg_comp_from_buf(scatter_comp, i,
636                                                           &fc_params->mac_buf);
637                         }
638                 } else {
639                         /* Output including mac */
640                         size = outputlen - iv_len;
641                         if (likely(size)) {
642                                 uint32_t aad_offset =
643                                         aad_len ? passthrough_len : 0;
644
645                                 if (unlikely(flags &
646                                              ROC_SE_SINGLE_BUF_INPLACE)) {
647                                         i = fill_sg_comp_from_buf_min(
648                                                 scatter_comp, i,
649                                                 fc_params->bufs, &size);
650                                 } else {
651                                         i = fill_sg_comp_from_iov(
652                                                 scatter_comp, i,
653                                                 fc_params->dst_iov, 0, &size,
654                                                 aad_buf, aad_offset);
655                                 }
656                                 if (unlikely(size)) {
657                                         plt_dp_err("Insufficient buffer"
658                                                    " space, size %d needed",
659                                                    size);
660                                         return -1;
661                                 }
662                         }
663                 }
664                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
665                 s_size_bytes =
666                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
667
668                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
669
670                 /* This is DPTR len in case of SG mode */
671                 cpt_inst_w4.s.dlen = size;
672
673                 inst->dptr = (uint64_t)in_buffer;
674         }
675
676         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
677                      (auth_offset >> 8))) {
678                 plt_dp_err("Offset not supported");
679                 plt_dp_err("enc_offset: %d", encr_offset);
680                 plt_dp_err("iv_offset : %d", iv_offset);
681                 plt_dp_err("auth_offset: %d", auth_offset);
682                 return -1;
683         }
684
685         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
686                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
687                 ((uint64_t)auth_offset));
688
689         inst->w4.u64 = cpt_inst_w4.u64;
690         return 0;
691 }
692
693 static __rte_always_inline int
694 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
695                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
696 {
697         uint32_t iv_offset = 0, size;
698         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
699         struct roc_se_ctx *se_ctx;
700         int32_t hash_type, mac_len;
701         uint8_t iv_len = 16;
702         struct roc_se_buf_ptr *aad_buf = NULL;
703         uint32_t encr_offset, auth_offset;
704         uint32_t encr_data_len, auth_data_len, aad_len = 0;
705         uint32_t passthrough_len = 0;
706         union cpt_inst_w4 cpt_inst_w4;
707         void *offset_vaddr;
708         uint8_t op_minor;
709
710         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
711         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
712         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
713         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
714
715         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
716                 /* We don't support both AAD and auth data separately */
717                 auth_data_len = 0;
718                 auth_offset = 0;
719                 aad_len = fc_params->aad_buf.size;
720                 aad_buf = &fc_params->aad_buf;
721         }
722
723         se_ctx = fc_params->ctx_buf.vaddr;
724         hash_type = se_ctx->hash_type;
725         mac_len = se_ctx->mac_len;
726         op_minor = se_ctx->template_w4.s.opcode_minor;
727
728         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
729                 iv_len = 0;
730                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
731         }
732
733         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
734                 /*
735                  * When AAD is given, data above encr_offset is pass through
736                  * Since AAD is given as separate pointer and not as offset,
737                  * this is a special case as we need to fragment input data
738                  * into passthrough + encr_data and then insert AAD in between.
739                  */
740                 if (hash_type != ROC_SE_GMAC_TYPE) {
741                         passthrough_len = encr_offset;
742                         auth_offset = passthrough_len + iv_len;
743                         encr_offset = passthrough_len + aad_len + iv_len;
744                         auth_data_len = aad_len + encr_data_len;
745                 } else {
746                         passthrough_len = 16 + aad_len;
747                         auth_offset = passthrough_len + iv_len;
748                         auth_data_len = aad_len;
749                 }
750         } else {
751                 encr_offset += iv_len;
752                 auth_offset += iv_len;
753         }
754
755         /* Decryption */
756         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
757         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
758         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
759
760         if (hash_type == ROC_SE_GMAC_TYPE) {
761                 encr_offset = 0;
762                 encr_data_len = 0;
763         }
764
765         enc_dlen = encr_offset + encr_data_len;
766         auth_dlen = auth_offset + auth_data_len;
767
768         if (auth_dlen > enc_dlen) {
769                 inputlen = auth_dlen + mac_len;
770                 outputlen = auth_dlen;
771         } else {
772                 inputlen = enc_dlen + mac_len;
773                 outputlen = enc_dlen;
774         }
775
776         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
777                 outputlen = inputlen = enc_dlen;
778
779         cpt_inst_w4.s.param1 = encr_data_len;
780         cpt_inst_w4.s.param2 = auth_data_len;
781
782         /*
783          * In cn9k, cn10k since we have a limitation of
784          * IV & Offset control word not part of instruction
785          * and need to be part of Data Buffer, we check if
786          * head room is there and then only do the Direct mode processing
787          */
788         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
789                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
790                 void *dm_vaddr = fc_params->bufs[0].vaddr;
791
792                 /* Use Direct mode */
793
794                 offset_vaddr =
795                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
796                 inst->dptr = (uint64_t)offset_vaddr;
797
798                 /* RPTR should just exclude offset control word */
799                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
800
801                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
802
803                 if (likely(iv_len)) {
804                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
805                                                       ROC_SE_OFF_CTRL_LEN);
806                         uint64_t *src = fc_params->iv_buf;
807                         dest[0] = src[0];
808                         dest[1] = src[1];
809                 }
810
811         } else {
812                 void *m_vaddr = fc_params->meta_buf.vaddr;
813                 uint32_t g_size_bytes, s_size_bytes;
814                 struct roc_se_sglist_comp *gather_comp;
815                 struct roc_se_sglist_comp *scatter_comp;
816                 uint8_t *in_buffer;
817                 uint8_t i = 0;
818
819                 /* This falls under strict SG mode */
820                 offset_vaddr = m_vaddr;
821                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
822
823                 m_vaddr = (uint8_t *)m_vaddr + size;
824
825                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
826
827                 if (likely(iv_len)) {
828                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
829                                                       ROC_SE_OFF_CTRL_LEN);
830                         uint64_t *src = fc_params->iv_buf;
831                         dest[0] = src[0];
832                         dest[1] = src[1];
833                 }
834
835                 /* DPTR has SG list */
836                 in_buffer = m_vaddr;
837
838                 ((uint16_t *)in_buffer)[0] = 0;
839                 ((uint16_t *)in_buffer)[1] = 0;
840
841                 /* TODO Add error check if space will be sufficient */
842                 gather_comp =
843                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
844
845                 /*
846                  * Input Gather List
847                  */
848                 i = 0;
849
850                 /* Offset control word that includes iv */
851                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
852                                  ROC_SE_OFF_CTRL_LEN + iv_len);
853
854                 /* Add input data */
855                 if (flags & ROC_SE_VALID_MAC_BUF) {
856                         size = inputlen - iv_len - mac_len;
857                         if (size) {
858                                 /* input data only */
859                                 if (unlikely(flags &
860                                              ROC_SE_SINGLE_BUF_INPLACE)) {
861                                         i = fill_sg_comp_from_buf_min(
862                                                 gather_comp, i, fc_params->bufs,
863                                                 &size);
864                                 } else {
865                                         uint32_t aad_offset =
866                                                 aad_len ? passthrough_len : 0;
867
868                                         i = fill_sg_comp_from_iov(
869                                                 gather_comp, i,
870                                                 fc_params->src_iov, 0, &size,
871                                                 aad_buf, aad_offset);
872                                 }
873                                 if (unlikely(size)) {
874                                         plt_dp_err("Insufficient buffer"
875                                                    " space, size %d needed",
876                                                    size);
877                                         return -1;
878                                 }
879                         }
880
881                         /* mac data */
882                         if (mac_len) {
883                                 i = fill_sg_comp_from_buf(gather_comp, i,
884                                                           &fc_params->mac_buf);
885                         }
886                 } else {
887                         /* input data + mac */
888                         size = inputlen - iv_len;
889                         if (size) {
890                                 if (unlikely(flags &
891                                              ROC_SE_SINGLE_BUF_INPLACE)) {
892                                         i = fill_sg_comp_from_buf_min(
893                                                 gather_comp, i, fc_params->bufs,
894                                                 &size);
895                                 } else {
896                                         uint32_t aad_offset =
897                                                 aad_len ? passthrough_len : 0;
898
899                                         if (unlikely(!fc_params->src_iov)) {
900                                                 plt_dp_err("Bad input args");
901                                                 return -1;
902                                         }
903
904                                         i = fill_sg_comp_from_iov(
905                                                 gather_comp, i,
906                                                 fc_params->src_iov, 0, &size,
907                                                 aad_buf, aad_offset);
908                                 }
909
910                                 if (unlikely(size)) {
911                                         plt_dp_err("Insufficient buffer"
912                                                    " space, size %d needed",
913                                                    size);
914                                         return -1;
915                                 }
916                         }
917                 }
918                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
919                 g_size_bytes =
920                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
921
922                 /*
923                  * Output Scatter List
924                  */
925
926                 i = 0;
927                 scatter_comp =
928                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
929                                                       g_size_bytes);
930
931                 /* Add iv */
932                 if (iv_len) {
933                         i = fill_sg_comp(scatter_comp, i,
934                                          (uint64_t)offset_vaddr +
935                                                  ROC_SE_OFF_CTRL_LEN,
936                                          iv_len);
937                 }
938
939                 /* Add output data */
940                 size = outputlen - iv_len;
941                 if (size) {
942                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
943                                 /* handle single buffer here */
944                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
945                                                               fc_params->bufs,
946                                                               &size);
947                         } else {
948                                 uint32_t aad_offset =
949                                         aad_len ? passthrough_len : 0;
950
951                                 if (unlikely(!fc_params->dst_iov)) {
952                                         plt_dp_err("Bad input args");
953                                         return -1;
954                                 }
955
956                                 i = fill_sg_comp_from_iov(
957                                         scatter_comp, i, fc_params->dst_iov, 0,
958                                         &size, aad_buf, aad_offset);
959                         }
960
961                         if (unlikely(size)) {
962                                 plt_dp_err("Insufficient buffer space,"
963                                            " size %d needed",
964                                            size);
965                                 return -1;
966                         }
967                 }
968
969                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
970                 s_size_bytes =
971                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
972
973                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
974
975                 /* This is DPTR len in case of SG mode */
976                 cpt_inst_w4.s.dlen = size;
977
978                 inst->dptr = (uint64_t)in_buffer;
979         }
980
981         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
982                      (auth_offset >> 8))) {
983                 plt_dp_err("Offset not supported");
984                 plt_dp_err("enc_offset: %d", encr_offset);
985                 plt_dp_err("iv_offset : %d", iv_offset);
986                 plt_dp_err("auth_offset: %d", auth_offset);
987                 return -1;
988         }
989
990         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
991                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
992                 ((uint64_t)auth_offset));
993
994         inst->w4.u64 = cpt_inst_w4.u64;
995         return 0;
996 }
997
998 static __rte_always_inline int
999 cpt_pdcp_alg_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1000                   struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1001 {
1002         uint32_t size;
1003         int32_t inputlen, outputlen;
1004         struct roc_se_ctx *se_ctx;
1005         uint32_t mac_len = 0;
1006         uint8_t pdcp_alg_type;
1007         uint32_t encr_offset, auth_offset;
1008         uint32_t encr_data_len, auth_data_len;
1009         int flags, iv_len;
1010         uint64_t offset_ctrl;
1011         uint64_t *offset_vaddr;
1012         uint8_t *iv_s;
1013         uint8_t pack_iv = 0;
1014         union cpt_inst_w4 cpt_inst_w4;
1015
1016         se_ctx = params->ctx_buf.vaddr;
1017         flags = se_ctx->zsk_flags;
1018         mac_len = se_ctx->mac_len;
1019         pdcp_alg_type = se_ctx->pdcp_alg_type;
1020
1021         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_PDCP;
1022         cpt_inst_w4.s.opcode_minor = se_ctx->template_w4.s.opcode_minor;
1023
1024         if (flags == 0x1) {
1025                 iv_s = params->auth_iv_buf;
1026
1027                 /*
1028                  * Microcode expects offsets in bytes
1029                  * TODO: Rounding off
1030                  */
1031                 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1032                 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
1033
1034                 if (se_ctx->pdcp_alg_type != ROC_SE_PDCP_ALG_TYPE_AES_CTR) {
1035                         iv_len = params->auth_iv_len;
1036
1037                         if (iv_len == 25) {
1038                                 iv_len -= 2;
1039                                 pack_iv = 1;
1040                         }
1041
1042                         auth_offset = auth_offset / 8;
1043
1044                         /* consider iv len */
1045                         auth_offset += iv_len;
1046
1047                         inputlen =
1048                                 auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1049                 } else {
1050                         iv_len = 16;
1051
1052                         /* consider iv len */
1053                         auth_offset += iv_len;
1054
1055                         inputlen = auth_offset + auth_data_len;
1056                 }
1057
1058                 outputlen = mac_len;
1059
1060                 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1061
1062                 encr_data_len = 0;
1063                 encr_offset = 0;
1064         } else {
1065                 iv_s = params->iv_buf;
1066                 iv_len = params->cipher_iv_len;
1067
1068                 if (iv_len == 25) {
1069                         iv_len -= 2;
1070                         pack_iv = 1;
1071                 }
1072
1073                 /*
1074                  * Microcode expects offsets in bytes
1075                  * TODO: Rounding off
1076                  */
1077                 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1078
1079                 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1080                 encr_offset = encr_offset / 8;
1081                 /* consider iv len */
1082                 encr_offset += iv_len;
1083
1084                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1085                 outputlen = inputlen;
1086
1087                 /* iv offset is 0 */
1088                 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1089
1090                 auth_data_len = 0;
1091                 auth_offset = 0;
1092         }
1093
1094         if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1095                 plt_dp_err("Offset not supported");
1096                 plt_dp_err("enc_offset: %d", encr_offset);
1097                 plt_dp_err("auth_offset: %d", auth_offset);
1098                 return -1;
1099         }
1100
1101         /*
1102          * GP op header, lengths are expected in bits.
1103          */
1104         cpt_inst_w4.s.param1 = encr_data_len;
1105         cpt_inst_w4.s.param2 = auth_data_len;
1106
1107         /*
1108          * In cn9k, cn10k since we have a limitation of
1109          * IV & Offset control word not part of instruction
1110          * and need to be part of Data Buffer, we check if
1111          * head room is there and then only do the Direct mode processing
1112          */
1113         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1114                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1115                 void *dm_vaddr = params->bufs[0].vaddr;
1116
1117                 /* Use Direct mode */
1118
1119                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1120                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1121
1122                 /* DPTR */
1123                 inst->dptr = (uint64_t)offset_vaddr;
1124                 /* RPTR should just exclude offset control word */
1125                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1126
1127                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1128
1129                 uint8_t *iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1130                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1131
1132                 *offset_vaddr = offset_ctrl;
1133         } else {
1134                 void *m_vaddr = params->meta_buf.vaddr;
1135                 uint32_t i, g_size_bytes, s_size_bytes;
1136                 struct roc_se_sglist_comp *gather_comp;
1137                 struct roc_se_sglist_comp *scatter_comp;
1138                 uint8_t *in_buffer;
1139                 uint8_t *iv_d;
1140
1141                 /* save space for iv */
1142                 offset_vaddr = m_vaddr;
1143
1144                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN +
1145                           RTE_ALIGN_CEIL(iv_len, 8);
1146
1147                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1148
1149                 /* DPTR has SG list */
1150                 in_buffer = m_vaddr;
1151
1152                 ((uint16_t *)in_buffer)[0] = 0;
1153                 ((uint16_t *)in_buffer)[1] = 0;
1154
1155                 /* TODO Add error check if space will be sufficient */
1156                 gather_comp =
1157                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1158
1159                 /*
1160                  * Input Gather List
1161                  */
1162                 i = 0;
1163
1164                 /* Offset control word followed by iv */
1165
1166                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1167                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1168
1169                 /* iv offset is 0 */
1170                 *offset_vaddr = offset_ctrl;
1171
1172                 iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1173                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1174
1175                 /* input data */
1176                 size = inputlen - iv_len;
1177                 if (size) {
1178                         i = fill_sg_comp_from_iov(gather_comp, i,
1179                                                   params->src_iov, 0, &size,
1180                                                   NULL, 0);
1181                         if (unlikely(size)) {
1182                                 plt_dp_err("Insufficient buffer space,"
1183                                            " size %d needed",
1184                                            size);
1185                                 return -1;
1186                         }
1187                 }
1188                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1189                 g_size_bytes =
1190                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1191
1192                 /*
1193                  * Output Scatter List
1194                  */
1195
1196                 i = 0;
1197                 scatter_comp =
1198                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1199                                                       g_size_bytes);
1200
1201                 if (flags == 0x1) {
1202                         /* IV in SLIST only for EEA3 & UEA2 */
1203                         iv_len = 0;
1204                 }
1205
1206                 if (iv_len) {
1207                         i = fill_sg_comp(scatter_comp, i,
1208                                          (uint64_t)offset_vaddr +
1209                                                  ROC_SE_OFF_CTRL_LEN,
1210                                          iv_len);
1211                 }
1212
1213                 /* Add output data */
1214                 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1215                         size = outputlen - iv_len - mac_len;
1216                         if (size) {
1217                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1218                                                           params->dst_iov, 0,
1219                                                           &size, NULL, 0);
1220
1221                                 if (unlikely(size)) {
1222                                         plt_dp_err("Insufficient buffer space,"
1223                                                    " size %d needed",
1224                                                    size);
1225                                         return -1;
1226                                 }
1227                         }
1228
1229                         /* mac data */
1230                         if (mac_len) {
1231                                 i = fill_sg_comp_from_buf(scatter_comp, i,
1232                                                           &params->mac_buf);
1233                         }
1234                 } else {
1235                         /* Output including mac */
1236                         size = outputlen - iv_len;
1237                         if (size) {
1238                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1239                                                           params->dst_iov, 0,
1240                                                           &size, NULL, 0);
1241
1242                                 if (unlikely(size)) {
1243                                         plt_dp_err("Insufficient buffer space,"
1244                                                    " size %d needed",
1245                                                    size);
1246                                         return -1;
1247                                 }
1248                         }
1249                 }
1250                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1251                 s_size_bytes =
1252                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1253
1254                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1255
1256                 /* This is DPTR len in case of SG mode */
1257                 cpt_inst_w4.s.dlen = size;
1258
1259                 inst->dptr = (uint64_t)in_buffer;
1260         }
1261
1262         inst->w4.u64 = cpt_inst_w4.u64;
1263
1264         return 0;
1265 }
1266
1267 static __rte_always_inline int
1268 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1269                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1270 {
1271         void *m_vaddr = params->meta_buf.vaddr;
1272         uint32_t size;
1273         int32_t inputlen = 0, outputlen = 0;
1274         struct roc_se_ctx *se_ctx;
1275         uint32_t mac_len = 0;
1276         uint8_t i = 0;
1277         uint32_t encr_offset, auth_offset;
1278         uint32_t encr_data_len, auth_data_len;
1279         int flags;
1280         uint8_t *iv_s, *iv_d, iv_len = 8;
1281         uint8_t dir = 0;
1282         uint64_t *offset_vaddr;
1283         union cpt_inst_w4 cpt_inst_w4;
1284         uint8_t *in_buffer;
1285         uint32_t g_size_bytes, s_size_bytes;
1286         struct roc_se_sglist_comp *gather_comp;
1287         struct roc_se_sglist_comp *scatter_comp;
1288
1289         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1290         auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1291         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1292         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1293
1294         se_ctx = params->ctx_buf.vaddr;
1295         flags = se_ctx->zsk_flags;
1296         mac_len = se_ctx->mac_len;
1297
1298         if (flags == 0x0)
1299                 iv_s = params->iv_buf;
1300         else
1301                 iv_s = params->auth_iv_buf;
1302
1303         dir = iv_s[8] & 0x1;
1304
1305         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1306
1307         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1308         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1309                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1310
1311         /*
1312          * GP op header, lengths are expected in bits.
1313          */
1314         cpt_inst_w4.s.param1 = encr_data_len;
1315         cpt_inst_w4.s.param2 = auth_data_len;
1316
1317         /* consider iv len */
1318         if (flags == 0x0) {
1319                 encr_offset += iv_len;
1320                 auth_offset += iv_len;
1321         }
1322
1323         /* save space for offset ctrl and iv */
1324         offset_vaddr = m_vaddr;
1325
1326         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1327
1328         /* DPTR has SG list */
1329         in_buffer = m_vaddr;
1330
1331         ((uint16_t *)in_buffer)[0] = 0;
1332         ((uint16_t *)in_buffer)[1] = 0;
1333
1334         /* TODO Add error check if space will be sufficient */
1335         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1336
1337         /*
1338          * Input Gather List
1339          */
1340         i = 0;
1341
1342         /* Offset control word followed by iv */
1343
1344         if (flags == 0x0) {
1345                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1346                 outputlen = inputlen;
1347                 /* iv offset is 0 */
1348                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1349                 if (unlikely((encr_offset >> 16))) {
1350                         plt_dp_err("Offset not supported");
1351                         plt_dp_err("enc_offset: %d", encr_offset);
1352                         return -1;
1353                 }
1354         } else {
1355                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1356                 outputlen = mac_len;
1357                 /* iv offset is 0 */
1358                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1359                 if (unlikely((auth_offset >> 8))) {
1360                         plt_dp_err("Offset not supported");
1361                         plt_dp_err("auth_offset: %d", auth_offset);
1362                         return -1;
1363                 }
1364         }
1365
1366         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1367                          ROC_SE_OFF_CTRL_LEN + iv_len);
1368
1369         /* IV */
1370         iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1371         memcpy(iv_d, iv_s, iv_len);
1372
1373         /* input data */
1374         size = inputlen - iv_len;
1375         if (size) {
1376                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1377                                           &size, NULL, 0);
1378
1379                 if (unlikely(size)) {
1380                         plt_dp_err("Insufficient buffer space,"
1381                                    " size %d needed",
1382                                    size);
1383                         return -1;
1384                 }
1385         }
1386         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1387         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1388
1389         /*
1390          * Output Scatter List
1391          */
1392
1393         i = 0;
1394         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1395                                                      g_size_bytes);
1396
1397         if (flags == 0x1) {
1398                 /* IV in SLIST only for F8 */
1399                 iv_len = 0;
1400         }
1401
1402         /* IV */
1403         if (iv_len) {
1404                 i = fill_sg_comp(scatter_comp, i,
1405                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1406                                  iv_len);
1407         }
1408
1409         /* Add output data */
1410         if (req_flags & ROC_SE_VALID_MAC_BUF) {
1411                 size = outputlen - iv_len - mac_len;
1412                 if (size) {
1413                         i = fill_sg_comp_from_iov(scatter_comp, i,
1414                                                   params->dst_iov, 0, &size,
1415                                                   NULL, 0);
1416
1417                         if (unlikely(size)) {
1418                                 plt_dp_err("Insufficient buffer space,"
1419                                            " size %d needed",
1420                                            size);
1421                                 return -1;
1422                         }
1423                 }
1424
1425                 /* mac data */
1426                 if (mac_len) {
1427                         i = fill_sg_comp_from_buf(scatter_comp, i,
1428                                                   &params->mac_buf);
1429                 }
1430         } else {
1431                 /* Output including mac */
1432                 size = outputlen - iv_len;
1433                 if (size) {
1434                         i = fill_sg_comp_from_iov(scatter_comp, i,
1435                                                   params->dst_iov, 0, &size,
1436                                                   NULL, 0);
1437
1438                         if (unlikely(size)) {
1439                                 plt_dp_err("Insufficient buffer space,"
1440                                            " size %d needed",
1441                                            size);
1442                                 return -1;
1443                         }
1444                 }
1445         }
1446         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1447         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1448
1449         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1450
1451         /* This is DPTR len in case of SG mode */
1452         cpt_inst_w4.s.dlen = size;
1453
1454         inst->dptr = (uint64_t)in_buffer;
1455         inst->w4.u64 = cpt_inst_w4.u64;
1456
1457         return 0;
1458 }
1459
1460 static __rte_always_inline int
1461 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1462                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1463 {
1464         void *m_vaddr = params->meta_buf.vaddr;
1465         uint32_t size;
1466         int32_t inputlen = 0, outputlen;
1467         struct roc_se_ctx *se_ctx;
1468         uint8_t i = 0, iv_len = 8;
1469         uint32_t encr_offset;
1470         uint32_t encr_data_len;
1471         int flags;
1472         uint8_t dir = 0;
1473         uint64_t *offset_vaddr;
1474         union cpt_inst_w4 cpt_inst_w4;
1475         uint8_t *in_buffer;
1476         uint32_t g_size_bytes, s_size_bytes;
1477         struct roc_se_sglist_comp *gather_comp;
1478         struct roc_se_sglist_comp *scatter_comp;
1479
1480         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1481         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1482
1483         se_ctx = params->ctx_buf.vaddr;
1484         flags = se_ctx->zsk_flags;
1485
1486         cpt_inst_w4.u64 = 0;
1487         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1488
1489         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1490         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1491                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1492
1493         /*
1494          * GP op header, lengths are expected in bits.
1495          */
1496         cpt_inst_w4.s.param1 = encr_data_len;
1497
1498         /* consider iv len */
1499         encr_offset += iv_len;
1500
1501         inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1502         outputlen = inputlen;
1503
1504         /* save space for offset ctrl & iv */
1505         offset_vaddr = m_vaddr;
1506
1507         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1508
1509         /* DPTR has SG list */
1510         in_buffer = m_vaddr;
1511
1512         ((uint16_t *)in_buffer)[0] = 0;
1513         ((uint16_t *)in_buffer)[1] = 0;
1514
1515         /* TODO Add error check if space will be sufficient */
1516         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1517
1518         /*
1519          * Input Gather List
1520          */
1521         i = 0;
1522
1523         /* Offset control word followed by iv */
1524         *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1525         if (unlikely((encr_offset >> 16))) {
1526                 plt_dp_err("Offset not supported");
1527                 plt_dp_err("enc_offset: %d", encr_offset);
1528                 return -1;
1529         }
1530
1531         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1532                          ROC_SE_OFF_CTRL_LEN + iv_len);
1533
1534         /* IV */
1535         memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1536                iv_len);
1537
1538         /* Add input data */
1539         size = inputlen - iv_len;
1540         if (size) {
1541                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1542                                           &size, NULL, 0);
1543                 if (unlikely(size)) {
1544                         plt_dp_err("Insufficient buffer space,"
1545                                    " size %d needed",
1546                                    size);
1547                         return -1;
1548                 }
1549         }
1550         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1551         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1552
1553         /*
1554          * Output Scatter List
1555          */
1556
1557         i = 0;
1558         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1559                                                      g_size_bytes);
1560
1561         /* IV */
1562         i = fill_sg_comp(scatter_comp, i,
1563                          (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1564
1565         /* Add output data */
1566         size = outputlen - iv_len;
1567         if (size) {
1568                 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1569                                           &size, NULL, 0);
1570                 if (unlikely(size)) {
1571                         plt_dp_err("Insufficient buffer space,"
1572                                    " size %d needed",
1573                                    size);
1574                         return -1;
1575                 }
1576         }
1577         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1578         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1579
1580         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1581
1582         /* This is DPTR len in case of SG mode */
1583         cpt_inst_w4.s.dlen = size;
1584
1585         inst->dptr = (uint64_t)in_buffer;
1586         inst->w4.u64 = cpt_inst_w4.u64;
1587
1588         return 0;
1589 }
1590
1591 static __rte_always_inline int
1592 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1593                      struct roc_se_fc_params *fc_params,
1594                      struct cpt_inst_s *inst)
1595 {
1596         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1597         uint8_t fc_type;
1598         int ret = -1;
1599
1600         fc_type = ctx->fc_type;
1601
1602         if (likely(fc_type == ROC_SE_FC_GEN)) {
1603                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1604         } else if (fc_type == ROC_SE_PDCP) {
1605                 ret = cpt_pdcp_alg_prep(flags, d_offs, d_lens, fc_params, inst);
1606         } else if (fc_type == ROC_SE_KASUMI) {
1607                 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1608         }
1609
1610         /*
1611          * For AUTH_ONLY case,
1612          * MC only supports digest generation and verification
1613          * should be done in software by memcmp()
1614          */
1615
1616         return ret;
1617 }
1618
1619 static __rte_always_inline int
1620 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1621                      struct roc_se_fc_params *fc_params,
1622                      struct cpt_inst_s *inst)
1623 {
1624         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1625         uint8_t fc_type;
1626         int ret = -1;
1627
1628         fc_type = ctx->fc_type;
1629
1630         if (likely(fc_type == ROC_SE_FC_GEN)) {
1631                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1632         } else if (fc_type == ROC_SE_PDCP) {
1633                 ret = cpt_pdcp_alg_prep(flags, d_offs, d_lens, fc_params, inst);
1634         } else if (fc_type == ROC_SE_KASUMI) {
1635                 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1636                                           inst);
1637         } else if (fc_type == ROC_SE_HASH_HMAC) {
1638                 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1639         }
1640
1641         return ret;
1642 }
1643
1644 static __rte_always_inline int
1645 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1646 {
1647         struct rte_crypto_aead_xform *aead_form;
1648         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1649         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1650         uint32_t cipher_key_len = 0;
1651         uint8_t aes_gcm = 0;
1652         aead_form = &xform->aead;
1653
1654         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1655                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1656                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1657         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1658                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1659                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1660         } else {
1661                 plt_dp_err("Unknown aead operation\n");
1662                 return -1;
1663         }
1664         switch (aead_form->algo) {
1665         case RTE_CRYPTO_AEAD_AES_GCM:
1666                 enc_type = ROC_SE_AES_GCM;
1667                 cipher_key_len = 16;
1668                 aes_gcm = 1;
1669                 break;
1670         case RTE_CRYPTO_AEAD_AES_CCM:
1671                 plt_dp_err("Crypto: Unsupported cipher algo %u",
1672                            aead_form->algo);
1673                 return -1;
1674         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1675                 enc_type = ROC_SE_CHACHA20;
1676                 auth_type = ROC_SE_POLY1305;
1677                 cipher_key_len = 32;
1678                 sess->chacha_poly = 1;
1679                 break;
1680         default:
1681                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1682                            aead_form->algo);
1683                 return -1;
1684         }
1685         if (aead_form->key.length < cipher_key_len) {
1686                 plt_dp_err("Invalid cipher params keylen %u",
1687                            aead_form->key.length);
1688                 return -1;
1689         }
1690         sess->zsk_flag = 0;
1691         sess->aes_gcm = aes_gcm;
1692         sess->mac_len = aead_form->digest_length;
1693         sess->iv_offset = aead_form->iv.offset;
1694         sess->iv_length = aead_form->iv.length;
1695         sess->aad_length = aead_form->aad_length;
1696
1697         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1698                                          aead_form->key.data,
1699                                          aead_form->key.length, NULL)))
1700                 return -1;
1701
1702         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1703                                          aead_form->digest_length)))
1704                 return -1;
1705
1706         return 0;
1707 }
1708
1709 static __rte_always_inline int
1710 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1711 {
1712         struct rte_crypto_cipher_xform *c_form;
1713         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1714         uint32_t cipher_key_len = 0;
1715         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1716
1717         c_form = &xform->cipher;
1718
1719         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1720                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1721         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1722                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1723                 if (xform->next != NULL &&
1724                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1725                         /* Perform decryption followed by auth verify */
1726                         sess->roc_se_ctx.template_w4.s.opcode_minor =
1727                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1728                 }
1729         } else {
1730                 plt_dp_err("Unknown cipher operation\n");
1731                 return -1;
1732         }
1733
1734         switch (c_form->algo) {
1735         case RTE_CRYPTO_CIPHER_AES_CBC:
1736                 enc_type = ROC_SE_AES_CBC;
1737                 cipher_key_len = 16;
1738                 break;
1739         case RTE_CRYPTO_CIPHER_3DES_CBC:
1740                 enc_type = ROC_SE_DES3_CBC;
1741                 cipher_key_len = 24;
1742                 break;
1743         case RTE_CRYPTO_CIPHER_DES_CBC:
1744                 /* DES is implemented using 3DES in hardware */
1745                 enc_type = ROC_SE_DES3_CBC;
1746                 cipher_key_len = 8;
1747                 break;
1748         case RTE_CRYPTO_CIPHER_AES_CTR:
1749                 enc_type = ROC_SE_AES_CTR;
1750                 cipher_key_len = 16;
1751                 aes_ctr = 1;
1752                 break;
1753         case RTE_CRYPTO_CIPHER_NULL:
1754                 enc_type = 0;
1755                 is_null = 1;
1756                 break;
1757         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1758                 enc_type = ROC_SE_KASUMI_F8_ECB;
1759                 cipher_key_len = 16;
1760                 zsk_flag = ROC_SE_K_F8;
1761                 break;
1762         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1763                 enc_type = ROC_SE_SNOW3G_UEA2;
1764                 cipher_key_len = 16;
1765                 zsk_flag = ROC_SE_ZS_EA;
1766                 break;
1767         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1768                 enc_type = ROC_SE_ZUC_EEA3;
1769                 cipher_key_len = c_form->key.length;
1770                 zsk_flag = ROC_SE_ZS_EA;
1771                 break;
1772         case RTE_CRYPTO_CIPHER_AES_XTS:
1773                 enc_type = ROC_SE_AES_XTS;
1774                 cipher_key_len = 16;
1775                 break;
1776         case RTE_CRYPTO_CIPHER_3DES_ECB:
1777                 enc_type = ROC_SE_DES3_ECB;
1778                 cipher_key_len = 24;
1779                 break;
1780         case RTE_CRYPTO_CIPHER_AES_ECB:
1781                 enc_type = ROC_SE_AES_ECB;
1782                 cipher_key_len = 16;
1783                 break;
1784         case RTE_CRYPTO_CIPHER_3DES_CTR:
1785         case RTE_CRYPTO_CIPHER_AES_F8:
1786         case RTE_CRYPTO_CIPHER_ARC4:
1787                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1788                 return -1;
1789         default:
1790                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1791                            c_form->algo);
1792                 return -1;
1793         }
1794
1795         if (c_form->key.length < cipher_key_len) {
1796                 plt_dp_err("Invalid cipher params keylen %u",
1797                            c_form->key.length);
1798                 return -1;
1799         }
1800
1801         sess->zsk_flag = zsk_flag;
1802         sess->aes_gcm = 0;
1803         sess->aes_ctr = aes_ctr;
1804         sess->iv_offset = c_form->iv.offset;
1805         sess->iv_length = c_form->iv.length;
1806         sess->is_null = is_null;
1807
1808         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1809                                          c_form->key.data, c_form->key.length,
1810                                          NULL)))
1811                 return -1;
1812
1813         if ((enc_type >= ROC_SE_ZUC_EEA3) && (enc_type <= ROC_SE_AES_CTR_EEA2))
1814                 roc_se_ctx_swap(&sess->roc_se_ctx);
1815         return 0;
1816 }
1817
1818 static __rte_always_inline int
1819 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1820 {
1821         struct rte_crypto_auth_xform *a_form;
1822         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1823         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1824
1825         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
1826                 return fill_sess_gmac(xform, sess);
1827
1828         if (xform->next != NULL &&
1829             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1830             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1831                 /* Perform auth followed by encryption */
1832                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1833                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1834         }
1835
1836         a_form = &xform->auth;
1837
1838         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1839                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1840         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1841                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1842         else {
1843                 plt_dp_err("Unknown auth operation");
1844                 return -1;
1845         }
1846
1847         switch (a_form->algo) {
1848         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1849                 /* Fall through */
1850         case RTE_CRYPTO_AUTH_SHA1:
1851                 auth_type = ROC_SE_SHA1_TYPE;
1852                 break;
1853         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1854         case RTE_CRYPTO_AUTH_SHA256:
1855                 auth_type = ROC_SE_SHA2_SHA256;
1856                 break;
1857         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1858         case RTE_CRYPTO_AUTH_SHA512:
1859                 auth_type = ROC_SE_SHA2_SHA512;
1860                 break;
1861         case RTE_CRYPTO_AUTH_AES_GMAC:
1862                 auth_type = ROC_SE_GMAC_TYPE;
1863                 aes_gcm = 1;
1864                 break;
1865         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1866         case RTE_CRYPTO_AUTH_SHA224:
1867                 auth_type = ROC_SE_SHA2_SHA224;
1868                 break;
1869         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1870         case RTE_CRYPTO_AUTH_SHA384:
1871                 auth_type = ROC_SE_SHA2_SHA384;
1872                 break;
1873         case RTE_CRYPTO_AUTH_MD5_HMAC:
1874         case RTE_CRYPTO_AUTH_MD5:
1875                 auth_type = ROC_SE_MD5_TYPE;
1876                 break;
1877         case RTE_CRYPTO_AUTH_KASUMI_F9:
1878                 auth_type = ROC_SE_KASUMI_F9_ECB;
1879                 /*
1880                  * Indicate that direction needs to be taken out
1881                  * from end of src
1882                  */
1883                 zsk_flag = ROC_SE_K_F9;
1884                 break;
1885         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1886                 auth_type = ROC_SE_SNOW3G_UIA2;
1887                 zsk_flag = ROC_SE_ZS_IA;
1888                 break;
1889         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1890                 auth_type = ROC_SE_ZUC_EIA3;
1891                 zsk_flag = ROC_SE_ZS_IA;
1892                 break;
1893         case RTE_CRYPTO_AUTH_NULL:
1894                 auth_type = 0;
1895                 is_null = 1;
1896                 break;
1897         case RTE_CRYPTO_AUTH_AES_CMAC:
1898                 auth_type = ROC_SE_AES_CMAC_EIA2;
1899                 zsk_flag = ROC_SE_ZS_IA;
1900                 break;
1901         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1902         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1903                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1904                 return -1;
1905         default:
1906                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1907                            a_form->algo);
1908                 return -1;
1909         }
1910
1911         sess->zsk_flag = zsk_flag;
1912         sess->aes_gcm = aes_gcm;
1913         sess->mac_len = a_form->digest_length;
1914         sess->is_null = is_null;
1915         if (zsk_flag) {
1916                 sess->auth_iv_offset = a_form->iv.offset;
1917                 sess->auth_iv_length = a_form->iv.length;
1918         }
1919         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1920                                          a_form->key.data, a_form->key.length,
1921                                          a_form->digest_length)))
1922                 return -1;
1923
1924         if ((auth_type >= ROC_SE_ZUC_EIA3) &&
1925             (auth_type <= ROC_SE_AES_CMAC_EIA2))
1926                 roc_se_ctx_swap(&sess->roc_se_ctx);
1927
1928         return 0;
1929 }
1930
1931 static __rte_always_inline int
1932 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1933 {
1934         struct rte_crypto_auth_xform *a_form;
1935         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1936         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1937
1938         a_form = &xform->auth;
1939
1940         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1941                 sess->cpt_op |= ROC_SE_OP_ENCODE;
1942         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1943                 sess->cpt_op |= ROC_SE_OP_DECODE;
1944         else {
1945                 plt_dp_err("Unknown auth operation");
1946                 return -1;
1947         }
1948
1949         switch (a_form->algo) {
1950         case RTE_CRYPTO_AUTH_AES_GMAC:
1951                 enc_type = ROC_SE_AES_GCM;
1952                 auth_type = ROC_SE_GMAC_TYPE;
1953                 break;
1954         default:
1955                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1956                            a_form->algo);
1957                 return -1;
1958         }
1959
1960         sess->zsk_flag = 0;
1961         sess->aes_gcm = 0;
1962         sess->is_gmac = 1;
1963         sess->iv_offset = a_form->iv.offset;
1964         sess->iv_length = a_form->iv.length;
1965         sess->mac_len = a_form->digest_length;
1966
1967         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1968                                          a_form->key.data, a_form->key.length,
1969                                          NULL)))
1970                 return -1;
1971
1972         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1973                                          a_form->digest_length)))
1974                 return -1;
1975
1976         return 0;
1977 }
1978
1979 static __rte_always_inline void *
1980 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1981               struct rte_mempool *cpt_meta_pool,
1982               struct cpt_inflight_req *infl_req)
1983 {
1984         uint8_t *mdata;
1985
1986         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1987                 return NULL;
1988
1989         buf->vaddr = mdata;
1990         buf->size = len;
1991
1992         infl_req->mdata = mdata;
1993         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1994
1995         return mdata;
1996 }
1997
1998 static __rte_always_inline uint32_t
1999 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
2000                      uint32_t start_offset)
2001 {
2002         uint16_t index = 0;
2003         void *seg_data = NULL;
2004         int32_t seg_size = 0;
2005
2006         if (!pkt) {
2007                 iovec->buf_cnt = 0;
2008                 return 0;
2009         }
2010
2011         if (!start_offset) {
2012                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2013                 seg_size = pkt->data_len;
2014         } else {
2015                 while (start_offset >= pkt->data_len) {
2016                         start_offset -= pkt->data_len;
2017                         pkt = pkt->next;
2018                 }
2019
2020                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2021                 seg_size = pkt->data_len - start_offset;
2022                 if (!seg_size)
2023                         return 1;
2024         }
2025
2026         /* first seg */
2027         iovec->bufs[index].vaddr = seg_data;
2028         iovec->bufs[index].size = seg_size;
2029         index++;
2030         pkt = pkt->next;
2031
2032         while (unlikely(pkt != NULL)) {
2033                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2034                 seg_size = pkt->data_len;
2035                 if (!seg_size)
2036                         break;
2037
2038                 iovec->bufs[index].vaddr = seg_data;
2039                 iovec->bufs[index].size = seg_size;
2040
2041                 index++;
2042
2043                 pkt = pkt->next;
2044         }
2045
2046         iovec->buf_cnt = index;
2047         return 0;
2048 }
2049
2050 static __rte_always_inline void
2051 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2052                              struct roc_se_fc_params *param, uint32_t *flags)
2053 {
2054         uint16_t index = 0;
2055         void *seg_data = NULL;
2056         uint32_t seg_size = 0;
2057         struct roc_se_iov_ptr *iovec;
2058
2059         seg_data = rte_pktmbuf_mtod(pkt, void *);
2060         seg_size = pkt->data_len;
2061
2062         /* first seg */
2063         if (likely(!pkt->next)) {
2064                 uint32_t headroom;
2065
2066                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2067                 headroom = rte_pktmbuf_headroom(pkt);
2068                 if (likely(headroom >= 24))
2069                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2070
2071                 param->bufs[0].vaddr = seg_data;
2072                 param->bufs[0].size = seg_size;
2073                 return;
2074         }
2075         iovec = param->src_iov;
2076         iovec->bufs[index].vaddr = seg_data;
2077         iovec->bufs[index].size = seg_size;
2078         index++;
2079         pkt = pkt->next;
2080
2081         while (unlikely(pkt != NULL)) {
2082                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2083                 seg_size = pkt->data_len;
2084
2085                 if (!seg_size)
2086                         break;
2087
2088                 iovec->bufs[index].vaddr = seg_data;
2089                 iovec->bufs[index].size = seg_size;
2090
2091                 index++;
2092
2093                 pkt = pkt->next;
2094         }
2095
2096         iovec->buf_cnt = index;
2097         return;
2098 }
2099
2100 static __rte_always_inline int
2101 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2102                struct cpt_qp_meta_info *m_info,
2103                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2104 {
2105         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2106         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2107         struct rte_crypto_sym_op *sym_op = cop->sym;
2108         void *mdata = NULL;
2109         uint32_t mc_hash_off;
2110         uint32_t flags = 0;
2111         uint64_t d_offs, d_lens;
2112         struct rte_mbuf *m_src, *m_dst;
2113         uint8_t cpt_op = sess->cpt_op;
2114 #ifdef CPT_ALWAYS_USE_SG_MODE
2115         uint8_t inplace = 0;
2116 #else
2117         uint8_t inplace = 1;
2118 #endif
2119         struct roc_se_fc_params fc_params;
2120         char src[SRC_IOV_SIZE];
2121         char dst[SRC_IOV_SIZE];
2122         uint32_t iv_buf[4];
2123         int ret;
2124
2125         fc_params.cipher_iv_len = sess->iv_length;
2126         fc_params.auth_iv_len = sess->auth_iv_length;
2127
2128         if (likely(sess->iv_length)) {
2129                 flags |= ROC_SE_VALID_IV_BUF;
2130                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2131                                                              sess->iv_offset);
2132                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2133                         memcpy((uint8_t *)iv_buf,
2134                                rte_crypto_op_ctod_offset(cop, uint8_t *,
2135                                                          sess->iv_offset),
2136                                12);
2137                         iv_buf[3] = rte_cpu_to_be_32(0x1);
2138                         fc_params.iv_buf = iv_buf;
2139                 }
2140         }
2141
2142         if (sess->zsk_flag) {
2143                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2144                         cop, uint8_t *, sess->auth_iv_offset);
2145                 if (sess->zsk_flag != ROC_SE_ZS_EA)
2146                         inplace = 0;
2147         }
2148         m_src = sym_op->m_src;
2149         m_dst = sym_op->m_dst;
2150
2151         if (sess->aes_gcm || sess->chacha_poly) {
2152                 uint8_t *salt;
2153                 uint8_t *aad_data;
2154                 uint16_t aad_len;
2155
2156                 d_offs = sym_op->aead.data.offset;
2157                 d_lens = sym_op->aead.data.length;
2158                 mc_hash_off =
2159                         sym_op->aead.data.offset + sym_op->aead.data.length;
2160
2161                 aad_data = sym_op->aead.aad.data;
2162                 aad_len = sess->aad_length;
2163                 if (likely((aad_data + aad_len) ==
2164                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2165                                                    sym_op->aead.data.offset))) {
2166                         d_offs = (d_offs - aad_len) | (d_offs << 16);
2167                         d_lens = (d_lens + aad_len) | (d_lens << 32);
2168                 } else {
2169                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2170                         fc_params.aad_buf.size = aad_len;
2171                         flags |= ROC_SE_VALID_AAD_BUF;
2172                         inplace = 0;
2173                         d_offs = d_offs << 16;
2174                         d_lens = d_lens << 32;
2175                 }
2176
2177                 salt = fc_params.iv_buf;
2178                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2179                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2180                         sess->salt = *(uint32_t *)salt;
2181                 }
2182                 fc_params.iv_buf = salt + 4;
2183                 if (likely(sess->mac_len)) {
2184                         struct rte_mbuf *m =
2185                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2186
2187                         if (!m)
2188                                 m = m_src;
2189
2190                         /* hmac immediately following data is best case */
2191                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2192                                              mc_hash_off !=
2193                                      (uint8_t *)sym_op->aead.digest.data)) {
2194                                 flags |= ROC_SE_VALID_MAC_BUF;
2195                                 fc_params.mac_buf.size = sess->mac_len;
2196                                 fc_params.mac_buf.vaddr =
2197                                         sym_op->aead.digest.data;
2198                                 inplace = 0;
2199                         }
2200                 }
2201         } else {
2202                 d_offs = sym_op->cipher.data.offset;
2203                 d_lens = sym_op->cipher.data.length;
2204                 mc_hash_off =
2205                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
2206                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2207                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2208
2209                 if (mc_hash_off <
2210                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2211                         mc_hash_off = (sym_op->auth.data.offset +
2212                                        sym_op->auth.data.length);
2213                 }
2214                 /* for gmac, salt should be updated like in gcm */
2215                 if (unlikely(sess->is_gmac)) {
2216                         uint8_t *salt;
2217                         salt = fc_params.iv_buf;
2218                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
2219                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2220                                 sess->salt = *(uint32_t *)salt;
2221                         }
2222                         fc_params.iv_buf = salt + 4;
2223                 }
2224                 if (likely(sess->mac_len)) {
2225                         struct rte_mbuf *m;
2226
2227                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2228                         if (!m)
2229                                 m = m_src;
2230
2231                         /* hmac immediately following data is best case */
2232                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2233                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2234                                               mc_hash_off !=
2235                                       (uint8_t *)sym_op->auth.digest.data))) {
2236                                 flags |= ROC_SE_VALID_MAC_BUF;
2237                                 fc_params.mac_buf.size = sess->mac_len;
2238                                 fc_params.mac_buf.vaddr =
2239                                         sym_op->auth.digest.data;
2240                                 inplace = 0;
2241                         }
2242                 }
2243         }
2244         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2245
2246         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2247             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2248                 inplace = 0;
2249
2250         if (likely(!m_dst && inplace)) {
2251                 /* Case of single buffer without AAD buf or
2252                  * separate mac buf in place and
2253                  * not air crypto
2254                  */
2255                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2256
2257                 prepare_iov_from_pkt_inplace(m_src, &fc_params, &flags);
2258
2259         } else {
2260                 /* Out of place processing */
2261                 fc_params.src_iov = (void *)src;
2262                 fc_params.dst_iov = (void *)dst;
2263
2264                 /* Store SG I/O in the api for reuse */
2265                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2266                         plt_dp_err("Prepare src iov failed");
2267                         ret = -EINVAL;
2268                         goto err_exit;
2269                 }
2270
2271                 if (unlikely(m_dst != NULL)) {
2272                         uint32_t pkt_len;
2273
2274                         /* Try to make room as much as src has */
2275                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
2276
2277                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2278                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2279                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2280                                         plt_dp_err("Not enough space in "
2281                                                    "m_dst %p, need %u"
2282                                                    " more",
2283                                                    m_dst, pkt_len);
2284                                         ret = -EINVAL;
2285                                         goto err_exit;
2286                                 }
2287                         }
2288
2289                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2290                                 plt_dp_err("Prepare dst iov failed for "
2291                                            "m_dst %p",
2292                                            m_dst);
2293                                 ret = -EINVAL;
2294                                 goto err_exit;
2295                         }
2296                 } else {
2297                         fc_params.dst_iov = (void *)src;
2298                 }
2299         }
2300
2301         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2302                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2303                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
2304                         (ctx->fc_type == ROC_SE_PDCP))))) {
2305                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2306                                       m_info->pool, infl_req);
2307                 if (mdata == NULL) {
2308                         plt_dp_err("Error allocating meta buffer for request");
2309                         return -ENOMEM;
2310                 }
2311         }
2312
2313         /* Finally prepare the instruction */
2314         if (cpt_op & ROC_SE_OP_ENCODE)
2315                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2316                                            inst);
2317         else
2318                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2319                                            inst);
2320
2321         if (unlikely(ret)) {
2322                 plt_dp_err("Preparing request failed due to bad input arg");
2323                 goto free_mdata_and_exit;
2324         }
2325
2326         return 0;
2327
2328 free_mdata_and_exit:
2329         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2330                 rte_mempool_put(m_info->pool, infl_req->mdata);
2331 err_exit:
2332         return ret;
2333 }
2334
2335 static __rte_always_inline void
2336 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2337 {
2338         uint8_t *mac;
2339         struct rte_crypto_sym_op *sym_op = op->sym;
2340
2341         if (sym_op->auth.digest.data)
2342                 mac = sym_op->auth.digest.data;
2343         else
2344                 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2345                                               sym_op->auth.data.length +
2346                                                       sym_op->auth.data.offset);
2347         if (!mac) {
2348                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2349                 return;
2350         }
2351
2352         if (memcmp(mac, gen_mac, mac_len))
2353                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2354         else
2355                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2356 }
2357
2358 static __rte_always_inline void
2359 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2360                                    uint32_t *addr_length_in_bits,
2361                                    uint8_t *addr_direction)
2362 {
2363         uint8_t found = 0;
2364         uint32_t pos;
2365         uint8_t last_byte;
2366         while (!found && counter_num_bytes > 0) {
2367                 counter_num_bytes--;
2368                 if (src[counter_num_bytes] == 0x00)
2369                         continue;
2370                 pos = rte_bsf32(src[counter_num_bytes]);
2371                 if (pos == 7) {
2372                         if (likely(counter_num_bytes > 0)) {
2373                                 last_byte = src[counter_num_bytes - 1];
2374                                 *addr_direction = last_byte & 0x1;
2375                                 *addr_length_in_bits =
2376                                         counter_num_bytes * 8 - 1;
2377                         }
2378                 } else {
2379                         last_byte = src[counter_num_bytes];
2380                         *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2381                         *addr_length_in_bits =
2382                                 counter_num_bytes * 8 + (8 - (pos + 2));
2383                 }
2384                 found = 1;
2385         }
2386 }
2387
2388 /*
2389  * This handles all auth only except AES_GMAC
2390  */
2391 static __rte_always_inline int
2392 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2393                    struct cpt_qp_meta_info *m_info,
2394                    struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2395 {
2396         uint32_t space = 0;
2397         struct rte_crypto_sym_op *sym_op = cop->sym;
2398         void *mdata;
2399         uint32_t auth_range_off;
2400         uint32_t flags = 0;
2401         uint64_t d_offs = 0, d_lens;
2402         struct rte_mbuf *m_src, *m_dst;
2403         uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2404         uint16_t mac_len = sess->mac_len;
2405         struct roc_se_fc_params params;
2406         char src[SRC_IOV_SIZE];
2407         uint8_t iv_buf[16];
2408         int ret;
2409
2410         memset(&params, 0, sizeof(struct roc_se_fc_params));
2411
2412         m_src = sym_op->m_src;
2413
2414         mdata = alloc_op_meta(&params.meta_buf, m_info->mlen, m_info->pool,
2415                               infl_req);
2416         if (mdata == NULL) {
2417                 ret = -ENOMEM;
2418                 goto err_exit;
2419         }
2420
2421         auth_range_off = sym_op->auth.data.offset;
2422
2423         flags = ROC_SE_VALID_MAC_BUF;
2424         params.src_iov = (void *)src;
2425         if (unlikely(sess->zsk_flag)) {
2426                 /*
2427                  * Since for Zuc, Kasumi, Snow3g offsets are in bits
2428                  * we will send pass through even for auth only case,
2429                  * let MC handle it
2430                  */
2431                 d_offs = auth_range_off;
2432                 auth_range_off = 0;
2433                 params.auth_iv_len = sess->auth_iv_length;
2434                 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2435                         cop, uint8_t *, sess->auth_iv_offset);
2436                 if (sess->zsk_flag == ROC_SE_K_F9) {
2437                         uint32_t length_in_bits, num_bytes;
2438                         uint8_t *src, direction = 0;
2439
2440                         memcpy(iv_buf,
2441                                rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2442                         /*
2443                          * This is kasumi f9, take direction from
2444                          * source buffer
2445                          */
2446                         length_in_bits = cop->sym->auth.data.length;
2447                         num_bytes = (length_in_bits >> 3);
2448                         src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2449                         find_kasumif9_direction_and_length(
2450                                 src, num_bytes, &length_in_bits, &direction);
2451                         length_in_bits -= 64;
2452                         cop->sym->auth.data.offset += 64;
2453                         d_offs = cop->sym->auth.data.offset;
2454                         auth_range_off = d_offs / 8;
2455                         cop->sym->auth.data.length = length_in_bits;
2456
2457                         /* Store it at end of auth iv */
2458                         iv_buf[8] = direction;
2459                         params.auth_iv_buf = iv_buf;
2460                 }
2461         }
2462
2463         d_lens = sym_op->auth.data.length;
2464
2465         params.ctx_buf.vaddr = &sess->roc_se_ctx;
2466
2467         if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2468                 if (sym_op->auth.digest.data) {
2469                         /*
2470                          * Digest to be generated
2471                          * in separate buffer
2472                          */
2473                         params.mac_buf.size = sess->mac_len;
2474                         params.mac_buf.vaddr = sym_op->auth.digest.data;
2475                 } else {
2476                         uint32_t off = sym_op->auth.data.offset +
2477                                        sym_op->auth.data.length;
2478                         int32_t dlen, space;
2479
2480                         m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2481                         dlen = rte_pktmbuf_pkt_len(m_dst);
2482
2483                         space = off + mac_len - dlen;
2484                         if (space > 0)
2485                                 if (!rte_pktmbuf_append(m_dst, space)) {
2486                                         plt_dp_err("Failed to extend "
2487                                                    "mbuf by %uB",
2488                                                    space);
2489                                         ret = -EINVAL;
2490                                         goto free_mdata_and_exit;
2491                                 }
2492
2493                         params.mac_buf.vaddr =
2494                                 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2495                         params.mac_buf.size = mac_len;
2496                 }
2497         } else {
2498                 uint64_t *op = mdata;
2499
2500                 /* Need space for storing generated mac */
2501                 space += 2 * sizeof(uint64_t);
2502
2503                 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2504                 params.mac_buf.size = mac_len;
2505                 space += RTE_ALIGN_CEIL(mac_len, 8);
2506                 op[0] = (uintptr_t)params.mac_buf.vaddr;
2507                 op[1] = mac_len;
2508                 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2509         }
2510
2511         params.meta_buf.vaddr = (uint8_t *)mdata + space;
2512         params.meta_buf.size -= space;
2513
2514         /* Out of place processing */
2515         params.src_iov = (void *)src;
2516
2517         /*Store SG I/O in the api for reuse */
2518         if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2519                 plt_dp_err("Prepare src iov failed");
2520                 ret = -EINVAL;
2521                 goto free_mdata_and_exit;
2522         }
2523
2524         ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, inst);
2525         if (ret)
2526                 goto free_mdata_and_exit;
2527
2528         return 0;
2529
2530 free_mdata_and_exit:
2531         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2532                 rte_mempool_put(m_info->pool, infl_req->mdata);
2533 err_exit:
2534         return ret;
2535 }
2536 #endif /*_CNXK_SE_H_ */