crypto/cnxk: enable allocated queues only
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
38
39 static inline void
40 cpt_pack_iv(uint8_t *iv_src, uint8_t *iv_dst)
41 {
42         iv_dst[16] = iv_src[16];
43         /* pack the last 8 bytes of IV to 6 bytes.
44          * discard the 2 MSB bits of each byte
45          */
46         iv_dst[17] = (((iv_src[17] & 0x3f) << 2) | ((iv_src[18] >> 4) & 0x3));
47         iv_dst[18] = (((iv_src[18] & 0xf) << 4) | ((iv_src[19] >> 2) & 0xf));
48         iv_dst[19] = (((iv_src[19] & 0x3) << 6) | (iv_src[20] & 0x3f));
49
50         iv_dst[20] = (((iv_src[21] & 0x3f) << 2) | ((iv_src[22] >> 4) & 0x3));
51         iv_dst[21] = (((iv_src[22] & 0xf) << 4) | ((iv_src[23] >> 2) & 0xf));
52         iv_dst[22] = (((iv_src[23] & 0x3) << 6) | (iv_src[24] & 0x3f));
53 }
54
55 static inline void
56 pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type,
57              uint8_t pack_iv)
58 {
59         uint32_t *iv_s_temp, iv_temp[4];
60         int j;
61
62         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
63                 /*
64                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
65                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
66                  */
67
68                 iv_s_temp = (uint32_t *)iv_s;
69
70                 for (j = 0; j < 4; j++)
71                         iv_temp[j] = iv_s_temp[3 - j];
72                 memcpy(iv_d, iv_temp, 16);
73         } else {
74                 /* ZUC doesn't need a swap */
75                 memcpy(iv_d, iv_s, 16);
76                 if (pack_iv)
77                         cpt_pack_iv(iv_s, iv_d);
78         }
79 }
80
81 static __rte_always_inline int
82 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
83 {
84         uint16_t mac_len = auth->digest_length;
85         int ret;
86
87         switch (auth->algo) {
88         case RTE_CRYPTO_AUTH_MD5:
89         case RTE_CRYPTO_AUTH_MD5_HMAC:
90                 ret = (mac_len == 16) ? 0 : -1;
91                 break;
92         case RTE_CRYPTO_AUTH_SHA1:
93         case RTE_CRYPTO_AUTH_SHA1_HMAC:
94                 ret = (mac_len == 20) ? 0 : -1;
95                 break;
96         case RTE_CRYPTO_AUTH_SHA224:
97         case RTE_CRYPTO_AUTH_SHA224_HMAC:
98                 ret = (mac_len == 28) ? 0 : -1;
99                 break;
100         case RTE_CRYPTO_AUTH_SHA256:
101         case RTE_CRYPTO_AUTH_SHA256_HMAC:
102                 ret = (mac_len == 32) ? 0 : -1;
103                 break;
104         case RTE_CRYPTO_AUTH_SHA384:
105         case RTE_CRYPTO_AUTH_SHA384_HMAC:
106                 ret = (mac_len == 48) ? 0 : -1;
107                 break;
108         case RTE_CRYPTO_AUTH_SHA512:
109         case RTE_CRYPTO_AUTH_SHA512_HMAC:
110                 ret = (mac_len == 64) ? 0 : -1;
111                 break;
112         case RTE_CRYPTO_AUTH_NULL:
113                 ret = 0;
114                 break;
115         default:
116                 ret = -1;
117         }
118
119         return ret;
120 }
121
122 static __rte_always_inline void
123 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
124 {
125         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
126         memcpy(fctx->enc.encr_iv, salt, 4);
127 }
128
129 static __rte_always_inline uint32_t
130 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
131              uint32_t size)
132 {
133         struct roc_se_sglist_comp *to = &list[i >> 2];
134
135         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
136         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
137         i++;
138         return i;
139 }
140
141 static __rte_always_inline uint32_t
142 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
143                       struct roc_se_buf_ptr *from)
144 {
145         struct roc_se_sglist_comp *to = &list[i >> 2];
146
147         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
148         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
149         i++;
150         return i;
151 }
152
153 static __rte_always_inline uint32_t
154 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
155                           struct roc_se_buf_ptr *from, uint32_t *psize)
156 {
157         struct roc_se_sglist_comp *to = &list[i >> 2];
158         uint32_t size = *psize;
159         uint32_t e_len;
160
161         e_len = (size > from->size) ? from->size : size;
162         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
163         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
164         *psize -= e_len;
165         i++;
166         return i;
167 }
168
169 /*
170  * This fills the MC expected SGIO list
171  * from IOV given by user.
172  */
173 static __rte_always_inline uint32_t
174 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
175                       struct roc_se_iov_ptr *from, uint32_t from_offset,
176                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
177                       uint32_t extra_offset)
178 {
179         int32_t j;
180         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
181         uint32_t size = *psize;
182         struct roc_se_buf_ptr *bufs;
183
184         bufs = from->bufs;
185         for (j = 0; (j < from->buf_cnt) && size; j++) {
186                 uint64_t e_vaddr;
187                 uint32_t e_len;
188                 struct roc_se_sglist_comp *to = &list[i >> 2];
189
190                 if (unlikely(from_offset)) {
191                         if (from_offset >= bufs[j].size) {
192                                 from_offset -= bufs[j].size;
193                                 continue;
194                         }
195                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
196                         e_len = (size > (bufs[j].size - from_offset)) ?
197                                         (bufs[j].size - from_offset) :
198                                         size;
199                         from_offset = 0;
200                 } else {
201                         e_vaddr = (uint64_t)bufs[j].vaddr;
202                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
203                 }
204
205                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
206                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
207
208                 if (extra_len && (e_len >= extra_offset)) {
209                         /* Break the data at given offset */
210                         uint32_t next_len = e_len - extra_offset;
211                         uint64_t next_vaddr = e_vaddr + extra_offset;
212
213                         if (!extra_offset) {
214                                 i--;
215                         } else {
216                                 e_len = extra_offset;
217                                 size -= e_len;
218                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
219                         }
220
221                         extra_len = RTE_MIN(extra_len, size);
222                         /* Insert extra data ptr */
223                         if (extra_len) {
224                                 i++;
225                                 to = &list[i >> 2];
226                                 to->u.s.len[i % 4] =
227                                         rte_cpu_to_be_16(extra_len);
228                                 to->ptr[i % 4] = rte_cpu_to_be_64(
229                                         (uint64_t)extra_buf->vaddr);
230                                 size -= extra_len;
231                         }
232
233                         next_len = RTE_MIN(next_len, size);
234                         /* insert the rest of the data */
235                         if (next_len) {
236                                 i++;
237                                 to = &list[i >> 2];
238                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
239                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
240                                 size -= next_len;
241                         }
242                         extra_len = 0;
243
244                 } else {
245                         size -= e_len;
246                 }
247                 if (extra_offset)
248                         extra_offset -= size;
249                 i++;
250         }
251
252         *psize = size;
253         return (uint32_t)i;
254 }
255
256 static __rte_always_inline int
257 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
258                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
259 {
260         void *m_vaddr = params->meta_buf.vaddr;
261         uint32_t size, i;
262         uint16_t data_len, mac_len, key_len;
263         roc_se_auth_type hash_type;
264         struct roc_se_ctx *ctx;
265         struct roc_se_sglist_comp *gather_comp;
266         struct roc_se_sglist_comp *scatter_comp;
267         uint8_t *in_buffer;
268         uint32_t g_size_bytes, s_size_bytes;
269         union cpt_inst_w4 cpt_inst_w4;
270
271         ctx = params->ctx_buf.vaddr;
272
273         hash_type = ctx->hash_type;
274         mac_len = ctx->mac_len;
275         key_len = ctx->auth_key_len;
276         data_len = ROC_SE_AUTH_DLEN(d_lens);
277
278         /*GP op header */
279         cpt_inst_w4.s.opcode_minor = 0;
280         cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
281         if (ctx->hmac) {
282                 cpt_inst_w4.s.opcode_major =
283                         ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
284                 cpt_inst_w4.s.param1 = key_len;
285                 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
286         } else {
287                 cpt_inst_w4.s.opcode_major =
288                         ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
289                 cpt_inst_w4.s.param1 = 0;
290                 cpt_inst_w4.s.dlen = data_len;
291         }
292
293         /* Null auth only case enters the if */
294         if (unlikely(!hash_type && !ctx->enc_cipher)) {
295                 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
296                 /* Minor op is passthrough */
297                 cpt_inst_w4.s.opcode_minor = 0x03;
298                 /* Send out completion code only */
299                 cpt_inst_w4.s.param2 = 0x1;
300         }
301
302         /* DPTR has SG list */
303         in_buffer = m_vaddr;
304
305         ((uint16_t *)in_buffer)[0] = 0;
306         ((uint16_t *)in_buffer)[1] = 0;
307
308         /* TODO Add error check if space will be sufficient */
309         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
310
311         /*
312          * Input gather list
313          */
314
315         i = 0;
316
317         if (ctx->hmac) {
318                 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
319                 /* Key */
320                 i = fill_sg_comp(gather_comp, i, k_vaddr,
321                                  RTE_ALIGN_CEIL(key_len, 8));
322         }
323
324         /* input data */
325         size = data_len;
326         if (size) {
327                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
328                                           &size, NULL, 0);
329                 if (unlikely(size)) {
330                         plt_dp_err("Insufficient dst IOV size, short by %dB",
331                                    size);
332                         return -1;
333                 }
334         } else {
335                 /*
336                  * Looks like we need to support zero data
337                  * gather ptr in case of hash & hmac
338                  */
339                 i++;
340         }
341         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
342         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
343
344         /*
345          * Output Gather list
346          */
347
348         i = 0;
349         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
350                                                      g_size_bytes);
351
352         if (flags & ROC_SE_VALID_MAC_BUF) {
353                 if (unlikely(params->mac_buf.size < mac_len)) {
354                         plt_dp_err("Insufficient MAC size");
355                         return -1;
356                 }
357
358                 size = mac_len;
359                 i = fill_sg_comp_from_buf_min(scatter_comp, i, &params->mac_buf,
360                                               &size);
361         } else {
362                 size = mac_len;
363                 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
364                                           data_len, &size, NULL, 0);
365                 if (unlikely(size)) {
366                         plt_dp_err("Insufficient dst IOV size, short by %dB",
367                                    size);
368                         return -1;
369                 }
370         }
371
372         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
373         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
374
375         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
376
377         /* This is DPTR len in case of SG mode */
378         cpt_inst_w4.s.dlen = size;
379
380         inst->dptr = (uint64_t)in_buffer;
381         inst->w4.u64 = cpt_inst_w4.u64;
382
383         return 0;
384 }
385
386 static __rte_always_inline int
387 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
388                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
389 {
390         uint32_t iv_offset = 0;
391         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
392         struct roc_se_ctx *se_ctx;
393         uint32_t cipher_type, hash_type;
394         uint32_t mac_len, size;
395         uint8_t iv_len = 16;
396         struct roc_se_buf_ptr *aad_buf = NULL;
397         uint32_t encr_offset, auth_offset;
398         uint32_t encr_data_len, auth_data_len, aad_len = 0;
399         uint32_t passthrough_len = 0;
400         union cpt_inst_w4 cpt_inst_w4;
401         void *offset_vaddr;
402         uint8_t op_minor;
403
404         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
405         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
406         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
407         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
408         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
409                 /* We don't support both AAD and auth data separately */
410                 auth_data_len = 0;
411                 auth_offset = 0;
412                 aad_len = fc_params->aad_buf.size;
413                 aad_buf = &fc_params->aad_buf;
414         }
415         se_ctx = fc_params->ctx_buf.vaddr;
416         cipher_type = se_ctx->enc_cipher;
417         hash_type = se_ctx->hash_type;
418         mac_len = se_ctx->mac_len;
419         op_minor = se_ctx->template_w4.s.opcode_minor;
420
421         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
422                 iv_len = 0;
423                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
424         }
425
426         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
427                 /*
428                  * When AAD is given, data above encr_offset is pass through
429                  * Since AAD is given as separate pointer and not as offset,
430                  * this is a special case as we need to fragment input data
431                  * into passthrough + encr_data and then insert AAD in between.
432                  */
433                 if (hash_type != ROC_SE_GMAC_TYPE) {
434                         passthrough_len = encr_offset;
435                         auth_offset = passthrough_len + iv_len;
436                         encr_offset = passthrough_len + aad_len + iv_len;
437                         auth_data_len = aad_len + encr_data_len;
438                 } else {
439                         passthrough_len = 16 + aad_len;
440                         auth_offset = passthrough_len + iv_len;
441                         auth_data_len = aad_len;
442                 }
443         } else {
444                 encr_offset += iv_len;
445                 auth_offset += iv_len;
446         }
447
448         /* Encryption */
449         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
450         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
451         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
452
453         if (hash_type == ROC_SE_GMAC_TYPE) {
454                 encr_offset = 0;
455                 encr_data_len = 0;
456         }
457
458         auth_dlen = auth_offset + auth_data_len;
459         enc_dlen = encr_data_len + encr_offset;
460         if (unlikely(encr_data_len & 0xf)) {
461                 if ((cipher_type == ROC_SE_DES3_CBC) ||
462                     (cipher_type == ROC_SE_DES3_ECB))
463                         enc_dlen =
464                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
465                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
466                                 (cipher_type == ROC_SE_AES_ECB)))
467                         enc_dlen =
468                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
469         }
470
471         if (unlikely(auth_dlen > enc_dlen)) {
472                 inputlen = auth_dlen;
473                 outputlen = auth_dlen + mac_len;
474         } else {
475                 inputlen = enc_dlen;
476                 outputlen = enc_dlen + mac_len;
477         }
478
479         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
480                 outputlen = enc_dlen;
481
482         /* GP op header */
483         cpt_inst_w4.s.param1 = encr_data_len;
484         cpt_inst_w4.s.param2 = auth_data_len;
485
486         /*
487          * In cn9k, cn10k since we have a limitation of
488          * IV & Offset control word not part of instruction
489          * and need to be part of Data Buffer, we check if
490          * head room is there and then only do the Direct mode processing
491          */
492         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
493                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
494                 void *dm_vaddr = fc_params->bufs[0].vaddr;
495
496                 /* Use Direct mode */
497
498                 offset_vaddr =
499                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
500
501                 /* DPTR */
502                 inst->dptr = (uint64_t)offset_vaddr;
503
504                 /* RPTR should just exclude offset control word */
505                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
506
507                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
508
509                 if (likely(iv_len)) {
510                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
511                                                       ROC_SE_OFF_CTRL_LEN);
512                         uint64_t *src = fc_params->iv_buf;
513                         dest[0] = src[0];
514                         dest[1] = src[1];
515                 }
516
517         } else {
518                 void *m_vaddr = fc_params->meta_buf.vaddr;
519                 uint32_t i, g_size_bytes, s_size_bytes;
520                 struct roc_se_sglist_comp *gather_comp;
521                 struct roc_se_sglist_comp *scatter_comp;
522                 uint8_t *in_buffer;
523
524                 /* This falls under strict SG mode */
525                 offset_vaddr = m_vaddr;
526                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
527
528                 m_vaddr = (uint8_t *)m_vaddr + size;
529
530                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
531
532                 if (likely(iv_len)) {
533                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
534                                                       ROC_SE_OFF_CTRL_LEN);
535                         uint64_t *src = fc_params->iv_buf;
536                         dest[0] = src[0];
537                         dest[1] = src[1];
538                 }
539
540                 /* DPTR has SG list */
541                 in_buffer = m_vaddr;
542
543                 ((uint16_t *)in_buffer)[0] = 0;
544                 ((uint16_t *)in_buffer)[1] = 0;
545
546                 /* TODO Add error check if space will be sufficient */
547                 gather_comp =
548                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
549
550                 /*
551                  * Input Gather List
552                  */
553
554                 i = 0;
555
556                 /* Offset control word that includes iv */
557                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
558                                  ROC_SE_OFF_CTRL_LEN + iv_len);
559
560                 /* Add input data */
561                 size = inputlen - iv_len;
562                 if (likely(size)) {
563                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
564
565                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
566                                 i = fill_sg_comp_from_buf_min(
567                                         gather_comp, i, fc_params->bufs, &size);
568                         } else {
569                                 i = fill_sg_comp_from_iov(
570                                         gather_comp, i, fc_params->src_iov, 0,
571                                         &size, aad_buf, aad_offset);
572                         }
573
574                         if (unlikely(size)) {
575                                 plt_dp_err("Insufficient buffer space,"
576                                            " size %d needed",
577                                            size);
578                                 return -1;
579                         }
580                 }
581                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
582                 g_size_bytes =
583                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
584
585                 /*
586                  * Output Scatter list
587                  */
588                 i = 0;
589                 scatter_comp =
590                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
591                                                       g_size_bytes);
592
593                 /* Add IV */
594                 if (likely(iv_len)) {
595                         i = fill_sg_comp(scatter_comp, i,
596                                          (uint64_t)offset_vaddr +
597                                                  ROC_SE_OFF_CTRL_LEN,
598                                          iv_len);
599                 }
600
601                 /* output data or output data + digest*/
602                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
603                         size = outputlen - iv_len - mac_len;
604                         if (size) {
605                                 uint32_t aad_offset =
606                                         aad_len ? passthrough_len : 0;
607
608                                 if (unlikely(flags &
609                                              ROC_SE_SINGLE_BUF_INPLACE)) {
610                                         i = fill_sg_comp_from_buf_min(
611                                                 scatter_comp, i,
612                                                 fc_params->bufs, &size);
613                                 } else {
614                                         i = fill_sg_comp_from_iov(
615                                                 scatter_comp, i,
616                                                 fc_params->dst_iov, 0, &size,
617                                                 aad_buf, aad_offset);
618                                 }
619                                 if (unlikely(size)) {
620                                         plt_dp_err("Insufficient buffer"
621                                                    " space, size %d needed",
622                                                    size);
623                                         return -1;
624                                 }
625                         }
626                         /* mac_data */
627                         if (mac_len) {
628                                 i = fill_sg_comp_from_buf(scatter_comp, i,
629                                                           &fc_params->mac_buf);
630                         }
631                 } else {
632                         /* Output including mac */
633                         size = outputlen - iv_len;
634                         if (likely(size)) {
635                                 uint32_t aad_offset =
636                                         aad_len ? passthrough_len : 0;
637
638                                 if (unlikely(flags &
639                                              ROC_SE_SINGLE_BUF_INPLACE)) {
640                                         i = fill_sg_comp_from_buf_min(
641                                                 scatter_comp, i,
642                                                 fc_params->bufs, &size);
643                                 } else {
644                                         i = fill_sg_comp_from_iov(
645                                                 scatter_comp, i,
646                                                 fc_params->dst_iov, 0, &size,
647                                                 aad_buf, aad_offset);
648                                 }
649                                 if (unlikely(size)) {
650                                         plt_dp_err("Insufficient buffer"
651                                                    " space, size %d needed",
652                                                    size);
653                                         return -1;
654                                 }
655                         }
656                 }
657                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
658                 s_size_bytes =
659                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
660
661                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
662
663                 /* This is DPTR len in case of SG mode */
664                 cpt_inst_w4.s.dlen = size;
665
666                 inst->dptr = (uint64_t)in_buffer;
667         }
668
669         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
670                      (auth_offset >> 8))) {
671                 plt_dp_err("Offset not supported");
672                 plt_dp_err("enc_offset: %d", encr_offset);
673                 plt_dp_err("iv_offset : %d", iv_offset);
674                 plt_dp_err("auth_offset: %d", auth_offset);
675                 return -1;
676         }
677
678         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
679                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
680                 ((uint64_t)auth_offset));
681
682         inst->w4.u64 = cpt_inst_w4.u64;
683         return 0;
684 }
685
686 static __rte_always_inline int
687 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
688                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
689 {
690         uint32_t iv_offset = 0, size;
691         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
692         struct roc_se_ctx *se_ctx;
693         int32_t hash_type, mac_len;
694         uint8_t iv_len = 16;
695         struct roc_se_buf_ptr *aad_buf = NULL;
696         uint32_t encr_offset, auth_offset;
697         uint32_t encr_data_len, auth_data_len, aad_len = 0;
698         uint32_t passthrough_len = 0;
699         union cpt_inst_w4 cpt_inst_w4;
700         void *offset_vaddr;
701         uint8_t op_minor;
702
703         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
704         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
705         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
706         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
707
708         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
709                 /* We don't support both AAD and auth data separately */
710                 auth_data_len = 0;
711                 auth_offset = 0;
712                 aad_len = fc_params->aad_buf.size;
713                 aad_buf = &fc_params->aad_buf;
714         }
715
716         se_ctx = fc_params->ctx_buf.vaddr;
717         hash_type = se_ctx->hash_type;
718         mac_len = se_ctx->mac_len;
719         op_minor = se_ctx->template_w4.s.opcode_minor;
720
721         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
722                 iv_len = 0;
723                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
724         }
725
726         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
727                 /*
728                  * When AAD is given, data above encr_offset is pass through
729                  * Since AAD is given as separate pointer and not as offset,
730                  * this is a special case as we need to fragment input data
731                  * into passthrough + encr_data and then insert AAD in between.
732                  */
733                 if (hash_type != ROC_SE_GMAC_TYPE) {
734                         passthrough_len = encr_offset;
735                         auth_offset = passthrough_len + iv_len;
736                         encr_offset = passthrough_len + aad_len + iv_len;
737                         auth_data_len = aad_len + encr_data_len;
738                 } else {
739                         passthrough_len = 16 + aad_len;
740                         auth_offset = passthrough_len + iv_len;
741                         auth_data_len = aad_len;
742                 }
743         } else {
744                 encr_offset += iv_len;
745                 auth_offset += iv_len;
746         }
747
748         /* Decryption */
749         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
750         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
751         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
752
753         if (hash_type == ROC_SE_GMAC_TYPE) {
754                 encr_offset = 0;
755                 encr_data_len = 0;
756         }
757
758         enc_dlen = encr_offset + encr_data_len;
759         auth_dlen = auth_offset + auth_data_len;
760
761         if (auth_dlen > enc_dlen) {
762                 inputlen = auth_dlen + mac_len;
763                 outputlen = auth_dlen;
764         } else {
765                 inputlen = enc_dlen + mac_len;
766                 outputlen = enc_dlen;
767         }
768
769         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
770                 outputlen = inputlen = enc_dlen;
771
772         cpt_inst_w4.s.param1 = encr_data_len;
773         cpt_inst_w4.s.param2 = auth_data_len;
774
775         /*
776          * In cn9k, cn10k since we have a limitation of
777          * IV & Offset control word not part of instruction
778          * and need to be part of Data Buffer, we check if
779          * head room is there and then only do the Direct mode processing
780          */
781         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
782                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
783                 void *dm_vaddr = fc_params->bufs[0].vaddr;
784
785                 /* Use Direct mode */
786
787                 offset_vaddr =
788                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
789                 inst->dptr = (uint64_t)offset_vaddr;
790
791                 /* RPTR should just exclude offset control word */
792                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
793
794                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
795
796                 if (likely(iv_len)) {
797                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
798                                                       ROC_SE_OFF_CTRL_LEN);
799                         uint64_t *src = fc_params->iv_buf;
800                         dest[0] = src[0];
801                         dest[1] = src[1];
802                 }
803
804         } else {
805                 void *m_vaddr = fc_params->meta_buf.vaddr;
806                 uint32_t g_size_bytes, s_size_bytes;
807                 struct roc_se_sglist_comp *gather_comp;
808                 struct roc_se_sglist_comp *scatter_comp;
809                 uint8_t *in_buffer;
810                 uint8_t i = 0;
811
812                 /* This falls under strict SG mode */
813                 offset_vaddr = m_vaddr;
814                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
815
816                 m_vaddr = (uint8_t *)m_vaddr + size;
817
818                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
819
820                 if (likely(iv_len)) {
821                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
822                                                       ROC_SE_OFF_CTRL_LEN);
823                         uint64_t *src = fc_params->iv_buf;
824                         dest[0] = src[0];
825                         dest[1] = src[1];
826                 }
827
828                 /* DPTR has SG list */
829                 in_buffer = m_vaddr;
830
831                 ((uint16_t *)in_buffer)[0] = 0;
832                 ((uint16_t *)in_buffer)[1] = 0;
833
834                 /* TODO Add error check if space will be sufficient */
835                 gather_comp =
836                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
837
838                 /*
839                  * Input Gather List
840                  */
841                 i = 0;
842
843                 /* Offset control word that includes iv */
844                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
845                                  ROC_SE_OFF_CTRL_LEN + iv_len);
846
847                 /* Add input data */
848                 if (flags & ROC_SE_VALID_MAC_BUF) {
849                         size = inputlen - iv_len - mac_len;
850                         if (size) {
851                                 /* input data only */
852                                 if (unlikely(flags &
853                                              ROC_SE_SINGLE_BUF_INPLACE)) {
854                                         i = fill_sg_comp_from_buf_min(
855                                                 gather_comp, i, fc_params->bufs,
856                                                 &size);
857                                 } else {
858                                         uint32_t aad_offset =
859                                                 aad_len ? passthrough_len : 0;
860
861                                         i = fill_sg_comp_from_iov(
862                                                 gather_comp, i,
863                                                 fc_params->src_iov, 0, &size,
864                                                 aad_buf, aad_offset);
865                                 }
866                                 if (unlikely(size)) {
867                                         plt_dp_err("Insufficient buffer"
868                                                    " space, size %d needed",
869                                                    size);
870                                         return -1;
871                                 }
872                         }
873
874                         /* mac data */
875                         if (mac_len) {
876                                 i = fill_sg_comp_from_buf(gather_comp, i,
877                                                           &fc_params->mac_buf);
878                         }
879                 } else {
880                         /* input data + mac */
881                         size = inputlen - iv_len;
882                         if (size) {
883                                 if (unlikely(flags &
884                                              ROC_SE_SINGLE_BUF_INPLACE)) {
885                                         i = fill_sg_comp_from_buf_min(
886                                                 gather_comp, i, fc_params->bufs,
887                                                 &size);
888                                 } else {
889                                         uint32_t aad_offset =
890                                                 aad_len ? passthrough_len : 0;
891
892                                         if (unlikely(!fc_params->src_iov)) {
893                                                 plt_dp_err("Bad input args");
894                                                 return -1;
895                                         }
896
897                                         i = fill_sg_comp_from_iov(
898                                                 gather_comp, i,
899                                                 fc_params->src_iov, 0, &size,
900                                                 aad_buf, aad_offset);
901                                 }
902
903                                 if (unlikely(size)) {
904                                         plt_dp_err("Insufficient buffer"
905                                                    " space, size %d needed",
906                                                    size);
907                                         return -1;
908                                 }
909                         }
910                 }
911                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
912                 g_size_bytes =
913                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
914
915                 /*
916                  * Output Scatter List
917                  */
918
919                 i = 0;
920                 scatter_comp =
921                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
922                                                       g_size_bytes);
923
924                 /* Add iv */
925                 if (iv_len) {
926                         i = fill_sg_comp(scatter_comp, i,
927                                          (uint64_t)offset_vaddr +
928                                                  ROC_SE_OFF_CTRL_LEN,
929                                          iv_len);
930                 }
931
932                 /* Add output data */
933                 size = outputlen - iv_len;
934                 if (size) {
935                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
936                                 /* handle single buffer here */
937                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
938                                                               fc_params->bufs,
939                                                               &size);
940                         } else {
941                                 uint32_t aad_offset =
942                                         aad_len ? passthrough_len : 0;
943
944                                 if (unlikely(!fc_params->dst_iov)) {
945                                         plt_dp_err("Bad input args");
946                                         return -1;
947                                 }
948
949                                 i = fill_sg_comp_from_iov(
950                                         scatter_comp, i, fc_params->dst_iov, 0,
951                                         &size, aad_buf, aad_offset);
952                         }
953
954                         if (unlikely(size)) {
955                                 plt_dp_err("Insufficient buffer space,"
956                                            " size %d needed",
957                                            size);
958                                 return -1;
959                         }
960                 }
961
962                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
963                 s_size_bytes =
964                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
965
966                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
967
968                 /* This is DPTR len in case of SG mode */
969                 cpt_inst_w4.s.dlen = size;
970
971                 inst->dptr = (uint64_t)in_buffer;
972         }
973
974         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
975                      (auth_offset >> 8))) {
976                 plt_dp_err("Offset not supported");
977                 plt_dp_err("enc_offset: %d", encr_offset);
978                 plt_dp_err("iv_offset : %d", iv_offset);
979                 plt_dp_err("auth_offset: %d", auth_offset);
980                 return -1;
981         }
982
983         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
984                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
985                 ((uint64_t)auth_offset));
986
987         inst->w4.u64 = cpt_inst_w4.u64;
988         return 0;
989 }
990
991 static __rte_always_inline int
992 cpt_zuc_snow3g_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
993                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
994 {
995         uint32_t size;
996         int32_t inputlen, outputlen;
997         struct roc_se_ctx *se_ctx;
998         uint32_t mac_len = 0;
999         uint8_t pdcp_alg_type;
1000         uint32_t encr_offset, auth_offset;
1001         uint32_t encr_data_len, auth_data_len;
1002         int flags, iv_len;
1003         uint64_t offset_ctrl;
1004         uint64_t *offset_vaddr;
1005         uint8_t *iv_s;
1006         uint8_t pack_iv = 0;
1007         union cpt_inst_w4 cpt_inst_w4;
1008
1009         se_ctx = params->ctx_buf.vaddr;
1010         flags = se_ctx->zsk_flags;
1011         mac_len = se_ctx->mac_len;
1012         pdcp_alg_type = se_ctx->pdcp_alg_type;
1013
1014         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
1015
1016         cpt_inst_w4.s.opcode_minor = se_ctx->template_w4.s.opcode_minor;
1017
1018         if (flags == 0x1) {
1019                 iv_s = params->auth_iv_buf;
1020                 iv_len = params->auth_iv_len;
1021
1022                 if (iv_len == 25) {
1023                         iv_len -= 2;
1024                         pack_iv = 1;
1025                 }
1026
1027                 /*
1028                  * Microcode expects offsets in bytes
1029                  * TODO: Rounding off
1030                  */
1031                 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1032
1033                 /* EIA3 or UIA2 */
1034                 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
1035                 auth_offset = auth_offset / 8;
1036
1037                 /* consider iv len */
1038                 auth_offset += iv_len;
1039
1040                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1041                 outputlen = mac_len;
1042
1043                 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1044
1045                 encr_data_len = 0;
1046                 encr_offset = 0;
1047         } else {
1048                 iv_s = params->iv_buf;
1049                 iv_len = params->cipher_iv_len;
1050
1051                 if (iv_len == 25) {
1052                         iv_len -= 2;
1053                         pack_iv = 1;
1054                 }
1055
1056                 /* EEA3 or UEA2 */
1057                 /*
1058                  * Microcode expects offsets in bytes
1059                  * TODO: Rounding off
1060                  */
1061                 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1062
1063                 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1064                 encr_offset = encr_offset / 8;
1065                 /* consider iv len */
1066                 encr_offset += iv_len;
1067
1068                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1069                 outputlen = inputlen;
1070
1071                 /* iv offset is 0 */
1072                 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1073
1074                 auth_data_len = 0;
1075                 auth_offset = 0;
1076         }
1077
1078         if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1079                 plt_dp_err("Offset not supported");
1080                 plt_dp_err("enc_offset: %d", encr_offset);
1081                 plt_dp_err("auth_offset: %d", auth_offset);
1082                 return -1;
1083         }
1084
1085         /*
1086          * GP op header, lengths are expected in bits.
1087          */
1088         cpt_inst_w4.s.param1 = encr_data_len;
1089         cpt_inst_w4.s.param2 = auth_data_len;
1090
1091         /*
1092          * In cn9k, cn10k since we have a limitation of
1093          * IV & Offset control word not part of instruction
1094          * and need to be part of Data Buffer, we check if
1095          * head room is there and then only do the Direct mode processing
1096          */
1097         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1098                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1099                 void *dm_vaddr = params->bufs[0].vaddr;
1100
1101                 /* Use Direct mode */
1102
1103                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1104                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1105
1106                 /* DPTR */
1107                 inst->dptr = (uint64_t)offset_vaddr;
1108                 /* RPTR should just exclude offset control word */
1109                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1110
1111                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1112
1113                 uint8_t *iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1114                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1115
1116                 *offset_vaddr = offset_ctrl;
1117         } else {
1118                 void *m_vaddr = params->meta_buf.vaddr;
1119                 uint32_t i, g_size_bytes, s_size_bytes;
1120                 struct roc_se_sglist_comp *gather_comp;
1121                 struct roc_se_sglist_comp *scatter_comp;
1122                 uint8_t *in_buffer;
1123                 uint8_t *iv_d;
1124
1125                 /* save space for iv */
1126                 offset_vaddr = m_vaddr;
1127
1128                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN +
1129                           RTE_ALIGN_CEIL(iv_len, 8);
1130
1131                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1132
1133                 /* DPTR has SG list */
1134                 in_buffer = m_vaddr;
1135
1136                 ((uint16_t *)in_buffer)[0] = 0;
1137                 ((uint16_t *)in_buffer)[1] = 0;
1138
1139                 /* TODO Add error check if space will be sufficient */
1140                 gather_comp =
1141                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1142
1143                 /*
1144                  * Input Gather List
1145                  */
1146                 i = 0;
1147
1148                 /* Offset control word followed by iv */
1149
1150                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1151                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1152
1153                 /* iv offset is 0 */
1154                 *offset_vaddr = offset_ctrl;
1155
1156                 iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1157                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1158
1159                 /* input data */
1160                 size = inputlen - iv_len;
1161                 if (size) {
1162                         i = fill_sg_comp_from_iov(gather_comp, i,
1163                                                   params->src_iov, 0, &size,
1164                                                   NULL, 0);
1165                         if (unlikely(size)) {
1166                                 plt_dp_err("Insufficient buffer space,"
1167                                            " size %d needed",
1168                                            size);
1169                                 return -1;
1170                         }
1171                 }
1172                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1173                 g_size_bytes =
1174                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1175
1176                 /*
1177                  * Output Scatter List
1178                  */
1179
1180                 i = 0;
1181                 scatter_comp =
1182                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1183                                                       g_size_bytes);
1184
1185                 if (flags == 0x1) {
1186                         /* IV in SLIST only for EEA3 & UEA2 */
1187                         iv_len = 0;
1188                 }
1189
1190                 if (iv_len) {
1191                         i = fill_sg_comp(scatter_comp, i,
1192                                          (uint64_t)offset_vaddr +
1193                                                  ROC_SE_OFF_CTRL_LEN,
1194                                          iv_len);
1195                 }
1196
1197                 /* Add output data */
1198                 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1199                         size = outputlen - iv_len - mac_len;
1200                         if (size) {
1201                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1202                                                           params->dst_iov, 0,
1203                                                           &size, NULL, 0);
1204
1205                                 if (unlikely(size)) {
1206                                         plt_dp_err("Insufficient buffer space,"
1207                                                    " size %d needed",
1208                                                    size);
1209                                         return -1;
1210                                 }
1211                         }
1212
1213                         /* mac data */
1214                         if (mac_len) {
1215                                 i = fill_sg_comp_from_buf(scatter_comp, i,
1216                                                           &params->mac_buf);
1217                         }
1218                 } else {
1219                         /* Output including mac */
1220                         size = outputlen - iv_len;
1221                         if (size) {
1222                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1223                                                           params->dst_iov, 0,
1224                                                           &size, NULL, 0);
1225
1226                                 if (unlikely(size)) {
1227                                         plt_dp_err("Insufficient buffer space,"
1228                                                    " size %d needed",
1229                                                    size);
1230                                         return -1;
1231                                 }
1232                         }
1233                 }
1234                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1235                 s_size_bytes =
1236                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1237
1238                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1239
1240                 /* This is DPTR len in case of SG mode */
1241                 cpt_inst_w4.s.dlen = size;
1242
1243                 inst->dptr = (uint64_t)in_buffer;
1244         }
1245
1246         inst->w4.u64 = cpt_inst_w4.u64;
1247
1248         return 0;
1249 }
1250
1251 static __rte_always_inline int
1252 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1253                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1254 {
1255         void *m_vaddr = params->meta_buf.vaddr;
1256         uint32_t size;
1257         int32_t inputlen = 0, outputlen = 0;
1258         struct roc_se_ctx *se_ctx;
1259         uint32_t mac_len = 0;
1260         uint8_t i = 0;
1261         uint32_t encr_offset, auth_offset;
1262         uint32_t encr_data_len, auth_data_len;
1263         int flags;
1264         uint8_t *iv_s, *iv_d, iv_len = 8;
1265         uint8_t dir = 0;
1266         uint64_t *offset_vaddr;
1267         union cpt_inst_w4 cpt_inst_w4;
1268         uint8_t *in_buffer;
1269         uint32_t g_size_bytes, s_size_bytes;
1270         struct roc_se_sglist_comp *gather_comp;
1271         struct roc_se_sglist_comp *scatter_comp;
1272
1273         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1274         auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1275         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1276         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1277
1278         se_ctx = params->ctx_buf.vaddr;
1279         flags = se_ctx->zsk_flags;
1280         mac_len = se_ctx->mac_len;
1281
1282         if (flags == 0x0)
1283                 iv_s = params->iv_buf;
1284         else
1285                 iv_s = params->auth_iv_buf;
1286
1287         dir = iv_s[8] & 0x1;
1288
1289         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1290
1291         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1292         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1293                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1294
1295         /*
1296          * GP op header, lengths are expected in bits.
1297          */
1298         cpt_inst_w4.s.param1 = encr_data_len;
1299         cpt_inst_w4.s.param2 = auth_data_len;
1300
1301         /* consider iv len */
1302         if (flags == 0x0) {
1303                 encr_offset += iv_len;
1304                 auth_offset += iv_len;
1305         }
1306
1307         /* save space for offset ctrl and iv */
1308         offset_vaddr = m_vaddr;
1309
1310         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1311
1312         /* DPTR has SG list */
1313         in_buffer = m_vaddr;
1314
1315         ((uint16_t *)in_buffer)[0] = 0;
1316         ((uint16_t *)in_buffer)[1] = 0;
1317
1318         /* TODO Add error check if space will be sufficient */
1319         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1320
1321         /*
1322          * Input Gather List
1323          */
1324         i = 0;
1325
1326         /* Offset control word followed by iv */
1327
1328         if (flags == 0x0) {
1329                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1330                 outputlen = inputlen;
1331                 /* iv offset is 0 */
1332                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1333                 if (unlikely((encr_offset >> 16))) {
1334                         plt_dp_err("Offset not supported");
1335                         plt_dp_err("enc_offset: %d", encr_offset);
1336                         return -1;
1337                 }
1338         } else {
1339                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1340                 outputlen = mac_len;
1341                 /* iv offset is 0 */
1342                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1343                 if (unlikely((auth_offset >> 8))) {
1344                         plt_dp_err("Offset not supported");
1345                         plt_dp_err("auth_offset: %d", auth_offset);
1346                         return -1;
1347                 }
1348         }
1349
1350         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1351                          ROC_SE_OFF_CTRL_LEN + iv_len);
1352
1353         /* IV */
1354         iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1355         memcpy(iv_d, iv_s, iv_len);
1356
1357         /* input data */
1358         size = inputlen - iv_len;
1359         if (size) {
1360                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1361                                           &size, NULL, 0);
1362
1363                 if (unlikely(size)) {
1364                         plt_dp_err("Insufficient buffer space,"
1365                                    " size %d needed",
1366                                    size);
1367                         return -1;
1368                 }
1369         }
1370         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1371         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1372
1373         /*
1374          * Output Scatter List
1375          */
1376
1377         i = 0;
1378         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1379                                                      g_size_bytes);
1380
1381         if (flags == 0x1) {
1382                 /* IV in SLIST only for F8 */
1383                 iv_len = 0;
1384         }
1385
1386         /* IV */
1387         if (iv_len) {
1388                 i = fill_sg_comp(scatter_comp, i,
1389                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1390                                  iv_len);
1391         }
1392
1393         /* Add output data */
1394         if (req_flags & ROC_SE_VALID_MAC_BUF) {
1395                 size = outputlen - iv_len - mac_len;
1396                 if (size) {
1397                         i = fill_sg_comp_from_iov(scatter_comp, i,
1398                                                   params->dst_iov, 0, &size,
1399                                                   NULL, 0);
1400
1401                         if (unlikely(size)) {
1402                                 plt_dp_err("Insufficient buffer space,"
1403                                            " size %d needed",
1404                                            size);
1405                                 return -1;
1406                         }
1407                 }
1408
1409                 /* mac data */
1410                 if (mac_len) {
1411                         i = fill_sg_comp_from_buf(scatter_comp, i,
1412                                                   &params->mac_buf);
1413                 }
1414         } else {
1415                 /* Output including mac */
1416                 size = outputlen - iv_len;
1417                 if (size) {
1418                         i = fill_sg_comp_from_iov(scatter_comp, i,
1419                                                   params->dst_iov, 0, &size,
1420                                                   NULL, 0);
1421
1422                         if (unlikely(size)) {
1423                                 plt_dp_err("Insufficient buffer space,"
1424                                            " size %d needed",
1425                                            size);
1426                                 return -1;
1427                         }
1428                 }
1429         }
1430         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1431         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1432
1433         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1434
1435         /* This is DPTR len in case of SG mode */
1436         cpt_inst_w4.s.dlen = size;
1437
1438         inst->dptr = (uint64_t)in_buffer;
1439         inst->w4.u64 = cpt_inst_w4.u64;
1440
1441         return 0;
1442 }
1443
1444 static __rte_always_inline int
1445 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1446                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1447 {
1448         void *m_vaddr = params->meta_buf.vaddr;
1449         uint32_t size;
1450         int32_t inputlen = 0, outputlen;
1451         struct roc_se_ctx *se_ctx;
1452         uint8_t i = 0, iv_len = 8;
1453         uint32_t encr_offset;
1454         uint32_t encr_data_len;
1455         int flags;
1456         uint8_t dir = 0;
1457         uint64_t *offset_vaddr;
1458         union cpt_inst_w4 cpt_inst_w4;
1459         uint8_t *in_buffer;
1460         uint32_t g_size_bytes, s_size_bytes;
1461         struct roc_se_sglist_comp *gather_comp;
1462         struct roc_se_sglist_comp *scatter_comp;
1463
1464         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1465         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1466
1467         se_ctx = params->ctx_buf.vaddr;
1468         flags = se_ctx->zsk_flags;
1469
1470         cpt_inst_w4.u64 = 0;
1471         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1472
1473         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1474         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1475                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1476
1477         /*
1478          * GP op header, lengths are expected in bits.
1479          */
1480         cpt_inst_w4.s.param1 = encr_data_len;
1481
1482         /* consider iv len */
1483         encr_offset += iv_len;
1484
1485         inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1486         outputlen = inputlen;
1487
1488         /* save space for offset ctrl & iv */
1489         offset_vaddr = m_vaddr;
1490
1491         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1492
1493         /* DPTR has SG list */
1494         in_buffer = m_vaddr;
1495
1496         ((uint16_t *)in_buffer)[0] = 0;
1497         ((uint16_t *)in_buffer)[1] = 0;
1498
1499         /* TODO Add error check if space will be sufficient */
1500         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1501
1502         /*
1503          * Input Gather List
1504          */
1505         i = 0;
1506
1507         /* Offset control word followed by iv */
1508         *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1509         if (unlikely((encr_offset >> 16))) {
1510                 plt_dp_err("Offset not supported");
1511                 plt_dp_err("enc_offset: %d", encr_offset);
1512                 return -1;
1513         }
1514
1515         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1516                          ROC_SE_OFF_CTRL_LEN + iv_len);
1517
1518         /* IV */
1519         memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1520                iv_len);
1521
1522         /* Add input data */
1523         size = inputlen - iv_len;
1524         if (size) {
1525                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1526                                           &size, NULL, 0);
1527                 if (unlikely(size)) {
1528                         plt_dp_err("Insufficient buffer space,"
1529                                    " size %d needed",
1530                                    size);
1531                         return -1;
1532                 }
1533         }
1534         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1535         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1536
1537         /*
1538          * Output Scatter List
1539          */
1540
1541         i = 0;
1542         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1543                                                      g_size_bytes);
1544
1545         /* IV */
1546         i = fill_sg_comp(scatter_comp, i,
1547                          (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1548
1549         /* Add output data */
1550         size = outputlen - iv_len;
1551         if (size) {
1552                 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1553                                           &size, NULL, 0);
1554                 if (unlikely(size)) {
1555                         plt_dp_err("Insufficient buffer space,"
1556                                    " size %d needed",
1557                                    size);
1558                         return -1;
1559                 }
1560         }
1561         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1562         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1563
1564         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1565
1566         /* This is DPTR len in case of SG mode */
1567         cpt_inst_w4.s.dlen = size;
1568
1569         inst->dptr = (uint64_t)in_buffer;
1570         inst->w4.u64 = cpt_inst_w4.u64;
1571
1572         return 0;
1573 }
1574
1575 static __rte_always_inline int
1576 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1577                      struct roc_se_fc_params *fc_params,
1578                      struct cpt_inst_s *inst)
1579 {
1580         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1581         uint8_t fc_type;
1582         int ret = -1;
1583
1584         fc_type = ctx->fc_type;
1585
1586         if (likely(fc_type == ROC_SE_FC_GEN)) {
1587                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1588         } else if (fc_type == ROC_SE_PDCP) {
1589                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1590                                           inst);
1591         } else if (fc_type == ROC_SE_KASUMI) {
1592                 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1593         }
1594
1595         /*
1596          * For AUTH_ONLY case,
1597          * MC only supports digest generation and verification
1598          * should be done in software by memcmp()
1599          */
1600
1601         return ret;
1602 }
1603
1604 static __rte_always_inline int
1605 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1606                      struct roc_se_fc_params *fc_params,
1607                      struct cpt_inst_s *inst)
1608 {
1609         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1610         uint8_t fc_type;
1611         int ret = -1;
1612
1613         fc_type = ctx->fc_type;
1614
1615         if (likely(fc_type == ROC_SE_FC_GEN)) {
1616                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1617         } else if (fc_type == ROC_SE_PDCP) {
1618                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1619                                           inst);
1620         } else if (fc_type == ROC_SE_KASUMI) {
1621                 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1622                                           inst);
1623         } else if (fc_type == ROC_SE_HASH_HMAC) {
1624                 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1625         }
1626
1627         return ret;
1628 }
1629
1630 static __rte_always_inline int
1631 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1632 {
1633         struct rte_crypto_aead_xform *aead_form;
1634         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1635         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1636         uint32_t cipher_key_len = 0;
1637         uint8_t aes_gcm = 0;
1638         aead_form = &xform->aead;
1639
1640         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1641                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1642                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1643         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1644                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1645                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1646         } else {
1647                 plt_dp_err("Unknown aead operation\n");
1648                 return -1;
1649         }
1650         switch (aead_form->algo) {
1651         case RTE_CRYPTO_AEAD_AES_GCM:
1652                 enc_type = ROC_SE_AES_GCM;
1653                 cipher_key_len = 16;
1654                 aes_gcm = 1;
1655                 break;
1656         case RTE_CRYPTO_AEAD_AES_CCM:
1657                 plt_dp_err("Crypto: Unsupported cipher algo %u",
1658                            aead_form->algo);
1659                 return -1;
1660         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1661                 enc_type = ROC_SE_CHACHA20;
1662                 auth_type = ROC_SE_POLY1305;
1663                 cipher_key_len = 32;
1664                 sess->chacha_poly = 1;
1665                 break;
1666         default:
1667                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1668                            aead_form->algo);
1669                 return -1;
1670         }
1671         if (aead_form->key.length < cipher_key_len) {
1672                 plt_dp_err("Invalid cipher params keylen %u",
1673                            aead_form->key.length);
1674                 return -1;
1675         }
1676         sess->zsk_flag = 0;
1677         sess->aes_gcm = aes_gcm;
1678         sess->mac_len = aead_form->digest_length;
1679         sess->iv_offset = aead_form->iv.offset;
1680         sess->iv_length = aead_form->iv.length;
1681         sess->aad_length = aead_form->aad_length;
1682
1683         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1684                                          aead_form->key.data,
1685                                          aead_form->key.length, NULL)))
1686                 return -1;
1687
1688         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1689                                          aead_form->digest_length)))
1690                 return -1;
1691
1692         return 0;
1693 }
1694
1695 static __rte_always_inline int
1696 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1697 {
1698         struct rte_crypto_cipher_xform *c_form;
1699         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1700         uint32_t cipher_key_len = 0;
1701         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1702
1703         c_form = &xform->cipher;
1704
1705         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1706                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1707         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1708                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1709                 if (xform->next != NULL &&
1710                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1711                         /* Perform decryption followed by auth verify */
1712                         sess->roc_se_ctx.template_w4.s.opcode_minor =
1713                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1714                 }
1715         } else {
1716                 plt_dp_err("Unknown cipher operation\n");
1717                 return -1;
1718         }
1719
1720         switch (c_form->algo) {
1721         case RTE_CRYPTO_CIPHER_AES_CBC:
1722                 enc_type = ROC_SE_AES_CBC;
1723                 cipher_key_len = 16;
1724                 break;
1725         case RTE_CRYPTO_CIPHER_3DES_CBC:
1726                 enc_type = ROC_SE_DES3_CBC;
1727                 cipher_key_len = 24;
1728                 break;
1729         case RTE_CRYPTO_CIPHER_DES_CBC:
1730                 /* DES is implemented using 3DES in hardware */
1731                 enc_type = ROC_SE_DES3_CBC;
1732                 cipher_key_len = 8;
1733                 break;
1734         case RTE_CRYPTO_CIPHER_AES_CTR:
1735                 enc_type = ROC_SE_AES_CTR;
1736                 cipher_key_len = 16;
1737                 aes_ctr = 1;
1738                 break;
1739         case RTE_CRYPTO_CIPHER_NULL:
1740                 enc_type = 0;
1741                 is_null = 1;
1742                 break;
1743         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1744                 enc_type = ROC_SE_KASUMI_F8_ECB;
1745                 cipher_key_len = 16;
1746                 zsk_flag = ROC_SE_K_F8;
1747                 break;
1748         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1749                 enc_type = ROC_SE_SNOW3G_UEA2;
1750                 cipher_key_len = 16;
1751                 zsk_flag = ROC_SE_ZS_EA;
1752                 break;
1753         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1754                 enc_type = ROC_SE_ZUC_EEA3;
1755                 cipher_key_len = c_form->key.length;
1756                 zsk_flag = ROC_SE_ZS_EA;
1757                 break;
1758         case RTE_CRYPTO_CIPHER_AES_XTS:
1759                 enc_type = ROC_SE_AES_XTS;
1760                 cipher_key_len = 16;
1761                 break;
1762         case RTE_CRYPTO_CIPHER_3DES_ECB:
1763                 enc_type = ROC_SE_DES3_ECB;
1764                 cipher_key_len = 24;
1765                 break;
1766         case RTE_CRYPTO_CIPHER_AES_ECB:
1767                 enc_type = ROC_SE_AES_ECB;
1768                 cipher_key_len = 16;
1769                 break;
1770         case RTE_CRYPTO_CIPHER_3DES_CTR:
1771         case RTE_CRYPTO_CIPHER_AES_F8:
1772         case RTE_CRYPTO_CIPHER_ARC4:
1773                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1774                 return -1;
1775         default:
1776                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1777                            c_form->algo);
1778                 return -1;
1779         }
1780
1781         if (c_form->key.length < cipher_key_len) {
1782                 plt_dp_err("Invalid cipher params keylen %u",
1783                            c_form->key.length);
1784                 return -1;
1785         }
1786
1787         sess->zsk_flag = zsk_flag;
1788         sess->aes_gcm = 0;
1789         sess->aes_ctr = aes_ctr;
1790         sess->iv_offset = c_form->iv.offset;
1791         sess->iv_length = c_form->iv.length;
1792         sess->is_null = is_null;
1793
1794         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1795                                          c_form->key.data, c_form->key.length,
1796                                          NULL)))
1797                 return -1;
1798
1799         if ((enc_type >= ROC_SE_ZUC_EEA3) && (enc_type <= ROC_SE_AES_CTR_EEA2))
1800                 roc_se_ctx_swap(&sess->roc_se_ctx);
1801         return 0;
1802 }
1803
1804 static __rte_always_inline int
1805 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1806 {
1807         struct rte_crypto_auth_xform *a_form;
1808         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1809         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1810
1811         if (xform->next != NULL &&
1812             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1813             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1814                 /* Perform auth followed by encryption */
1815                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1816                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1817         }
1818
1819         a_form = &xform->auth;
1820
1821         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1822                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1823         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1824                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1825         else {
1826                 plt_dp_err("Unknown auth operation");
1827                 return -1;
1828         }
1829
1830         switch (a_form->algo) {
1831         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1832                 /* Fall through */
1833         case RTE_CRYPTO_AUTH_SHA1:
1834                 auth_type = ROC_SE_SHA1_TYPE;
1835                 break;
1836         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1837         case RTE_CRYPTO_AUTH_SHA256:
1838                 auth_type = ROC_SE_SHA2_SHA256;
1839                 break;
1840         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1841         case RTE_CRYPTO_AUTH_SHA512:
1842                 auth_type = ROC_SE_SHA2_SHA512;
1843                 break;
1844         case RTE_CRYPTO_AUTH_AES_GMAC:
1845                 auth_type = ROC_SE_GMAC_TYPE;
1846                 aes_gcm = 1;
1847                 break;
1848         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1849         case RTE_CRYPTO_AUTH_SHA224:
1850                 auth_type = ROC_SE_SHA2_SHA224;
1851                 break;
1852         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1853         case RTE_CRYPTO_AUTH_SHA384:
1854                 auth_type = ROC_SE_SHA2_SHA384;
1855                 break;
1856         case RTE_CRYPTO_AUTH_MD5_HMAC:
1857         case RTE_CRYPTO_AUTH_MD5:
1858                 auth_type = ROC_SE_MD5_TYPE;
1859                 break;
1860         case RTE_CRYPTO_AUTH_KASUMI_F9:
1861                 auth_type = ROC_SE_KASUMI_F9_ECB;
1862                 /*
1863                  * Indicate that direction needs to be taken out
1864                  * from end of src
1865                  */
1866                 zsk_flag = ROC_SE_K_F9;
1867                 break;
1868         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1869                 auth_type = ROC_SE_SNOW3G_UIA2;
1870                 zsk_flag = ROC_SE_ZS_IA;
1871                 break;
1872         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1873                 auth_type = ROC_SE_ZUC_EIA3;
1874                 zsk_flag = ROC_SE_ZS_IA;
1875                 break;
1876         case RTE_CRYPTO_AUTH_NULL:
1877                 auth_type = 0;
1878                 is_null = 1;
1879                 break;
1880         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1881         case RTE_CRYPTO_AUTH_AES_CMAC:
1882         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1883                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1884                 return -1;
1885         default:
1886                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1887                            a_form->algo);
1888                 return -1;
1889         }
1890
1891         sess->zsk_flag = zsk_flag;
1892         sess->aes_gcm = aes_gcm;
1893         sess->mac_len = a_form->digest_length;
1894         sess->is_null = is_null;
1895         if (zsk_flag) {
1896                 sess->auth_iv_offset = a_form->iv.offset;
1897                 sess->auth_iv_length = a_form->iv.length;
1898         }
1899         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1900                                          a_form->key.data, a_form->key.length,
1901                                          a_form->digest_length)))
1902                 return -1;
1903
1904         if ((auth_type >= ROC_SE_ZUC_EIA3) &&
1905             (auth_type <= ROC_SE_AES_CMAC_EIA2))
1906                 roc_se_ctx_swap(&sess->roc_se_ctx);
1907
1908         return 0;
1909 }
1910
1911 static __rte_always_inline int
1912 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1913 {
1914         struct rte_crypto_auth_xform *a_form;
1915         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1916         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1917
1918         a_form = &xform->auth;
1919
1920         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1921                 sess->cpt_op |= ROC_SE_OP_ENCODE;
1922         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1923                 sess->cpt_op |= ROC_SE_OP_DECODE;
1924         else {
1925                 plt_dp_err("Unknown auth operation");
1926                 return -1;
1927         }
1928
1929         switch (a_form->algo) {
1930         case RTE_CRYPTO_AUTH_AES_GMAC:
1931                 enc_type = ROC_SE_AES_GCM;
1932                 auth_type = ROC_SE_GMAC_TYPE;
1933                 break;
1934         default:
1935                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1936                            a_form->algo);
1937                 return -1;
1938         }
1939
1940         sess->zsk_flag = 0;
1941         sess->aes_gcm = 0;
1942         sess->is_gmac = 1;
1943         sess->iv_offset = a_form->iv.offset;
1944         sess->iv_length = a_form->iv.length;
1945         sess->mac_len = a_form->digest_length;
1946
1947         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1948                                          a_form->key.data, a_form->key.length,
1949                                          NULL)))
1950                 return -1;
1951
1952         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1953                                          a_form->digest_length)))
1954                 return -1;
1955
1956         return 0;
1957 }
1958
1959 static __rte_always_inline void *
1960 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1961               struct rte_mempool *cpt_meta_pool,
1962               struct cpt_inflight_req *infl_req)
1963 {
1964         uint8_t *mdata;
1965
1966         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1967                 return NULL;
1968
1969         buf->vaddr = mdata;
1970         buf->size = len;
1971
1972         infl_req->mdata = mdata;
1973         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1974
1975         return mdata;
1976 }
1977
1978 static __rte_always_inline uint32_t
1979 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
1980                      uint32_t start_offset)
1981 {
1982         uint16_t index = 0;
1983         void *seg_data = NULL;
1984         int32_t seg_size = 0;
1985
1986         if (!pkt) {
1987                 iovec->buf_cnt = 0;
1988                 return 0;
1989         }
1990
1991         if (!start_offset) {
1992                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1993                 seg_size = pkt->data_len;
1994         } else {
1995                 while (start_offset >= pkt->data_len) {
1996                         start_offset -= pkt->data_len;
1997                         pkt = pkt->next;
1998                 }
1999
2000                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2001                 seg_size = pkt->data_len - start_offset;
2002                 if (!seg_size)
2003                         return 1;
2004         }
2005
2006         /* first seg */
2007         iovec->bufs[index].vaddr = seg_data;
2008         iovec->bufs[index].size = seg_size;
2009         index++;
2010         pkt = pkt->next;
2011
2012         while (unlikely(pkt != NULL)) {
2013                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2014                 seg_size = pkt->data_len;
2015                 if (!seg_size)
2016                         break;
2017
2018                 iovec->bufs[index].vaddr = seg_data;
2019                 iovec->bufs[index].size = seg_size;
2020
2021                 index++;
2022
2023                 pkt = pkt->next;
2024         }
2025
2026         iovec->buf_cnt = index;
2027         return 0;
2028 }
2029
2030 static __rte_always_inline uint32_t
2031 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2032                              struct roc_se_fc_params *param, uint32_t *flags)
2033 {
2034         uint16_t index = 0;
2035         void *seg_data = NULL;
2036         uint32_t seg_size = 0;
2037         struct roc_se_iov_ptr *iovec;
2038
2039         seg_data = rte_pktmbuf_mtod(pkt, void *);
2040         seg_size = pkt->data_len;
2041
2042         /* first seg */
2043         if (likely(!pkt->next)) {
2044                 uint32_t headroom;
2045
2046                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2047                 headroom = rte_pktmbuf_headroom(pkt);
2048                 if (likely(headroom >= 24))
2049                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2050
2051                 param->bufs[0].vaddr = seg_data;
2052                 param->bufs[0].size = seg_size;
2053                 return 0;
2054         }
2055         iovec = param->src_iov;
2056         iovec->bufs[index].vaddr = seg_data;
2057         iovec->bufs[index].size = seg_size;
2058         index++;
2059         pkt = pkt->next;
2060
2061         while (unlikely(pkt != NULL)) {
2062                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2063                 seg_size = pkt->data_len;
2064
2065                 if (!seg_size)
2066                         break;
2067
2068                 iovec->bufs[index].vaddr = seg_data;
2069                 iovec->bufs[index].size = seg_size;
2070
2071                 index++;
2072
2073                 pkt = pkt->next;
2074         }
2075
2076         iovec->buf_cnt = index;
2077         return 0;
2078 }
2079
2080 static __rte_always_inline int
2081 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2082                struct cpt_qp_meta_info *m_info,
2083                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2084 {
2085         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2086         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2087         struct rte_crypto_sym_op *sym_op = cop->sym;
2088         void *mdata = NULL;
2089         uint32_t mc_hash_off;
2090         uint32_t flags = 0;
2091         uint64_t d_offs, d_lens;
2092         struct rte_mbuf *m_src, *m_dst;
2093         uint8_t cpt_op = sess->cpt_op;
2094 #ifdef CPT_ALWAYS_USE_SG_MODE
2095         uint8_t inplace = 0;
2096 #else
2097         uint8_t inplace = 1;
2098 #endif
2099         struct roc_se_fc_params fc_params;
2100         char src[SRC_IOV_SIZE];
2101         char dst[SRC_IOV_SIZE];
2102         uint32_t iv_buf[4];
2103         int ret;
2104
2105         fc_params.cipher_iv_len = sess->iv_length;
2106         fc_params.auth_iv_len = sess->auth_iv_length;
2107
2108         if (likely(sess->iv_length)) {
2109                 flags |= ROC_SE_VALID_IV_BUF;
2110                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2111                                                              sess->iv_offset);
2112                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2113                         memcpy((uint8_t *)iv_buf,
2114                                rte_crypto_op_ctod_offset(cop, uint8_t *,
2115                                                          sess->iv_offset),
2116                                12);
2117                         iv_buf[3] = rte_cpu_to_be_32(0x1);
2118                         fc_params.iv_buf = iv_buf;
2119                 }
2120         }
2121
2122         if (sess->zsk_flag) {
2123                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2124                         cop, uint8_t *, sess->auth_iv_offset);
2125                 if (sess->zsk_flag != ROC_SE_ZS_EA)
2126                         inplace = 0;
2127         }
2128         m_src = sym_op->m_src;
2129         m_dst = sym_op->m_dst;
2130
2131         if (sess->aes_gcm || sess->chacha_poly) {
2132                 uint8_t *salt;
2133                 uint8_t *aad_data;
2134                 uint16_t aad_len;
2135
2136                 d_offs = sym_op->aead.data.offset;
2137                 d_lens = sym_op->aead.data.length;
2138                 mc_hash_off =
2139                         sym_op->aead.data.offset + sym_op->aead.data.length;
2140
2141                 aad_data = sym_op->aead.aad.data;
2142                 aad_len = sess->aad_length;
2143                 if (likely((aad_data + aad_len) ==
2144                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2145                                                    sym_op->aead.data.offset))) {
2146                         d_offs = (d_offs - aad_len) | (d_offs << 16);
2147                         d_lens = (d_lens + aad_len) | (d_lens << 32);
2148                 } else {
2149                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2150                         fc_params.aad_buf.size = aad_len;
2151                         flags |= ROC_SE_VALID_AAD_BUF;
2152                         inplace = 0;
2153                         d_offs = d_offs << 16;
2154                         d_lens = d_lens << 32;
2155                 }
2156
2157                 salt = fc_params.iv_buf;
2158                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2159                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2160                         sess->salt = *(uint32_t *)salt;
2161                 }
2162                 fc_params.iv_buf = salt + 4;
2163                 if (likely(sess->mac_len)) {
2164                         struct rte_mbuf *m =
2165                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2166
2167                         if (!m)
2168                                 m = m_src;
2169
2170                         /* hmac immediately following data is best case */
2171                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2172                                              mc_hash_off !=
2173                                      (uint8_t *)sym_op->aead.digest.data)) {
2174                                 flags |= ROC_SE_VALID_MAC_BUF;
2175                                 fc_params.mac_buf.size = sess->mac_len;
2176                                 fc_params.mac_buf.vaddr =
2177                                         sym_op->aead.digest.data;
2178                                 inplace = 0;
2179                         }
2180                 }
2181         } else {
2182                 d_offs = sym_op->cipher.data.offset;
2183                 d_lens = sym_op->cipher.data.length;
2184                 mc_hash_off =
2185                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
2186                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2187                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2188
2189                 if (mc_hash_off <
2190                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2191                         mc_hash_off = (sym_op->auth.data.offset +
2192                                        sym_op->auth.data.length);
2193                 }
2194                 /* for gmac, salt should be updated like in gcm */
2195                 if (unlikely(sess->is_gmac)) {
2196                         uint8_t *salt;
2197                         salt = fc_params.iv_buf;
2198                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
2199                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2200                                 sess->salt = *(uint32_t *)salt;
2201                         }
2202                         fc_params.iv_buf = salt + 4;
2203                 }
2204                 if (likely(sess->mac_len)) {
2205                         struct rte_mbuf *m;
2206
2207                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2208                         if (!m)
2209                                 m = m_src;
2210
2211                         /* hmac immediately following data is best case */
2212                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2213                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2214                                               mc_hash_off !=
2215                                       (uint8_t *)sym_op->auth.digest.data))) {
2216                                 flags |= ROC_SE_VALID_MAC_BUF;
2217                                 fc_params.mac_buf.size = sess->mac_len;
2218                                 fc_params.mac_buf.vaddr =
2219                                         sym_op->auth.digest.data;
2220                                 inplace = 0;
2221                         }
2222                 }
2223         }
2224         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2225
2226         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2227             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2228                 inplace = 0;
2229
2230         if (likely(!m_dst && inplace)) {
2231                 /* Case of single buffer without AAD buf or
2232                  * separate mac buf in place and
2233                  * not air crypto
2234                  */
2235                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2236
2237                 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2238                                                           &flags))) {
2239                         plt_dp_err("Prepare inplace src iov failed");
2240                         ret = -EINVAL;
2241                         goto err_exit;
2242                 }
2243
2244         } else {
2245                 /* Out of place processing */
2246                 fc_params.src_iov = (void *)src;
2247                 fc_params.dst_iov = (void *)dst;
2248
2249                 /* Store SG I/O in the api for reuse */
2250                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2251                         plt_dp_err("Prepare src iov failed");
2252                         ret = -EINVAL;
2253                         goto err_exit;
2254                 }
2255
2256                 if (unlikely(m_dst != NULL)) {
2257                         uint32_t pkt_len;
2258
2259                         /* Try to make room as much as src has */
2260                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
2261
2262                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2263                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2264                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2265                                         plt_dp_err("Not enough space in "
2266                                                    "m_dst %p, need %u"
2267                                                    " more",
2268                                                    m_dst, pkt_len);
2269                                         ret = -EINVAL;
2270                                         goto err_exit;
2271                                 }
2272                         }
2273
2274                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2275                                 plt_dp_err("Prepare dst iov failed for "
2276                                            "m_dst %p",
2277                                            m_dst);
2278                                 ret = -EINVAL;
2279                                 goto err_exit;
2280                         }
2281                 } else {
2282                         fc_params.dst_iov = (void *)src;
2283                 }
2284         }
2285
2286         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2287                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2288                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
2289                         (ctx->fc_type == ROC_SE_PDCP))))) {
2290                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2291                                       m_info->pool, infl_req);
2292                 if (mdata == NULL) {
2293                         plt_dp_err("Error allocating meta buffer for request");
2294                         return -ENOMEM;
2295                 }
2296         }
2297
2298         /* Finally prepare the instruction */
2299         if (cpt_op & ROC_SE_OP_ENCODE)
2300                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2301                                            inst);
2302         else
2303                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2304                                            inst);
2305
2306         if (unlikely(ret)) {
2307                 plt_dp_err("Preparing request failed due to bad input arg");
2308                 goto free_mdata_and_exit;
2309         }
2310
2311         return 0;
2312
2313 free_mdata_and_exit:
2314         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2315                 rte_mempool_put(m_info->pool, infl_req->mdata);
2316 err_exit:
2317         return ret;
2318 }
2319
2320 static __rte_always_inline void
2321 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2322 {
2323         uint8_t *mac;
2324         struct rte_crypto_sym_op *sym_op = op->sym;
2325
2326         if (sym_op->auth.digest.data)
2327                 mac = sym_op->auth.digest.data;
2328         else
2329                 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2330                                               sym_op->auth.data.length +
2331                                                       sym_op->auth.data.offset);
2332         if (!mac) {
2333                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2334                 return;
2335         }
2336
2337         if (memcmp(mac, gen_mac, mac_len))
2338                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2339         else
2340                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2341 }
2342
2343 static __rte_always_inline void
2344 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2345                                    uint32_t *addr_length_in_bits,
2346                                    uint8_t *addr_direction)
2347 {
2348         uint8_t found = 0;
2349         uint32_t pos;
2350         uint8_t last_byte;
2351         while (!found && counter_num_bytes > 0) {
2352                 counter_num_bytes--;
2353                 if (src[counter_num_bytes] == 0x00)
2354                         continue;
2355                 pos = rte_bsf32(src[counter_num_bytes]);
2356                 if (pos == 7) {
2357                         if (likely(counter_num_bytes > 0)) {
2358                                 last_byte = src[counter_num_bytes - 1];
2359                                 *addr_direction = last_byte & 0x1;
2360                                 *addr_length_in_bits =
2361                                         counter_num_bytes * 8 - 1;
2362                         }
2363                 } else {
2364                         last_byte = src[counter_num_bytes];
2365                         *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2366                         *addr_length_in_bits =
2367                                 counter_num_bytes * 8 + (8 - (pos + 2));
2368                 }
2369                 found = 1;
2370         }
2371 }
2372
2373 /*
2374  * This handles all auth only except AES_GMAC
2375  */
2376 static __rte_always_inline int
2377 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2378                    struct cpt_qp_meta_info *m_info,
2379                    struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2380 {
2381         uint32_t space = 0;
2382         struct rte_crypto_sym_op *sym_op = cop->sym;
2383         void *mdata;
2384         uint32_t auth_range_off;
2385         uint32_t flags = 0;
2386         uint64_t d_offs = 0, d_lens;
2387         struct rte_mbuf *m_src, *m_dst;
2388         uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2389         uint16_t mac_len = sess->mac_len;
2390         struct roc_se_fc_params params;
2391         char src[SRC_IOV_SIZE];
2392         uint8_t iv_buf[16];
2393         int ret;
2394
2395         memset(&params, 0, sizeof(struct roc_se_fc_params));
2396
2397         m_src = sym_op->m_src;
2398
2399         mdata = alloc_op_meta(&params.meta_buf, m_info->mlen, m_info->pool,
2400                               infl_req);
2401         if (mdata == NULL) {
2402                 ret = -ENOMEM;
2403                 goto err_exit;
2404         }
2405
2406         auth_range_off = sym_op->auth.data.offset;
2407
2408         flags = ROC_SE_VALID_MAC_BUF;
2409         params.src_iov = (void *)src;
2410         if (unlikely(sess->zsk_flag)) {
2411                 /*
2412                  * Since for Zuc, Kasumi, Snow3g offsets are in bits
2413                  * we will send pass through even for auth only case,
2414                  * let MC handle it
2415                  */
2416                 d_offs = auth_range_off;
2417                 auth_range_off = 0;
2418                 params.auth_iv_len = sess->auth_iv_length;
2419                 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2420                         cop, uint8_t *, sess->auth_iv_offset);
2421                 if (sess->zsk_flag == ROC_SE_K_F9) {
2422                         uint32_t length_in_bits, num_bytes;
2423                         uint8_t *src, direction = 0;
2424
2425                         memcpy(iv_buf,
2426                                rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2427                         /*
2428                          * This is kasumi f9, take direction from
2429                          * source buffer
2430                          */
2431                         length_in_bits = cop->sym->auth.data.length;
2432                         num_bytes = (length_in_bits >> 3);
2433                         src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2434                         find_kasumif9_direction_and_length(
2435                                 src, num_bytes, &length_in_bits, &direction);
2436                         length_in_bits -= 64;
2437                         cop->sym->auth.data.offset += 64;
2438                         d_offs = cop->sym->auth.data.offset;
2439                         auth_range_off = d_offs / 8;
2440                         cop->sym->auth.data.length = length_in_bits;
2441
2442                         /* Store it at end of auth iv */
2443                         iv_buf[8] = direction;
2444                         params.auth_iv_buf = iv_buf;
2445                 }
2446         }
2447
2448         d_lens = sym_op->auth.data.length;
2449
2450         params.ctx_buf.vaddr = &sess->roc_se_ctx;
2451
2452         if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2453                 if (sym_op->auth.digest.data) {
2454                         /*
2455                          * Digest to be generated
2456                          * in separate buffer
2457                          */
2458                         params.mac_buf.size = sess->mac_len;
2459                         params.mac_buf.vaddr = sym_op->auth.digest.data;
2460                 } else {
2461                         uint32_t off = sym_op->auth.data.offset +
2462                                        sym_op->auth.data.length;
2463                         int32_t dlen, space;
2464
2465                         m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2466                         dlen = rte_pktmbuf_pkt_len(m_dst);
2467
2468                         space = off + mac_len - dlen;
2469                         if (space > 0)
2470                                 if (!rte_pktmbuf_append(m_dst, space)) {
2471                                         plt_dp_err("Failed to extend "
2472                                                    "mbuf by %uB",
2473                                                    space);
2474                                         ret = -EINVAL;
2475                                         goto free_mdata_and_exit;
2476                                 }
2477
2478                         params.mac_buf.vaddr =
2479                                 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2480                         params.mac_buf.size = mac_len;
2481                 }
2482         } else {
2483                 uint64_t *op = mdata;
2484
2485                 /* Need space for storing generated mac */
2486                 space += 2 * sizeof(uint64_t);
2487
2488                 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2489                 params.mac_buf.size = mac_len;
2490                 space += RTE_ALIGN_CEIL(mac_len, 8);
2491                 op[0] = (uintptr_t)params.mac_buf.vaddr;
2492                 op[1] = mac_len;
2493                 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2494         }
2495
2496         params.meta_buf.vaddr = (uint8_t *)mdata + space;
2497         params.meta_buf.size -= space;
2498
2499         /* Out of place processing */
2500         params.src_iov = (void *)src;
2501
2502         /*Store SG I/O in the api for reuse */
2503         if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2504                 plt_dp_err("Prepare src iov failed");
2505                 ret = -EINVAL;
2506                 goto free_mdata_and_exit;
2507         }
2508
2509         ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, inst);
2510         if (ret)
2511                 goto free_mdata_and_exit;
2512
2513         return 0;
2514
2515 free_mdata_and_exit:
2516         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2517                 rte_mempool_put(m_info->pool, infl_req->mdata);
2518 err_exit:
2519         return ret;
2520 }
2521 #endif /*_CNXK_SE_H_ */