7959c4c7af8d3aa3ef3c4280195148c0c4c262ec
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
38
39 static inline void
40 pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type)
41 {
42         uint32_t *iv_s_temp, iv_temp[4];
43         int j;
44
45         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
46                 /*
47                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
48                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
49                  */
50
51                 iv_s_temp = (uint32_t *)iv_s;
52
53                 for (j = 0; j < 4; j++)
54                         iv_temp[j] = iv_s_temp[3 - j];
55                 memcpy(iv_d, iv_temp, 16);
56         } else {
57                 /* ZUC doesn't need a swap */
58                 memcpy(iv_d, iv_s, 16);
59         }
60 }
61
62 static __rte_always_inline int
63 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
64 {
65         uint16_t mac_len = auth->digest_length;
66         int ret;
67
68         switch (auth->algo) {
69         case RTE_CRYPTO_AUTH_MD5:
70         case RTE_CRYPTO_AUTH_MD5_HMAC:
71                 ret = (mac_len == 16) ? 0 : -1;
72                 break;
73         case RTE_CRYPTO_AUTH_SHA1:
74         case RTE_CRYPTO_AUTH_SHA1_HMAC:
75                 ret = (mac_len == 20) ? 0 : -1;
76                 break;
77         case RTE_CRYPTO_AUTH_SHA224:
78         case RTE_CRYPTO_AUTH_SHA224_HMAC:
79                 ret = (mac_len == 28) ? 0 : -1;
80                 break;
81         case RTE_CRYPTO_AUTH_SHA256:
82         case RTE_CRYPTO_AUTH_SHA256_HMAC:
83                 ret = (mac_len == 32) ? 0 : -1;
84                 break;
85         case RTE_CRYPTO_AUTH_SHA384:
86         case RTE_CRYPTO_AUTH_SHA384_HMAC:
87                 ret = (mac_len == 48) ? 0 : -1;
88                 break;
89         case RTE_CRYPTO_AUTH_SHA512:
90         case RTE_CRYPTO_AUTH_SHA512_HMAC:
91                 ret = (mac_len == 64) ? 0 : -1;
92                 break;
93         case RTE_CRYPTO_AUTH_NULL:
94                 ret = 0;
95                 break;
96         default:
97                 ret = -1;
98         }
99
100         return ret;
101 }
102
103 static __rte_always_inline void
104 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
105 {
106         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
107         memcpy(fctx->enc.encr_iv, salt, 4);
108 }
109
110 static __rte_always_inline uint32_t
111 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
112              uint32_t size)
113 {
114         struct roc_se_sglist_comp *to = &list[i >> 2];
115
116         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
117         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
118         i++;
119         return i;
120 }
121
122 static __rte_always_inline uint32_t
123 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
124                       struct roc_se_buf_ptr *from)
125 {
126         struct roc_se_sglist_comp *to = &list[i >> 2];
127
128         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
129         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
130         i++;
131         return i;
132 }
133
134 static __rte_always_inline uint32_t
135 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
136                           struct roc_se_buf_ptr *from, uint32_t *psize)
137 {
138         struct roc_se_sglist_comp *to = &list[i >> 2];
139         uint32_t size = *psize;
140         uint32_t e_len;
141
142         e_len = (size > from->size) ? from->size : size;
143         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
144         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
145         *psize -= e_len;
146         i++;
147         return i;
148 }
149
150 /*
151  * This fills the MC expected SGIO list
152  * from IOV given by user.
153  */
154 static __rte_always_inline uint32_t
155 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
156                       struct roc_se_iov_ptr *from, uint32_t from_offset,
157                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
158                       uint32_t extra_offset)
159 {
160         int32_t j;
161         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
162         uint32_t size = *psize;
163         struct roc_se_buf_ptr *bufs;
164
165         bufs = from->bufs;
166         for (j = 0; (j < from->buf_cnt) && size; j++) {
167                 uint64_t e_vaddr;
168                 uint32_t e_len;
169                 struct roc_se_sglist_comp *to = &list[i >> 2];
170
171                 if (unlikely(from_offset)) {
172                         if (from_offset >= bufs[j].size) {
173                                 from_offset -= bufs[j].size;
174                                 continue;
175                         }
176                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
177                         e_len = (size > (bufs[j].size - from_offset)) ?
178                                         (bufs[j].size - from_offset) :
179                                         size;
180                         from_offset = 0;
181                 } else {
182                         e_vaddr = (uint64_t)bufs[j].vaddr;
183                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
184                 }
185
186                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
187                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
188
189                 if (extra_len && (e_len >= extra_offset)) {
190                         /* Break the data at given offset */
191                         uint32_t next_len = e_len - extra_offset;
192                         uint64_t next_vaddr = e_vaddr + extra_offset;
193
194                         if (!extra_offset) {
195                                 i--;
196                         } else {
197                                 e_len = extra_offset;
198                                 size -= e_len;
199                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
200                         }
201
202                         extra_len = RTE_MIN(extra_len, size);
203                         /* Insert extra data ptr */
204                         if (extra_len) {
205                                 i++;
206                                 to = &list[i >> 2];
207                                 to->u.s.len[i % 4] =
208                                         rte_cpu_to_be_16(extra_len);
209                                 to->ptr[i % 4] = rte_cpu_to_be_64(
210                                         (uint64_t)extra_buf->vaddr);
211                                 size -= extra_len;
212                         }
213
214                         next_len = RTE_MIN(next_len, size);
215                         /* insert the rest of the data */
216                         if (next_len) {
217                                 i++;
218                                 to = &list[i >> 2];
219                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
220                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
221                                 size -= next_len;
222                         }
223                         extra_len = 0;
224
225                 } else {
226                         size -= e_len;
227                 }
228                 if (extra_offset)
229                         extra_offset -= size;
230                 i++;
231         }
232
233         *psize = size;
234         return (uint32_t)i;
235 }
236
237 static __rte_always_inline int
238 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
239                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
240 {
241         void *m_vaddr = params->meta_buf.vaddr;
242         uint32_t size, i;
243         uint16_t data_len, mac_len, key_len;
244         roc_se_auth_type hash_type;
245         struct roc_se_ctx *ctx;
246         struct roc_se_sglist_comp *gather_comp;
247         struct roc_se_sglist_comp *scatter_comp;
248         uint8_t *in_buffer;
249         uint32_t g_size_bytes, s_size_bytes;
250         union cpt_inst_w4 cpt_inst_w4;
251
252         ctx = params->ctx_buf.vaddr;
253
254         hash_type = ctx->hash_type;
255         mac_len = ctx->mac_len;
256         key_len = ctx->auth_key_len;
257         data_len = ROC_SE_AUTH_DLEN(d_lens);
258
259         /*GP op header */
260         cpt_inst_w4.s.opcode_minor = 0;
261         cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
262         if (ctx->hmac) {
263                 cpt_inst_w4.s.opcode_major =
264                         ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
265                 cpt_inst_w4.s.param1 = key_len;
266                 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
267         } else {
268                 cpt_inst_w4.s.opcode_major =
269                         ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
270                 cpt_inst_w4.s.param1 = 0;
271                 cpt_inst_w4.s.dlen = data_len;
272         }
273
274         /* Null auth only case enters the if */
275         if (unlikely(!hash_type && !ctx->enc_cipher)) {
276                 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
277                 /* Minor op is passthrough */
278                 cpt_inst_w4.s.opcode_minor = 0x03;
279                 /* Send out completion code only */
280                 cpt_inst_w4.s.param2 = 0x1;
281         }
282
283         /* DPTR has SG list */
284         in_buffer = m_vaddr;
285
286         ((uint16_t *)in_buffer)[0] = 0;
287         ((uint16_t *)in_buffer)[1] = 0;
288
289         /* TODO Add error check if space will be sufficient */
290         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
291
292         /*
293          * Input gather list
294          */
295
296         i = 0;
297
298         if (ctx->hmac) {
299                 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
300                 /* Key */
301                 i = fill_sg_comp(gather_comp, i, k_vaddr,
302                                  RTE_ALIGN_CEIL(key_len, 8));
303         }
304
305         /* input data */
306         size = data_len;
307         if (size) {
308                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
309                                           &size, NULL, 0);
310                 if (unlikely(size)) {
311                         plt_dp_err("Insufficient dst IOV size, short by %dB",
312                                    size);
313                         return -1;
314                 }
315         } else {
316                 /*
317                  * Looks like we need to support zero data
318                  * gather ptr in case of hash & hmac
319                  */
320                 i++;
321         }
322         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
323         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
324
325         /*
326          * Output Gather list
327          */
328
329         i = 0;
330         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
331                                                      g_size_bytes);
332
333         if (flags & ROC_SE_VALID_MAC_BUF) {
334                 if (unlikely(params->mac_buf.size < mac_len)) {
335                         plt_dp_err("Insufficient MAC size");
336                         return -1;
337                 }
338
339                 size = mac_len;
340                 i = fill_sg_comp_from_buf_min(scatter_comp, i, &params->mac_buf,
341                                               &size);
342         } else {
343                 size = mac_len;
344                 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
345                                           data_len, &size, NULL, 0);
346                 if (unlikely(size)) {
347                         plt_dp_err("Insufficient dst IOV size, short by %dB",
348                                    size);
349                         return -1;
350                 }
351         }
352
353         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
354         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
355
356         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
357
358         /* This is DPTR len in case of SG mode */
359         cpt_inst_w4.s.dlen = size;
360
361         inst->dptr = (uint64_t)in_buffer;
362         inst->w4.u64 = cpt_inst_w4.u64;
363
364         return 0;
365 }
366
367 static __rte_always_inline int
368 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
369                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
370 {
371         uint32_t iv_offset = 0;
372         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
373         struct roc_se_ctx *se_ctx;
374         uint32_t cipher_type, hash_type;
375         uint32_t mac_len, size;
376         uint8_t iv_len = 16;
377         struct roc_se_buf_ptr *aad_buf = NULL;
378         uint32_t encr_offset, auth_offset;
379         uint32_t encr_data_len, auth_data_len, aad_len = 0;
380         uint32_t passthrough_len = 0;
381         union cpt_inst_w4 cpt_inst_w4;
382         void *offset_vaddr;
383         uint8_t op_minor;
384
385         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
386         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
387         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
388         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
389         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
390                 /* We don't support both AAD and auth data separately */
391                 auth_data_len = 0;
392                 auth_offset = 0;
393                 aad_len = fc_params->aad_buf.size;
394                 aad_buf = &fc_params->aad_buf;
395         }
396         se_ctx = fc_params->ctx_buf.vaddr;
397         cipher_type = se_ctx->enc_cipher;
398         hash_type = se_ctx->hash_type;
399         mac_len = se_ctx->mac_len;
400         op_minor = se_ctx->template_w4.s.opcode_minor;
401
402         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
403                 iv_len = 0;
404                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
405         }
406
407         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
408                 /*
409                  * When AAD is given, data above encr_offset is pass through
410                  * Since AAD is given as separate pointer and not as offset,
411                  * this is a special case as we need to fragment input data
412                  * into passthrough + encr_data and then insert AAD in between.
413                  */
414                 if (hash_type != ROC_SE_GMAC_TYPE) {
415                         passthrough_len = encr_offset;
416                         auth_offset = passthrough_len + iv_len;
417                         encr_offset = passthrough_len + aad_len + iv_len;
418                         auth_data_len = aad_len + encr_data_len;
419                 } else {
420                         passthrough_len = 16 + aad_len;
421                         auth_offset = passthrough_len + iv_len;
422                         auth_data_len = aad_len;
423                 }
424         } else {
425                 encr_offset += iv_len;
426                 auth_offset += iv_len;
427         }
428
429         /* Encryption */
430         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
431         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
432         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
433
434         if (hash_type == ROC_SE_GMAC_TYPE) {
435                 encr_offset = 0;
436                 encr_data_len = 0;
437         }
438
439         auth_dlen = auth_offset + auth_data_len;
440         enc_dlen = encr_data_len + encr_offset;
441         if (unlikely(encr_data_len & 0xf)) {
442                 if ((cipher_type == ROC_SE_DES3_CBC) ||
443                     (cipher_type == ROC_SE_DES3_ECB))
444                         enc_dlen =
445                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
446                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
447                                 (cipher_type == ROC_SE_AES_ECB)))
448                         enc_dlen =
449                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
450         }
451
452         if (unlikely(auth_dlen > enc_dlen)) {
453                 inputlen = auth_dlen;
454                 outputlen = auth_dlen + mac_len;
455         } else {
456                 inputlen = enc_dlen;
457                 outputlen = enc_dlen + mac_len;
458         }
459
460         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
461                 outputlen = enc_dlen;
462
463         /* GP op header */
464         cpt_inst_w4.s.param1 = encr_data_len;
465         cpt_inst_w4.s.param2 = auth_data_len;
466
467         /*
468          * In cn9k, cn10k since we have a limitation of
469          * IV & Offset control word not part of instruction
470          * and need to be part of Data Buffer, we check if
471          * head room is there and then only do the Direct mode processing
472          */
473         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
474                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
475                 void *dm_vaddr = fc_params->bufs[0].vaddr;
476
477                 /* Use Direct mode */
478
479                 offset_vaddr =
480                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
481
482                 /* DPTR */
483                 inst->dptr = (uint64_t)offset_vaddr;
484
485                 /* RPTR should just exclude offset control word */
486                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
487
488                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
489
490                 if (likely(iv_len)) {
491                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
492                                                       ROC_SE_OFF_CTRL_LEN);
493                         uint64_t *src = fc_params->iv_buf;
494                         dest[0] = src[0];
495                         dest[1] = src[1];
496                 }
497
498         } else {
499                 void *m_vaddr = fc_params->meta_buf.vaddr;
500                 uint32_t i, g_size_bytes, s_size_bytes;
501                 struct roc_se_sglist_comp *gather_comp;
502                 struct roc_se_sglist_comp *scatter_comp;
503                 uint8_t *in_buffer;
504
505                 /* This falls under strict SG mode */
506                 offset_vaddr = m_vaddr;
507                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
508
509                 m_vaddr = (uint8_t *)m_vaddr + size;
510
511                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
512
513                 if (likely(iv_len)) {
514                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
515                                                       ROC_SE_OFF_CTRL_LEN);
516                         uint64_t *src = fc_params->iv_buf;
517                         dest[0] = src[0];
518                         dest[1] = src[1];
519                 }
520
521                 /* DPTR has SG list */
522                 in_buffer = m_vaddr;
523
524                 ((uint16_t *)in_buffer)[0] = 0;
525                 ((uint16_t *)in_buffer)[1] = 0;
526
527                 /* TODO Add error check if space will be sufficient */
528                 gather_comp =
529                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
530
531                 /*
532                  * Input Gather List
533                  */
534
535                 i = 0;
536
537                 /* Offset control word that includes iv */
538                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
539                                  ROC_SE_OFF_CTRL_LEN + iv_len);
540
541                 /* Add input data */
542                 size = inputlen - iv_len;
543                 if (likely(size)) {
544                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
545
546                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
547                                 i = fill_sg_comp_from_buf_min(
548                                         gather_comp, i, fc_params->bufs, &size);
549                         } else {
550                                 i = fill_sg_comp_from_iov(
551                                         gather_comp, i, fc_params->src_iov, 0,
552                                         &size, aad_buf, aad_offset);
553                         }
554
555                         if (unlikely(size)) {
556                                 plt_dp_err("Insufficient buffer space,"
557                                            " size %d needed",
558                                            size);
559                                 return -1;
560                         }
561                 }
562                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
563                 g_size_bytes =
564                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
565
566                 /*
567                  * Output Scatter list
568                  */
569                 i = 0;
570                 scatter_comp =
571                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
572                                                       g_size_bytes);
573
574                 /* Add IV */
575                 if (likely(iv_len)) {
576                         i = fill_sg_comp(scatter_comp, i,
577                                          (uint64_t)offset_vaddr +
578                                                  ROC_SE_OFF_CTRL_LEN,
579                                          iv_len);
580                 }
581
582                 /* output data or output data + digest*/
583                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
584                         size = outputlen - iv_len - mac_len;
585                         if (size) {
586                                 uint32_t aad_offset =
587                                         aad_len ? passthrough_len : 0;
588
589                                 if (unlikely(flags &
590                                              ROC_SE_SINGLE_BUF_INPLACE)) {
591                                         i = fill_sg_comp_from_buf_min(
592                                                 scatter_comp, i,
593                                                 fc_params->bufs, &size);
594                                 } else {
595                                         i = fill_sg_comp_from_iov(
596                                                 scatter_comp, i,
597                                                 fc_params->dst_iov, 0, &size,
598                                                 aad_buf, aad_offset);
599                                 }
600                                 if (unlikely(size)) {
601                                         plt_dp_err("Insufficient buffer"
602                                                    " space, size %d needed",
603                                                    size);
604                                         return -1;
605                                 }
606                         }
607                         /* mac_data */
608                         if (mac_len) {
609                                 i = fill_sg_comp_from_buf(scatter_comp, i,
610                                                           &fc_params->mac_buf);
611                         }
612                 } else {
613                         /* Output including mac */
614                         size = outputlen - iv_len;
615                         if (likely(size)) {
616                                 uint32_t aad_offset =
617                                         aad_len ? passthrough_len : 0;
618
619                                 if (unlikely(flags &
620                                              ROC_SE_SINGLE_BUF_INPLACE)) {
621                                         i = fill_sg_comp_from_buf_min(
622                                                 scatter_comp, i,
623                                                 fc_params->bufs, &size);
624                                 } else {
625                                         i = fill_sg_comp_from_iov(
626                                                 scatter_comp, i,
627                                                 fc_params->dst_iov, 0, &size,
628                                                 aad_buf, aad_offset);
629                                 }
630                                 if (unlikely(size)) {
631                                         plt_dp_err("Insufficient buffer"
632                                                    " space, size %d needed",
633                                                    size);
634                                         return -1;
635                                 }
636                         }
637                 }
638                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
639                 s_size_bytes =
640                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
641
642                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
643
644                 /* This is DPTR len in case of SG mode */
645                 cpt_inst_w4.s.dlen = size;
646
647                 inst->dptr = (uint64_t)in_buffer;
648         }
649
650         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
651                      (auth_offset >> 8))) {
652                 plt_dp_err("Offset not supported");
653                 plt_dp_err("enc_offset: %d", encr_offset);
654                 plt_dp_err("iv_offset : %d", iv_offset);
655                 plt_dp_err("auth_offset: %d", auth_offset);
656                 return -1;
657         }
658
659         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
660                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
661                 ((uint64_t)auth_offset));
662
663         inst->w4.u64 = cpt_inst_w4.u64;
664         return 0;
665 }
666
667 static __rte_always_inline int
668 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
669                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
670 {
671         uint32_t iv_offset = 0, size;
672         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
673         struct roc_se_ctx *se_ctx;
674         int32_t hash_type, mac_len;
675         uint8_t iv_len = 16;
676         struct roc_se_buf_ptr *aad_buf = NULL;
677         uint32_t encr_offset, auth_offset;
678         uint32_t encr_data_len, auth_data_len, aad_len = 0;
679         uint32_t passthrough_len = 0;
680         union cpt_inst_w4 cpt_inst_w4;
681         void *offset_vaddr;
682         uint8_t op_minor;
683
684         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
685         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
686         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
687         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
688
689         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
690                 /* We don't support both AAD and auth data separately */
691                 auth_data_len = 0;
692                 auth_offset = 0;
693                 aad_len = fc_params->aad_buf.size;
694                 aad_buf = &fc_params->aad_buf;
695         }
696
697         se_ctx = fc_params->ctx_buf.vaddr;
698         hash_type = se_ctx->hash_type;
699         mac_len = se_ctx->mac_len;
700         op_minor = se_ctx->template_w4.s.opcode_minor;
701
702         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
703                 iv_len = 0;
704                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
705         }
706
707         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
708                 /*
709                  * When AAD is given, data above encr_offset is pass through
710                  * Since AAD is given as separate pointer and not as offset,
711                  * this is a special case as we need to fragment input data
712                  * into passthrough + encr_data and then insert AAD in between.
713                  */
714                 if (hash_type != ROC_SE_GMAC_TYPE) {
715                         passthrough_len = encr_offset;
716                         auth_offset = passthrough_len + iv_len;
717                         encr_offset = passthrough_len + aad_len + iv_len;
718                         auth_data_len = aad_len + encr_data_len;
719                 } else {
720                         passthrough_len = 16 + aad_len;
721                         auth_offset = passthrough_len + iv_len;
722                         auth_data_len = aad_len;
723                 }
724         } else {
725                 encr_offset += iv_len;
726                 auth_offset += iv_len;
727         }
728
729         /* Decryption */
730         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
731         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
732         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
733
734         if (hash_type == ROC_SE_GMAC_TYPE) {
735                 encr_offset = 0;
736                 encr_data_len = 0;
737         }
738
739         enc_dlen = encr_offset + encr_data_len;
740         auth_dlen = auth_offset + auth_data_len;
741
742         if (auth_dlen > enc_dlen) {
743                 inputlen = auth_dlen + mac_len;
744                 outputlen = auth_dlen;
745         } else {
746                 inputlen = enc_dlen + mac_len;
747                 outputlen = enc_dlen;
748         }
749
750         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
751                 outputlen = inputlen = enc_dlen;
752
753         cpt_inst_w4.s.param1 = encr_data_len;
754         cpt_inst_w4.s.param2 = auth_data_len;
755
756         /*
757          * In cn9k, cn10k since we have a limitation of
758          * IV & Offset control word not part of instruction
759          * and need to be part of Data Buffer, we check if
760          * head room is there and then only do the Direct mode processing
761          */
762         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
763                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
764                 void *dm_vaddr = fc_params->bufs[0].vaddr;
765
766                 /* Use Direct mode */
767
768                 offset_vaddr =
769                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
770                 inst->dptr = (uint64_t)offset_vaddr;
771
772                 /* RPTR should just exclude offset control word */
773                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
774
775                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
776
777                 if (likely(iv_len)) {
778                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
779                                                       ROC_SE_OFF_CTRL_LEN);
780                         uint64_t *src = fc_params->iv_buf;
781                         dest[0] = src[0];
782                         dest[1] = src[1];
783                 }
784
785         } else {
786                 void *m_vaddr = fc_params->meta_buf.vaddr;
787                 uint32_t g_size_bytes, s_size_bytes;
788                 struct roc_se_sglist_comp *gather_comp;
789                 struct roc_se_sglist_comp *scatter_comp;
790                 uint8_t *in_buffer;
791                 uint8_t i = 0;
792
793                 /* This falls under strict SG mode */
794                 offset_vaddr = m_vaddr;
795                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
796
797                 m_vaddr = (uint8_t *)m_vaddr + size;
798
799                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
800
801                 if (likely(iv_len)) {
802                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
803                                                       ROC_SE_OFF_CTRL_LEN);
804                         uint64_t *src = fc_params->iv_buf;
805                         dest[0] = src[0];
806                         dest[1] = src[1];
807                 }
808
809                 /* DPTR has SG list */
810                 in_buffer = m_vaddr;
811
812                 ((uint16_t *)in_buffer)[0] = 0;
813                 ((uint16_t *)in_buffer)[1] = 0;
814
815                 /* TODO Add error check if space will be sufficient */
816                 gather_comp =
817                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
818
819                 /*
820                  * Input Gather List
821                  */
822                 i = 0;
823
824                 /* Offset control word that includes iv */
825                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
826                                  ROC_SE_OFF_CTRL_LEN + iv_len);
827
828                 /* Add input data */
829                 if (flags & ROC_SE_VALID_MAC_BUF) {
830                         size = inputlen - iv_len - mac_len;
831                         if (size) {
832                                 /* input data only */
833                                 if (unlikely(flags &
834                                              ROC_SE_SINGLE_BUF_INPLACE)) {
835                                         i = fill_sg_comp_from_buf_min(
836                                                 gather_comp, i, fc_params->bufs,
837                                                 &size);
838                                 } else {
839                                         uint32_t aad_offset =
840                                                 aad_len ? passthrough_len : 0;
841
842                                         i = fill_sg_comp_from_iov(
843                                                 gather_comp, i,
844                                                 fc_params->src_iov, 0, &size,
845                                                 aad_buf, aad_offset);
846                                 }
847                                 if (unlikely(size)) {
848                                         plt_dp_err("Insufficient buffer"
849                                                    " space, size %d needed",
850                                                    size);
851                                         return -1;
852                                 }
853                         }
854
855                         /* mac data */
856                         if (mac_len) {
857                                 i = fill_sg_comp_from_buf(gather_comp, i,
858                                                           &fc_params->mac_buf);
859                         }
860                 } else {
861                         /* input data + mac */
862                         size = inputlen - iv_len;
863                         if (size) {
864                                 if (unlikely(flags &
865                                              ROC_SE_SINGLE_BUF_INPLACE)) {
866                                         i = fill_sg_comp_from_buf_min(
867                                                 gather_comp, i, fc_params->bufs,
868                                                 &size);
869                                 } else {
870                                         uint32_t aad_offset =
871                                                 aad_len ? passthrough_len : 0;
872
873                                         if (unlikely(!fc_params->src_iov)) {
874                                                 plt_dp_err("Bad input args");
875                                                 return -1;
876                                         }
877
878                                         i = fill_sg_comp_from_iov(
879                                                 gather_comp, i,
880                                                 fc_params->src_iov, 0, &size,
881                                                 aad_buf, aad_offset);
882                                 }
883
884                                 if (unlikely(size)) {
885                                         plt_dp_err("Insufficient buffer"
886                                                    " space, size %d needed",
887                                                    size);
888                                         return -1;
889                                 }
890                         }
891                 }
892                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
893                 g_size_bytes =
894                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
895
896                 /*
897                  * Output Scatter List
898                  */
899
900                 i = 0;
901                 scatter_comp =
902                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
903                                                       g_size_bytes);
904
905                 /* Add iv */
906                 if (iv_len) {
907                         i = fill_sg_comp(scatter_comp, i,
908                                          (uint64_t)offset_vaddr +
909                                                  ROC_SE_OFF_CTRL_LEN,
910                                          iv_len);
911                 }
912
913                 /* Add output data */
914                 size = outputlen - iv_len;
915                 if (size) {
916                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
917                                 /* handle single buffer here */
918                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
919                                                               fc_params->bufs,
920                                                               &size);
921                         } else {
922                                 uint32_t aad_offset =
923                                         aad_len ? passthrough_len : 0;
924
925                                 if (unlikely(!fc_params->dst_iov)) {
926                                         plt_dp_err("Bad input args");
927                                         return -1;
928                                 }
929
930                                 i = fill_sg_comp_from_iov(
931                                         scatter_comp, i, fc_params->dst_iov, 0,
932                                         &size, aad_buf, aad_offset);
933                         }
934
935                         if (unlikely(size)) {
936                                 plt_dp_err("Insufficient buffer space,"
937                                            " size %d needed",
938                                            size);
939                                 return -1;
940                         }
941                 }
942
943                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
944                 s_size_bytes =
945                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
946
947                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
948
949                 /* This is DPTR len in case of SG mode */
950                 cpt_inst_w4.s.dlen = size;
951
952                 inst->dptr = (uint64_t)in_buffer;
953         }
954
955         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
956                      (auth_offset >> 8))) {
957                 plt_dp_err("Offset not supported");
958                 plt_dp_err("enc_offset: %d", encr_offset);
959                 plt_dp_err("iv_offset : %d", iv_offset);
960                 plt_dp_err("auth_offset: %d", auth_offset);
961                 return -1;
962         }
963
964         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
965                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
966                 ((uint64_t)auth_offset));
967
968         inst->w4.u64 = cpt_inst_w4.u64;
969         return 0;
970 }
971
972 static __rte_always_inline int
973 cpt_zuc_snow3g_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
974                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
975 {
976         uint32_t size;
977         int32_t inputlen, outputlen;
978         struct roc_se_ctx *se_ctx;
979         uint32_t mac_len = 0;
980         uint8_t pdcp_alg_type;
981         uint32_t encr_offset, auth_offset;
982         uint32_t encr_data_len, auth_data_len;
983         int flags, iv_len;
984         uint64_t offset_ctrl;
985         uint64_t *offset_vaddr;
986         uint8_t *iv_s;
987         union cpt_inst_w4 cpt_inst_w4;
988
989         se_ctx = params->ctx_buf.vaddr;
990         flags = se_ctx->zsk_flags;
991         mac_len = se_ctx->mac_len;
992         pdcp_alg_type = se_ctx->pdcp_alg_type;
993
994         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
995
996         cpt_inst_w4.s.opcode_minor = se_ctx->template_w4.s.opcode_minor;
997
998         if (flags == 0x1) {
999                 iv_s = params->auth_iv_buf;
1000                 iv_len = params->auth_iv_len;
1001
1002                 /*
1003                  * Microcode expects offsets in bytes
1004                  * TODO: Rounding off
1005                  */
1006                 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1007
1008                 /* EIA3 or UIA2 */
1009                 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
1010                 auth_offset = auth_offset / 8;
1011
1012                 /* consider iv len */
1013                 auth_offset += iv_len;
1014
1015                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1016                 outputlen = mac_len;
1017
1018                 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1019
1020                 encr_data_len = 0;
1021                 encr_offset = 0;
1022         } else {
1023                 iv_s = params->iv_buf;
1024                 iv_len = params->cipher_iv_len;
1025
1026                 /* EEA3 or UEA2 */
1027                 /*
1028                  * Microcode expects offsets in bytes
1029                  * TODO: Rounding off
1030                  */
1031                 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1032
1033                 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1034                 encr_offset = encr_offset / 8;
1035                 /* consider iv len */
1036                 encr_offset += iv_len;
1037
1038                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1039                 outputlen = inputlen;
1040
1041                 /* iv offset is 0 */
1042                 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1043
1044                 auth_data_len = 0;
1045                 auth_offset = 0;
1046         }
1047
1048         if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1049                 plt_dp_err("Offset not supported");
1050                 plt_dp_err("enc_offset: %d", encr_offset);
1051                 plt_dp_err("auth_offset: %d", auth_offset);
1052                 return -1;
1053         }
1054
1055         /*
1056          * GP op header, lengths are expected in bits.
1057          */
1058         cpt_inst_w4.s.param1 = encr_data_len;
1059         cpt_inst_w4.s.param2 = auth_data_len;
1060
1061         /*
1062          * In cn9k, cn10k since we have a limitation of
1063          * IV & Offset control word not part of instruction
1064          * and need to be part of Data Buffer, we check if
1065          * head room is there and then only do the Direct mode processing
1066          */
1067         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1068                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1069                 void *dm_vaddr = params->bufs[0].vaddr;
1070
1071                 /* Use Direct mode */
1072
1073                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1074                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1075
1076                 /* DPTR */
1077                 inst->dptr = (uint64_t)offset_vaddr;
1078                 /* RPTR should just exclude offset control word */
1079                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1080
1081                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1082
1083                 uint8_t *iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1084                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type);
1085
1086                 *offset_vaddr = offset_ctrl;
1087         } else {
1088                 void *m_vaddr = params->meta_buf.vaddr;
1089                 uint32_t i, g_size_bytes, s_size_bytes;
1090                 struct roc_se_sglist_comp *gather_comp;
1091                 struct roc_se_sglist_comp *scatter_comp;
1092                 uint8_t *in_buffer;
1093                 uint8_t *iv_d;
1094
1095                 /* save space for iv */
1096                 offset_vaddr = m_vaddr;
1097
1098                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1099
1100                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1101
1102                 /* DPTR has SG list */
1103                 in_buffer = m_vaddr;
1104
1105                 ((uint16_t *)in_buffer)[0] = 0;
1106                 ((uint16_t *)in_buffer)[1] = 0;
1107
1108                 /* TODO Add error check if space will be sufficient */
1109                 gather_comp =
1110                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1111
1112                 /*
1113                  * Input Gather List
1114                  */
1115                 i = 0;
1116
1117                 /* Offset control word followed by iv */
1118
1119                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1120                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1121
1122                 /* iv offset is 0 */
1123                 *offset_vaddr = offset_ctrl;
1124
1125                 iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1126                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type);
1127
1128                 /* input data */
1129                 size = inputlen - iv_len;
1130                 if (size) {
1131                         i = fill_sg_comp_from_iov(gather_comp, i,
1132                                                   params->src_iov, 0, &size,
1133                                                   NULL, 0);
1134                         if (unlikely(size)) {
1135                                 plt_dp_err("Insufficient buffer space,"
1136                                            " size %d needed",
1137                                            size);
1138                                 return -1;
1139                         }
1140                 }
1141                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1142                 g_size_bytes =
1143                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1144
1145                 /*
1146                  * Output Scatter List
1147                  */
1148
1149                 i = 0;
1150                 scatter_comp =
1151                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1152                                                       g_size_bytes);
1153
1154                 if (flags == 0x1) {
1155                         /* IV in SLIST only for EEA3 & UEA2 */
1156                         iv_len = 0;
1157                 }
1158
1159                 if (iv_len) {
1160                         i = fill_sg_comp(scatter_comp, i,
1161                                          (uint64_t)offset_vaddr +
1162                                                  ROC_SE_OFF_CTRL_LEN,
1163                                          iv_len);
1164                 }
1165
1166                 /* Add output data */
1167                 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1168                         size = outputlen - iv_len - mac_len;
1169                         if (size) {
1170                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1171                                                           params->dst_iov, 0,
1172                                                           &size, NULL, 0);
1173
1174                                 if (unlikely(size)) {
1175                                         plt_dp_err("Insufficient buffer space,"
1176                                                    " size %d needed",
1177                                                    size);
1178                                         return -1;
1179                                 }
1180                         }
1181
1182                         /* mac data */
1183                         if (mac_len) {
1184                                 i = fill_sg_comp_from_buf(scatter_comp, i,
1185                                                           &params->mac_buf);
1186                         }
1187                 } else {
1188                         /* Output including mac */
1189                         size = outputlen - iv_len;
1190                         if (size) {
1191                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1192                                                           params->dst_iov, 0,
1193                                                           &size, NULL, 0);
1194
1195                                 if (unlikely(size)) {
1196                                         plt_dp_err("Insufficient buffer space,"
1197                                                    " size %d needed",
1198                                                    size);
1199                                         return -1;
1200                                 }
1201                         }
1202                 }
1203                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1204                 s_size_bytes =
1205                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1206
1207                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1208
1209                 /* This is DPTR len in case of SG mode */
1210                 cpt_inst_w4.s.dlen = size;
1211
1212                 inst->dptr = (uint64_t)in_buffer;
1213         }
1214
1215         inst->w4.u64 = cpt_inst_w4.u64;
1216
1217         return 0;
1218 }
1219
1220 static __rte_always_inline int
1221 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1222                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1223 {
1224         void *m_vaddr = params->meta_buf.vaddr;
1225         uint32_t size;
1226         int32_t inputlen = 0, outputlen = 0;
1227         struct roc_se_ctx *se_ctx;
1228         uint32_t mac_len = 0;
1229         uint8_t i = 0;
1230         uint32_t encr_offset, auth_offset;
1231         uint32_t encr_data_len, auth_data_len;
1232         int flags;
1233         uint8_t *iv_s, *iv_d, iv_len = 8;
1234         uint8_t dir = 0;
1235         uint64_t *offset_vaddr;
1236         union cpt_inst_w4 cpt_inst_w4;
1237         uint8_t *in_buffer;
1238         uint32_t g_size_bytes, s_size_bytes;
1239         struct roc_se_sglist_comp *gather_comp;
1240         struct roc_se_sglist_comp *scatter_comp;
1241
1242         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1243         auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1244         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1245         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1246
1247         se_ctx = params->ctx_buf.vaddr;
1248         flags = se_ctx->zsk_flags;
1249         mac_len = se_ctx->mac_len;
1250
1251         if (flags == 0x0)
1252                 iv_s = params->iv_buf;
1253         else
1254                 iv_s = params->auth_iv_buf;
1255
1256         dir = iv_s[8] & 0x1;
1257
1258         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1259
1260         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1261         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1262                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1263
1264         /*
1265          * GP op header, lengths are expected in bits.
1266          */
1267         cpt_inst_w4.s.param1 = encr_data_len;
1268         cpt_inst_w4.s.param2 = auth_data_len;
1269
1270         /* consider iv len */
1271         if (flags == 0x0) {
1272                 encr_offset += iv_len;
1273                 auth_offset += iv_len;
1274         }
1275
1276         /* save space for offset ctrl and iv */
1277         offset_vaddr = m_vaddr;
1278
1279         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1280
1281         /* DPTR has SG list */
1282         in_buffer = m_vaddr;
1283
1284         ((uint16_t *)in_buffer)[0] = 0;
1285         ((uint16_t *)in_buffer)[1] = 0;
1286
1287         /* TODO Add error check if space will be sufficient */
1288         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1289
1290         /*
1291          * Input Gather List
1292          */
1293         i = 0;
1294
1295         /* Offset control word followed by iv */
1296
1297         if (flags == 0x0) {
1298                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1299                 outputlen = inputlen;
1300                 /* iv offset is 0 */
1301                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1302                 if (unlikely((encr_offset >> 16))) {
1303                         plt_dp_err("Offset not supported");
1304                         plt_dp_err("enc_offset: %d", encr_offset);
1305                         return -1;
1306                 }
1307         } else {
1308                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1309                 outputlen = mac_len;
1310                 /* iv offset is 0 */
1311                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1312                 if (unlikely((auth_offset >> 8))) {
1313                         plt_dp_err("Offset not supported");
1314                         plt_dp_err("auth_offset: %d", auth_offset);
1315                         return -1;
1316                 }
1317         }
1318
1319         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1320                          ROC_SE_OFF_CTRL_LEN + iv_len);
1321
1322         /* IV */
1323         iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1324         memcpy(iv_d, iv_s, iv_len);
1325
1326         /* input data */
1327         size = inputlen - iv_len;
1328         if (size) {
1329                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1330                                           &size, NULL, 0);
1331
1332                 if (unlikely(size)) {
1333                         plt_dp_err("Insufficient buffer space,"
1334                                    " size %d needed",
1335                                    size);
1336                         return -1;
1337                 }
1338         }
1339         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1340         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1341
1342         /*
1343          * Output Scatter List
1344          */
1345
1346         i = 0;
1347         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1348                                                      g_size_bytes);
1349
1350         if (flags == 0x1) {
1351                 /* IV in SLIST only for F8 */
1352                 iv_len = 0;
1353         }
1354
1355         /* IV */
1356         if (iv_len) {
1357                 i = fill_sg_comp(scatter_comp, i,
1358                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1359                                  iv_len);
1360         }
1361
1362         /* Add output data */
1363         if (req_flags & ROC_SE_VALID_MAC_BUF) {
1364                 size = outputlen - iv_len - mac_len;
1365                 if (size) {
1366                         i = fill_sg_comp_from_iov(scatter_comp, i,
1367                                                   params->dst_iov, 0, &size,
1368                                                   NULL, 0);
1369
1370                         if (unlikely(size)) {
1371                                 plt_dp_err("Insufficient buffer space,"
1372                                            " size %d needed",
1373                                            size);
1374                                 return -1;
1375                         }
1376                 }
1377
1378                 /* mac data */
1379                 if (mac_len) {
1380                         i = fill_sg_comp_from_buf(scatter_comp, i,
1381                                                   &params->mac_buf);
1382                 }
1383         } else {
1384                 /* Output including mac */
1385                 size = outputlen - iv_len;
1386                 if (size) {
1387                         i = fill_sg_comp_from_iov(scatter_comp, i,
1388                                                   params->dst_iov, 0, &size,
1389                                                   NULL, 0);
1390
1391                         if (unlikely(size)) {
1392                                 plt_dp_err("Insufficient buffer space,"
1393                                            " size %d needed",
1394                                            size);
1395                                 return -1;
1396                         }
1397                 }
1398         }
1399         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1400         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1401
1402         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1403
1404         /* This is DPTR len in case of SG mode */
1405         cpt_inst_w4.s.dlen = size;
1406
1407         inst->dptr = (uint64_t)in_buffer;
1408         inst->w4.u64 = cpt_inst_w4.u64;
1409
1410         return 0;
1411 }
1412
1413 static __rte_always_inline int
1414 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1415                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1416 {
1417         void *m_vaddr = params->meta_buf.vaddr;
1418         uint32_t size;
1419         int32_t inputlen = 0, outputlen;
1420         struct roc_se_ctx *se_ctx;
1421         uint8_t i = 0, iv_len = 8;
1422         uint32_t encr_offset;
1423         uint32_t encr_data_len;
1424         int flags;
1425         uint8_t dir = 0;
1426         uint64_t *offset_vaddr;
1427         union cpt_inst_w4 cpt_inst_w4;
1428         uint8_t *in_buffer;
1429         uint32_t g_size_bytes, s_size_bytes;
1430         struct roc_se_sglist_comp *gather_comp;
1431         struct roc_se_sglist_comp *scatter_comp;
1432
1433         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1434         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1435
1436         se_ctx = params->ctx_buf.vaddr;
1437         flags = se_ctx->zsk_flags;
1438
1439         cpt_inst_w4.u64 = 0;
1440         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1441
1442         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1443         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1444                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1445
1446         /*
1447          * GP op header, lengths are expected in bits.
1448          */
1449         cpt_inst_w4.s.param1 = encr_data_len;
1450
1451         /* consider iv len */
1452         encr_offset += iv_len;
1453
1454         inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
1455         outputlen = inputlen;
1456
1457         /* save space for offset ctrl & iv */
1458         offset_vaddr = m_vaddr;
1459
1460         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1461
1462         /* DPTR has SG list */
1463         in_buffer = m_vaddr;
1464
1465         ((uint16_t *)in_buffer)[0] = 0;
1466         ((uint16_t *)in_buffer)[1] = 0;
1467
1468         /* TODO Add error check if space will be sufficient */
1469         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1470
1471         /*
1472          * Input Gather List
1473          */
1474         i = 0;
1475
1476         /* Offset control word followed by iv */
1477         *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1478         if (unlikely((encr_offset >> 16))) {
1479                 plt_dp_err("Offset not supported");
1480                 plt_dp_err("enc_offset: %d", encr_offset);
1481                 return -1;
1482         }
1483
1484         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1485                          ROC_SE_OFF_CTRL_LEN + iv_len);
1486
1487         /* IV */
1488         memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1489                iv_len);
1490
1491         /* Add input data */
1492         size = inputlen - iv_len;
1493         if (size) {
1494                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1495                                           &size, NULL, 0);
1496                 if (unlikely(size)) {
1497                         plt_dp_err("Insufficient buffer space,"
1498                                    " size %d needed",
1499                                    size);
1500                         return -1;
1501                 }
1502         }
1503         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1504         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1505
1506         /*
1507          * Output Scatter List
1508          */
1509
1510         i = 0;
1511         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1512                                                      g_size_bytes);
1513
1514         /* IV */
1515         i = fill_sg_comp(scatter_comp, i,
1516                          (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1517
1518         /* Add output data */
1519         size = outputlen - iv_len;
1520         if (size) {
1521                 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1522                                           &size, NULL, 0);
1523                 if (unlikely(size)) {
1524                         plt_dp_err("Insufficient buffer space,"
1525                                    " size %d needed",
1526                                    size);
1527                         return -1;
1528                 }
1529         }
1530         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1531         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1532
1533         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1534
1535         /* This is DPTR len in case of SG mode */
1536         cpt_inst_w4.s.dlen = size;
1537
1538         inst->dptr = (uint64_t)in_buffer;
1539         inst->w4.u64 = cpt_inst_w4.u64;
1540
1541         return 0;
1542 }
1543
1544 static __rte_always_inline int
1545 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1546                      struct roc_se_fc_params *fc_params,
1547                      struct cpt_inst_s *inst)
1548 {
1549         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1550         uint8_t fc_type;
1551         int ret = -1;
1552
1553         fc_type = ctx->fc_type;
1554
1555         if (likely(fc_type == ROC_SE_FC_GEN)) {
1556                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1557         } else if (fc_type == ROC_SE_PDCP) {
1558                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1559                                           inst);
1560         } else if (fc_type == ROC_SE_KASUMI) {
1561                 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1562         }
1563
1564         /*
1565          * For AUTH_ONLY case,
1566          * MC only supports digest generation and verification
1567          * should be done in software by memcmp()
1568          */
1569
1570         return ret;
1571 }
1572
1573 static __rte_always_inline int
1574 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1575                      struct roc_se_fc_params *fc_params,
1576                      struct cpt_inst_s *inst)
1577 {
1578         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1579         uint8_t fc_type;
1580         int ret = -1;
1581
1582         fc_type = ctx->fc_type;
1583
1584         if (likely(fc_type == ROC_SE_FC_GEN)) {
1585                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1586         } else if (fc_type == ROC_SE_PDCP) {
1587                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1588                                           inst);
1589         } else if (fc_type == ROC_SE_KASUMI) {
1590                 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1591                                           inst);
1592         } else if (fc_type == ROC_SE_HASH_HMAC) {
1593                 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1594         }
1595
1596         return ret;
1597 }
1598
1599 static __rte_always_inline int
1600 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1601 {
1602         struct rte_crypto_aead_xform *aead_form;
1603         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1604         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1605         uint32_t cipher_key_len = 0;
1606         uint8_t aes_gcm = 0;
1607         aead_form = &xform->aead;
1608
1609         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1610                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1611                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1612         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1613                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1614                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1615         } else {
1616                 plt_dp_err("Unknown aead operation\n");
1617                 return -1;
1618         }
1619         switch (aead_form->algo) {
1620         case RTE_CRYPTO_AEAD_AES_GCM:
1621                 enc_type = ROC_SE_AES_GCM;
1622                 cipher_key_len = 16;
1623                 aes_gcm = 1;
1624                 break;
1625         case RTE_CRYPTO_AEAD_AES_CCM:
1626                 plt_dp_err("Crypto: Unsupported cipher algo %u",
1627                            aead_form->algo);
1628                 return -1;
1629         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1630                 enc_type = ROC_SE_CHACHA20;
1631                 auth_type = ROC_SE_POLY1305;
1632                 cipher_key_len = 32;
1633                 sess->chacha_poly = 1;
1634                 break;
1635         default:
1636                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1637                            aead_form->algo);
1638                 return -1;
1639         }
1640         if (aead_form->key.length < cipher_key_len) {
1641                 plt_dp_err("Invalid cipher params keylen %u",
1642                            aead_form->key.length);
1643                 return -1;
1644         }
1645         sess->zsk_flag = 0;
1646         sess->aes_gcm = aes_gcm;
1647         sess->mac_len = aead_form->digest_length;
1648         sess->iv_offset = aead_form->iv.offset;
1649         sess->iv_length = aead_form->iv.length;
1650         sess->aad_length = aead_form->aad_length;
1651
1652         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1653                                          aead_form->key.data,
1654                                          aead_form->key.length, NULL)))
1655                 return -1;
1656
1657         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1658                                          aead_form->digest_length)))
1659                 return -1;
1660
1661         return 0;
1662 }
1663
1664 static __rte_always_inline int
1665 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1666 {
1667         struct rte_crypto_cipher_xform *c_form;
1668         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1669         uint32_t cipher_key_len = 0;
1670         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1671
1672         c_form = &xform->cipher;
1673
1674         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1675                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1676         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1677                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1678                 if (xform->next != NULL &&
1679                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1680                         /* Perform decryption followed by auth verify */
1681                         sess->roc_se_ctx.template_w4.s.opcode_minor =
1682                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1683                 }
1684         } else {
1685                 plt_dp_err("Unknown cipher operation\n");
1686                 return -1;
1687         }
1688
1689         switch (c_form->algo) {
1690         case RTE_CRYPTO_CIPHER_AES_CBC:
1691                 enc_type = ROC_SE_AES_CBC;
1692                 cipher_key_len = 16;
1693                 break;
1694         case RTE_CRYPTO_CIPHER_3DES_CBC:
1695                 enc_type = ROC_SE_DES3_CBC;
1696                 cipher_key_len = 24;
1697                 break;
1698         case RTE_CRYPTO_CIPHER_DES_CBC:
1699                 /* DES is implemented using 3DES in hardware */
1700                 enc_type = ROC_SE_DES3_CBC;
1701                 cipher_key_len = 8;
1702                 break;
1703         case RTE_CRYPTO_CIPHER_AES_CTR:
1704                 enc_type = ROC_SE_AES_CTR;
1705                 cipher_key_len = 16;
1706                 aes_ctr = 1;
1707                 break;
1708         case RTE_CRYPTO_CIPHER_NULL:
1709                 enc_type = 0;
1710                 is_null = 1;
1711                 break;
1712         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1713                 enc_type = ROC_SE_KASUMI_F8_ECB;
1714                 cipher_key_len = 16;
1715                 zsk_flag = ROC_SE_K_F8;
1716                 break;
1717         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1718                 enc_type = ROC_SE_SNOW3G_UEA2;
1719                 cipher_key_len = 16;
1720                 zsk_flag = ROC_SE_ZS_EA;
1721                 break;
1722         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1723                 enc_type = ROC_SE_ZUC_EEA3;
1724                 cipher_key_len = c_form->key.length;
1725                 zsk_flag = ROC_SE_ZS_EA;
1726                 break;
1727         case RTE_CRYPTO_CIPHER_AES_XTS:
1728                 enc_type = ROC_SE_AES_XTS;
1729                 cipher_key_len = 16;
1730                 break;
1731         case RTE_CRYPTO_CIPHER_3DES_ECB:
1732                 enc_type = ROC_SE_DES3_ECB;
1733                 cipher_key_len = 24;
1734                 break;
1735         case RTE_CRYPTO_CIPHER_AES_ECB:
1736                 enc_type = ROC_SE_AES_ECB;
1737                 cipher_key_len = 16;
1738                 break;
1739         case RTE_CRYPTO_CIPHER_3DES_CTR:
1740         case RTE_CRYPTO_CIPHER_AES_F8:
1741         case RTE_CRYPTO_CIPHER_ARC4:
1742                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1743                 return -1;
1744         default:
1745                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1746                            c_form->algo);
1747                 return -1;
1748         }
1749
1750         if (c_form->key.length < cipher_key_len) {
1751                 plt_dp_err("Invalid cipher params keylen %u",
1752                            c_form->key.length);
1753                 return -1;
1754         }
1755
1756         sess->zsk_flag = zsk_flag;
1757         sess->aes_gcm = 0;
1758         sess->aes_ctr = aes_ctr;
1759         sess->iv_offset = c_form->iv.offset;
1760         sess->iv_length = c_form->iv.length;
1761         sess->is_null = is_null;
1762
1763         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1764                                          c_form->key.data, c_form->key.length,
1765                                          NULL)))
1766                 return -1;
1767
1768         if ((enc_type >= ROC_SE_ZUC_EEA3) && (enc_type <= ROC_SE_AES_CTR_EEA2))
1769                 roc_se_ctx_swap(&sess->roc_se_ctx);
1770         return 0;
1771 }
1772
1773 static __rte_always_inline int
1774 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1775 {
1776         struct rte_crypto_auth_xform *a_form;
1777         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1778         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1779
1780         if (xform->next != NULL &&
1781             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1782             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1783                 /* Perform auth followed by encryption */
1784                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1785                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1786         }
1787
1788         a_form = &xform->auth;
1789
1790         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1791                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1792         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1793                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1794         else {
1795                 plt_dp_err("Unknown auth operation");
1796                 return -1;
1797         }
1798
1799         switch (a_form->algo) {
1800         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1801                 /* Fall through */
1802         case RTE_CRYPTO_AUTH_SHA1:
1803                 auth_type = ROC_SE_SHA1_TYPE;
1804                 break;
1805         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1806         case RTE_CRYPTO_AUTH_SHA256:
1807                 auth_type = ROC_SE_SHA2_SHA256;
1808                 break;
1809         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1810         case RTE_CRYPTO_AUTH_SHA512:
1811                 auth_type = ROC_SE_SHA2_SHA512;
1812                 break;
1813         case RTE_CRYPTO_AUTH_AES_GMAC:
1814                 auth_type = ROC_SE_GMAC_TYPE;
1815                 aes_gcm = 1;
1816                 break;
1817         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1818         case RTE_CRYPTO_AUTH_SHA224:
1819                 auth_type = ROC_SE_SHA2_SHA224;
1820                 break;
1821         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1822         case RTE_CRYPTO_AUTH_SHA384:
1823                 auth_type = ROC_SE_SHA2_SHA384;
1824                 break;
1825         case RTE_CRYPTO_AUTH_MD5_HMAC:
1826         case RTE_CRYPTO_AUTH_MD5:
1827                 auth_type = ROC_SE_MD5_TYPE;
1828                 break;
1829         case RTE_CRYPTO_AUTH_KASUMI_F9:
1830                 auth_type = ROC_SE_KASUMI_F9_ECB;
1831                 /*
1832                  * Indicate that direction needs to be taken out
1833                  * from end of src
1834                  */
1835                 zsk_flag = ROC_SE_K_F9;
1836                 break;
1837         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1838                 auth_type = ROC_SE_SNOW3G_UIA2;
1839                 zsk_flag = ROC_SE_ZS_IA;
1840                 break;
1841         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1842                 auth_type = ROC_SE_ZUC_EIA3;
1843                 zsk_flag = ROC_SE_ZS_IA;
1844                 break;
1845         case RTE_CRYPTO_AUTH_NULL:
1846                 auth_type = 0;
1847                 is_null = 1;
1848                 break;
1849         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1850         case RTE_CRYPTO_AUTH_AES_CMAC:
1851         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1852                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1853                 return -1;
1854         default:
1855                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1856                            a_form->algo);
1857                 return -1;
1858         }
1859
1860         sess->zsk_flag = zsk_flag;
1861         sess->aes_gcm = aes_gcm;
1862         sess->mac_len = a_form->digest_length;
1863         sess->is_null = is_null;
1864         if (zsk_flag) {
1865                 sess->auth_iv_offset = a_form->iv.offset;
1866                 sess->auth_iv_length = a_form->iv.length;
1867         }
1868         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1869                                          a_form->key.data, a_form->key.length,
1870                                          a_form->digest_length)))
1871                 return -1;
1872
1873         if ((auth_type >= ROC_SE_ZUC_EIA3) &&
1874             (auth_type <= ROC_SE_AES_CMAC_EIA2))
1875                 roc_se_ctx_swap(&sess->roc_se_ctx);
1876
1877         return 0;
1878 }
1879
1880 static __rte_always_inline int
1881 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1882 {
1883         struct rte_crypto_auth_xform *a_form;
1884         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1885         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1886
1887         a_form = &xform->auth;
1888
1889         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1890                 sess->cpt_op |= ROC_SE_OP_ENCODE;
1891         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1892                 sess->cpt_op |= ROC_SE_OP_DECODE;
1893         else {
1894                 plt_dp_err("Unknown auth operation");
1895                 return -1;
1896         }
1897
1898         switch (a_form->algo) {
1899         case RTE_CRYPTO_AUTH_AES_GMAC:
1900                 enc_type = ROC_SE_AES_GCM;
1901                 auth_type = ROC_SE_GMAC_TYPE;
1902                 break;
1903         default:
1904                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1905                            a_form->algo);
1906                 return -1;
1907         }
1908
1909         sess->zsk_flag = 0;
1910         sess->aes_gcm = 0;
1911         sess->is_gmac = 1;
1912         sess->iv_offset = a_form->iv.offset;
1913         sess->iv_length = a_form->iv.length;
1914         sess->mac_len = a_form->digest_length;
1915
1916         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1917                                          a_form->key.data, a_form->key.length,
1918                                          NULL)))
1919                 return -1;
1920
1921         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1922                                          a_form->digest_length)))
1923                 return -1;
1924
1925         return 0;
1926 }
1927
1928 static __rte_always_inline void *
1929 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1930               struct rte_mempool *cpt_meta_pool,
1931               struct cpt_inflight_req *infl_req)
1932 {
1933         uint8_t *mdata;
1934
1935         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1936                 return NULL;
1937
1938         buf->vaddr = mdata;
1939         buf->size = len;
1940
1941         infl_req->mdata = mdata;
1942         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1943
1944         return mdata;
1945 }
1946
1947 static __rte_always_inline uint32_t
1948 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
1949                      uint32_t start_offset)
1950 {
1951         uint16_t index = 0;
1952         void *seg_data = NULL;
1953         int32_t seg_size = 0;
1954
1955         if (!pkt) {
1956                 iovec->buf_cnt = 0;
1957                 return 0;
1958         }
1959
1960         if (!start_offset) {
1961                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1962                 seg_size = pkt->data_len;
1963         } else {
1964                 while (start_offset >= pkt->data_len) {
1965                         start_offset -= pkt->data_len;
1966                         pkt = pkt->next;
1967                 }
1968
1969                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
1970                 seg_size = pkt->data_len - start_offset;
1971                 if (!seg_size)
1972                         return 1;
1973         }
1974
1975         /* first seg */
1976         iovec->bufs[index].vaddr = seg_data;
1977         iovec->bufs[index].size = seg_size;
1978         index++;
1979         pkt = pkt->next;
1980
1981         while (unlikely(pkt != NULL)) {
1982                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1983                 seg_size = pkt->data_len;
1984                 if (!seg_size)
1985                         break;
1986
1987                 iovec->bufs[index].vaddr = seg_data;
1988                 iovec->bufs[index].size = seg_size;
1989
1990                 index++;
1991
1992                 pkt = pkt->next;
1993         }
1994
1995         iovec->buf_cnt = index;
1996         return 0;
1997 }
1998
1999 static __rte_always_inline uint32_t
2000 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2001                              struct roc_se_fc_params *param, uint32_t *flags)
2002 {
2003         uint16_t index = 0;
2004         void *seg_data = NULL;
2005         uint32_t seg_size = 0;
2006         struct roc_se_iov_ptr *iovec;
2007
2008         seg_data = rte_pktmbuf_mtod(pkt, void *);
2009         seg_size = pkt->data_len;
2010
2011         /* first seg */
2012         if (likely(!pkt->next)) {
2013                 uint32_t headroom;
2014
2015                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2016                 headroom = rte_pktmbuf_headroom(pkt);
2017                 if (likely(headroom >= 24))
2018                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2019
2020                 param->bufs[0].vaddr = seg_data;
2021                 param->bufs[0].size = seg_size;
2022                 return 0;
2023         }
2024         iovec = param->src_iov;
2025         iovec->bufs[index].vaddr = seg_data;
2026         iovec->bufs[index].size = seg_size;
2027         index++;
2028         pkt = pkt->next;
2029
2030         while (unlikely(pkt != NULL)) {
2031                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2032                 seg_size = pkt->data_len;
2033
2034                 if (!seg_size)
2035                         break;
2036
2037                 iovec->bufs[index].vaddr = seg_data;
2038                 iovec->bufs[index].size = seg_size;
2039
2040                 index++;
2041
2042                 pkt = pkt->next;
2043         }
2044
2045         iovec->buf_cnt = index;
2046         return 0;
2047 }
2048
2049 static __rte_always_inline int
2050 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2051                struct cpt_qp_meta_info *m_info,
2052                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2053 {
2054         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2055         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2056         struct rte_crypto_sym_op *sym_op = cop->sym;
2057         void *mdata = NULL;
2058         uint32_t mc_hash_off;
2059         uint32_t flags = 0;
2060         uint64_t d_offs, d_lens;
2061         struct rte_mbuf *m_src, *m_dst;
2062         uint8_t cpt_op = sess->cpt_op;
2063 #ifdef CPT_ALWAYS_USE_SG_MODE
2064         uint8_t inplace = 0;
2065 #else
2066         uint8_t inplace = 1;
2067 #endif
2068         struct roc_se_fc_params fc_params;
2069         char src[SRC_IOV_SIZE];
2070         char dst[SRC_IOV_SIZE];
2071         uint32_t iv_buf[4];
2072         int ret;
2073
2074         fc_params.cipher_iv_len = sess->iv_length;
2075         fc_params.auth_iv_len = sess->auth_iv_length;
2076
2077         if (likely(sess->iv_length)) {
2078                 flags |= ROC_SE_VALID_IV_BUF;
2079                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2080                                                              sess->iv_offset);
2081                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2082                         memcpy((uint8_t *)iv_buf,
2083                                rte_crypto_op_ctod_offset(cop, uint8_t *,
2084                                                          sess->iv_offset),
2085                                12);
2086                         iv_buf[3] = rte_cpu_to_be_32(0x1);
2087                         fc_params.iv_buf = iv_buf;
2088                 }
2089         }
2090
2091         if (sess->zsk_flag) {
2092                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2093                         cop, uint8_t *, sess->auth_iv_offset);
2094                 if (sess->zsk_flag != ROC_SE_ZS_EA)
2095                         inplace = 0;
2096         }
2097         m_src = sym_op->m_src;
2098         m_dst = sym_op->m_dst;
2099
2100         if (sess->aes_gcm || sess->chacha_poly) {
2101                 uint8_t *salt;
2102                 uint8_t *aad_data;
2103                 uint16_t aad_len;
2104
2105                 d_offs = sym_op->aead.data.offset;
2106                 d_lens = sym_op->aead.data.length;
2107                 mc_hash_off =
2108                         sym_op->aead.data.offset + sym_op->aead.data.length;
2109
2110                 aad_data = sym_op->aead.aad.data;
2111                 aad_len = sess->aad_length;
2112                 if (likely((aad_data + aad_len) ==
2113                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2114                                                    sym_op->aead.data.offset))) {
2115                         d_offs = (d_offs - aad_len) | (d_offs << 16);
2116                         d_lens = (d_lens + aad_len) | (d_lens << 32);
2117                 } else {
2118                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2119                         fc_params.aad_buf.size = aad_len;
2120                         flags |= ROC_SE_VALID_AAD_BUF;
2121                         inplace = 0;
2122                         d_offs = d_offs << 16;
2123                         d_lens = d_lens << 32;
2124                 }
2125
2126                 salt = fc_params.iv_buf;
2127                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2128                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2129                         sess->salt = *(uint32_t *)salt;
2130                 }
2131                 fc_params.iv_buf = salt + 4;
2132                 if (likely(sess->mac_len)) {
2133                         struct rte_mbuf *m =
2134                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2135
2136                         if (!m)
2137                                 m = m_src;
2138
2139                         /* hmac immediately following data is best case */
2140                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2141                                              mc_hash_off !=
2142                                      (uint8_t *)sym_op->aead.digest.data)) {
2143                                 flags |= ROC_SE_VALID_MAC_BUF;
2144                                 fc_params.mac_buf.size = sess->mac_len;
2145                                 fc_params.mac_buf.vaddr =
2146                                         sym_op->aead.digest.data;
2147                                 inplace = 0;
2148                         }
2149                 }
2150         } else {
2151                 d_offs = sym_op->cipher.data.offset;
2152                 d_lens = sym_op->cipher.data.length;
2153                 mc_hash_off =
2154                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
2155                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2156                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2157
2158                 if (mc_hash_off <
2159                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2160                         mc_hash_off = (sym_op->auth.data.offset +
2161                                        sym_op->auth.data.length);
2162                 }
2163                 /* for gmac, salt should be updated like in gcm */
2164                 if (unlikely(sess->is_gmac)) {
2165                         uint8_t *salt;
2166                         salt = fc_params.iv_buf;
2167                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
2168                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2169                                 sess->salt = *(uint32_t *)salt;
2170                         }
2171                         fc_params.iv_buf = salt + 4;
2172                 }
2173                 if (likely(sess->mac_len)) {
2174                         struct rte_mbuf *m;
2175
2176                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2177                         if (!m)
2178                                 m = m_src;
2179
2180                         /* hmac immediately following data is best case */
2181                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2182                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2183                                               mc_hash_off !=
2184                                       (uint8_t *)sym_op->auth.digest.data))) {
2185                                 flags |= ROC_SE_VALID_MAC_BUF;
2186                                 fc_params.mac_buf.size = sess->mac_len;
2187                                 fc_params.mac_buf.vaddr =
2188                                         sym_op->auth.digest.data;
2189                                 inplace = 0;
2190                         }
2191                 }
2192         }
2193         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2194
2195         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2196             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2197                 inplace = 0;
2198
2199         if (likely(!m_dst && inplace)) {
2200                 /* Case of single buffer without AAD buf or
2201                  * separate mac buf in place and
2202                  * not air crypto
2203                  */
2204                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2205
2206                 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2207                                                           &flags))) {
2208                         plt_dp_err("Prepare inplace src iov failed");
2209                         ret = -EINVAL;
2210                         goto err_exit;
2211                 }
2212
2213         } else {
2214                 /* Out of place processing */
2215                 fc_params.src_iov = (void *)src;
2216                 fc_params.dst_iov = (void *)dst;
2217
2218                 /* Store SG I/O in the api for reuse */
2219                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2220                         plt_dp_err("Prepare src iov failed");
2221                         ret = -EINVAL;
2222                         goto err_exit;
2223                 }
2224
2225                 if (unlikely(m_dst != NULL)) {
2226                         uint32_t pkt_len;
2227
2228                         /* Try to make room as much as src has */
2229                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
2230
2231                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2232                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2233                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2234                                         plt_dp_err("Not enough space in "
2235                                                    "m_dst %p, need %u"
2236                                                    " more",
2237                                                    m_dst, pkt_len);
2238                                         ret = -EINVAL;
2239                                         goto err_exit;
2240                                 }
2241                         }
2242
2243                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2244                                 plt_dp_err("Prepare dst iov failed for "
2245                                            "m_dst %p",
2246                                            m_dst);
2247                                 ret = -EINVAL;
2248                                 goto err_exit;
2249                         }
2250                 } else {
2251                         fc_params.dst_iov = (void *)src;
2252                 }
2253         }
2254
2255         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2256                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2257                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
2258                         (ctx->fc_type == ROC_SE_PDCP))))) {
2259                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2260                                       m_info->pool, infl_req);
2261                 if (mdata == NULL) {
2262                         plt_dp_err("Error allocating meta buffer for request");
2263                         return -ENOMEM;
2264                 }
2265         }
2266
2267         /* Finally prepare the instruction */
2268         if (cpt_op & ROC_SE_OP_ENCODE)
2269                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2270                                            inst);
2271         else
2272                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2273                                            inst);
2274
2275         if (unlikely(ret)) {
2276                 plt_dp_err("Preparing request failed due to bad input arg");
2277                 goto free_mdata_and_exit;
2278         }
2279
2280         return 0;
2281
2282 free_mdata_and_exit:
2283         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2284                 rte_mempool_put(m_info->pool, infl_req->mdata);
2285 err_exit:
2286         return ret;
2287 }
2288
2289 static __rte_always_inline void
2290 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2291 {
2292         uint8_t *mac;
2293         struct rte_crypto_sym_op *sym_op = op->sym;
2294
2295         if (sym_op->auth.digest.data)
2296                 mac = sym_op->auth.digest.data;
2297         else
2298                 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2299                                               sym_op->auth.data.length +
2300                                                       sym_op->auth.data.offset);
2301         if (!mac) {
2302                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2303                 return;
2304         }
2305
2306         if (memcmp(mac, gen_mac, mac_len))
2307                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2308         else
2309                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2310 }
2311
2312 static __rte_always_inline void
2313 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2314                                    uint32_t *addr_length_in_bits,
2315                                    uint8_t *addr_direction)
2316 {
2317         uint8_t found = 0;
2318         uint32_t pos;
2319         uint8_t last_byte;
2320         while (!found && counter_num_bytes > 0) {
2321                 counter_num_bytes--;
2322                 if (src[counter_num_bytes] == 0x00)
2323                         continue;
2324                 pos = rte_bsf32(src[counter_num_bytes]);
2325                 if (pos == 7) {
2326                         if (likely(counter_num_bytes > 0)) {
2327                                 last_byte = src[counter_num_bytes - 1];
2328                                 *addr_direction = last_byte & 0x1;
2329                                 *addr_length_in_bits =
2330                                         counter_num_bytes * 8 - 1;
2331                         }
2332                 } else {
2333                         last_byte = src[counter_num_bytes];
2334                         *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2335                         *addr_length_in_bits =
2336                                 counter_num_bytes * 8 + (8 - (pos + 2));
2337                 }
2338                 found = 1;
2339         }
2340 }
2341
2342 /*
2343  * This handles all auth only except AES_GMAC
2344  */
2345 static __rte_always_inline int
2346 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2347                    struct cpt_qp_meta_info *m_info,
2348                    struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2349 {
2350         uint32_t space = 0;
2351         struct rte_crypto_sym_op *sym_op = cop->sym;
2352         void *mdata;
2353         uint32_t auth_range_off;
2354         uint32_t flags = 0;
2355         uint64_t d_offs = 0, d_lens;
2356         struct rte_mbuf *m_src, *m_dst;
2357         uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2358         uint16_t mac_len = sess->mac_len;
2359         struct roc_se_fc_params params;
2360         char src[SRC_IOV_SIZE];
2361         uint8_t iv_buf[16];
2362         int ret;
2363
2364         memset(&params, 0, sizeof(struct roc_se_fc_params));
2365
2366         m_src = sym_op->m_src;
2367
2368         mdata = alloc_op_meta(&params.meta_buf, m_info->mlen, m_info->pool,
2369                               infl_req);
2370         if (mdata == NULL) {
2371                 ret = -ENOMEM;
2372                 goto err_exit;
2373         }
2374
2375         auth_range_off = sym_op->auth.data.offset;
2376
2377         flags = ROC_SE_VALID_MAC_BUF;
2378         params.src_iov = (void *)src;
2379         if (unlikely(sess->zsk_flag)) {
2380                 /*
2381                  * Since for Zuc, Kasumi, Snow3g offsets are in bits
2382                  * we will send pass through even for auth only case,
2383                  * let MC handle it
2384                  */
2385                 d_offs = auth_range_off;
2386                 auth_range_off = 0;
2387                 params.auth_iv_len = sess->auth_iv_length;
2388                 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2389                         cop, uint8_t *, sess->auth_iv_offset);
2390                 if (sess->zsk_flag == ROC_SE_K_F9) {
2391                         uint32_t length_in_bits, num_bytes;
2392                         uint8_t *src, direction = 0;
2393
2394                         memcpy(iv_buf,
2395                                rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2396                         /*
2397                          * This is kasumi f9, take direction from
2398                          * source buffer
2399                          */
2400                         length_in_bits = cop->sym->auth.data.length;
2401                         num_bytes = (length_in_bits >> 3);
2402                         src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2403                         find_kasumif9_direction_and_length(
2404                                 src, num_bytes, &length_in_bits, &direction);
2405                         length_in_bits -= 64;
2406                         cop->sym->auth.data.offset += 64;
2407                         d_offs = cop->sym->auth.data.offset;
2408                         auth_range_off = d_offs / 8;
2409                         cop->sym->auth.data.length = length_in_bits;
2410
2411                         /* Store it at end of auth iv */
2412                         iv_buf[8] = direction;
2413                         params.auth_iv_buf = iv_buf;
2414                 }
2415         }
2416
2417         d_lens = sym_op->auth.data.length;
2418
2419         params.ctx_buf.vaddr = &sess->roc_se_ctx;
2420
2421         if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2422                 if (sym_op->auth.digest.data) {
2423                         /*
2424                          * Digest to be generated
2425                          * in separate buffer
2426                          */
2427                         params.mac_buf.size = sess->mac_len;
2428                         params.mac_buf.vaddr = sym_op->auth.digest.data;
2429                 } else {
2430                         uint32_t off = sym_op->auth.data.offset +
2431                                        sym_op->auth.data.length;
2432                         int32_t dlen, space;
2433
2434                         m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2435                         dlen = rte_pktmbuf_pkt_len(m_dst);
2436
2437                         space = off + mac_len - dlen;
2438                         if (space > 0)
2439                                 if (!rte_pktmbuf_append(m_dst, space)) {
2440                                         plt_dp_err("Failed to extend "
2441                                                    "mbuf by %uB",
2442                                                    space);
2443                                         ret = -EINVAL;
2444                                         goto free_mdata_and_exit;
2445                                 }
2446
2447                         params.mac_buf.vaddr =
2448                                 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2449                         params.mac_buf.size = mac_len;
2450                 }
2451         } else {
2452                 uint64_t *op = mdata;
2453
2454                 /* Need space for storing generated mac */
2455                 space += 2 * sizeof(uint64_t);
2456
2457                 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2458                 params.mac_buf.size = mac_len;
2459                 space += RTE_ALIGN_CEIL(mac_len, 8);
2460                 op[0] = (uintptr_t)params.mac_buf.vaddr;
2461                 op[1] = mac_len;
2462                 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2463         }
2464
2465         params.meta_buf.vaddr = (uint8_t *)mdata + space;
2466         params.meta_buf.size -= space;
2467
2468         /* Out of place processing */
2469         params.src_iov = (void *)src;
2470
2471         /*Store SG I/O in the api for reuse */
2472         if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2473                 plt_dp_err("Prepare src iov failed");
2474                 ret = -EINVAL;
2475                 goto free_mdata_and_exit;
2476         }
2477
2478         ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, inst);
2479         if (ret)
2480                 goto free_mdata_and_exit;
2481
2482         return 0;
2483
2484 free_mdata_and_exit:
2485         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2486                 rte_mempool_put(m_info->pool, infl_req->mdata);
2487 err_exit:
2488         return ret;
2489 }
2490 #endif /*_CNXK_SE_H_ */