net/cnxk: add TM shaper and node operations
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
38
39 static inline void
40 pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type)
41 {
42         uint32_t *iv_s_temp, iv_temp[4];
43         int j;
44
45         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
46                 /*
47                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
48                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
49                  */
50
51                 iv_s_temp = (uint32_t *)iv_s;
52
53                 for (j = 0; j < 4; j++)
54                         iv_temp[j] = iv_s_temp[3 - j];
55                 memcpy(iv_d, iv_temp, 16);
56         } else {
57                 /* ZUC doesn't need a swap */
58                 memcpy(iv_d, iv_s, 16);
59         }
60 }
61
62 static __rte_always_inline int
63 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
64 {
65         uint16_t mac_len = auth->digest_length;
66         int ret;
67
68         switch (auth->algo) {
69         case RTE_CRYPTO_AUTH_MD5:
70         case RTE_CRYPTO_AUTH_MD5_HMAC:
71                 ret = (mac_len == 16) ? 0 : -1;
72                 break;
73         case RTE_CRYPTO_AUTH_SHA1:
74         case RTE_CRYPTO_AUTH_SHA1_HMAC:
75                 ret = (mac_len == 20) ? 0 : -1;
76                 break;
77         case RTE_CRYPTO_AUTH_SHA224:
78         case RTE_CRYPTO_AUTH_SHA224_HMAC:
79                 ret = (mac_len == 28) ? 0 : -1;
80                 break;
81         case RTE_CRYPTO_AUTH_SHA256:
82         case RTE_CRYPTO_AUTH_SHA256_HMAC:
83                 ret = (mac_len == 32) ? 0 : -1;
84                 break;
85         case RTE_CRYPTO_AUTH_SHA384:
86         case RTE_CRYPTO_AUTH_SHA384_HMAC:
87                 ret = (mac_len == 48) ? 0 : -1;
88                 break;
89         case RTE_CRYPTO_AUTH_SHA512:
90         case RTE_CRYPTO_AUTH_SHA512_HMAC:
91                 ret = (mac_len == 64) ? 0 : -1;
92                 break;
93         case RTE_CRYPTO_AUTH_NULL:
94                 ret = 0;
95                 break;
96         default:
97                 ret = -1;
98         }
99
100         return ret;
101 }
102
103 static __rte_always_inline void
104 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
105 {
106         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
107         memcpy(fctx->enc.encr_iv, salt, 4);
108 }
109
110 static __rte_always_inline uint32_t
111 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
112              uint32_t size)
113 {
114         struct roc_se_sglist_comp *to = &list[i >> 2];
115
116         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
117         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
118         i++;
119         return i;
120 }
121
122 static __rte_always_inline uint32_t
123 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
124                       struct roc_se_buf_ptr *from)
125 {
126         struct roc_se_sglist_comp *to = &list[i >> 2];
127
128         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
129         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
130         i++;
131         return i;
132 }
133
134 static __rte_always_inline uint32_t
135 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
136                           struct roc_se_buf_ptr *from, uint32_t *psize)
137 {
138         struct roc_se_sglist_comp *to = &list[i >> 2];
139         uint32_t size = *psize;
140         uint32_t e_len;
141
142         e_len = (size > from->size) ? from->size : size;
143         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
144         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
145         *psize -= e_len;
146         i++;
147         return i;
148 }
149
150 /*
151  * This fills the MC expected SGIO list
152  * from IOV given by user.
153  */
154 static __rte_always_inline uint32_t
155 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
156                       struct roc_se_iov_ptr *from, uint32_t from_offset,
157                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
158                       uint32_t extra_offset)
159 {
160         int32_t j;
161         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
162         uint32_t size = *psize;
163         struct roc_se_buf_ptr *bufs;
164
165         bufs = from->bufs;
166         for (j = 0; (j < from->buf_cnt) && size; j++) {
167                 uint64_t e_vaddr;
168                 uint32_t e_len;
169                 struct roc_se_sglist_comp *to = &list[i >> 2];
170
171                 if (unlikely(from_offset)) {
172                         if (from_offset >= bufs[j].size) {
173                                 from_offset -= bufs[j].size;
174                                 continue;
175                         }
176                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
177                         e_len = (size > (bufs[j].size - from_offset)) ?
178                                         (bufs[j].size - from_offset) :
179                                         size;
180                         from_offset = 0;
181                 } else {
182                         e_vaddr = (uint64_t)bufs[j].vaddr;
183                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
184                 }
185
186                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
187                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
188
189                 if (extra_len && (e_len >= extra_offset)) {
190                         /* Break the data at given offset */
191                         uint32_t next_len = e_len - extra_offset;
192                         uint64_t next_vaddr = e_vaddr + extra_offset;
193
194                         if (!extra_offset) {
195                                 i--;
196                         } else {
197                                 e_len = extra_offset;
198                                 size -= e_len;
199                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
200                         }
201
202                         extra_len = RTE_MIN(extra_len, size);
203                         /* Insert extra data ptr */
204                         if (extra_len) {
205                                 i++;
206                                 to = &list[i >> 2];
207                                 to->u.s.len[i % 4] =
208                                         rte_cpu_to_be_16(extra_len);
209                                 to->ptr[i % 4] = rte_cpu_to_be_64(
210                                         (uint64_t)extra_buf->vaddr);
211                                 size -= extra_len;
212                         }
213
214                         next_len = RTE_MIN(next_len, size);
215                         /* insert the rest of the data */
216                         if (next_len) {
217                                 i++;
218                                 to = &list[i >> 2];
219                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
220                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
221                                 size -= next_len;
222                         }
223                         extra_len = 0;
224
225                 } else {
226                         size -= e_len;
227                 }
228                 if (extra_offset)
229                         extra_offset -= size;
230                 i++;
231         }
232
233         *psize = size;
234         return (uint32_t)i;
235 }
236
237 static __rte_always_inline int
238 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
239                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
240 {
241         void *m_vaddr = params->meta_buf.vaddr;
242         uint32_t size, i;
243         uint16_t data_len, mac_len, key_len;
244         roc_se_auth_type hash_type;
245         struct roc_se_ctx *ctx;
246         struct roc_se_sglist_comp *gather_comp;
247         struct roc_se_sglist_comp *scatter_comp;
248         uint8_t *in_buffer;
249         uint32_t g_size_bytes, s_size_bytes;
250         union cpt_inst_w4 cpt_inst_w4;
251
252         ctx = params->ctx_buf.vaddr;
253
254         hash_type = ctx->hash_type;
255         mac_len = ctx->mac_len;
256         key_len = ctx->auth_key_len;
257         data_len = ROC_SE_AUTH_DLEN(d_lens);
258
259         /*GP op header */
260         cpt_inst_w4.s.opcode_minor = 0;
261         cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
262         if (ctx->hmac) {
263                 cpt_inst_w4.s.opcode_major =
264                         ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
265                 cpt_inst_w4.s.param1 = key_len;
266                 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
267         } else {
268                 cpt_inst_w4.s.opcode_major =
269                         ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
270                 cpt_inst_w4.s.param1 = 0;
271                 cpt_inst_w4.s.dlen = data_len;
272         }
273
274         /* Null auth only case enters the if */
275         if (unlikely(!hash_type && !ctx->enc_cipher)) {
276                 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
277                 /* Minor op is passthrough */
278                 cpt_inst_w4.s.opcode_minor = 0x03;
279                 /* Send out completion code only */
280                 cpt_inst_w4.s.param2 = 0x1;
281         }
282
283         /* DPTR has SG list */
284         in_buffer = m_vaddr;
285
286         ((uint16_t *)in_buffer)[0] = 0;
287         ((uint16_t *)in_buffer)[1] = 0;
288
289         /* TODO Add error check if space will be sufficient */
290         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
291
292         /*
293          * Input gather list
294          */
295
296         i = 0;
297
298         if (ctx->hmac) {
299                 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
300                 /* Key */
301                 i = fill_sg_comp(gather_comp, i, k_vaddr,
302                                  RTE_ALIGN_CEIL(key_len, 8));
303         }
304
305         /* input data */
306         size = data_len;
307         if (size) {
308                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
309                                           &size, NULL, 0);
310                 if (unlikely(size)) {
311                         plt_dp_err("Insufficient dst IOV size, short by %dB",
312                                    size);
313                         return -1;
314                 }
315         } else {
316                 /*
317                  * Looks like we need to support zero data
318                  * gather ptr in case of hash & hmac
319                  */
320                 i++;
321         }
322         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
323         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
324
325         /*
326          * Output Gather list
327          */
328
329         i = 0;
330         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
331                                                      g_size_bytes);
332
333         if (flags & ROC_SE_VALID_MAC_BUF) {
334                 if (unlikely(params->mac_buf.size < mac_len)) {
335                         plt_dp_err("Insufficient MAC size");
336                         return -1;
337                 }
338
339                 size = mac_len;
340                 i = fill_sg_comp_from_buf_min(scatter_comp, i, &params->mac_buf,
341                                               &size);
342         } else {
343                 size = mac_len;
344                 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
345                                           data_len, &size, NULL, 0);
346                 if (unlikely(size)) {
347                         plt_dp_err("Insufficient dst IOV size, short by %dB",
348                                    size);
349                         return -1;
350                 }
351         }
352
353         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
354         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
355
356         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
357
358         /* This is DPTR len in case of SG mode */
359         cpt_inst_w4.s.dlen = size;
360
361         inst->dptr = (uint64_t)in_buffer;
362         inst->w4.u64 = cpt_inst_w4.u64;
363
364         return 0;
365 }
366
367 static __rte_always_inline int
368 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
369                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
370 {
371         uint32_t iv_offset = 0;
372         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
373         struct roc_se_ctx *se_ctx;
374         uint32_t cipher_type, hash_type;
375         uint32_t mac_len, size;
376         uint8_t iv_len = 16;
377         struct roc_se_buf_ptr *aad_buf = NULL;
378         uint32_t encr_offset, auth_offset;
379         uint32_t encr_data_len, auth_data_len, aad_len = 0;
380         uint32_t passthrough_len = 0;
381         union cpt_inst_w4 cpt_inst_w4;
382         void *offset_vaddr;
383         uint8_t op_minor;
384
385         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
386         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
387         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
388         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
389         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
390                 /* We don't support both AAD and auth data separately */
391                 auth_data_len = 0;
392                 auth_offset = 0;
393                 aad_len = fc_params->aad_buf.size;
394                 aad_buf = &fc_params->aad_buf;
395         }
396         se_ctx = fc_params->ctx_buf.vaddr;
397         cipher_type = se_ctx->enc_cipher;
398         hash_type = se_ctx->hash_type;
399         mac_len = se_ctx->mac_len;
400         op_minor = se_ctx->template_w4.s.opcode_minor;
401
402         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
403                 iv_len = 0;
404                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
405         }
406
407         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
408                 /*
409                  * When AAD is given, data above encr_offset is pass through
410                  * Since AAD is given as separate pointer and not as offset,
411                  * this is a special case as we need to fragment input data
412                  * into passthrough + encr_data and then insert AAD in between.
413                  */
414                 if (hash_type != ROC_SE_GMAC_TYPE) {
415                         passthrough_len = encr_offset;
416                         auth_offset = passthrough_len + iv_len;
417                         encr_offset = passthrough_len + aad_len + iv_len;
418                         auth_data_len = aad_len + encr_data_len;
419                 } else {
420                         passthrough_len = 16 + aad_len;
421                         auth_offset = passthrough_len + iv_len;
422                         auth_data_len = aad_len;
423                 }
424         } else {
425                 encr_offset += iv_len;
426                 auth_offset += iv_len;
427         }
428
429         /* Encryption */
430         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
431         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
432         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
433
434         if (hash_type == ROC_SE_GMAC_TYPE) {
435                 encr_offset = 0;
436                 encr_data_len = 0;
437         }
438
439         auth_dlen = auth_offset + auth_data_len;
440         enc_dlen = encr_data_len + encr_offset;
441         if (unlikely(encr_data_len & 0xf)) {
442                 if ((cipher_type == ROC_SE_DES3_CBC) ||
443                     (cipher_type == ROC_SE_DES3_ECB))
444                         enc_dlen =
445                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
446                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
447                                 (cipher_type == ROC_SE_AES_ECB)))
448                         enc_dlen =
449                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
450         }
451
452         if (unlikely(auth_dlen > enc_dlen)) {
453                 inputlen = auth_dlen;
454                 outputlen = auth_dlen + mac_len;
455         } else {
456                 inputlen = enc_dlen;
457                 outputlen = enc_dlen + mac_len;
458         }
459
460         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
461                 outputlen = enc_dlen;
462
463         /* GP op header */
464         cpt_inst_w4.s.param1 = encr_data_len;
465         cpt_inst_w4.s.param2 = auth_data_len;
466
467         /*
468          * In cn9k, cn10k since we have a limitation of
469          * IV & Offset control word not part of instruction
470          * and need to be part of Data Buffer, we check if
471          * head room is there and then only do the Direct mode processing
472          */
473         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
474                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
475                 void *dm_vaddr = fc_params->bufs[0].vaddr;
476
477                 /* Use Direct mode */
478
479                 offset_vaddr =
480                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
481
482                 /* DPTR */
483                 inst->dptr = (uint64_t)offset_vaddr;
484
485                 /* RPTR should just exclude offset control word */
486                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
487
488                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
489
490                 if (likely(iv_len)) {
491                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
492                                                       ROC_SE_OFF_CTRL_LEN);
493                         uint64_t *src = fc_params->iv_buf;
494                         dest[0] = src[0];
495                         dest[1] = src[1];
496                 }
497
498         } else {
499                 void *m_vaddr = fc_params->meta_buf.vaddr;
500                 uint32_t i, g_size_bytes, s_size_bytes;
501                 struct roc_se_sglist_comp *gather_comp;
502                 struct roc_se_sglist_comp *scatter_comp;
503                 uint8_t *in_buffer;
504
505                 /* This falls under strict SG mode */
506                 offset_vaddr = m_vaddr;
507                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
508
509                 m_vaddr = (uint8_t *)m_vaddr + size;
510
511                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
512
513                 if (likely(iv_len)) {
514                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
515                                                       ROC_SE_OFF_CTRL_LEN);
516                         uint64_t *src = fc_params->iv_buf;
517                         dest[0] = src[0];
518                         dest[1] = src[1];
519                 }
520
521                 /* DPTR has SG list */
522                 in_buffer = m_vaddr;
523
524                 ((uint16_t *)in_buffer)[0] = 0;
525                 ((uint16_t *)in_buffer)[1] = 0;
526
527                 /* TODO Add error check if space will be sufficient */
528                 gather_comp =
529                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
530
531                 /*
532                  * Input Gather List
533                  */
534
535                 i = 0;
536
537                 /* Offset control word that includes iv */
538                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
539                                  ROC_SE_OFF_CTRL_LEN + iv_len);
540
541                 /* Add input data */
542                 size = inputlen - iv_len;
543                 if (likely(size)) {
544                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
545
546                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
547                                 i = fill_sg_comp_from_buf_min(
548                                         gather_comp, i, fc_params->bufs, &size);
549                         } else {
550                                 i = fill_sg_comp_from_iov(
551                                         gather_comp, i, fc_params->src_iov, 0,
552                                         &size, aad_buf, aad_offset);
553                         }
554
555                         if (unlikely(size)) {
556                                 plt_dp_err("Insufficient buffer space,"
557                                            " size %d needed",
558                                            size);
559                                 return -1;
560                         }
561                 }
562                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
563                 g_size_bytes =
564                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
565
566                 /*
567                  * Output Scatter list
568                  */
569                 i = 0;
570                 scatter_comp =
571                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
572                                                       g_size_bytes);
573
574                 /* Add IV */
575                 if (likely(iv_len)) {
576                         i = fill_sg_comp(scatter_comp, i,
577                                          (uint64_t)offset_vaddr +
578                                                  ROC_SE_OFF_CTRL_LEN,
579                                          iv_len);
580                 }
581
582                 /* output data or output data + digest*/
583                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
584                         size = outputlen - iv_len - mac_len;
585                         if (size) {
586                                 uint32_t aad_offset =
587                                         aad_len ? passthrough_len : 0;
588
589                                 if (unlikely(flags &
590                                              ROC_SE_SINGLE_BUF_INPLACE)) {
591                                         i = fill_sg_comp_from_buf_min(
592                                                 scatter_comp, i,
593                                                 fc_params->bufs, &size);
594                                 } else {
595                                         i = fill_sg_comp_from_iov(
596                                                 scatter_comp, i,
597                                                 fc_params->dst_iov, 0, &size,
598                                                 aad_buf, aad_offset);
599                                 }
600                                 if (unlikely(size)) {
601                                         plt_dp_err("Insufficient buffer"
602                                                    " space, size %d needed",
603                                                    size);
604                                         return -1;
605                                 }
606                         }
607                         /* mac_data */
608                         if (mac_len) {
609                                 i = fill_sg_comp_from_buf(scatter_comp, i,
610                                                           &fc_params->mac_buf);
611                         }
612                 } else {
613                         /* Output including mac */
614                         size = outputlen - iv_len;
615                         if (likely(size)) {
616                                 uint32_t aad_offset =
617                                         aad_len ? passthrough_len : 0;
618
619                                 if (unlikely(flags &
620                                              ROC_SE_SINGLE_BUF_INPLACE)) {
621                                         i = fill_sg_comp_from_buf_min(
622                                                 scatter_comp, i,
623                                                 fc_params->bufs, &size);
624                                 } else {
625                                         i = fill_sg_comp_from_iov(
626                                                 scatter_comp, i,
627                                                 fc_params->dst_iov, 0, &size,
628                                                 aad_buf, aad_offset);
629                                 }
630                                 if (unlikely(size)) {
631                                         plt_dp_err("Insufficient buffer"
632                                                    " space, size %d needed",
633                                                    size);
634                                         return -1;
635                                 }
636                         }
637                 }
638                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
639                 s_size_bytes =
640                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
641
642                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
643
644                 /* This is DPTR len in case of SG mode */
645                 cpt_inst_w4.s.dlen = size;
646
647                 inst->dptr = (uint64_t)in_buffer;
648         }
649
650         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
651                      (auth_offset >> 8))) {
652                 plt_dp_err("Offset not supported");
653                 plt_dp_err("enc_offset: %d", encr_offset);
654                 plt_dp_err("iv_offset : %d", iv_offset);
655                 plt_dp_err("auth_offset: %d", auth_offset);
656                 return -1;
657         }
658
659         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
660                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
661                 ((uint64_t)auth_offset));
662
663         inst->w4.u64 = cpt_inst_w4.u64;
664         return 0;
665 }
666
667 static __rte_always_inline int
668 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
669                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
670 {
671         uint32_t iv_offset = 0, size;
672         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
673         struct roc_se_ctx *se_ctx;
674         int32_t hash_type, mac_len;
675         uint8_t iv_len = 16;
676         struct roc_se_buf_ptr *aad_buf = NULL;
677         uint32_t encr_offset, auth_offset;
678         uint32_t encr_data_len, auth_data_len, aad_len = 0;
679         uint32_t passthrough_len = 0;
680         union cpt_inst_w4 cpt_inst_w4;
681         void *offset_vaddr;
682         uint8_t op_minor;
683
684         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
685         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
686         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
687         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
688
689         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
690                 /* We don't support both AAD and auth data separately */
691                 auth_data_len = 0;
692                 auth_offset = 0;
693                 aad_len = fc_params->aad_buf.size;
694                 aad_buf = &fc_params->aad_buf;
695         }
696
697         se_ctx = fc_params->ctx_buf.vaddr;
698         hash_type = se_ctx->hash_type;
699         mac_len = se_ctx->mac_len;
700         op_minor = se_ctx->template_w4.s.opcode_minor;
701
702         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
703                 iv_len = 0;
704                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
705         }
706
707         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
708                 /*
709                  * When AAD is given, data above encr_offset is pass through
710                  * Since AAD is given as separate pointer and not as offset,
711                  * this is a special case as we need to fragment input data
712                  * into passthrough + encr_data and then insert AAD in between.
713                  */
714                 if (hash_type != ROC_SE_GMAC_TYPE) {
715                         passthrough_len = encr_offset;
716                         auth_offset = passthrough_len + iv_len;
717                         encr_offset = passthrough_len + aad_len + iv_len;
718                         auth_data_len = aad_len + encr_data_len;
719                 } else {
720                         passthrough_len = 16 + aad_len;
721                         auth_offset = passthrough_len + iv_len;
722                         auth_data_len = aad_len;
723                 }
724         } else {
725                 encr_offset += iv_len;
726                 auth_offset += iv_len;
727         }
728
729         /* Decryption */
730         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
731         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
732         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
733
734         if (hash_type == ROC_SE_GMAC_TYPE) {
735                 encr_offset = 0;
736                 encr_data_len = 0;
737         }
738
739         enc_dlen = encr_offset + encr_data_len;
740         auth_dlen = auth_offset + auth_data_len;
741
742         if (auth_dlen > enc_dlen) {
743                 inputlen = auth_dlen + mac_len;
744                 outputlen = auth_dlen;
745         } else {
746                 inputlen = enc_dlen + mac_len;
747                 outputlen = enc_dlen;
748         }
749
750         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
751                 outputlen = inputlen = enc_dlen;
752
753         cpt_inst_w4.s.param1 = encr_data_len;
754         cpt_inst_w4.s.param2 = auth_data_len;
755
756         /*
757          * In cn9k, cn10k since we have a limitation of
758          * IV & Offset control word not part of instruction
759          * and need to be part of Data Buffer, we check if
760          * head room is there and then only do the Direct mode processing
761          */
762         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
763                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
764                 void *dm_vaddr = fc_params->bufs[0].vaddr;
765
766                 /* Use Direct mode */
767
768                 offset_vaddr =
769                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
770                 inst->dptr = (uint64_t)offset_vaddr;
771
772                 /* RPTR should just exclude offset control word */
773                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
774
775                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
776
777                 if (likely(iv_len)) {
778                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
779                                                       ROC_SE_OFF_CTRL_LEN);
780                         uint64_t *src = fc_params->iv_buf;
781                         dest[0] = src[0];
782                         dest[1] = src[1];
783                 }
784
785         } else {
786                 void *m_vaddr = fc_params->meta_buf.vaddr;
787                 uint32_t g_size_bytes, s_size_bytes;
788                 struct roc_se_sglist_comp *gather_comp;
789                 struct roc_se_sglist_comp *scatter_comp;
790                 uint8_t *in_buffer;
791                 uint8_t i = 0;
792
793                 /* This falls under strict SG mode */
794                 offset_vaddr = m_vaddr;
795                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
796
797                 m_vaddr = (uint8_t *)m_vaddr + size;
798
799                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
800
801                 if (likely(iv_len)) {
802                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
803                                                       ROC_SE_OFF_CTRL_LEN);
804                         uint64_t *src = fc_params->iv_buf;
805                         dest[0] = src[0];
806                         dest[1] = src[1];
807                 }
808
809                 /* DPTR has SG list */
810                 in_buffer = m_vaddr;
811
812                 ((uint16_t *)in_buffer)[0] = 0;
813                 ((uint16_t *)in_buffer)[1] = 0;
814
815                 /* TODO Add error check if space will be sufficient */
816                 gather_comp =
817                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
818
819                 /*
820                  * Input Gather List
821                  */
822                 i = 0;
823
824                 /* Offset control word that includes iv */
825                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
826                                  ROC_SE_OFF_CTRL_LEN + iv_len);
827
828                 /* Add input data */
829                 if (flags & ROC_SE_VALID_MAC_BUF) {
830                         size = inputlen - iv_len - mac_len;
831                         if (size) {
832                                 /* input data only */
833                                 if (unlikely(flags &
834                                              ROC_SE_SINGLE_BUF_INPLACE)) {
835                                         i = fill_sg_comp_from_buf_min(
836                                                 gather_comp, i, fc_params->bufs,
837                                                 &size);
838                                 } else {
839                                         uint32_t aad_offset =
840                                                 aad_len ? passthrough_len : 0;
841
842                                         i = fill_sg_comp_from_iov(
843                                                 gather_comp, i,
844                                                 fc_params->src_iov, 0, &size,
845                                                 aad_buf, aad_offset);
846                                 }
847                                 if (unlikely(size)) {
848                                         plt_dp_err("Insufficient buffer"
849                                                    " space, size %d needed",
850                                                    size);
851                                         return -1;
852                                 }
853                         }
854
855                         /* mac data */
856                         if (mac_len) {
857                                 i = fill_sg_comp_from_buf(gather_comp, i,
858                                                           &fc_params->mac_buf);
859                         }
860                 } else {
861                         /* input data + mac */
862                         size = inputlen - iv_len;
863                         if (size) {
864                                 if (unlikely(flags &
865                                              ROC_SE_SINGLE_BUF_INPLACE)) {
866                                         i = fill_sg_comp_from_buf_min(
867                                                 gather_comp, i, fc_params->bufs,
868                                                 &size);
869                                 } else {
870                                         uint32_t aad_offset =
871                                                 aad_len ? passthrough_len : 0;
872
873                                         if (unlikely(!fc_params->src_iov)) {
874                                                 plt_dp_err("Bad input args");
875                                                 return -1;
876                                         }
877
878                                         i = fill_sg_comp_from_iov(
879                                                 gather_comp, i,
880                                                 fc_params->src_iov, 0, &size,
881                                                 aad_buf, aad_offset);
882                                 }
883
884                                 if (unlikely(size)) {
885                                         plt_dp_err("Insufficient buffer"
886                                                    " space, size %d needed",
887                                                    size);
888                                         return -1;
889                                 }
890                         }
891                 }
892                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
893                 g_size_bytes =
894                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
895
896                 /*
897                  * Output Scatter List
898                  */
899
900                 i = 0;
901                 scatter_comp =
902                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
903                                                       g_size_bytes);
904
905                 /* Add iv */
906                 if (iv_len) {
907                         i = fill_sg_comp(scatter_comp, i,
908                                          (uint64_t)offset_vaddr +
909                                                  ROC_SE_OFF_CTRL_LEN,
910                                          iv_len);
911                 }
912
913                 /* Add output data */
914                 size = outputlen - iv_len;
915                 if (size) {
916                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
917                                 /* handle single buffer here */
918                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
919                                                               fc_params->bufs,
920                                                               &size);
921                         } else {
922                                 uint32_t aad_offset =
923                                         aad_len ? passthrough_len : 0;
924
925                                 if (unlikely(!fc_params->dst_iov)) {
926                                         plt_dp_err("Bad input args");
927                                         return -1;
928                                 }
929
930                                 i = fill_sg_comp_from_iov(
931                                         scatter_comp, i, fc_params->dst_iov, 0,
932                                         &size, aad_buf, aad_offset);
933                         }
934
935                         if (unlikely(size)) {
936                                 plt_dp_err("Insufficient buffer space,"
937                                            " size %d needed",
938                                            size);
939                                 return -1;
940                         }
941                 }
942
943                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
944                 s_size_bytes =
945                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
946
947                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
948
949                 /* This is DPTR len in case of SG mode */
950                 cpt_inst_w4.s.dlen = size;
951
952                 inst->dptr = (uint64_t)in_buffer;
953         }
954
955         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
956                      (auth_offset >> 8))) {
957                 plt_dp_err("Offset not supported");
958                 plt_dp_err("enc_offset: %d", encr_offset);
959                 plt_dp_err("iv_offset : %d", iv_offset);
960                 plt_dp_err("auth_offset: %d", auth_offset);
961                 return -1;
962         }
963
964         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
965                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
966                 ((uint64_t)auth_offset));
967
968         inst->w4.u64 = cpt_inst_w4.u64;
969         return 0;
970 }
971
972 static __rte_always_inline int
973 cpt_zuc_snow3g_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
974                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
975 {
976         uint32_t size;
977         int32_t inputlen, outputlen;
978         struct roc_se_ctx *se_ctx;
979         uint32_t mac_len = 0;
980         uint8_t pdcp_alg_type;
981         uint32_t encr_offset, auth_offset;
982         uint32_t encr_data_len, auth_data_len;
983         int flags, iv_len = 16;
984         uint64_t offset_ctrl;
985         uint64_t *offset_vaddr;
986         uint8_t *iv_s;
987         union cpt_inst_w4 cpt_inst_w4;
988
989         se_ctx = params->ctx_buf.vaddr;
990         flags = se_ctx->zsk_flags;
991         mac_len = se_ctx->mac_len;
992         pdcp_alg_type = se_ctx->pdcp_alg_type;
993
994         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
995
996         cpt_inst_w4.s.opcode_minor = se_ctx->template_w4.s.opcode_minor;
997
998         if (flags == 0x1) {
999                 /*
1000                  * Microcode expects offsets in bytes
1001                  * TODO: Rounding off
1002                  */
1003                 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1004
1005                 /* EIA3 or UIA2 */
1006                 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
1007                 auth_offset = auth_offset / 8;
1008
1009                 /* consider iv len */
1010                 auth_offset += iv_len;
1011
1012                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1013                 outputlen = mac_len;
1014
1015                 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1016
1017                 encr_data_len = 0;
1018                 encr_offset = 0;
1019
1020                 iv_s = params->auth_iv_buf;
1021         } else {
1022                 /* EEA3 or UEA2 */
1023                 /*
1024                  * Microcode expects offsets in bytes
1025                  * TODO: Rounding off
1026                  */
1027                 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1028
1029                 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1030                 encr_offset = encr_offset / 8;
1031                 /* consider iv len */
1032                 encr_offset += iv_len;
1033
1034                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1035                 outputlen = inputlen;
1036
1037                 /* iv offset is 0 */
1038                 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1039
1040                 auth_data_len = 0;
1041                 auth_offset = 0;
1042
1043                 iv_s = params->iv_buf;
1044         }
1045
1046         if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1047                 plt_dp_err("Offset not supported");
1048                 plt_dp_err("enc_offset: %d", encr_offset);
1049                 plt_dp_err("auth_offset: %d", auth_offset);
1050                 return -1;
1051         }
1052
1053         /*
1054          * GP op header, lengths are expected in bits.
1055          */
1056         cpt_inst_w4.s.param1 = encr_data_len;
1057         cpt_inst_w4.s.param2 = auth_data_len;
1058
1059         /*
1060          * In cn9k, cn10k since we have a limitation of
1061          * IV & Offset control word not part of instruction
1062          * and need to be part of Data Buffer, we check if
1063          * head room is there and then only do the Direct mode processing
1064          */
1065         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1066                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1067                 void *dm_vaddr = params->bufs[0].vaddr;
1068
1069                 /* Use Direct mode */
1070
1071                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1072                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1073
1074                 /* DPTR */
1075                 inst->dptr = (uint64_t)offset_vaddr;
1076                 /* RPTR should just exclude offset control word */
1077                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1078
1079                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1080
1081                 uint8_t *iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1082                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type);
1083
1084                 *offset_vaddr = offset_ctrl;
1085         } else {
1086                 void *m_vaddr = params->meta_buf.vaddr;
1087                 uint32_t i, g_size_bytes, s_size_bytes;
1088                 struct roc_se_sglist_comp *gather_comp;
1089                 struct roc_se_sglist_comp *scatter_comp;
1090                 uint8_t *in_buffer;
1091                 uint8_t *iv_d;
1092
1093                 /* save space for iv */
1094                 offset_vaddr = m_vaddr;
1095
1096                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1097
1098                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1099
1100                 /* DPTR has SG list */
1101                 in_buffer = m_vaddr;
1102
1103                 ((uint16_t *)in_buffer)[0] = 0;
1104                 ((uint16_t *)in_buffer)[1] = 0;
1105
1106                 /* TODO Add error check if space will be sufficient */
1107                 gather_comp =
1108                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1109
1110                 /*
1111                  * Input Gather List
1112                  */
1113                 i = 0;
1114
1115                 /* Offset control word followed by iv */
1116
1117                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1118                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1119
1120                 /* iv offset is 0 */
1121                 *offset_vaddr = offset_ctrl;
1122
1123                 iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1124                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type);
1125
1126                 /* input data */
1127                 size = inputlen - iv_len;
1128                 if (size) {
1129                         i = fill_sg_comp_from_iov(gather_comp, i,
1130                                                   params->src_iov, 0, &size,
1131                                                   NULL, 0);
1132                         if (unlikely(size)) {
1133                                 plt_dp_err("Insufficient buffer space,"
1134                                            " size %d needed",
1135                                            size);
1136                                 return -1;
1137                         }
1138                 }
1139                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1140                 g_size_bytes =
1141                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1142
1143                 /*
1144                  * Output Scatter List
1145                  */
1146
1147                 i = 0;
1148                 scatter_comp =
1149                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1150                                                       g_size_bytes);
1151
1152                 if (flags == 0x1) {
1153                         /* IV in SLIST only for EEA3 & UEA2 */
1154                         iv_len = 0;
1155                 }
1156
1157                 if (iv_len) {
1158                         i = fill_sg_comp(scatter_comp, i,
1159                                          (uint64_t)offset_vaddr +
1160                                                  ROC_SE_OFF_CTRL_LEN,
1161                                          iv_len);
1162                 }
1163
1164                 /* Add output data */
1165                 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1166                         size = outputlen - iv_len - mac_len;
1167                         if (size) {
1168                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1169                                                           params->dst_iov, 0,
1170                                                           &size, NULL, 0);
1171
1172                                 if (unlikely(size)) {
1173                                         plt_dp_err("Insufficient buffer space,"
1174                                                    " size %d needed",
1175                                                    size);
1176                                         return -1;
1177                                 }
1178                         }
1179
1180                         /* mac data */
1181                         if (mac_len) {
1182                                 i = fill_sg_comp_from_buf(scatter_comp, i,
1183                                                           &params->mac_buf);
1184                         }
1185                 } else {
1186                         /* Output including mac */
1187                         size = outputlen - iv_len;
1188                         if (size) {
1189                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1190                                                           params->dst_iov, 0,
1191                                                           &size, NULL, 0);
1192
1193                                 if (unlikely(size)) {
1194                                         plt_dp_err("Insufficient buffer space,"
1195                                                    " size %d needed",
1196                                                    size);
1197                                         return -1;
1198                                 }
1199                         }
1200                 }
1201                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1202                 s_size_bytes =
1203                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1204
1205                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1206
1207                 /* This is DPTR len in case of SG mode */
1208                 cpt_inst_w4.s.dlen = size;
1209
1210                 inst->dptr = (uint64_t)in_buffer;
1211         }
1212
1213         inst->w4.u64 = cpt_inst_w4.u64;
1214
1215         return 0;
1216 }
1217
1218 static __rte_always_inline int
1219 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1220                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1221 {
1222         void *m_vaddr = params->meta_buf.vaddr;
1223         uint32_t size;
1224         int32_t inputlen = 0, outputlen = 0;
1225         struct roc_se_ctx *se_ctx;
1226         uint32_t mac_len = 0;
1227         uint8_t i = 0;
1228         uint32_t encr_offset, auth_offset;
1229         uint32_t encr_data_len, auth_data_len;
1230         int flags;
1231         uint8_t *iv_s, *iv_d, iv_len = 8;
1232         uint8_t dir = 0;
1233         uint64_t *offset_vaddr;
1234         union cpt_inst_w4 cpt_inst_w4;
1235         uint8_t *in_buffer;
1236         uint32_t g_size_bytes, s_size_bytes;
1237         struct roc_se_sglist_comp *gather_comp;
1238         struct roc_se_sglist_comp *scatter_comp;
1239
1240         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1241         auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1242         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1243         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1244
1245         se_ctx = params->ctx_buf.vaddr;
1246         flags = se_ctx->zsk_flags;
1247         mac_len = se_ctx->mac_len;
1248
1249         if (flags == 0x0)
1250                 iv_s = params->iv_buf;
1251         else
1252                 iv_s = params->auth_iv_buf;
1253
1254         dir = iv_s[8] & 0x1;
1255
1256         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1257
1258         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1259         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1260                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1261
1262         /*
1263          * GP op header, lengths are expected in bits.
1264          */
1265         cpt_inst_w4.s.param1 = encr_data_len;
1266         cpt_inst_w4.s.param2 = auth_data_len;
1267
1268         /* consider iv len */
1269         if (flags == 0x0) {
1270                 encr_offset += iv_len;
1271                 auth_offset += iv_len;
1272         }
1273
1274         /* save space for offset ctrl and iv */
1275         offset_vaddr = m_vaddr;
1276
1277         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1278
1279         /* DPTR has SG list */
1280         in_buffer = m_vaddr;
1281
1282         ((uint16_t *)in_buffer)[0] = 0;
1283         ((uint16_t *)in_buffer)[1] = 0;
1284
1285         /* TODO Add error check if space will be sufficient */
1286         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1287
1288         /*
1289          * Input Gather List
1290          */
1291         i = 0;
1292
1293         /* Offset control word followed by iv */
1294
1295         if (flags == 0x0) {
1296                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1297                 outputlen = inputlen;
1298                 /* iv offset is 0 */
1299                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1300                 if (unlikely((encr_offset >> 16))) {
1301                         plt_dp_err("Offset not supported");
1302                         plt_dp_err("enc_offset: %d", encr_offset);
1303                         return -1;
1304                 }
1305         } else {
1306                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1307                 outputlen = mac_len;
1308                 /* iv offset is 0 */
1309                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1310                 if (unlikely((auth_offset >> 8))) {
1311                         plt_dp_err("Offset not supported");
1312                         plt_dp_err("auth_offset: %d", auth_offset);
1313                         return -1;
1314                 }
1315         }
1316
1317         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1318                          ROC_SE_OFF_CTRL_LEN + iv_len);
1319
1320         /* IV */
1321         iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1322         memcpy(iv_d, iv_s, iv_len);
1323
1324         /* input data */
1325         size = inputlen - iv_len;
1326         if (size) {
1327                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1328                                           &size, NULL, 0);
1329
1330                 if (unlikely(size)) {
1331                         plt_dp_err("Insufficient buffer space,"
1332                                    " size %d needed",
1333                                    size);
1334                         return -1;
1335                 }
1336         }
1337         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1338         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1339
1340         /*
1341          * Output Scatter List
1342          */
1343
1344         i = 0;
1345         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1346                                                      g_size_bytes);
1347
1348         if (flags == 0x1) {
1349                 /* IV in SLIST only for F8 */
1350                 iv_len = 0;
1351         }
1352
1353         /* IV */
1354         if (iv_len) {
1355                 i = fill_sg_comp(scatter_comp, i,
1356                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1357                                  iv_len);
1358         }
1359
1360         /* Add output data */
1361         if (req_flags & ROC_SE_VALID_MAC_BUF) {
1362                 size = outputlen - iv_len - mac_len;
1363                 if (size) {
1364                         i = fill_sg_comp_from_iov(scatter_comp, i,
1365                                                   params->dst_iov, 0, &size,
1366                                                   NULL, 0);
1367
1368                         if (unlikely(size)) {
1369                                 plt_dp_err("Insufficient buffer space,"
1370                                            " size %d needed",
1371                                            size);
1372                                 return -1;
1373                         }
1374                 }
1375
1376                 /* mac data */
1377                 if (mac_len) {
1378                         i = fill_sg_comp_from_buf(scatter_comp, i,
1379                                                   &params->mac_buf);
1380                 }
1381         } else {
1382                 /* Output including mac */
1383                 size = outputlen - iv_len;
1384                 if (size) {
1385                         i = fill_sg_comp_from_iov(scatter_comp, i,
1386                                                   params->dst_iov, 0, &size,
1387                                                   NULL, 0);
1388
1389                         if (unlikely(size)) {
1390                                 plt_dp_err("Insufficient buffer space,"
1391                                            " size %d needed",
1392                                            size);
1393                                 return -1;
1394                         }
1395                 }
1396         }
1397         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1398         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1399
1400         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1401
1402         /* This is DPTR len in case of SG mode */
1403         cpt_inst_w4.s.dlen = size;
1404
1405         inst->dptr = (uint64_t)in_buffer;
1406         inst->w4.u64 = cpt_inst_w4.u64;
1407
1408         return 0;
1409 }
1410
1411 static __rte_always_inline int
1412 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1413                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1414 {
1415         void *m_vaddr = params->meta_buf.vaddr;
1416         uint32_t size;
1417         int32_t inputlen = 0, outputlen;
1418         struct roc_se_ctx *se_ctx;
1419         uint8_t i = 0, iv_len = 8;
1420         uint32_t encr_offset;
1421         uint32_t encr_data_len;
1422         int flags;
1423         uint8_t dir = 0;
1424         uint64_t *offset_vaddr;
1425         union cpt_inst_w4 cpt_inst_w4;
1426         uint8_t *in_buffer;
1427         uint32_t g_size_bytes, s_size_bytes;
1428         struct roc_se_sglist_comp *gather_comp;
1429         struct roc_se_sglist_comp *scatter_comp;
1430
1431         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1432         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1433
1434         se_ctx = params->ctx_buf.vaddr;
1435         flags = se_ctx->zsk_flags;
1436
1437         cpt_inst_w4.u64 = 0;
1438         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1439
1440         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1441         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1442                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1443
1444         /*
1445          * GP op header, lengths are expected in bits.
1446          */
1447         cpt_inst_w4.s.param1 = encr_data_len;
1448
1449         /* consider iv len */
1450         encr_offset += iv_len;
1451
1452         inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
1453         outputlen = inputlen;
1454
1455         /* save space for offset ctrl & iv */
1456         offset_vaddr = m_vaddr;
1457
1458         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1459
1460         /* DPTR has SG list */
1461         in_buffer = m_vaddr;
1462
1463         ((uint16_t *)in_buffer)[0] = 0;
1464         ((uint16_t *)in_buffer)[1] = 0;
1465
1466         /* TODO Add error check if space will be sufficient */
1467         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1468
1469         /*
1470          * Input Gather List
1471          */
1472         i = 0;
1473
1474         /* Offset control word followed by iv */
1475         *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1476         if (unlikely((encr_offset >> 16))) {
1477                 plt_dp_err("Offset not supported");
1478                 plt_dp_err("enc_offset: %d", encr_offset);
1479                 return -1;
1480         }
1481
1482         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1483                          ROC_SE_OFF_CTRL_LEN + iv_len);
1484
1485         /* IV */
1486         memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1487                iv_len);
1488
1489         /* Add input data */
1490         size = inputlen - iv_len;
1491         if (size) {
1492                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1493                                           &size, NULL, 0);
1494                 if (unlikely(size)) {
1495                         plt_dp_err("Insufficient buffer space,"
1496                                    " size %d needed",
1497                                    size);
1498                         return -1;
1499                 }
1500         }
1501         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1502         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1503
1504         /*
1505          * Output Scatter List
1506          */
1507
1508         i = 0;
1509         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1510                                                      g_size_bytes);
1511
1512         /* IV */
1513         i = fill_sg_comp(scatter_comp, i,
1514                          (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1515
1516         /* Add output data */
1517         size = outputlen - iv_len;
1518         if (size) {
1519                 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1520                                           &size, NULL, 0);
1521                 if (unlikely(size)) {
1522                         plt_dp_err("Insufficient buffer space,"
1523                                    " size %d needed",
1524                                    size);
1525                         return -1;
1526                 }
1527         }
1528         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1529         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1530
1531         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1532
1533         /* This is DPTR len in case of SG mode */
1534         cpt_inst_w4.s.dlen = size;
1535
1536         inst->dptr = (uint64_t)in_buffer;
1537         inst->w4.u64 = cpt_inst_w4.u64;
1538
1539         return 0;
1540 }
1541
1542 static __rte_always_inline int
1543 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1544                      struct roc_se_fc_params *fc_params,
1545                      struct cpt_inst_s *inst)
1546 {
1547         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1548         uint8_t fc_type;
1549         int ret = -1;
1550
1551         fc_type = ctx->fc_type;
1552
1553         if (likely(fc_type == ROC_SE_FC_GEN)) {
1554                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1555         } else if (fc_type == ROC_SE_PDCP) {
1556                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1557                                           inst);
1558         } else if (fc_type == ROC_SE_KASUMI) {
1559                 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1560         }
1561
1562         /*
1563          * For AUTH_ONLY case,
1564          * MC only supports digest generation and verification
1565          * should be done in software by memcmp()
1566          */
1567
1568         return ret;
1569 }
1570
1571 static __rte_always_inline int
1572 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1573                      struct roc_se_fc_params *fc_params,
1574                      struct cpt_inst_s *inst)
1575 {
1576         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1577         uint8_t fc_type;
1578         int ret = -1;
1579
1580         fc_type = ctx->fc_type;
1581
1582         if (likely(fc_type == ROC_SE_FC_GEN)) {
1583                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1584         } else if (fc_type == ROC_SE_PDCP) {
1585                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1586                                           inst);
1587         } else if (fc_type == ROC_SE_KASUMI) {
1588                 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1589                                           inst);
1590         } else if (fc_type == ROC_SE_HASH_HMAC) {
1591                 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1592         }
1593
1594         return ret;
1595 }
1596
1597 static __rte_always_inline int
1598 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1599 {
1600         struct rte_crypto_aead_xform *aead_form;
1601         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1602         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1603         uint32_t cipher_key_len = 0;
1604         uint8_t aes_gcm = 0;
1605         aead_form = &xform->aead;
1606
1607         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1608                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1609                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1610         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1611                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1612                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1613         } else {
1614                 plt_dp_err("Unknown aead operation\n");
1615                 return -1;
1616         }
1617         switch (aead_form->algo) {
1618         case RTE_CRYPTO_AEAD_AES_GCM:
1619                 enc_type = ROC_SE_AES_GCM;
1620                 cipher_key_len = 16;
1621                 aes_gcm = 1;
1622                 break;
1623         case RTE_CRYPTO_AEAD_AES_CCM:
1624                 plt_dp_err("Crypto: Unsupported cipher algo %u",
1625                            aead_form->algo);
1626                 return -1;
1627         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1628                 enc_type = ROC_SE_CHACHA20;
1629                 auth_type = ROC_SE_POLY1305;
1630                 cipher_key_len = 32;
1631                 sess->chacha_poly = 1;
1632                 break;
1633         default:
1634                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1635                            aead_form->algo);
1636                 return -1;
1637         }
1638         if (aead_form->key.length < cipher_key_len) {
1639                 plt_dp_err("Invalid cipher params keylen %u",
1640                            aead_form->key.length);
1641                 return -1;
1642         }
1643         sess->zsk_flag = 0;
1644         sess->aes_gcm = aes_gcm;
1645         sess->mac_len = aead_form->digest_length;
1646         sess->iv_offset = aead_form->iv.offset;
1647         sess->iv_length = aead_form->iv.length;
1648         sess->aad_length = aead_form->aad_length;
1649
1650         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1651                                          aead_form->key.data,
1652                                          aead_form->key.length, NULL)))
1653                 return -1;
1654
1655         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1656                                          aead_form->digest_length)))
1657                 return -1;
1658
1659         return 0;
1660 }
1661
1662 static __rte_always_inline int
1663 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1664 {
1665         struct rte_crypto_cipher_xform *c_form;
1666         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1667         uint32_t cipher_key_len = 0;
1668         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1669
1670         c_form = &xform->cipher;
1671
1672         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1673                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1674         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1675                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1676                 if (xform->next != NULL &&
1677                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1678                         /* Perform decryption followed by auth verify */
1679                         sess->roc_se_ctx.template_w4.s.opcode_minor =
1680                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1681                 }
1682         } else {
1683                 plt_dp_err("Unknown cipher operation\n");
1684                 return -1;
1685         }
1686
1687         switch (c_form->algo) {
1688         case RTE_CRYPTO_CIPHER_AES_CBC:
1689                 enc_type = ROC_SE_AES_CBC;
1690                 cipher_key_len = 16;
1691                 break;
1692         case RTE_CRYPTO_CIPHER_3DES_CBC:
1693                 enc_type = ROC_SE_DES3_CBC;
1694                 cipher_key_len = 24;
1695                 break;
1696         case RTE_CRYPTO_CIPHER_DES_CBC:
1697                 /* DES is implemented using 3DES in hardware */
1698                 enc_type = ROC_SE_DES3_CBC;
1699                 cipher_key_len = 8;
1700                 break;
1701         case RTE_CRYPTO_CIPHER_AES_CTR:
1702                 enc_type = ROC_SE_AES_CTR;
1703                 cipher_key_len = 16;
1704                 aes_ctr = 1;
1705                 break;
1706         case RTE_CRYPTO_CIPHER_NULL:
1707                 enc_type = 0;
1708                 is_null = 1;
1709                 break;
1710         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1711                 enc_type = ROC_SE_KASUMI_F8_ECB;
1712                 cipher_key_len = 16;
1713                 zsk_flag = ROC_SE_K_F8;
1714                 break;
1715         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1716                 enc_type = ROC_SE_SNOW3G_UEA2;
1717                 cipher_key_len = 16;
1718                 zsk_flag = ROC_SE_ZS_EA;
1719                 break;
1720         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1721                 enc_type = ROC_SE_ZUC_EEA3;
1722                 cipher_key_len = 16;
1723                 zsk_flag = ROC_SE_ZS_EA;
1724                 break;
1725         case RTE_CRYPTO_CIPHER_AES_XTS:
1726                 enc_type = ROC_SE_AES_XTS;
1727                 cipher_key_len = 16;
1728                 break;
1729         case RTE_CRYPTO_CIPHER_3DES_ECB:
1730                 enc_type = ROC_SE_DES3_ECB;
1731                 cipher_key_len = 24;
1732                 break;
1733         case RTE_CRYPTO_CIPHER_AES_ECB:
1734                 enc_type = ROC_SE_AES_ECB;
1735                 cipher_key_len = 16;
1736                 break;
1737         case RTE_CRYPTO_CIPHER_3DES_CTR:
1738         case RTE_CRYPTO_CIPHER_AES_F8:
1739         case RTE_CRYPTO_CIPHER_ARC4:
1740                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1741                 return -1;
1742         default:
1743                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1744                            c_form->algo);
1745                 return -1;
1746         }
1747
1748         if (c_form->key.length < cipher_key_len) {
1749                 plt_dp_err("Invalid cipher params keylen %u",
1750                            c_form->key.length);
1751                 return -1;
1752         }
1753
1754         sess->zsk_flag = zsk_flag;
1755         sess->aes_gcm = 0;
1756         sess->aes_ctr = aes_ctr;
1757         sess->iv_offset = c_form->iv.offset;
1758         sess->iv_length = c_form->iv.length;
1759         sess->is_null = is_null;
1760
1761         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1762                                          c_form->key.data, c_form->key.length,
1763                                          NULL)))
1764                 return -1;
1765
1766         if ((enc_type >= ROC_SE_ZUC_EEA3) && (enc_type <= ROC_SE_AES_CTR_EEA2))
1767                 roc_se_ctx_swap(&sess->roc_se_ctx);
1768         return 0;
1769 }
1770
1771 static __rte_always_inline int
1772 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1773 {
1774         struct rte_crypto_auth_xform *a_form;
1775         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1776         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1777
1778         if (xform->next != NULL &&
1779             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1780             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1781                 /* Perform auth followed by encryption */
1782                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1783                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1784         }
1785
1786         a_form = &xform->auth;
1787
1788         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1789                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1790         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1791                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1792         else {
1793                 plt_dp_err("Unknown auth operation");
1794                 return -1;
1795         }
1796
1797         switch (a_form->algo) {
1798         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1799                 /* Fall through */
1800         case RTE_CRYPTO_AUTH_SHA1:
1801                 auth_type = ROC_SE_SHA1_TYPE;
1802                 break;
1803         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1804         case RTE_CRYPTO_AUTH_SHA256:
1805                 auth_type = ROC_SE_SHA2_SHA256;
1806                 break;
1807         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1808         case RTE_CRYPTO_AUTH_SHA512:
1809                 auth_type = ROC_SE_SHA2_SHA512;
1810                 break;
1811         case RTE_CRYPTO_AUTH_AES_GMAC:
1812                 auth_type = ROC_SE_GMAC_TYPE;
1813                 aes_gcm = 1;
1814                 break;
1815         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1816         case RTE_CRYPTO_AUTH_SHA224:
1817                 auth_type = ROC_SE_SHA2_SHA224;
1818                 break;
1819         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1820         case RTE_CRYPTO_AUTH_SHA384:
1821                 auth_type = ROC_SE_SHA2_SHA384;
1822                 break;
1823         case RTE_CRYPTO_AUTH_MD5_HMAC:
1824         case RTE_CRYPTO_AUTH_MD5:
1825                 auth_type = ROC_SE_MD5_TYPE;
1826                 break;
1827         case RTE_CRYPTO_AUTH_KASUMI_F9:
1828                 auth_type = ROC_SE_KASUMI_F9_ECB;
1829                 /*
1830                  * Indicate that direction needs to be taken out
1831                  * from end of src
1832                  */
1833                 zsk_flag = ROC_SE_K_F9;
1834                 break;
1835         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1836                 auth_type = ROC_SE_SNOW3G_UIA2;
1837                 zsk_flag = ROC_SE_ZS_IA;
1838                 break;
1839         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1840                 auth_type = ROC_SE_ZUC_EIA3;
1841                 zsk_flag = ROC_SE_ZS_IA;
1842                 break;
1843         case RTE_CRYPTO_AUTH_NULL:
1844                 auth_type = 0;
1845                 is_null = 1;
1846                 break;
1847         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1848         case RTE_CRYPTO_AUTH_AES_CMAC:
1849         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1850                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1851                 return -1;
1852         default:
1853                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1854                            a_form->algo);
1855                 return -1;
1856         }
1857
1858         sess->zsk_flag = zsk_flag;
1859         sess->aes_gcm = aes_gcm;
1860         sess->mac_len = a_form->digest_length;
1861         sess->is_null = is_null;
1862         if (zsk_flag) {
1863                 sess->auth_iv_offset = a_form->iv.offset;
1864                 sess->auth_iv_length = a_form->iv.length;
1865         }
1866         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1867                                          a_form->key.data, a_form->key.length,
1868                                          a_form->digest_length)))
1869                 return -1;
1870
1871         if ((auth_type >= ROC_SE_ZUC_EIA3) &&
1872             (auth_type <= ROC_SE_AES_CMAC_EIA2))
1873                 roc_se_ctx_swap(&sess->roc_se_ctx);
1874
1875         return 0;
1876 }
1877
1878 static __rte_always_inline int
1879 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1880 {
1881         struct rte_crypto_auth_xform *a_form;
1882         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1883         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1884
1885         a_form = &xform->auth;
1886
1887         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1888                 sess->cpt_op |= ROC_SE_OP_ENCODE;
1889         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1890                 sess->cpt_op |= ROC_SE_OP_DECODE;
1891         else {
1892                 plt_dp_err("Unknown auth operation");
1893                 return -1;
1894         }
1895
1896         switch (a_form->algo) {
1897         case RTE_CRYPTO_AUTH_AES_GMAC:
1898                 enc_type = ROC_SE_AES_GCM;
1899                 auth_type = ROC_SE_GMAC_TYPE;
1900                 break;
1901         default:
1902                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1903                            a_form->algo);
1904                 return -1;
1905         }
1906
1907         sess->zsk_flag = 0;
1908         sess->aes_gcm = 0;
1909         sess->is_gmac = 1;
1910         sess->iv_offset = a_form->iv.offset;
1911         sess->iv_length = a_form->iv.length;
1912         sess->mac_len = a_form->digest_length;
1913
1914         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1915                                          a_form->key.data, a_form->key.length,
1916                                          NULL)))
1917                 return -1;
1918
1919         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1920                                          a_form->digest_length)))
1921                 return -1;
1922
1923         return 0;
1924 }
1925
1926 static __rte_always_inline void *
1927 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1928               struct rte_mempool *cpt_meta_pool,
1929               struct cpt_inflight_req *infl_req)
1930 {
1931         uint8_t *mdata;
1932
1933         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1934                 return NULL;
1935
1936         buf->vaddr = mdata;
1937         buf->size = len;
1938
1939         infl_req->mdata = mdata;
1940         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1941
1942         return mdata;
1943 }
1944
1945 static __rte_always_inline uint32_t
1946 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
1947                      uint32_t start_offset)
1948 {
1949         uint16_t index = 0;
1950         void *seg_data = NULL;
1951         int32_t seg_size = 0;
1952
1953         if (!pkt) {
1954                 iovec->buf_cnt = 0;
1955                 return 0;
1956         }
1957
1958         if (!start_offset) {
1959                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1960                 seg_size = pkt->data_len;
1961         } else {
1962                 while (start_offset >= pkt->data_len) {
1963                         start_offset -= pkt->data_len;
1964                         pkt = pkt->next;
1965                 }
1966
1967                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
1968                 seg_size = pkt->data_len - start_offset;
1969                 if (!seg_size)
1970                         return 1;
1971         }
1972
1973         /* first seg */
1974         iovec->bufs[index].vaddr = seg_data;
1975         iovec->bufs[index].size = seg_size;
1976         index++;
1977         pkt = pkt->next;
1978
1979         while (unlikely(pkt != NULL)) {
1980                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1981                 seg_size = pkt->data_len;
1982                 if (!seg_size)
1983                         break;
1984
1985                 iovec->bufs[index].vaddr = seg_data;
1986                 iovec->bufs[index].size = seg_size;
1987
1988                 index++;
1989
1990                 pkt = pkt->next;
1991         }
1992
1993         iovec->buf_cnt = index;
1994         return 0;
1995 }
1996
1997 static __rte_always_inline uint32_t
1998 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
1999                              struct roc_se_fc_params *param, uint32_t *flags)
2000 {
2001         uint16_t index = 0;
2002         void *seg_data = NULL;
2003         uint32_t seg_size = 0;
2004         struct roc_se_iov_ptr *iovec;
2005
2006         seg_data = rte_pktmbuf_mtod(pkt, void *);
2007         seg_size = pkt->data_len;
2008
2009         /* first seg */
2010         if (likely(!pkt->next)) {
2011                 uint32_t headroom;
2012
2013                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2014                 headroom = rte_pktmbuf_headroom(pkt);
2015                 if (likely(headroom >= 24))
2016                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2017
2018                 param->bufs[0].vaddr = seg_data;
2019                 param->bufs[0].size = seg_size;
2020                 return 0;
2021         }
2022         iovec = param->src_iov;
2023         iovec->bufs[index].vaddr = seg_data;
2024         iovec->bufs[index].size = seg_size;
2025         index++;
2026         pkt = pkt->next;
2027
2028         while (unlikely(pkt != NULL)) {
2029                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2030                 seg_size = pkt->data_len;
2031
2032                 if (!seg_size)
2033                         break;
2034
2035                 iovec->bufs[index].vaddr = seg_data;
2036                 iovec->bufs[index].size = seg_size;
2037
2038                 index++;
2039
2040                 pkt = pkt->next;
2041         }
2042
2043         iovec->buf_cnt = index;
2044         return 0;
2045 }
2046
2047 static __rte_always_inline int
2048 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2049                struct cpt_qp_meta_info *m_info,
2050                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2051 {
2052         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2053         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2054         struct rte_crypto_sym_op *sym_op = cop->sym;
2055         void *mdata = NULL;
2056         uint32_t mc_hash_off;
2057         uint32_t flags = 0;
2058         uint64_t d_offs, d_lens;
2059         struct rte_mbuf *m_src, *m_dst;
2060         uint8_t cpt_op = sess->cpt_op;
2061 #ifdef CPT_ALWAYS_USE_SG_MODE
2062         uint8_t inplace = 0;
2063 #else
2064         uint8_t inplace = 1;
2065 #endif
2066         struct roc_se_fc_params fc_params;
2067         char src[SRC_IOV_SIZE];
2068         char dst[SRC_IOV_SIZE];
2069         uint32_t iv_buf[4];
2070         int ret;
2071
2072         if (likely(sess->iv_length)) {
2073                 flags |= ROC_SE_VALID_IV_BUF;
2074                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2075                                                              sess->iv_offset);
2076                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2077                         memcpy((uint8_t *)iv_buf,
2078                                rte_crypto_op_ctod_offset(cop, uint8_t *,
2079                                                          sess->iv_offset),
2080                                12);
2081                         iv_buf[3] = rte_cpu_to_be_32(0x1);
2082                         fc_params.iv_buf = iv_buf;
2083                 }
2084         }
2085
2086         if (sess->zsk_flag) {
2087                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2088                         cop, uint8_t *, sess->auth_iv_offset);
2089                 if (sess->zsk_flag != ROC_SE_ZS_EA)
2090                         inplace = 0;
2091         }
2092         m_src = sym_op->m_src;
2093         m_dst = sym_op->m_dst;
2094
2095         if (sess->aes_gcm || sess->chacha_poly) {
2096                 uint8_t *salt;
2097                 uint8_t *aad_data;
2098                 uint16_t aad_len;
2099
2100                 d_offs = sym_op->aead.data.offset;
2101                 d_lens = sym_op->aead.data.length;
2102                 mc_hash_off =
2103                         sym_op->aead.data.offset + sym_op->aead.data.length;
2104
2105                 aad_data = sym_op->aead.aad.data;
2106                 aad_len = sess->aad_length;
2107                 if (likely((aad_data + aad_len) ==
2108                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2109                                                    sym_op->aead.data.offset))) {
2110                         d_offs = (d_offs - aad_len) | (d_offs << 16);
2111                         d_lens = (d_lens + aad_len) | (d_lens << 32);
2112                 } else {
2113                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2114                         fc_params.aad_buf.size = aad_len;
2115                         flags |= ROC_SE_VALID_AAD_BUF;
2116                         inplace = 0;
2117                         d_offs = d_offs << 16;
2118                         d_lens = d_lens << 32;
2119                 }
2120
2121                 salt = fc_params.iv_buf;
2122                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2123                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2124                         sess->salt = *(uint32_t *)salt;
2125                 }
2126                 fc_params.iv_buf = salt + 4;
2127                 if (likely(sess->mac_len)) {
2128                         struct rte_mbuf *m =
2129                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2130
2131                         if (!m)
2132                                 m = m_src;
2133
2134                         /* hmac immediately following data is best case */
2135                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2136                                              mc_hash_off !=
2137                                      (uint8_t *)sym_op->aead.digest.data)) {
2138                                 flags |= ROC_SE_VALID_MAC_BUF;
2139                                 fc_params.mac_buf.size = sess->mac_len;
2140                                 fc_params.mac_buf.vaddr =
2141                                         sym_op->aead.digest.data;
2142                                 inplace = 0;
2143                         }
2144                 }
2145         } else {
2146                 d_offs = sym_op->cipher.data.offset;
2147                 d_lens = sym_op->cipher.data.length;
2148                 mc_hash_off =
2149                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
2150                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2151                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2152
2153                 if (mc_hash_off <
2154                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2155                         mc_hash_off = (sym_op->auth.data.offset +
2156                                        sym_op->auth.data.length);
2157                 }
2158                 /* for gmac, salt should be updated like in gcm */
2159                 if (unlikely(sess->is_gmac)) {
2160                         uint8_t *salt;
2161                         salt = fc_params.iv_buf;
2162                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
2163                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2164                                 sess->salt = *(uint32_t *)salt;
2165                         }
2166                         fc_params.iv_buf = salt + 4;
2167                 }
2168                 if (likely(sess->mac_len)) {
2169                         struct rte_mbuf *m;
2170
2171                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2172                         if (!m)
2173                                 m = m_src;
2174
2175                         /* hmac immediately following data is best case */
2176                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2177                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2178                                               mc_hash_off !=
2179                                       (uint8_t *)sym_op->auth.digest.data))) {
2180                                 flags |= ROC_SE_VALID_MAC_BUF;
2181                                 fc_params.mac_buf.size = sess->mac_len;
2182                                 fc_params.mac_buf.vaddr =
2183                                         sym_op->auth.digest.data;
2184                                 inplace = 0;
2185                         }
2186                 }
2187         }
2188         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2189
2190         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2191             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2192                 inplace = 0;
2193
2194         if (likely(!m_dst && inplace)) {
2195                 /* Case of single buffer without AAD buf or
2196                  * separate mac buf in place and
2197                  * not air crypto
2198                  */
2199                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2200
2201                 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2202                                                           &flags))) {
2203                         plt_dp_err("Prepare inplace src iov failed");
2204                         ret = -EINVAL;
2205                         goto err_exit;
2206                 }
2207
2208         } else {
2209                 /* Out of place processing */
2210                 fc_params.src_iov = (void *)src;
2211                 fc_params.dst_iov = (void *)dst;
2212
2213                 /* Store SG I/O in the api for reuse */
2214                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2215                         plt_dp_err("Prepare src iov failed");
2216                         ret = -EINVAL;
2217                         goto err_exit;
2218                 }
2219
2220                 if (unlikely(m_dst != NULL)) {
2221                         uint32_t pkt_len;
2222
2223                         /* Try to make room as much as src has */
2224                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
2225
2226                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2227                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2228                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2229                                         plt_dp_err("Not enough space in "
2230                                                    "m_dst %p, need %u"
2231                                                    " more",
2232                                                    m_dst, pkt_len);
2233                                         ret = -EINVAL;
2234                                         goto err_exit;
2235                                 }
2236                         }
2237
2238                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2239                                 plt_dp_err("Prepare dst iov failed for "
2240                                            "m_dst %p",
2241                                            m_dst);
2242                                 ret = -EINVAL;
2243                                 goto err_exit;
2244                         }
2245                 } else {
2246                         fc_params.dst_iov = (void *)src;
2247                 }
2248         }
2249
2250         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2251                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2252                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
2253                         (ctx->fc_type == ROC_SE_PDCP))))) {
2254                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2255                                       m_info->pool, infl_req);
2256                 if (mdata == NULL) {
2257                         plt_dp_err("Error allocating meta buffer for request");
2258                         return -ENOMEM;
2259                 }
2260         }
2261
2262         /* Finally prepare the instruction */
2263         if (cpt_op & ROC_SE_OP_ENCODE)
2264                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2265                                            inst);
2266         else
2267                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2268                                            inst);
2269
2270         if (unlikely(ret)) {
2271                 plt_dp_err("Preparing request failed due to bad input arg");
2272                 goto free_mdata_and_exit;
2273         }
2274
2275         return 0;
2276
2277 free_mdata_and_exit:
2278         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2279                 rte_mempool_put(m_info->pool, infl_req->mdata);
2280 err_exit:
2281         return ret;
2282 }
2283
2284 static __rte_always_inline void
2285 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2286 {
2287         uint8_t *mac;
2288         struct rte_crypto_sym_op *sym_op = op->sym;
2289
2290         if (sym_op->auth.digest.data)
2291                 mac = sym_op->auth.digest.data;
2292         else
2293                 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2294                                               sym_op->auth.data.length +
2295                                                       sym_op->auth.data.offset);
2296         if (!mac) {
2297                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2298                 return;
2299         }
2300
2301         if (memcmp(mac, gen_mac, mac_len))
2302                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2303         else
2304                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2305 }
2306
2307 static __rte_always_inline void
2308 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2309                                    uint32_t *addr_length_in_bits,
2310                                    uint8_t *addr_direction)
2311 {
2312         uint8_t found = 0;
2313         uint32_t pos;
2314         uint8_t last_byte;
2315         while (!found && counter_num_bytes > 0) {
2316                 counter_num_bytes--;
2317                 if (src[counter_num_bytes] == 0x00)
2318                         continue;
2319                 pos = rte_bsf32(src[counter_num_bytes]);
2320                 if (pos == 7) {
2321                         if (likely(counter_num_bytes > 0)) {
2322                                 last_byte = src[counter_num_bytes - 1];
2323                                 *addr_direction = last_byte & 0x1;
2324                                 *addr_length_in_bits =
2325                                         counter_num_bytes * 8 - 1;
2326                         }
2327                 } else {
2328                         last_byte = src[counter_num_bytes];
2329                         *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2330                         *addr_length_in_bits =
2331                                 counter_num_bytes * 8 + (8 - (pos + 2));
2332                 }
2333                 found = 1;
2334         }
2335 }
2336
2337 /*
2338  * This handles all auth only except AES_GMAC
2339  */
2340 static __rte_always_inline int
2341 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2342                    struct cpt_qp_meta_info *m_info,
2343                    struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2344 {
2345         uint32_t space = 0;
2346         struct rte_crypto_sym_op *sym_op = cop->sym;
2347         void *mdata;
2348         uint32_t auth_range_off;
2349         uint32_t flags = 0;
2350         uint64_t d_offs = 0, d_lens;
2351         struct rte_mbuf *m_src, *m_dst;
2352         uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2353         uint16_t mac_len = sess->mac_len;
2354         struct roc_se_fc_params params;
2355         char src[SRC_IOV_SIZE];
2356         uint8_t iv_buf[16];
2357         int ret;
2358
2359         memset(&params, 0, sizeof(struct roc_se_fc_params));
2360
2361         m_src = sym_op->m_src;
2362
2363         mdata = alloc_op_meta(&params.meta_buf, m_info->mlen, m_info->pool,
2364                               infl_req);
2365         if (mdata == NULL) {
2366                 ret = -ENOMEM;
2367                 goto err_exit;
2368         }
2369
2370         auth_range_off = sym_op->auth.data.offset;
2371
2372         flags = ROC_SE_VALID_MAC_BUF;
2373         params.src_iov = (void *)src;
2374         if (unlikely(sess->zsk_flag)) {
2375                 /*
2376                  * Since for Zuc, Kasumi, Snow3g offsets are in bits
2377                  * we will send pass through even for auth only case,
2378                  * let MC handle it
2379                  */
2380                 d_offs = auth_range_off;
2381                 auth_range_off = 0;
2382                 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2383                         cop, uint8_t *, sess->auth_iv_offset);
2384                 if (sess->zsk_flag == ROC_SE_K_F9) {
2385                         uint32_t length_in_bits, num_bytes;
2386                         uint8_t *src, direction = 0;
2387
2388                         memcpy(iv_buf,
2389                                rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2390                         /*
2391                          * This is kasumi f9, take direction from
2392                          * source buffer
2393                          */
2394                         length_in_bits = cop->sym->auth.data.length;
2395                         num_bytes = (length_in_bits >> 3);
2396                         src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2397                         find_kasumif9_direction_and_length(
2398                                 src, num_bytes, &length_in_bits, &direction);
2399                         length_in_bits -= 64;
2400                         cop->sym->auth.data.offset += 64;
2401                         d_offs = cop->sym->auth.data.offset;
2402                         auth_range_off = d_offs / 8;
2403                         cop->sym->auth.data.length = length_in_bits;
2404
2405                         /* Store it at end of auth iv */
2406                         iv_buf[8] = direction;
2407                         params.auth_iv_buf = iv_buf;
2408                 }
2409         }
2410
2411         d_lens = sym_op->auth.data.length;
2412
2413         params.ctx_buf.vaddr = &sess->roc_se_ctx;
2414
2415         if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2416                 if (sym_op->auth.digest.data) {
2417                         /*
2418                          * Digest to be generated
2419                          * in separate buffer
2420                          */
2421                         params.mac_buf.size = sess->mac_len;
2422                         params.mac_buf.vaddr = sym_op->auth.digest.data;
2423                 } else {
2424                         uint32_t off = sym_op->auth.data.offset +
2425                                        sym_op->auth.data.length;
2426                         int32_t dlen, space;
2427
2428                         m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2429                         dlen = rte_pktmbuf_pkt_len(m_dst);
2430
2431                         space = off + mac_len - dlen;
2432                         if (space > 0)
2433                                 if (!rte_pktmbuf_append(m_dst, space)) {
2434                                         plt_dp_err("Failed to extend "
2435                                                    "mbuf by %uB",
2436                                                    space);
2437                                         ret = -EINVAL;
2438                                         goto free_mdata_and_exit;
2439                                 }
2440
2441                         params.mac_buf.vaddr =
2442                                 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2443                         params.mac_buf.size = mac_len;
2444                 }
2445         } else {
2446                 uint64_t *op = mdata;
2447
2448                 /* Need space for storing generated mac */
2449                 space += 2 * sizeof(uint64_t);
2450
2451                 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2452                 params.mac_buf.size = mac_len;
2453                 space += RTE_ALIGN_CEIL(mac_len, 8);
2454                 op[0] = (uintptr_t)params.mac_buf.vaddr;
2455                 op[1] = mac_len;
2456                 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2457         }
2458
2459         params.meta_buf.vaddr = (uint8_t *)mdata + space;
2460         params.meta_buf.size -= space;
2461
2462         /* Out of place processing */
2463         params.src_iov = (void *)src;
2464
2465         /*Store SG I/O in the api for reuse */
2466         if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2467                 plt_dp_err("Prepare src iov failed");
2468                 ret = -EINVAL;
2469                 goto free_mdata_and_exit;
2470         }
2471
2472         ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, inst);
2473         if (ret)
2474                 goto free_mdata_and_exit;
2475
2476         return 0;
2477
2478 free_mdata_and_exit:
2479         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2480                 rte_mempool_put(m_info->pool, infl_req->mdata);
2481 err_exit:
2482         return ret;
2483 }
2484 #endif /*_CNXK_SE_H_ */