bbad289884fa360c0276dc087e30414c9b493bf5
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
38
39 static inline void
40 pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type)
41 {
42         uint32_t *iv_s_temp, iv_temp[4];
43         int j;
44
45         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
46                 /*
47                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
48                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
49                  */
50
51                 iv_s_temp = (uint32_t *)iv_s;
52
53                 for (j = 0; j < 4; j++)
54                         iv_temp[j] = iv_s_temp[3 - j];
55                 memcpy(iv_d, iv_temp, 16);
56         } else {
57                 /* ZUC doesn't need a swap */
58                 memcpy(iv_d, iv_s, 16);
59         }
60 }
61
62 static __rte_always_inline int
63 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
64 {
65         uint16_t mac_len = auth->digest_length;
66         int ret;
67
68         switch (auth->algo) {
69         case RTE_CRYPTO_AUTH_MD5:
70         case RTE_CRYPTO_AUTH_MD5_HMAC:
71                 ret = (mac_len == 16) ? 0 : -1;
72                 break;
73         case RTE_CRYPTO_AUTH_SHA1:
74         case RTE_CRYPTO_AUTH_SHA1_HMAC:
75                 ret = (mac_len == 20) ? 0 : -1;
76                 break;
77         case RTE_CRYPTO_AUTH_SHA224:
78         case RTE_CRYPTO_AUTH_SHA224_HMAC:
79                 ret = (mac_len == 28) ? 0 : -1;
80                 break;
81         case RTE_CRYPTO_AUTH_SHA256:
82         case RTE_CRYPTO_AUTH_SHA256_HMAC:
83                 ret = (mac_len == 32) ? 0 : -1;
84                 break;
85         case RTE_CRYPTO_AUTH_SHA384:
86         case RTE_CRYPTO_AUTH_SHA384_HMAC:
87                 ret = (mac_len == 48) ? 0 : -1;
88                 break;
89         case RTE_CRYPTO_AUTH_SHA512:
90         case RTE_CRYPTO_AUTH_SHA512_HMAC:
91                 ret = (mac_len == 64) ? 0 : -1;
92                 break;
93         case RTE_CRYPTO_AUTH_NULL:
94                 ret = 0;
95                 break;
96         default:
97                 ret = -1;
98         }
99
100         return ret;
101 }
102
103 static __rte_always_inline void
104 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
105 {
106         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
107         memcpy(fctx->enc.encr_iv, salt, 4);
108 }
109
110 static __rte_always_inline uint32_t
111 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
112              uint32_t size)
113 {
114         struct roc_se_sglist_comp *to = &list[i >> 2];
115
116         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
117         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
118         i++;
119         return i;
120 }
121
122 static __rte_always_inline uint32_t
123 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
124                       struct roc_se_buf_ptr *from)
125 {
126         struct roc_se_sglist_comp *to = &list[i >> 2];
127
128         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
129         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
130         i++;
131         return i;
132 }
133
134 static __rte_always_inline uint32_t
135 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
136                           struct roc_se_buf_ptr *from, uint32_t *psize)
137 {
138         struct roc_se_sglist_comp *to = &list[i >> 2];
139         uint32_t size = *psize;
140         uint32_t e_len;
141
142         e_len = (size > from->size) ? from->size : size;
143         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
144         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
145         *psize -= e_len;
146         i++;
147         return i;
148 }
149
150 /*
151  * This fills the MC expected SGIO list
152  * from IOV given by user.
153  */
154 static __rte_always_inline uint32_t
155 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
156                       struct roc_se_iov_ptr *from, uint32_t from_offset,
157                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
158                       uint32_t extra_offset)
159 {
160         int32_t j;
161         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
162         uint32_t size = *psize;
163         struct roc_se_buf_ptr *bufs;
164
165         bufs = from->bufs;
166         for (j = 0; (j < from->buf_cnt) && size; j++) {
167                 uint64_t e_vaddr;
168                 uint32_t e_len;
169                 struct roc_se_sglist_comp *to = &list[i >> 2];
170
171                 if (unlikely(from_offset)) {
172                         if (from_offset >= bufs[j].size) {
173                                 from_offset -= bufs[j].size;
174                                 continue;
175                         }
176                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
177                         e_len = (size > (bufs[j].size - from_offset)) ?
178                                         (bufs[j].size - from_offset) :
179                                         size;
180                         from_offset = 0;
181                 } else {
182                         e_vaddr = (uint64_t)bufs[j].vaddr;
183                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
184                 }
185
186                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
187                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
188
189                 if (extra_len && (e_len >= extra_offset)) {
190                         /* Break the data at given offset */
191                         uint32_t next_len = e_len - extra_offset;
192                         uint64_t next_vaddr = e_vaddr + extra_offset;
193
194                         if (!extra_offset) {
195                                 i--;
196                         } else {
197                                 e_len = extra_offset;
198                                 size -= e_len;
199                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
200                         }
201
202                         extra_len = RTE_MIN(extra_len, size);
203                         /* Insert extra data ptr */
204                         if (extra_len) {
205                                 i++;
206                                 to = &list[i >> 2];
207                                 to->u.s.len[i % 4] =
208                                         rte_cpu_to_be_16(extra_len);
209                                 to->ptr[i % 4] = rte_cpu_to_be_64(
210                                         (uint64_t)extra_buf->vaddr);
211                                 size -= extra_len;
212                         }
213
214                         next_len = RTE_MIN(next_len, size);
215                         /* insert the rest of the data */
216                         if (next_len) {
217                                 i++;
218                                 to = &list[i >> 2];
219                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
220                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
221                                 size -= next_len;
222                         }
223                         extra_len = 0;
224
225                 } else {
226                         size -= e_len;
227                 }
228                 if (extra_offset)
229                         extra_offset -= size;
230                 i++;
231         }
232
233         *psize = size;
234         return (uint32_t)i;
235 }
236
237 static __rte_always_inline int
238 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
239                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
240 {
241         void *m_vaddr = params->meta_buf.vaddr;
242         uint32_t size, i;
243         uint16_t data_len, mac_len, key_len;
244         roc_se_auth_type hash_type;
245         struct roc_se_ctx *ctx;
246         struct roc_se_sglist_comp *gather_comp;
247         struct roc_se_sglist_comp *scatter_comp;
248         uint8_t *in_buffer;
249         uint32_t g_size_bytes, s_size_bytes;
250         union cpt_inst_w4 cpt_inst_w4;
251
252         ctx = params->ctx_buf.vaddr;
253
254         hash_type = ctx->hash_type;
255         mac_len = ctx->mac_len;
256         key_len = ctx->auth_key_len;
257         data_len = ROC_SE_AUTH_DLEN(d_lens);
258
259         /*GP op header */
260         cpt_inst_w4.s.opcode_minor = 0;
261         cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
262         if (ctx->hmac) {
263                 cpt_inst_w4.s.opcode_major =
264                         ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
265                 cpt_inst_w4.s.param1 = key_len;
266                 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
267         } else {
268                 cpt_inst_w4.s.opcode_major =
269                         ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
270                 cpt_inst_w4.s.param1 = 0;
271                 cpt_inst_w4.s.dlen = data_len;
272         }
273
274         /* Null auth only case enters the if */
275         if (unlikely(!hash_type && !ctx->enc_cipher)) {
276                 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
277                 /* Minor op is passthrough */
278                 cpt_inst_w4.s.opcode_minor = 0x03;
279                 /* Send out completion code only */
280                 cpt_inst_w4.s.param2 = 0x1;
281         }
282
283         /* DPTR has SG list */
284         in_buffer = m_vaddr;
285
286         ((uint16_t *)in_buffer)[0] = 0;
287         ((uint16_t *)in_buffer)[1] = 0;
288
289         /* TODO Add error check if space will be sufficient */
290         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
291
292         /*
293          * Input gather list
294          */
295
296         i = 0;
297
298         if (ctx->hmac) {
299                 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
300                 /* Key */
301                 i = fill_sg_comp(gather_comp, i, k_vaddr,
302                                  RTE_ALIGN_CEIL(key_len, 8));
303         }
304
305         /* input data */
306         size = data_len;
307         if (size) {
308                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
309                                           &size, NULL, 0);
310                 if (unlikely(size)) {
311                         plt_dp_err("Insufficient dst IOV size, short by %dB",
312                                    size);
313                         return -1;
314                 }
315         } else {
316                 /*
317                  * Looks like we need to support zero data
318                  * gather ptr in case of hash & hmac
319                  */
320                 i++;
321         }
322         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
323         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
324
325         /*
326          * Output Gather list
327          */
328
329         i = 0;
330         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
331                                                      g_size_bytes);
332
333         if (flags & ROC_SE_VALID_MAC_BUF) {
334                 if (unlikely(params->mac_buf.size < mac_len)) {
335                         plt_dp_err("Insufficient MAC size");
336                         return -1;
337                 }
338
339                 size = mac_len;
340                 i = fill_sg_comp_from_buf_min(scatter_comp, i, &params->mac_buf,
341                                               &size);
342         } else {
343                 size = mac_len;
344                 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
345                                           data_len, &size, NULL, 0);
346                 if (unlikely(size)) {
347                         plt_dp_err("Insufficient dst IOV size, short by %dB",
348                                    size);
349                         return -1;
350                 }
351         }
352
353         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
354         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
355
356         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
357
358         /* This is DPTR len in case of SG mode */
359         cpt_inst_w4.s.dlen = size;
360
361         inst->dptr = (uint64_t)in_buffer;
362         inst->w4.u64 = cpt_inst_w4.u64;
363
364         return 0;
365 }
366
367 static __rte_always_inline int
368 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
369                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
370 {
371         uint32_t iv_offset = 0;
372         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
373         struct roc_se_ctx *se_ctx;
374         uint32_t cipher_type, hash_type;
375         uint32_t mac_len, size;
376         uint8_t iv_len = 16;
377         struct roc_se_buf_ptr *aad_buf = NULL;
378         uint32_t encr_offset, auth_offset;
379         uint32_t encr_data_len, auth_data_len, aad_len = 0;
380         uint32_t passthrough_len = 0;
381         union cpt_inst_w4 cpt_inst_w4;
382         void *offset_vaddr;
383         uint8_t op_minor;
384
385         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
386         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
387         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
388         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
389         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
390                 /* We don't support both AAD and auth data separately */
391                 auth_data_len = 0;
392                 auth_offset = 0;
393                 aad_len = fc_params->aad_buf.size;
394                 aad_buf = &fc_params->aad_buf;
395         }
396         se_ctx = fc_params->ctx_buf.vaddr;
397         cipher_type = se_ctx->enc_cipher;
398         hash_type = se_ctx->hash_type;
399         mac_len = se_ctx->mac_len;
400         op_minor = se_ctx->template_w4.s.opcode_minor;
401
402         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
403                 iv_len = 0;
404                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
405         }
406
407         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
408                 /*
409                  * When AAD is given, data above encr_offset is pass through
410                  * Since AAD is given as separate pointer and not as offset,
411                  * this is a special case as we need to fragment input data
412                  * into passthrough + encr_data and then insert AAD in between.
413                  */
414                 if (hash_type != ROC_SE_GMAC_TYPE) {
415                         passthrough_len = encr_offset;
416                         auth_offset = passthrough_len + iv_len;
417                         encr_offset = passthrough_len + aad_len + iv_len;
418                         auth_data_len = aad_len + encr_data_len;
419                 } else {
420                         passthrough_len = 16 + aad_len;
421                         auth_offset = passthrough_len + iv_len;
422                         auth_data_len = aad_len;
423                 }
424         } else {
425                 encr_offset += iv_len;
426                 auth_offset += iv_len;
427         }
428
429         /* Encryption */
430         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
431         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
432         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
433
434         if (hash_type == ROC_SE_GMAC_TYPE) {
435                 encr_offset = 0;
436                 encr_data_len = 0;
437         }
438
439         auth_dlen = auth_offset + auth_data_len;
440         enc_dlen = encr_data_len + encr_offset;
441         if (unlikely(encr_data_len & 0xf)) {
442                 if ((cipher_type == ROC_SE_DES3_CBC) ||
443                     (cipher_type == ROC_SE_DES3_ECB))
444                         enc_dlen =
445                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
446                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
447                                 (cipher_type == ROC_SE_AES_ECB)))
448                         enc_dlen =
449                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
450         }
451
452         if (unlikely(auth_dlen > enc_dlen)) {
453                 inputlen = auth_dlen;
454                 outputlen = auth_dlen + mac_len;
455         } else {
456                 inputlen = enc_dlen;
457                 outputlen = enc_dlen + mac_len;
458         }
459
460         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
461                 outputlen = enc_dlen;
462
463         /* GP op header */
464         cpt_inst_w4.s.param1 = encr_data_len;
465         cpt_inst_w4.s.param2 = auth_data_len;
466
467         /*
468          * In cn9k, cn10k since we have a limitation of
469          * IV & Offset control word not part of instruction
470          * and need to be part of Data Buffer, we check if
471          * head room is there and then only do the Direct mode processing
472          */
473         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
474                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
475                 void *dm_vaddr = fc_params->bufs[0].vaddr;
476
477                 /* Use Direct mode */
478
479                 offset_vaddr =
480                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
481
482                 /* DPTR */
483                 inst->dptr = (uint64_t)offset_vaddr;
484
485                 /* RPTR should just exclude offset control word */
486                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
487
488                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
489
490                 if (likely(iv_len)) {
491                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
492                                                       ROC_SE_OFF_CTRL_LEN);
493                         uint64_t *src = fc_params->iv_buf;
494                         dest[0] = src[0];
495                         dest[1] = src[1];
496                 }
497
498         } else {
499                 void *m_vaddr = fc_params->meta_buf.vaddr;
500                 uint32_t i, g_size_bytes, s_size_bytes;
501                 struct roc_se_sglist_comp *gather_comp;
502                 struct roc_se_sglist_comp *scatter_comp;
503                 uint8_t *in_buffer;
504
505                 /* This falls under strict SG mode */
506                 offset_vaddr = m_vaddr;
507                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
508
509                 m_vaddr = (uint8_t *)m_vaddr + size;
510
511                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
512
513                 if (likely(iv_len)) {
514                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
515                                                       ROC_SE_OFF_CTRL_LEN);
516                         uint64_t *src = fc_params->iv_buf;
517                         dest[0] = src[0];
518                         dest[1] = src[1];
519                 }
520
521                 /* DPTR has SG list */
522                 in_buffer = m_vaddr;
523
524                 ((uint16_t *)in_buffer)[0] = 0;
525                 ((uint16_t *)in_buffer)[1] = 0;
526
527                 /* TODO Add error check if space will be sufficient */
528                 gather_comp =
529                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
530
531                 /*
532                  * Input Gather List
533                  */
534
535                 i = 0;
536
537                 /* Offset control word that includes iv */
538                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
539                                  ROC_SE_OFF_CTRL_LEN + iv_len);
540
541                 /* Add input data */
542                 size = inputlen - iv_len;
543                 if (likely(size)) {
544                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
545
546                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
547                                 i = fill_sg_comp_from_buf_min(
548                                         gather_comp, i, fc_params->bufs, &size);
549                         } else {
550                                 i = fill_sg_comp_from_iov(
551                                         gather_comp, i, fc_params->src_iov, 0,
552                                         &size, aad_buf, aad_offset);
553                         }
554
555                         if (unlikely(size)) {
556                                 plt_dp_err("Insufficient buffer space,"
557                                            " size %d needed",
558                                            size);
559                                 return -1;
560                         }
561                 }
562                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
563                 g_size_bytes =
564                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
565
566                 /*
567                  * Output Scatter list
568                  */
569                 i = 0;
570                 scatter_comp =
571                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
572                                                       g_size_bytes);
573
574                 /* Add IV */
575                 if (likely(iv_len)) {
576                         i = fill_sg_comp(scatter_comp, i,
577                                          (uint64_t)offset_vaddr +
578                                                  ROC_SE_OFF_CTRL_LEN,
579                                          iv_len);
580                 }
581
582                 /* output data or output data + digest*/
583                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
584                         size = outputlen - iv_len - mac_len;
585                         if (size) {
586                                 uint32_t aad_offset =
587                                         aad_len ? passthrough_len : 0;
588
589                                 if (unlikely(flags &
590                                              ROC_SE_SINGLE_BUF_INPLACE)) {
591                                         i = fill_sg_comp_from_buf_min(
592                                                 scatter_comp, i,
593                                                 fc_params->bufs, &size);
594                                 } else {
595                                         i = fill_sg_comp_from_iov(
596                                                 scatter_comp, i,
597                                                 fc_params->dst_iov, 0, &size,
598                                                 aad_buf, aad_offset);
599                                 }
600                                 if (unlikely(size)) {
601                                         plt_dp_err("Insufficient buffer"
602                                                    " space, size %d needed",
603                                                    size);
604                                         return -1;
605                                 }
606                         }
607                         /* mac_data */
608                         if (mac_len) {
609                                 i = fill_sg_comp_from_buf(scatter_comp, i,
610                                                           &fc_params->mac_buf);
611                         }
612                 } else {
613                         /* Output including mac */
614                         size = outputlen - iv_len;
615                         if (likely(size)) {
616                                 uint32_t aad_offset =
617                                         aad_len ? passthrough_len : 0;
618
619                                 if (unlikely(flags &
620                                              ROC_SE_SINGLE_BUF_INPLACE)) {
621                                         i = fill_sg_comp_from_buf_min(
622                                                 scatter_comp, i,
623                                                 fc_params->bufs, &size);
624                                 } else {
625                                         i = fill_sg_comp_from_iov(
626                                                 scatter_comp, i,
627                                                 fc_params->dst_iov, 0, &size,
628                                                 aad_buf, aad_offset);
629                                 }
630                                 if (unlikely(size)) {
631                                         plt_dp_err("Insufficient buffer"
632                                                    " space, size %d needed",
633                                                    size);
634                                         return -1;
635                                 }
636                         }
637                 }
638                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
639                 s_size_bytes =
640                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
641
642                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
643
644                 /* This is DPTR len in case of SG mode */
645                 cpt_inst_w4.s.dlen = size;
646
647                 inst->dptr = (uint64_t)in_buffer;
648         }
649
650         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
651                      (auth_offset >> 8))) {
652                 plt_dp_err("Offset not supported");
653                 plt_dp_err("enc_offset: %d", encr_offset);
654                 plt_dp_err("iv_offset : %d", iv_offset);
655                 plt_dp_err("auth_offset: %d", auth_offset);
656                 return -1;
657         }
658
659         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
660                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
661                 ((uint64_t)auth_offset));
662
663         inst->w4.u64 = cpt_inst_w4.u64;
664         return 0;
665 }
666
667 static __rte_always_inline int
668 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
669                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
670 {
671         uint32_t iv_offset = 0, size;
672         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
673         struct roc_se_ctx *se_ctx;
674         int32_t hash_type, mac_len;
675         uint8_t iv_len = 16;
676         struct roc_se_buf_ptr *aad_buf = NULL;
677         uint32_t encr_offset, auth_offset;
678         uint32_t encr_data_len, auth_data_len, aad_len = 0;
679         uint32_t passthrough_len = 0;
680         union cpt_inst_w4 cpt_inst_w4;
681         void *offset_vaddr;
682         uint8_t op_minor;
683
684         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
685         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
686         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
687         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
688
689         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
690                 /* We don't support both AAD and auth data separately */
691                 auth_data_len = 0;
692                 auth_offset = 0;
693                 aad_len = fc_params->aad_buf.size;
694                 aad_buf = &fc_params->aad_buf;
695         }
696
697         se_ctx = fc_params->ctx_buf.vaddr;
698         hash_type = se_ctx->hash_type;
699         mac_len = se_ctx->mac_len;
700         op_minor = se_ctx->template_w4.s.opcode_minor;
701
702         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
703                 iv_len = 0;
704                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
705         }
706
707         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
708                 /*
709                  * When AAD is given, data above encr_offset is pass through
710                  * Since AAD is given as separate pointer and not as offset,
711                  * this is a special case as we need to fragment input data
712                  * into passthrough + encr_data and then insert AAD in between.
713                  */
714                 if (hash_type != ROC_SE_GMAC_TYPE) {
715                         passthrough_len = encr_offset;
716                         auth_offset = passthrough_len + iv_len;
717                         encr_offset = passthrough_len + aad_len + iv_len;
718                         auth_data_len = aad_len + encr_data_len;
719                 } else {
720                         passthrough_len = 16 + aad_len;
721                         auth_offset = passthrough_len + iv_len;
722                         auth_data_len = aad_len;
723                 }
724         } else {
725                 encr_offset += iv_len;
726                 auth_offset += iv_len;
727         }
728
729         /* Decryption */
730         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
731         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
732         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
733
734         if (hash_type == ROC_SE_GMAC_TYPE) {
735                 encr_offset = 0;
736                 encr_data_len = 0;
737         }
738
739         enc_dlen = encr_offset + encr_data_len;
740         auth_dlen = auth_offset + auth_data_len;
741
742         if (auth_dlen > enc_dlen) {
743                 inputlen = auth_dlen + mac_len;
744                 outputlen = auth_dlen;
745         } else {
746                 inputlen = enc_dlen + mac_len;
747                 outputlen = enc_dlen;
748         }
749
750         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
751                 outputlen = inputlen = enc_dlen;
752
753         cpt_inst_w4.s.param1 = encr_data_len;
754         cpt_inst_w4.s.param2 = auth_data_len;
755
756         /*
757          * In cn9k, cn10k since we have a limitation of
758          * IV & Offset control word not part of instruction
759          * and need to be part of Data Buffer, we check if
760          * head room is there and then only do the Direct mode processing
761          */
762         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
763                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
764                 void *dm_vaddr = fc_params->bufs[0].vaddr;
765
766                 /* Use Direct mode */
767
768                 offset_vaddr =
769                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
770                 inst->dptr = (uint64_t)offset_vaddr;
771
772                 /* RPTR should just exclude offset control word */
773                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
774
775                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
776
777                 if (likely(iv_len)) {
778                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
779                                                       ROC_SE_OFF_CTRL_LEN);
780                         uint64_t *src = fc_params->iv_buf;
781                         dest[0] = src[0];
782                         dest[1] = src[1];
783                 }
784
785         } else {
786                 void *m_vaddr = fc_params->meta_buf.vaddr;
787                 uint32_t g_size_bytes, s_size_bytes;
788                 struct roc_se_sglist_comp *gather_comp;
789                 struct roc_se_sglist_comp *scatter_comp;
790                 uint8_t *in_buffer;
791                 uint8_t i = 0;
792
793                 /* This falls under strict SG mode */
794                 offset_vaddr = m_vaddr;
795                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
796
797                 m_vaddr = (uint8_t *)m_vaddr + size;
798
799                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
800
801                 if (likely(iv_len)) {
802                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
803                                                       ROC_SE_OFF_CTRL_LEN);
804                         uint64_t *src = fc_params->iv_buf;
805                         dest[0] = src[0];
806                         dest[1] = src[1];
807                 }
808
809                 /* DPTR has SG list */
810                 in_buffer = m_vaddr;
811
812                 ((uint16_t *)in_buffer)[0] = 0;
813                 ((uint16_t *)in_buffer)[1] = 0;
814
815                 /* TODO Add error check if space will be sufficient */
816                 gather_comp =
817                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
818
819                 /*
820                  * Input Gather List
821                  */
822                 i = 0;
823
824                 /* Offset control word that includes iv */
825                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
826                                  ROC_SE_OFF_CTRL_LEN + iv_len);
827
828                 /* Add input data */
829                 if (flags & ROC_SE_VALID_MAC_BUF) {
830                         size = inputlen - iv_len - mac_len;
831                         if (size) {
832                                 /* input data only */
833                                 if (unlikely(flags &
834                                              ROC_SE_SINGLE_BUF_INPLACE)) {
835                                         i = fill_sg_comp_from_buf_min(
836                                                 gather_comp, i, fc_params->bufs,
837                                                 &size);
838                                 } else {
839                                         uint32_t aad_offset =
840                                                 aad_len ? passthrough_len : 0;
841
842                                         i = fill_sg_comp_from_iov(
843                                                 gather_comp, i,
844                                                 fc_params->src_iov, 0, &size,
845                                                 aad_buf, aad_offset);
846                                 }
847                                 if (unlikely(size)) {
848                                         plt_dp_err("Insufficient buffer"
849                                                    " space, size %d needed",
850                                                    size);
851                                         return -1;
852                                 }
853                         }
854
855                         /* mac data */
856                         if (mac_len) {
857                                 i = fill_sg_comp_from_buf(gather_comp, i,
858                                                           &fc_params->mac_buf);
859                         }
860                 } else {
861                         /* input data + mac */
862                         size = inputlen - iv_len;
863                         if (size) {
864                                 if (unlikely(flags &
865                                              ROC_SE_SINGLE_BUF_INPLACE)) {
866                                         i = fill_sg_comp_from_buf_min(
867                                                 gather_comp, i, fc_params->bufs,
868                                                 &size);
869                                 } else {
870                                         uint32_t aad_offset =
871                                                 aad_len ? passthrough_len : 0;
872
873                                         if (unlikely(!fc_params->src_iov)) {
874                                                 plt_dp_err("Bad input args");
875                                                 return -1;
876                                         }
877
878                                         i = fill_sg_comp_from_iov(
879                                                 gather_comp, i,
880                                                 fc_params->src_iov, 0, &size,
881                                                 aad_buf, aad_offset);
882                                 }
883
884                                 if (unlikely(size)) {
885                                         plt_dp_err("Insufficient buffer"
886                                                    " space, size %d needed",
887                                                    size);
888                                         return -1;
889                                 }
890                         }
891                 }
892                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
893                 g_size_bytes =
894                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
895
896                 /*
897                  * Output Scatter List
898                  */
899
900                 i = 0;
901                 scatter_comp =
902                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
903                                                       g_size_bytes);
904
905                 /* Add iv */
906                 if (iv_len) {
907                         i = fill_sg_comp(scatter_comp, i,
908                                          (uint64_t)offset_vaddr +
909                                                  ROC_SE_OFF_CTRL_LEN,
910                                          iv_len);
911                 }
912
913                 /* Add output data */
914                 size = outputlen - iv_len;
915                 if (size) {
916                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
917                                 /* handle single buffer here */
918                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
919                                                               fc_params->bufs,
920                                                               &size);
921                         } else {
922                                 uint32_t aad_offset =
923                                         aad_len ? passthrough_len : 0;
924
925                                 if (unlikely(!fc_params->dst_iov)) {
926                                         plt_dp_err("Bad input args");
927                                         return -1;
928                                 }
929
930                                 i = fill_sg_comp_from_iov(
931                                         scatter_comp, i, fc_params->dst_iov, 0,
932                                         &size, aad_buf, aad_offset);
933                         }
934
935                         if (unlikely(size)) {
936                                 plt_dp_err("Insufficient buffer space,"
937                                            " size %d needed",
938                                            size);
939                                 return -1;
940                         }
941                 }
942
943                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
944                 s_size_bytes =
945                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
946
947                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
948
949                 /* This is DPTR len in case of SG mode */
950                 cpt_inst_w4.s.dlen = size;
951
952                 inst->dptr = (uint64_t)in_buffer;
953         }
954
955         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
956                      (auth_offset >> 8))) {
957                 plt_dp_err("Offset not supported");
958                 plt_dp_err("enc_offset: %d", encr_offset);
959                 plt_dp_err("iv_offset : %d", iv_offset);
960                 plt_dp_err("auth_offset: %d", auth_offset);
961                 return -1;
962         }
963
964         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
965                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
966                 ((uint64_t)auth_offset));
967
968         inst->w4.u64 = cpt_inst_w4.u64;
969         return 0;
970 }
971
972 static __rte_always_inline int
973 cpt_zuc_snow3g_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
974                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
975 {
976         uint32_t size;
977         int32_t inputlen, outputlen;
978         struct roc_se_ctx *se_ctx;
979         uint32_t mac_len = 0;
980         uint8_t pdcp_alg_type;
981         uint32_t encr_offset, auth_offset;
982         uint32_t encr_data_len, auth_data_len;
983         int flags, iv_len = 16;
984         uint64_t offset_ctrl;
985         uint64_t *offset_vaddr;
986         uint8_t *iv_s;
987         union cpt_inst_w4 cpt_inst_w4;
988
989         se_ctx = params->ctx_buf.vaddr;
990         flags = se_ctx->zsk_flags;
991         mac_len = se_ctx->mac_len;
992         pdcp_alg_type = se_ctx->pdcp_alg_type;
993
994         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
995
996         /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
997
998         cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
999                                       (0 << 4) | (0 << 3) | (flags & 0x7));
1000
1001         if (flags == 0x1) {
1002                 /*
1003                  * Microcode expects offsets in bytes
1004                  * TODO: Rounding off
1005                  */
1006                 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1007
1008                 /* EIA3 or UIA2 */
1009                 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
1010                 auth_offset = auth_offset / 8;
1011
1012                 /* consider iv len */
1013                 auth_offset += iv_len;
1014
1015                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1016                 outputlen = mac_len;
1017
1018                 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1019
1020                 encr_data_len = 0;
1021                 encr_offset = 0;
1022
1023                 iv_s = params->auth_iv_buf;
1024         } else {
1025                 /* EEA3 or UEA2 */
1026                 /*
1027                  * Microcode expects offsets in bytes
1028                  * TODO: Rounding off
1029                  */
1030                 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1031
1032                 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1033                 encr_offset = encr_offset / 8;
1034                 /* consider iv len */
1035                 encr_offset += iv_len;
1036
1037                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1038                 outputlen = inputlen;
1039
1040                 /* iv offset is 0 */
1041                 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1042
1043                 auth_data_len = 0;
1044                 auth_offset = 0;
1045
1046                 iv_s = params->iv_buf;
1047         }
1048
1049         if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1050                 plt_dp_err("Offset not supported");
1051                 plt_dp_err("enc_offset: %d", encr_offset);
1052                 plt_dp_err("auth_offset: %d", auth_offset);
1053                 return -1;
1054         }
1055
1056         /*
1057          * GP op header, lengths are expected in bits.
1058          */
1059         cpt_inst_w4.s.param1 = encr_data_len;
1060         cpt_inst_w4.s.param2 = auth_data_len;
1061
1062         /*
1063          * In cn9k, cn10k since we have a limitation of
1064          * IV & Offset control word not part of instruction
1065          * and need to be part of Data Buffer, we check if
1066          * head room is there and then only do the Direct mode processing
1067          */
1068         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1069                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1070                 void *dm_vaddr = params->bufs[0].vaddr;
1071
1072                 /* Use Direct mode */
1073
1074                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1075                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1076
1077                 /* DPTR */
1078                 inst->dptr = (uint64_t)offset_vaddr;
1079                 /* RPTR should just exclude offset control word */
1080                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1081
1082                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1083
1084                 uint8_t *iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1085                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type);
1086
1087                 *offset_vaddr = offset_ctrl;
1088         } else {
1089                 void *m_vaddr = params->meta_buf.vaddr;
1090                 uint32_t i, g_size_bytes, s_size_bytes;
1091                 struct roc_se_sglist_comp *gather_comp;
1092                 struct roc_se_sglist_comp *scatter_comp;
1093                 uint8_t *in_buffer;
1094                 uint8_t *iv_d;
1095
1096                 /* save space for iv */
1097                 offset_vaddr = m_vaddr;
1098
1099                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1100
1101                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1102
1103                 /* DPTR has SG list */
1104                 in_buffer = m_vaddr;
1105
1106                 ((uint16_t *)in_buffer)[0] = 0;
1107                 ((uint16_t *)in_buffer)[1] = 0;
1108
1109                 /* TODO Add error check if space will be sufficient */
1110                 gather_comp =
1111                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1112
1113                 /*
1114                  * Input Gather List
1115                  */
1116                 i = 0;
1117
1118                 /* Offset control word followed by iv */
1119
1120                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1121                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1122
1123                 /* iv offset is 0 */
1124                 *offset_vaddr = offset_ctrl;
1125
1126                 iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1127                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type);
1128
1129                 /* input data */
1130                 size = inputlen - iv_len;
1131                 if (size) {
1132                         i = fill_sg_comp_from_iov(gather_comp, i,
1133                                                   params->src_iov, 0, &size,
1134                                                   NULL, 0);
1135                         if (unlikely(size)) {
1136                                 plt_dp_err("Insufficient buffer space,"
1137                                            " size %d needed",
1138                                            size);
1139                                 return -1;
1140                         }
1141                 }
1142                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1143                 g_size_bytes =
1144                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1145
1146                 /*
1147                  * Output Scatter List
1148                  */
1149
1150                 i = 0;
1151                 scatter_comp =
1152                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1153                                                       g_size_bytes);
1154
1155                 if (flags == 0x1) {
1156                         /* IV in SLIST only for EEA3 & UEA2 */
1157                         iv_len = 0;
1158                 }
1159
1160                 if (iv_len) {
1161                         i = fill_sg_comp(scatter_comp, i,
1162                                          (uint64_t)offset_vaddr +
1163                                                  ROC_SE_OFF_CTRL_LEN,
1164                                          iv_len);
1165                 }
1166
1167                 /* Add output data */
1168                 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1169                         size = outputlen - iv_len - mac_len;
1170                         if (size) {
1171                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1172                                                           params->dst_iov, 0,
1173                                                           &size, NULL, 0);
1174
1175                                 if (unlikely(size)) {
1176                                         plt_dp_err("Insufficient buffer space,"
1177                                                    " size %d needed",
1178                                                    size);
1179                                         return -1;
1180                                 }
1181                         }
1182
1183                         /* mac data */
1184                         if (mac_len) {
1185                                 i = fill_sg_comp_from_buf(scatter_comp, i,
1186                                                           &params->mac_buf);
1187                         }
1188                 } else {
1189                         /* Output including mac */
1190                         size = outputlen - iv_len;
1191                         if (size) {
1192                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1193                                                           params->dst_iov, 0,
1194                                                           &size, NULL, 0);
1195
1196                                 if (unlikely(size)) {
1197                                         plt_dp_err("Insufficient buffer space,"
1198                                                    " size %d needed",
1199                                                    size);
1200                                         return -1;
1201                                 }
1202                         }
1203                 }
1204                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1205                 s_size_bytes =
1206                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1207
1208                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1209
1210                 /* This is DPTR len in case of SG mode */
1211                 cpt_inst_w4.s.dlen = size;
1212
1213                 inst->dptr = (uint64_t)in_buffer;
1214         }
1215
1216         inst->w4.u64 = cpt_inst_w4.u64;
1217
1218         return 0;
1219 }
1220
1221 static __rte_always_inline int
1222 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1223                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1224 {
1225         void *m_vaddr = params->meta_buf.vaddr;
1226         uint32_t size;
1227         int32_t inputlen = 0, outputlen = 0;
1228         struct roc_se_ctx *se_ctx;
1229         uint32_t mac_len = 0;
1230         uint8_t i = 0;
1231         uint32_t encr_offset, auth_offset;
1232         uint32_t encr_data_len, auth_data_len;
1233         int flags;
1234         uint8_t *iv_s, *iv_d, iv_len = 8;
1235         uint8_t dir = 0;
1236         uint64_t *offset_vaddr;
1237         union cpt_inst_w4 cpt_inst_w4;
1238         uint8_t *in_buffer;
1239         uint32_t g_size_bytes, s_size_bytes;
1240         struct roc_se_sglist_comp *gather_comp;
1241         struct roc_se_sglist_comp *scatter_comp;
1242
1243         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1244         auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1245         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1246         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1247
1248         se_ctx = params->ctx_buf.vaddr;
1249         flags = se_ctx->zsk_flags;
1250         mac_len = se_ctx->mac_len;
1251
1252         if (flags == 0x0)
1253                 iv_s = params->iv_buf;
1254         else
1255                 iv_s = params->auth_iv_buf;
1256
1257         dir = iv_s[8] & 0x1;
1258
1259         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1260
1261         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1262         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1263                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1264
1265         /*
1266          * GP op header, lengths are expected in bits.
1267          */
1268         cpt_inst_w4.s.param1 = encr_data_len;
1269         cpt_inst_w4.s.param2 = auth_data_len;
1270
1271         /* consider iv len */
1272         if (flags == 0x0) {
1273                 encr_offset += iv_len;
1274                 auth_offset += iv_len;
1275         }
1276
1277         /* save space for offset ctrl and iv */
1278         offset_vaddr = m_vaddr;
1279
1280         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1281
1282         /* DPTR has SG list */
1283         in_buffer = m_vaddr;
1284
1285         ((uint16_t *)in_buffer)[0] = 0;
1286         ((uint16_t *)in_buffer)[1] = 0;
1287
1288         /* TODO Add error check if space will be sufficient */
1289         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1290
1291         /*
1292          * Input Gather List
1293          */
1294         i = 0;
1295
1296         /* Offset control word followed by iv */
1297
1298         if (flags == 0x0) {
1299                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1300                 outputlen = inputlen;
1301                 /* iv offset is 0 */
1302                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1303                 if (unlikely((encr_offset >> 16))) {
1304                         plt_dp_err("Offset not supported");
1305                         plt_dp_err("enc_offset: %d", encr_offset);
1306                         return -1;
1307                 }
1308         } else {
1309                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1310                 outputlen = mac_len;
1311                 /* iv offset is 0 */
1312                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1313                 if (unlikely((auth_offset >> 8))) {
1314                         plt_dp_err("Offset not supported");
1315                         plt_dp_err("auth_offset: %d", auth_offset);
1316                         return -1;
1317                 }
1318         }
1319
1320         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1321                          ROC_SE_OFF_CTRL_LEN + iv_len);
1322
1323         /* IV */
1324         iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1325         memcpy(iv_d, iv_s, iv_len);
1326
1327         /* input data */
1328         size = inputlen - iv_len;
1329         if (size) {
1330                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1331                                           &size, NULL, 0);
1332
1333                 if (unlikely(size)) {
1334                         plt_dp_err("Insufficient buffer space,"
1335                                    " size %d needed",
1336                                    size);
1337                         return -1;
1338                 }
1339         }
1340         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1341         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1342
1343         /*
1344          * Output Scatter List
1345          */
1346
1347         i = 0;
1348         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1349                                                      g_size_bytes);
1350
1351         if (flags == 0x1) {
1352                 /* IV in SLIST only for F8 */
1353                 iv_len = 0;
1354         }
1355
1356         /* IV */
1357         if (iv_len) {
1358                 i = fill_sg_comp(scatter_comp, i,
1359                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1360                                  iv_len);
1361         }
1362
1363         /* Add output data */
1364         if (req_flags & ROC_SE_VALID_MAC_BUF) {
1365                 size = outputlen - iv_len - mac_len;
1366                 if (size) {
1367                         i = fill_sg_comp_from_iov(scatter_comp, i,
1368                                                   params->dst_iov, 0, &size,
1369                                                   NULL, 0);
1370
1371                         if (unlikely(size)) {
1372                                 plt_dp_err("Insufficient buffer space,"
1373                                            " size %d needed",
1374                                            size);
1375                                 return -1;
1376                         }
1377                 }
1378
1379                 /* mac data */
1380                 if (mac_len) {
1381                         i = fill_sg_comp_from_buf(scatter_comp, i,
1382                                                   &params->mac_buf);
1383                 }
1384         } else {
1385                 /* Output including mac */
1386                 size = outputlen - iv_len;
1387                 if (size) {
1388                         i = fill_sg_comp_from_iov(scatter_comp, i,
1389                                                   params->dst_iov, 0, &size,
1390                                                   NULL, 0);
1391
1392                         if (unlikely(size)) {
1393                                 plt_dp_err("Insufficient buffer space,"
1394                                            " size %d needed",
1395                                            size);
1396                                 return -1;
1397                         }
1398                 }
1399         }
1400         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1401         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1402
1403         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1404
1405         /* This is DPTR len in case of SG mode */
1406         cpt_inst_w4.s.dlen = size;
1407
1408         inst->dptr = (uint64_t)in_buffer;
1409         inst->w4.u64 = cpt_inst_w4.u64;
1410
1411         return 0;
1412 }
1413
1414 static __rte_always_inline int
1415 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1416                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1417 {
1418         void *m_vaddr = params->meta_buf.vaddr;
1419         uint32_t size;
1420         int32_t inputlen = 0, outputlen;
1421         struct roc_se_ctx *se_ctx;
1422         uint8_t i = 0, iv_len = 8;
1423         uint32_t encr_offset;
1424         uint32_t encr_data_len;
1425         int flags;
1426         uint8_t dir = 0;
1427         uint64_t *offset_vaddr;
1428         union cpt_inst_w4 cpt_inst_w4;
1429         uint8_t *in_buffer;
1430         uint32_t g_size_bytes, s_size_bytes;
1431         struct roc_se_sglist_comp *gather_comp;
1432         struct roc_se_sglist_comp *scatter_comp;
1433
1434         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1435         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1436
1437         se_ctx = params->ctx_buf.vaddr;
1438         flags = se_ctx->zsk_flags;
1439
1440         cpt_inst_w4.u64 = 0;
1441         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1442
1443         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1444         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1445                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1446
1447         /*
1448          * GP op header, lengths are expected in bits.
1449          */
1450         cpt_inst_w4.s.param1 = encr_data_len;
1451
1452         /* consider iv len */
1453         encr_offset += iv_len;
1454
1455         inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
1456         outputlen = inputlen;
1457
1458         /* save space for offset ctrl & iv */
1459         offset_vaddr = m_vaddr;
1460
1461         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1462
1463         /* DPTR has SG list */
1464         in_buffer = m_vaddr;
1465
1466         ((uint16_t *)in_buffer)[0] = 0;
1467         ((uint16_t *)in_buffer)[1] = 0;
1468
1469         /* TODO Add error check if space will be sufficient */
1470         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1471
1472         /*
1473          * Input Gather List
1474          */
1475         i = 0;
1476
1477         /* Offset control word followed by iv */
1478         *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1479         if (unlikely((encr_offset >> 16))) {
1480                 plt_dp_err("Offset not supported");
1481                 plt_dp_err("enc_offset: %d", encr_offset);
1482                 return -1;
1483         }
1484
1485         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1486                          ROC_SE_OFF_CTRL_LEN + iv_len);
1487
1488         /* IV */
1489         memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1490                iv_len);
1491
1492         /* Add input data */
1493         size = inputlen - iv_len;
1494         if (size) {
1495                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1496                                           &size, NULL, 0);
1497                 if (unlikely(size)) {
1498                         plt_dp_err("Insufficient buffer space,"
1499                                    " size %d needed",
1500                                    size);
1501                         return -1;
1502                 }
1503         }
1504         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1505         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1506
1507         /*
1508          * Output Scatter List
1509          */
1510
1511         i = 0;
1512         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1513                                                      g_size_bytes);
1514
1515         /* IV */
1516         i = fill_sg_comp(scatter_comp, i,
1517                          (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1518
1519         /* Add output data */
1520         size = outputlen - iv_len;
1521         if (size) {
1522                 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1523                                           &size, NULL, 0);
1524                 if (unlikely(size)) {
1525                         plt_dp_err("Insufficient buffer space,"
1526                                    " size %d needed",
1527                                    size);
1528                         return -1;
1529                 }
1530         }
1531         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1532         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1533
1534         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1535
1536         /* This is DPTR len in case of SG mode */
1537         cpt_inst_w4.s.dlen = size;
1538
1539         inst->dptr = (uint64_t)in_buffer;
1540         inst->w4.u64 = cpt_inst_w4.u64;
1541
1542         return 0;
1543 }
1544
1545 static __rte_always_inline int
1546 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1547                      struct roc_se_fc_params *fc_params,
1548                      struct cpt_inst_s *inst)
1549 {
1550         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1551         uint8_t fc_type;
1552         int ret = -1;
1553
1554         fc_type = ctx->fc_type;
1555
1556         if (likely(fc_type == ROC_SE_FC_GEN)) {
1557                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1558         } else if (fc_type == ROC_SE_PDCP) {
1559                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1560                                           inst);
1561         } else if (fc_type == ROC_SE_KASUMI) {
1562                 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1563         }
1564
1565         /*
1566          * For AUTH_ONLY case,
1567          * MC only supports digest generation and verification
1568          * should be done in software by memcmp()
1569          */
1570
1571         return ret;
1572 }
1573
1574 static __rte_always_inline int
1575 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1576                      struct roc_se_fc_params *fc_params,
1577                      struct cpt_inst_s *inst)
1578 {
1579         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1580         uint8_t fc_type;
1581         int ret = -1;
1582
1583         fc_type = ctx->fc_type;
1584
1585         if (likely(fc_type == ROC_SE_FC_GEN)) {
1586                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1587         } else if (fc_type == ROC_SE_PDCP) {
1588                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1589                                           inst);
1590         } else if (fc_type == ROC_SE_KASUMI) {
1591                 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1592                                           inst);
1593         } else if (fc_type == ROC_SE_HASH_HMAC) {
1594                 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1595         }
1596
1597         return ret;
1598 }
1599
1600 static __rte_always_inline int
1601 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1602 {
1603         struct rte_crypto_aead_xform *aead_form;
1604         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1605         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1606         uint32_t cipher_key_len = 0;
1607         uint8_t aes_gcm = 0;
1608         aead_form = &xform->aead;
1609
1610         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1611                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1612                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1613         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1614                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1615                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1616         } else {
1617                 plt_dp_err("Unknown aead operation\n");
1618                 return -1;
1619         }
1620         switch (aead_form->algo) {
1621         case RTE_CRYPTO_AEAD_AES_GCM:
1622                 enc_type = ROC_SE_AES_GCM;
1623                 cipher_key_len = 16;
1624                 aes_gcm = 1;
1625                 break;
1626         case RTE_CRYPTO_AEAD_AES_CCM:
1627                 plt_dp_err("Crypto: Unsupported cipher algo %u",
1628                            aead_form->algo);
1629                 return -1;
1630         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1631                 enc_type = ROC_SE_CHACHA20;
1632                 auth_type = ROC_SE_POLY1305;
1633                 cipher_key_len = 32;
1634                 sess->chacha_poly = 1;
1635                 break;
1636         default:
1637                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1638                            aead_form->algo);
1639                 return -1;
1640         }
1641         if (aead_form->key.length < cipher_key_len) {
1642                 plt_dp_err("Invalid cipher params keylen %u",
1643                            aead_form->key.length);
1644                 return -1;
1645         }
1646         sess->zsk_flag = 0;
1647         sess->aes_gcm = aes_gcm;
1648         sess->mac_len = aead_form->digest_length;
1649         sess->iv_offset = aead_form->iv.offset;
1650         sess->iv_length = aead_form->iv.length;
1651         sess->aad_length = aead_form->aad_length;
1652
1653         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1654                                          aead_form->key.data,
1655                                          aead_form->key.length, NULL)))
1656                 return -1;
1657
1658         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1659                                          aead_form->digest_length)))
1660                 return -1;
1661
1662         return 0;
1663 }
1664
1665 static __rte_always_inline int
1666 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1667 {
1668         struct rte_crypto_cipher_xform *c_form;
1669         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1670         uint32_t cipher_key_len = 0;
1671         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1672
1673         c_form = &xform->cipher;
1674
1675         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1676                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1677         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1678                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1679                 if (xform->next != NULL &&
1680                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1681                         /* Perform decryption followed by auth verify */
1682                         sess->roc_se_ctx.template_w4.s.opcode_minor =
1683                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1684                 }
1685         } else {
1686                 plt_dp_err("Unknown cipher operation\n");
1687                 return -1;
1688         }
1689
1690         switch (c_form->algo) {
1691         case RTE_CRYPTO_CIPHER_AES_CBC:
1692                 enc_type = ROC_SE_AES_CBC;
1693                 cipher_key_len = 16;
1694                 break;
1695         case RTE_CRYPTO_CIPHER_3DES_CBC:
1696                 enc_type = ROC_SE_DES3_CBC;
1697                 cipher_key_len = 24;
1698                 break;
1699         case RTE_CRYPTO_CIPHER_DES_CBC:
1700                 /* DES is implemented using 3DES in hardware */
1701                 enc_type = ROC_SE_DES3_CBC;
1702                 cipher_key_len = 8;
1703                 break;
1704         case RTE_CRYPTO_CIPHER_AES_CTR:
1705                 enc_type = ROC_SE_AES_CTR;
1706                 cipher_key_len = 16;
1707                 aes_ctr = 1;
1708                 break;
1709         case RTE_CRYPTO_CIPHER_NULL:
1710                 enc_type = 0;
1711                 is_null = 1;
1712                 break;
1713         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1714                 enc_type = ROC_SE_KASUMI_F8_ECB;
1715                 cipher_key_len = 16;
1716                 zsk_flag = ROC_SE_K_F8;
1717                 break;
1718         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1719                 enc_type = ROC_SE_SNOW3G_UEA2;
1720                 cipher_key_len = 16;
1721                 zsk_flag = ROC_SE_ZS_EA;
1722                 break;
1723         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1724                 enc_type = ROC_SE_ZUC_EEA3;
1725                 cipher_key_len = 16;
1726                 zsk_flag = ROC_SE_ZS_EA;
1727                 break;
1728         case RTE_CRYPTO_CIPHER_AES_XTS:
1729                 enc_type = ROC_SE_AES_XTS;
1730                 cipher_key_len = 16;
1731                 break;
1732         case RTE_CRYPTO_CIPHER_3DES_ECB:
1733                 enc_type = ROC_SE_DES3_ECB;
1734                 cipher_key_len = 24;
1735                 break;
1736         case RTE_CRYPTO_CIPHER_AES_ECB:
1737                 enc_type = ROC_SE_AES_ECB;
1738                 cipher_key_len = 16;
1739                 break;
1740         case RTE_CRYPTO_CIPHER_3DES_CTR:
1741         case RTE_CRYPTO_CIPHER_AES_F8:
1742         case RTE_CRYPTO_CIPHER_ARC4:
1743                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1744                 return -1;
1745         default:
1746                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1747                            c_form->algo);
1748                 return -1;
1749         }
1750
1751         if (c_form->key.length < cipher_key_len) {
1752                 plt_dp_err("Invalid cipher params keylen %u",
1753                            c_form->key.length);
1754                 return -1;
1755         }
1756
1757         sess->zsk_flag = zsk_flag;
1758         sess->aes_gcm = 0;
1759         sess->aes_ctr = aes_ctr;
1760         sess->iv_offset = c_form->iv.offset;
1761         sess->iv_length = c_form->iv.length;
1762         sess->is_null = is_null;
1763
1764         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1765                                          c_form->key.data, c_form->key.length,
1766                                          NULL)))
1767                 return -1;
1768
1769         return 0;
1770 }
1771
1772 static __rte_always_inline int
1773 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1774 {
1775         struct rte_crypto_auth_xform *a_form;
1776         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1777         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1778
1779         if (xform->next != NULL &&
1780             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1781             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1782                 /* Perform auth followed by encryption */
1783                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1784                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1785         }
1786
1787         a_form = &xform->auth;
1788
1789         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1790                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1791         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1792                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1793         else {
1794                 plt_dp_err("Unknown auth operation");
1795                 return -1;
1796         }
1797
1798         switch (a_form->algo) {
1799         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1800                 /* Fall through */
1801         case RTE_CRYPTO_AUTH_SHA1:
1802                 auth_type = ROC_SE_SHA1_TYPE;
1803                 break;
1804         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1805         case RTE_CRYPTO_AUTH_SHA256:
1806                 auth_type = ROC_SE_SHA2_SHA256;
1807                 break;
1808         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1809         case RTE_CRYPTO_AUTH_SHA512:
1810                 auth_type = ROC_SE_SHA2_SHA512;
1811                 break;
1812         case RTE_CRYPTO_AUTH_AES_GMAC:
1813                 auth_type = ROC_SE_GMAC_TYPE;
1814                 aes_gcm = 1;
1815                 break;
1816         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1817         case RTE_CRYPTO_AUTH_SHA224:
1818                 auth_type = ROC_SE_SHA2_SHA224;
1819                 break;
1820         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1821         case RTE_CRYPTO_AUTH_SHA384:
1822                 auth_type = ROC_SE_SHA2_SHA384;
1823                 break;
1824         case RTE_CRYPTO_AUTH_MD5_HMAC:
1825         case RTE_CRYPTO_AUTH_MD5:
1826                 auth_type = ROC_SE_MD5_TYPE;
1827                 break;
1828         case RTE_CRYPTO_AUTH_KASUMI_F9:
1829                 auth_type = ROC_SE_KASUMI_F9_ECB;
1830                 /*
1831                  * Indicate that direction needs to be taken out
1832                  * from end of src
1833                  */
1834                 zsk_flag = ROC_SE_K_F9;
1835                 break;
1836         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1837                 auth_type = ROC_SE_SNOW3G_UIA2;
1838                 zsk_flag = ROC_SE_ZS_IA;
1839                 break;
1840         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1841                 auth_type = ROC_SE_ZUC_EIA3;
1842                 zsk_flag = ROC_SE_ZS_IA;
1843                 break;
1844         case RTE_CRYPTO_AUTH_NULL:
1845                 auth_type = 0;
1846                 is_null = 1;
1847                 break;
1848         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1849         case RTE_CRYPTO_AUTH_AES_CMAC:
1850         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1851                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1852                 return -1;
1853         default:
1854                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1855                            a_form->algo);
1856                 return -1;
1857         }
1858
1859         sess->zsk_flag = zsk_flag;
1860         sess->aes_gcm = aes_gcm;
1861         sess->mac_len = a_form->digest_length;
1862         sess->is_null = is_null;
1863         if (zsk_flag) {
1864                 sess->auth_iv_offset = a_form->iv.offset;
1865                 sess->auth_iv_length = a_form->iv.length;
1866         }
1867         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1868                                          a_form->key.data, a_form->key.length,
1869                                          a_form->digest_length)))
1870                 return -1;
1871
1872         return 0;
1873 }
1874
1875 static __rte_always_inline int
1876 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1877 {
1878         struct rte_crypto_auth_xform *a_form;
1879         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1880         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1881
1882         a_form = &xform->auth;
1883
1884         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1885                 sess->cpt_op |= ROC_SE_OP_ENCODE;
1886         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1887                 sess->cpt_op |= ROC_SE_OP_DECODE;
1888         else {
1889                 plt_dp_err("Unknown auth operation");
1890                 return -1;
1891         }
1892
1893         switch (a_form->algo) {
1894         case RTE_CRYPTO_AUTH_AES_GMAC:
1895                 enc_type = ROC_SE_AES_GCM;
1896                 auth_type = ROC_SE_GMAC_TYPE;
1897                 break;
1898         default:
1899                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1900                            a_form->algo);
1901                 return -1;
1902         }
1903
1904         sess->zsk_flag = 0;
1905         sess->aes_gcm = 0;
1906         sess->is_gmac = 1;
1907         sess->iv_offset = a_form->iv.offset;
1908         sess->iv_length = a_form->iv.length;
1909         sess->mac_len = a_form->digest_length;
1910
1911         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1912                                          a_form->key.data, a_form->key.length,
1913                                          NULL)))
1914                 return -1;
1915
1916         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1917                                          a_form->digest_length)))
1918                 return -1;
1919
1920         return 0;
1921 }
1922
1923 static __rte_always_inline void *
1924 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1925               struct rte_mempool *cpt_meta_pool,
1926               struct cpt_inflight_req *infl_req)
1927 {
1928         uint8_t *mdata;
1929
1930         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1931                 return NULL;
1932
1933         buf->vaddr = mdata;
1934         buf->size = len;
1935
1936         infl_req->mdata = mdata;
1937         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1938
1939         return mdata;
1940 }
1941
1942 static __rte_always_inline uint32_t
1943 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
1944                      uint32_t start_offset)
1945 {
1946         uint16_t index = 0;
1947         void *seg_data = NULL;
1948         int32_t seg_size = 0;
1949
1950         if (!pkt) {
1951                 iovec->buf_cnt = 0;
1952                 return 0;
1953         }
1954
1955         if (!start_offset) {
1956                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1957                 seg_size = pkt->data_len;
1958         } else {
1959                 while (start_offset >= pkt->data_len) {
1960                         start_offset -= pkt->data_len;
1961                         pkt = pkt->next;
1962                 }
1963
1964                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
1965                 seg_size = pkt->data_len - start_offset;
1966                 if (!seg_size)
1967                         return 1;
1968         }
1969
1970         /* first seg */
1971         iovec->bufs[index].vaddr = seg_data;
1972         iovec->bufs[index].size = seg_size;
1973         index++;
1974         pkt = pkt->next;
1975
1976         while (unlikely(pkt != NULL)) {
1977                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1978                 seg_size = pkt->data_len;
1979                 if (!seg_size)
1980                         break;
1981
1982                 iovec->bufs[index].vaddr = seg_data;
1983                 iovec->bufs[index].size = seg_size;
1984
1985                 index++;
1986
1987                 pkt = pkt->next;
1988         }
1989
1990         iovec->buf_cnt = index;
1991         return 0;
1992 }
1993
1994 static __rte_always_inline uint32_t
1995 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
1996                              struct roc_se_fc_params *param, uint32_t *flags)
1997 {
1998         uint16_t index = 0;
1999         void *seg_data = NULL;
2000         uint32_t seg_size = 0;
2001         struct roc_se_iov_ptr *iovec;
2002
2003         seg_data = rte_pktmbuf_mtod(pkt, void *);
2004         seg_size = pkt->data_len;
2005
2006         /* first seg */
2007         if (likely(!pkt->next)) {
2008                 uint32_t headroom;
2009
2010                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2011                 headroom = rte_pktmbuf_headroom(pkt);
2012                 if (likely(headroom >= 24))
2013                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2014
2015                 param->bufs[0].vaddr = seg_data;
2016                 param->bufs[0].size = seg_size;
2017                 return 0;
2018         }
2019         iovec = param->src_iov;
2020         iovec->bufs[index].vaddr = seg_data;
2021         iovec->bufs[index].size = seg_size;
2022         index++;
2023         pkt = pkt->next;
2024
2025         while (unlikely(pkt != NULL)) {
2026                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2027                 seg_size = pkt->data_len;
2028
2029                 if (!seg_size)
2030                         break;
2031
2032                 iovec->bufs[index].vaddr = seg_data;
2033                 iovec->bufs[index].size = seg_size;
2034
2035                 index++;
2036
2037                 pkt = pkt->next;
2038         }
2039
2040         iovec->buf_cnt = index;
2041         return 0;
2042 }
2043
2044 static __rte_always_inline int
2045 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2046                struct cpt_qp_meta_info *m_info,
2047                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2048 {
2049         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2050         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2051         struct rte_crypto_sym_op *sym_op = cop->sym;
2052         void *mdata = NULL;
2053         uint32_t mc_hash_off;
2054         uint32_t flags = 0;
2055         uint64_t d_offs, d_lens;
2056         struct rte_mbuf *m_src, *m_dst;
2057         uint8_t cpt_op = sess->cpt_op;
2058 #ifdef CPT_ALWAYS_USE_SG_MODE
2059         uint8_t inplace = 0;
2060 #else
2061         uint8_t inplace = 1;
2062 #endif
2063         struct roc_se_fc_params fc_params;
2064         char src[SRC_IOV_SIZE];
2065         char dst[SRC_IOV_SIZE];
2066         uint32_t iv_buf[4];
2067         int ret;
2068
2069         if (likely(sess->iv_length)) {
2070                 flags |= ROC_SE_VALID_IV_BUF;
2071                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2072                                                              sess->iv_offset);
2073                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2074                         memcpy((uint8_t *)iv_buf,
2075                                rte_crypto_op_ctod_offset(cop, uint8_t *,
2076                                                          sess->iv_offset),
2077                                12);
2078                         iv_buf[3] = rte_cpu_to_be_32(0x1);
2079                         fc_params.iv_buf = iv_buf;
2080                 }
2081         }
2082
2083         if (sess->zsk_flag) {
2084                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2085                         cop, uint8_t *, sess->auth_iv_offset);
2086                 if (sess->zsk_flag != ROC_SE_ZS_EA)
2087                         inplace = 0;
2088         }
2089         m_src = sym_op->m_src;
2090         m_dst = sym_op->m_dst;
2091
2092         if (sess->aes_gcm || sess->chacha_poly) {
2093                 uint8_t *salt;
2094                 uint8_t *aad_data;
2095                 uint16_t aad_len;
2096
2097                 d_offs = sym_op->aead.data.offset;
2098                 d_lens = sym_op->aead.data.length;
2099                 mc_hash_off =
2100                         sym_op->aead.data.offset + sym_op->aead.data.length;
2101
2102                 aad_data = sym_op->aead.aad.data;
2103                 aad_len = sess->aad_length;
2104                 if (likely((aad_data + aad_len) ==
2105                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2106                                                    sym_op->aead.data.offset))) {
2107                         d_offs = (d_offs - aad_len) | (d_offs << 16);
2108                         d_lens = (d_lens + aad_len) | (d_lens << 32);
2109                 } else {
2110                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2111                         fc_params.aad_buf.size = aad_len;
2112                         flags |= ROC_SE_VALID_AAD_BUF;
2113                         inplace = 0;
2114                         d_offs = d_offs << 16;
2115                         d_lens = d_lens << 32;
2116                 }
2117
2118                 salt = fc_params.iv_buf;
2119                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2120                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2121                         sess->salt = *(uint32_t *)salt;
2122                 }
2123                 fc_params.iv_buf = salt + 4;
2124                 if (likely(sess->mac_len)) {
2125                         struct rte_mbuf *m =
2126                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2127
2128                         if (!m)
2129                                 m = m_src;
2130
2131                         /* hmac immediately following data is best case */
2132                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2133                                              mc_hash_off !=
2134                                      (uint8_t *)sym_op->aead.digest.data)) {
2135                                 flags |= ROC_SE_VALID_MAC_BUF;
2136                                 fc_params.mac_buf.size = sess->mac_len;
2137                                 fc_params.mac_buf.vaddr =
2138                                         sym_op->aead.digest.data;
2139                                 inplace = 0;
2140                         }
2141                 }
2142         } else {
2143                 d_offs = sym_op->cipher.data.offset;
2144                 d_lens = sym_op->cipher.data.length;
2145                 mc_hash_off =
2146                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
2147                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2148                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2149
2150                 if (mc_hash_off <
2151                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2152                         mc_hash_off = (sym_op->auth.data.offset +
2153                                        sym_op->auth.data.length);
2154                 }
2155                 /* for gmac, salt should be updated like in gcm */
2156                 if (unlikely(sess->is_gmac)) {
2157                         uint8_t *salt;
2158                         salt = fc_params.iv_buf;
2159                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
2160                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2161                                 sess->salt = *(uint32_t *)salt;
2162                         }
2163                         fc_params.iv_buf = salt + 4;
2164                 }
2165                 if (likely(sess->mac_len)) {
2166                         struct rte_mbuf *m;
2167
2168                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2169                         if (!m)
2170                                 m = m_src;
2171
2172                         /* hmac immediately following data is best case */
2173                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2174                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2175                                               mc_hash_off !=
2176                                       (uint8_t *)sym_op->auth.digest.data))) {
2177                                 flags |= ROC_SE_VALID_MAC_BUF;
2178                                 fc_params.mac_buf.size = sess->mac_len;
2179                                 fc_params.mac_buf.vaddr =
2180                                         sym_op->auth.digest.data;
2181                                 inplace = 0;
2182                         }
2183                 }
2184         }
2185         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2186
2187         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2188             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2189                 inplace = 0;
2190
2191         if (likely(!m_dst && inplace)) {
2192                 /* Case of single buffer without AAD buf or
2193                  * separate mac buf in place and
2194                  * not air crypto
2195                  */
2196                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2197
2198                 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2199                                                           &flags))) {
2200                         plt_dp_err("Prepare inplace src iov failed");
2201                         ret = -EINVAL;
2202                         goto err_exit;
2203                 }
2204
2205         } else {
2206                 /* Out of place processing */
2207                 fc_params.src_iov = (void *)src;
2208                 fc_params.dst_iov = (void *)dst;
2209
2210                 /* Store SG I/O in the api for reuse */
2211                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2212                         plt_dp_err("Prepare src iov failed");
2213                         ret = -EINVAL;
2214                         goto err_exit;
2215                 }
2216
2217                 if (unlikely(m_dst != NULL)) {
2218                         uint32_t pkt_len;
2219
2220                         /* Try to make room as much as src has */
2221                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
2222
2223                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2224                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2225                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2226                                         plt_dp_err("Not enough space in "
2227                                                    "m_dst %p, need %u"
2228                                                    " more",
2229                                                    m_dst, pkt_len);
2230                                         ret = -EINVAL;
2231                                         goto err_exit;
2232                                 }
2233                         }
2234
2235                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2236                                 plt_dp_err("Prepare dst iov failed for "
2237                                            "m_dst %p",
2238                                            m_dst);
2239                                 ret = -EINVAL;
2240                                 goto err_exit;
2241                         }
2242                 } else {
2243                         fc_params.dst_iov = (void *)src;
2244                 }
2245         }
2246
2247         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2248                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2249                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
2250                         (ctx->fc_type == ROC_SE_PDCP))))) {
2251                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2252                                       m_info->pool, infl_req);
2253                 if (mdata == NULL) {
2254                         plt_dp_err("Error allocating meta buffer for request");
2255                         return -ENOMEM;
2256                 }
2257         }
2258
2259         /* Finally prepare the instruction */
2260         if (cpt_op & ROC_SE_OP_ENCODE)
2261                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2262                                            inst);
2263         else
2264                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2265                                            inst);
2266
2267         if (unlikely(ret)) {
2268                 plt_dp_err("Preparing request failed due to bad input arg");
2269                 goto free_mdata_and_exit;
2270         }
2271
2272         return 0;
2273
2274 free_mdata_and_exit:
2275         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2276                 rte_mempool_put(m_info->pool, infl_req->mdata);
2277 err_exit:
2278         return ret;
2279 }
2280
2281 static __rte_always_inline void
2282 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2283 {
2284         uint8_t *mac;
2285         struct rte_crypto_sym_op *sym_op = op->sym;
2286
2287         if (sym_op->auth.digest.data)
2288                 mac = sym_op->auth.digest.data;
2289         else
2290                 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2291                                               sym_op->auth.data.length +
2292                                                       sym_op->auth.data.offset);
2293         if (!mac) {
2294                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2295                 return;
2296         }
2297
2298         if (memcmp(mac, gen_mac, mac_len))
2299                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2300         else
2301                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2302 }
2303
2304 static __rte_always_inline void
2305 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2306                                    uint32_t *addr_length_in_bits,
2307                                    uint8_t *addr_direction)
2308 {
2309         uint8_t found = 0;
2310         uint32_t pos;
2311         uint8_t last_byte;
2312         while (!found && counter_num_bytes > 0) {
2313                 counter_num_bytes--;
2314                 if (src[counter_num_bytes] == 0x00)
2315                         continue;
2316                 pos = rte_bsf32(src[counter_num_bytes]);
2317                 if (pos == 7) {
2318                         if (likely(counter_num_bytes > 0)) {
2319                                 last_byte = src[counter_num_bytes - 1];
2320                                 *addr_direction = last_byte & 0x1;
2321                                 *addr_length_in_bits =
2322                                         counter_num_bytes * 8 - 1;
2323                         }
2324                 } else {
2325                         last_byte = src[counter_num_bytes];
2326                         *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2327                         *addr_length_in_bits =
2328                                 counter_num_bytes * 8 + (8 - (pos + 2));
2329                 }
2330                 found = 1;
2331         }
2332 }
2333
2334 /*
2335  * This handles all auth only except AES_GMAC
2336  */
2337 static __rte_always_inline int
2338 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2339                    struct cpt_qp_meta_info *m_info,
2340                    struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2341 {
2342         uint32_t space = 0;
2343         struct rte_crypto_sym_op *sym_op = cop->sym;
2344         void *mdata;
2345         uint32_t auth_range_off;
2346         uint32_t flags = 0;
2347         uint64_t d_offs = 0, d_lens;
2348         struct rte_mbuf *m_src, *m_dst;
2349         uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2350         uint16_t mac_len = sess->mac_len;
2351         struct roc_se_fc_params params;
2352         char src[SRC_IOV_SIZE];
2353         uint8_t iv_buf[16];
2354         int ret;
2355
2356         memset(&params, 0, sizeof(struct roc_se_fc_params));
2357
2358         m_src = sym_op->m_src;
2359
2360         mdata = alloc_op_meta(&params.meta_buf, m_info->mlen, m_info->pool,
2361                               infl_req);
2362         if (mdata == NULL) {
2363                 ret = -ENOMEM;
2364                 goto err_exit;
2365         }
2366
2367         auth_range_off = sym_op->auth.data.offset;
2368
2369         flags = ROC_SE_VALID_MAC_BUF;
2370         params.src_iov = (void *)src;
2371         if (unlikely(sess->zsk_flag)) {
2372                 /*
2373                  * Since for Zuc, Kasumi, Snow3g offsets are in bits
2374                  * we will send pass through even for auth only case,
2375                  * let MC handle it
2376                  */
2377                 d_offs = auth_range_off;
2378                 auth_range_off = 0;
2379                 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2380                         cop, uint8_t *, sess->auth_iv_offset);
2381                 if (sess->zsk_flag == ROC_SE_K_F9) {
2382                         uint32_t length_in_bits, num_bytes;
2383                         uint8_t *src, direction = 0;
2384
2385                         memcpy(iv_buf,
2386                                rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2387                         /*
2388                          * This is kasumi f9, take direction from
2389                          * source buffer
2390                          */
2391                         length_in_bits = cop->sym->auth.data.length;
2392                         num_bytes = (length_in_bits >> 3);
2393                         src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2394                         find_kasumif9_direction_and_length(
2395                                 src, num_bytes, &length_in_bits, &direction);
2396                         length_in_bits -= 64;
2397                         cop->sym->auth.data.offset += 64;
2398                         d_offs = cop->sym->auth.data.offset;
2399                         auth_range_off = d_offs / 8;
2400                         cop->sym->auth.data.length = length_in_bits;
2401
2402                         /* Store it at end of auth iv */
2403                         iv_buf[8] = direction;
2404                         params.auth_iv_buf = iv_buf;
2405                 }
2406         }
2407
2408         d_lens = sym_op->auth.data.length;
2409
2410         params.ctx_buf.vaddr = &sess->roc_se_ctx;
2411
2412         if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2413                 if (sym_op->auth.digest.data) {
2414                         /*
2415                          * Digest to be generated
2416                          * in separate buffer
2417                          */
2418                         params.mac_buf.size = sess->mac_len;
2419                         params.mac_buf.vaddr = sym_op->auth.digest.data;
2420                 } else {
2421                         uint32_t off = sym_op->auth.data.offset +
2422                                        sym_op->auth.data.length;
2423                         int32_t dlen, space;
2424
2425                         m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2426                         dlen = rte_pktmbuf_pkt_len(m_dst);
2427
2428                         space = off + mac_len - dlen;
2429                         if (space > 0)
2430                                 if (!rte_pktmbuf_append(m_dst, space)) {
2431                                         plt_dp_err("Failed to extend "
2432                                                    "mbuf by %uB",
2433                                                    space);
2434                                         ret = -EINVAL;
2435                                         goto free_mdata_and_exit;
2436                                 }
2437
2438                         params.mac_buf.vaddr =
2439                                 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2440                         params.mac_buf.size = mac_len;
2441                 }
2442         } else {
2443                 uint64_t *op = mdata;
2444
2445                 /* Need space for storing generated mac */
2446                 space += 2 * sizeof(uint64_t);
2447
2448                 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2449                 params.mac_buf.size = mac_len;
2450                 space += RTE_ALIGN_CEIL(mac_len, 8);
2451                 op[0] = (uintptr_t)params.mac_buf.vaddr;
2452                 op[1] = mac_len;
2453                 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2454         }
2455
2456         params.meta_buf.vaddr = (uint8_t *)mdata + space;
2457         params.meta_buf.size -= space;
2458
2459         /* Out of place processing */
2460         params.src_iov = (void *)src;
2461
2462         /*Store SG I/O in the api for reuse */
2463         if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2464                 plt_dp_err("Prepare src iov failed");
2465                 ret = -EINVAL;
2466                 goto free_mdata_and_exit;
2467         }
2468
2469         ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, inst);
2470         if (ret)
2471                 goto free_mdata_and_exit;
2472
2473         return 0;
2474
2475 free_mdata_and_exit:
2476         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2477                 rte_mempool_put(m_info->pool, infl_req->mdata);
2478 err_exit:
2479         return ret;
2480 }
2481 #endif /*_CNXK_SE_H_ */