examples/pipeline: fix build
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         uint64_t cpt_inst_w2;
37         struct cnxk_cpt_qp *qp;
38         struct roc_se_ctx roc_se_ctx;
39 } __rte_cache_aligned;
40
41 static __rte_always_inline int
42 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess);
43
44 static inline void
45 cpt_pack_iv(uint8_t *iv_src, uint8_t *iv_dst)
46 {
47         iv_dst[16] = iv_src[16];
48         /* pack the last 8 bytes of IV to 6 bytes.
49          * discard the 2 MSB bits of each byte
50          */
51         iv_dst[17] = (((iv_src[17] & 0x3f) << 2) | ((iv_src[18] >> 4) & 0x3));
52         iv_dst[18] = (((iv_src[18] & 0xf) << 4) | ((iv_src[19] >> 2) & 0xf));
53         iv_dst[19] = (((iv_src[19] & 0x3) << 6) | (iv_src[20] & 0x3f));
54
55         iv_dst[20] = (((iv_src[21] & 0x3f) << 2) | ((iv_src[22] >> 4) & 0x3));
56         iv_dst[21] = (((iv_src[22] & 0xf) << 4) | ((iv_src[23] >> 2) & 0xf));
57         iv_dst[22] = (((iv_src[23] & 0x3) << 6) | (iv_src[24] & 0x3f));
58 }
59
60 static inline void
61 pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type,
62              uint8_t pack_iv)
63 {
64         uint32_t *iv_s_temp, iv_temp[4];
65         int j;
66
67         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
68                 /*
69                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
70                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
71                  */
72
73                 iv_s_temp = (uint32_t *)iv_s;
74
75                 for (j = 0; j < 4; j++)
76                         iv_temp[j] = iv_s_temp[3 - j];
77                 memcpy(iv_d, iv_temp, 16);
78         } else if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_ZUC) {
79                 /* ZUC doesn't need a swap */
80                 memcpy(iv_d, iv_s, 16);
81                 if (pack_iv)
82                         cpt_pack_iv(iv_s, iv_d);
83         } else {
84                 /* AES-CMAC EIA2, microcode expects 16B zeroized IV */
85                 for (j = 0; j < 4; j++)
86                         iv_d[j] = 0;
87         }
88 }
89
90 static __rte_always_inline int
91 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
92 {
93         uint16_t mac_len = auth->digest_length;
94         int ret;
95
96         switch (auth->algo) {
97         case RTE_CRYPTO_AUTH_MD5:
98         case RTE_CRYPTO_AUTH_MD5_HMAC:
99                 ret = (mac_len == 16) ? 0 : -1;
100                 break;
101         case RTE_CRYPTO_AUTH_SHA1:
102         case RTE_CRYPTO_AUTH_SHA1_HMAC:
103                 ret = (mac_len == 20) ? 0 : -1;
104                 break;
105         case RTE_CRYPTO_AUTH_SHA224:
106         case RTE_CRYPTO_AUTH_SHA224_HMAC:
107                 ret = (mac_len == 28) ? 0 : -1;
108                 break;
109         case RTE_CRYPTO_AUTH_SHA256:
110         case RTE_CRYPTO_AUTH_SHA256_HMAC:
111                 ret = (mac_len == 32) ? 0 : -1;
112                 break;
113         case RTE_CRYPTO_AUTH_SHA384:
114         case RTE_CRYPTO_AUTH_SHA384_HMAC:
115                 ret = (mac_len == 48) ? 0 : -1;
116                 break;
117         case RTE_CRYPTO_AUTH_SHA512:
118         case RTE_CRYPTO_AUTH_SHA512_HMAC:
119                 ret = (mac_len == 64) ? 0 : -1;
120                 break;
121         case RTE_CRYPTO_AUTH_NULL:
122                 ret = 0;
123                 break;
124         default:
125                 ret = -1;
126         }
127
128         return ret;
129 }
130
131 static __rte_always_inline void
132 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
133 {
134         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
135         memcpy(fctx->enc.encr_iv, salt, 4);
136 }
137
138 static __rte_always_inline uint32_t
139 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
140              uint32_t size)
141 {
142         struct roc_se_sglist_comp *to = &list[i >> 2];
143
144         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
145         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
146         i++;
147         return i;
148 }
149
150 static __rte_always_inline uint32_t
151 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
152                       struct roc_se_buf_ptr *from)
153 {
154         struct roc_se_sglist_comp *to = &list[i >> 2];
155
156         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
157         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
158         i++;
159         return i;
160 }
161
162 static __rte_always_inline uint32_t
163 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
164                           struct roc_se_buf_ptr *from, uint32_t *psize)
165 {
166         struct roc_se_sglist_comp *to = &list[i >> 2];
167         uint32_t size = *psize;
168         uint32_t e_len;
169
170         e_len = (size > from->size) ? from->size : size;
171         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
172         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
173         *psize -= e_len;
174         i++;
175         return i;
176 }
177
178 /*
179  * This fills the MC expected SGIO list
180  * from IOV given by user.
181  */
182 static __rte_always_inline uint32_t
183 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
184                       struct roc_se_iov_ptr *from, uint32_t from_offset,
185                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
186                       uint32_t extra_offset)
187 {
188         int32_t j;
189         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
190         uint32_t size = *psize;
191         struct roc_se_buf_ptr *bufs;
192
193         bufs = from->bufs;
194         for (j = 0; (j < from->buf_cnt) && size; j++) {
195                 uint64_t e_vaddr;
196                 uint32_t e_len;
197                 struct roc_se_sglist_comp *to = &list[i >> 2];
198
199                 if (unlikely(from_offset)) {
200                         if (from_offset >= bufs[j].size) {
201                                 from_offset -= bufs[j].size;
202                                 continue;
203                         }
204                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
205                         e_len = (size > (bufs[j].size - from_offset)) ?
206                                         (bufs[j].size - from_offset) :
207                                         size;
208                         from_offset = 0;
209                 } else {
210                         e_vaddr = (uint64_t)bufs[j].vaddr;
211                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
212                 }
213
214                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
215                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
216
217                 if (extra_len && (e_len >= extra_offset)) {
218                         /* Break the data at given offset */
219                         uint32_t next_len = e_len - extra_offset;
220                         uint64_t next_vaddr = e_vaddr + extra_offset;
221
222                         if (!extra_offset) {
223                                 i--;
224                         } else {
225                                 e_len = extra_offset;
226                                 size -= e_len;
227                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
228                         }
229
230                         extra_len = RTE_MIN(extra_len, size);
231                         /* Insert extra data ptr */
232                         if (extra_len) {
233                                 i++;
234                                 to = &list[i >> 2];
235                                 to->u.s.len[i % 4] =
236                                         rte_cpu_to_be_16(extra_len);
237                                 to->ptr[i % 4] = rte_cpu_to_be_64(
238                                         (uint64_t)extra_buf->vaddr);
239                                 size -= extra_len;
240                         }
241
242                         next_len = RTE_MIN(next_len, size);
243                         /* insert the rest of the data */
244                         if (next_len) {
245                                 i++;
246                                 to = &list[i >> 2];
247                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
248                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
249                                 size -= next_len;
250                         }
251                         extra_len = 0;
252
253                 } else {
254                         size -= e_len;
255                 }
256                 if (extra_offset)
257                         extra_offset -= size;
258                 i++;
259         }
260
261         *psize = size;
262         return (uint32_t)i;
263 }
264
265 static __rte_always_inline int
266 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
267                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
268 {
269         void *m_vaddr = params->meta_buf.vaddr;
270         uint32_t size, i;
271         uint16_t data_len, mac_len, key_len;
272         roc_se_auth_type hash_type;
273         struct roc_se_ctx *ctx;
274         struct roc_se_sglist_comp *gather_comp;
275         struct roc_se_sglist_comp *scatter_comp;
276         uint8_t *in_buffer;
277         uint32_t g_size_bytes, s_size_bytes;
278         union cpt_inst_w4 cpt_inst_w4;
279
280         ctx = params->ctx_buf.vaddr;
281
282         hash_type = ctx->hash_type;
283         mac_len = ctx->mac_len;
284         key_len = ctx->auth_key_len;
285         data_len = ROC_SE_AUTH_DLEN(d_lens);
286
287         /*GP op header */
288         cpt_inst_w4.s.opcode_minor = 0;
289         cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
290         if (ctx->hmac) {
291                 cpt_inst_w4.s.opcode_major =
292                         ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
293                 cpt_inst_w4.s.param1 = key_len;
294                 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
295         } else {
296                 cpt_inst_w4.s.opcode_major =
297                         ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
298                 cpt_inst_w4.s.param1 = 0;
299                 cpt_inst_w4.s.dlen = data_len;
300         }
301
302         /* Null auth only case enters the if */
303         if (unlikely(!hash_type && !ctx->enc_cipher)) {
304                 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
305                 /* Minor op is passthrough */
306                 cpt_inst_w4.s.opcode_minor = 0x03;
307                 /* Send out completion code only */
308                 cpt_inst_w4.s.param2 = 0x1;
309         }
310
311         /* DPTR has SG list */
312         in_buffer = m_vaddr;
313
314         ((uint16_t *)in_buffer)[0] = 0;
315         ((uint16_t *)in_buffer)[1] = 0;
316
317         /* TODO Add error check if space will be sufficient */
318         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
319
320         /*
321          * Input gather list
322          */
323
324         i = 0;
325
326         if (ctx->hmac) {
327                 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
328                 /* Key */
329                 i = fill_sg_comp(gather_comp, i, k_vaddr,
330                                  RTE_ALIGN_CEIL(key_len, 8));
331         }
332
333         /* input data */
334         size = data_len;
335         if (size) {
336                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
337                                           &size, NULL, 0);
338                 if (unlikely(size)) {
339                         plt_dp_err("Insufficient dst IOV size, short by %dB",
340                                    size);
341                         return -1;
342                 }
343         } else {
344                 /*
345                  * Looks like we need to support zero data
346                  * gather ptr in case of hash & hmac
347                  */
348                 i++;
349         }
350         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
351         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
352
353         /*
354          * Output Gather list
355          */
356
357         i = 0;
358         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
359                                                      g_size_bytes);
360
361         if (flags & ROC_SE_VALID_MAC_BUF) {
362                 if (unlikely(params->mac_buf.size < mac_len)) {
363                         plt_dp_err("Insufficient MAC size");
364                         return -1;
365                 }
366
367                 size = mac_len;
368                 i = fill_sg_comp_from_buf_min(scatter_comp, i, &params->mac_buf,
369                                               &size);
370         } else {
371                 size = mac_len;
372                 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
373                                           data_len, &size, NULL, 0);
374                 if (unlikely(size)) {
375                         plt_dp_err("Insufficient dst IOV size, short by %dB",
376                                    size);
377                         return -1;
378                 }
379         }
380
381         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
382         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
383
384         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
385
386         /* This is DPTR len in case of SG mode */
387         cpt_inst_w4.s.dlen = size;
388
389         inst->dptr = (uint64_t)in_buffer;
390         inst->w4.u64 = cpt_inst_w4.u64;
391
392         return 0;
393 }
394
395 static __rte_always_inline int
396 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
397                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
398 {
399         uint32_t iv_offset = 0;
400         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
401         struct roc_se_ctx *se_ctx;
402         uint32_t cipher_type, hash_type;
403         uint32_t mac_len, size;
404         uint8_t iv_len = 16;
405         struct roc_se_buf_ptr *aad_buf = NULL;
406         uint32_t encr_offset, auth_offset;
407         uint32_t encr_data_len, auth_data_len, aad_len = 0;
408         uint32_t passthrough_len = 0;
409         union cpt_inst_w4 cpt_inst_w4;
410         void *offset_vaddr;
411         uint8_t op_minor;
412
413         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
414         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
415         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
416         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
417         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
418                 /* We don't support both AAD and auth data separately */
419                 auth_data_len = 0;
420                 auth_offset = 0;
421                 aad_len = fc_params->aad_buf.size;
422                 aad_buf = &fc_params->aad_buf;
423         }
424         se_ctx = fc_params->ctx_buf.vaddr;
425         cipher_type = se_ctx->enc_cipher;
426         hash_type = se_ctx->hash_type;
427         mac_len = se_ctx->mac_len;
428         op_minor = se_ctx->template_w4.s.opcode_minor;
429
430         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
431                 iv_len = 0;
432                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
433         }
434
435         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
436                 /*
437                  * When AAD is given, data above encr_offset is pass through
438                  * Since AAD is given as separate pointer and not as offset,
439                  * this is a special case as we need to fragment input data
440                  * into passthrough + encr_data and then insert AAD in between.
441                  */
442                 if (hash_type != ROC_SE_GMAC_TYPE) {
443                         passthrough_len = encr_offset;
444                         auth_offset = passthrough_len + iv_len;
445                         encr_offset = passthrough_len + aad_len + iv_len;
446                         auth_data_len = aad_len + encr_data_len;
447                 } else {
448                         passthrough_len = 16 + aad_len;
449                         auth_offset = passthrough_len + iv_len;
450                         auth_data_len = aad_len;
451                 }
452         } else {
453                 encr_offset += iv_len;
454                 auth_offset += iv_len;
455         }
456
457         /* Encryption */
458         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
459         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
460         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
461
462         if (hash_type == ROC_SE_GMAC_TYPE) {
463                 encr_offset = 0;
464                 encr_data_len = 0;
465         }
466
467         auth_dlen = auth_offset + auth_data_len;
468         enc_dlen = encr_data_len + encr_offset;
469         if (unlikely(encr_data_len & 0xf)) {
470                 if ((cipher_type == ROC_SE_DES3_CBC) ||
471                     (cipher_type == ROC_SE_DES3_ECB))
472                         enc_dlen =
473                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
474                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
475                                 (cipher_type == ROC_SE_AES_ECB)))
476                         enc_dlen =
477                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
478         }
479
480         if (unlikely(auth_dlen > enc_dlen)) {
481                 inputlen = auth_dlen;
482                 outputlen = auth_dlen + mac_len;
483         } else {
484                 inputlen = enc_dlen;
485                 outputlen = enc_dlen + mac_len;
486         }
487
488         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
489                 outputlen = enc_dlen;
490
491         /* GP op header */
492         cpt_inst_w4.s.param1 = encr_data_len;
493         cpt_inst_w4.s.param2 = auth_data_len;
494
495         /*
496          * In cn9k, cn10k since we have a limitation of
497          * IV & Offset control word not part of instruction
498          * and need to be part of Data Buffer, we check if
499          * head room is there and then only do the Direct mode processing
500          */
501         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
502                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
503                 void *dm_vaddr = fc_params->bufs[0].vaddr;
504
505                 /* Use Direct mode */
506
507                 offset_vaddr =
508                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
509
510                 /* DPTR */
511                 inst->dptr = (uint64_t)offset_vaddr;
512
513                 /* RPTR should just exclude offset control word */
514                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
515
516                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
517
518                 if (likely(iv_len)) {
519                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
520                                                       ROC_SE_OFF_CTRL_LEN);
521                         uint64_t *src = fc_params->iv_buf;
522                         dest[0] = src[0];
523                         dest[1] = src[1];
524                 }
525
526         } else {
527                 void *m_vaddr = fc_params->meta_buf.vaddr;
528                 uint32_t i, g_size_bytes, s_size_bytes;
529                 struct roc_se_sglist_comp *gather_comp;
530                 struct roc_se_sglist_comp *scatter_comp;
531                 uint8_t *in_buffer;
532
533                 /* This falls under strict SG mode */
534                 offset_vaddr = m_vaddr;
535                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
536
537                 m_vaddr = (uint8_t *)m_vaddr + size;
538
539                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
540
541                 if (likely(iv_len)) {
542                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
543                                                       ROC_SE_OFF_CTRL_LEN);
544                         uint64_t *src = fc_params->iv_buf;
545                         dest[0] = src[0];
546                         dest[1] = src[1];
547                 }
548
549                 /* DPTR has SG list */
550                 in_buffer = m_vaddr;
551
552                 ((uint16_t *)in_buffer)[0] = 0;
553                 ((uint16_t *)in_buffer)[1] = 0;
554
555                 /* TODO Add error check if space will be sufficient */
556                 gather_comp =
557                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
558
559                 /*
560                  * Input Gather List
561                  */
562
563                 i = 0;
564
565                 /* Offset control word that includes iv */
566                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
567                                  ROC_SE_OFF_CTRL_LEN + iv_len);
568
569                 /* Add input data */
570                 size = inputlen - iv_len;
571                 if (likely(size)) {
572                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
573
574                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
575                                 i = fill_sg_comp_from_buf_min(
576                                         gather_comp, i, fc_params->bufs, &size);
577                         } else {
578                                 i = fill_sg_comp_from_iov(
579                                         gather_comp, i, fc_params->src_iov, 0,
580                                         &size, aad_buf, aad_offset);
581                         }
582
583                         if (unlikely(size)) {
584                                 plt_dp_err("Insufficient buffer space,"
585                                            " size %d needed",
586                                            size);
587                                 return -1;
588                         }
589                 }
590                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
591                 g_size_bytes =
592                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
593
594                 /*
595                  * Output Scatter list
596                  */
597                 i = 0;
598                 scatter_comp =
599                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
600                                                       g_size_bytes);
601
602                 /* Add IV */
603                 if (likely(iv_len)) {
604                         i = fill_sg_comp(scatter_comp, i,
605                                          (uint64_t)offset_vaddr +
606                                                  ROC_SE_OFF_CTRL_LEN,
607                                          iv_len);
608                 }
609
610                 /* output data or output data + digest*/
611                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
612                         size = outputlen - iv_len - mac_len;
613                         if (size) {
614                                 uint32_t aad_offset =
615                                         aad_len ? passthrough_len : 0;
616
617                                 if (unlikely(flags &
618                                              ROC_SE_SINGLE_BUF_INPLACE)) {
619                                         i = fill_sg_comp_from_buf_min(
620                                                 scatter_comp, i,
621                                                 fc_params->bufs, &size);
622                                 } else {
623                                         i = fill_sg_comp_from_iov(
624                                                 scatter_comp, i,
625                                                 fc_params->dst_iov, 0, &size,
626                                                 aad_buf, aad_offset);
627                                 }
628                                 if (unlikely(size)) {
629                                         plt_dp_err("Insufficient buffer"
630                                                    " space, size %d needed",
631                                                    size);
632                                         return -1;
633                                 }
634                         }
635                         /* mac_data */
636                         if (mac_len) {
637                                 i = fill_sg_comp_from_buf(scatter_comp, i,
638                                                           &fc_params->mac_buf);
639                         }
640                 } else {
641                         /* Output including mac */
642                         size = outputlen - iv_len;
643                         if (likely(size)) {
644                                 uint32_t aad_offset =
645                                         aad_len ? passthrough_len : 0;
646
647                                 if (unlikely(flags &
648                                              ROC_SE_SINGLE_BUF_INPLACE)) {
649                                         i = fill_sg_comp_from_buf_min(
650                                                 scatter_comp, i,
651                                                 fc_params->bufs, &size);
652                                 } else {
653                                         i = fill_sg_comp_from_iov(
654                                                 scatter_comp, i,
655                                                 fc_params->dst_iov, 0, &size,
656                                                 aad_buf, aad_offset);
657                                 }
658                                 if (unlikely(size)) {
659                                         plt_dp_err("Insufficient buffer"
660                                                    " space, size %d needed",
661                                                    size);
662                                         return -1;
663                                 }
664                         }
665                 }
666                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
667                 s_size_bytes =
668                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
669
670                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
671
672                 /* This is DPTR len in case of SG mode */
673                 cpt_inst_w4.s.dlen = size;
674
675                 inst->dptr = (uint64_t)in_buffer;
676         }
677
678         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
679                      (auth_offset >> 8))) {
680                 plt_dp_err("Offset not supported");
681                 plt_dp_err("enc_offset: %d", encr_offset);
682                 plt_dp_err("iv_offset : %d", iv_offset);
683                 plt_dp_err("auth_offset: %d", auth_offset);
684                 return -1;
685         }
686
687         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
688                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
689                 ((uint64_t)auth_offset));
690
691         inst->w4.u64 = cpt_inst_w4.u64;
692         return 0;
693 }
694
695 static __rte_always_inline int
696 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
697                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
698 {
699         uint32_t iv_offset = 0, size;
700         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
701         struct roc_se_ctx *se_ctx;
702         int32_t hash_type, mac_len;
703         uint8_t iv_len = 16;
704         struct roc_se_buf_ptr *aad_buf = NULL;
705         uint32_t encr_offset, auth_offset;
706         uint32_t encr_data_len, auth_data_len, aad_len = 0;
707         uint32_t passthrough_len = 0;
708         union cpt_inst_w4 cpt_inst_w4;
709         void *offset_vaddr;
710         uint8_t op_minor;
711
712         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
713         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
714         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
715         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
716
717         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
718                 /* We don't support both AAD and auth data separately */
719                 auth_data_len = 0;
720                 auth_offset = 0;
721                 aad_len = fc_params->aad_buf.size;
722                 aad_buf = &fc_params->aad_buf;
723         }
724
725         se_ctx = fc_params->ctx_buf.vaddr;
726         hash_type = se_ctx->hash_type;
727         mac_len = se_ctx->mac_len;
728         op_minor = se_ctx->template_w4.s.opcode_minor;
729
730         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
731                 iv_len = 0;
732                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
733         }
734
735         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
736                 /*
737                  * When AAD is given, data above encr_offset is pass through
738                  * Since AAD is given as separate pointer and not as offset,
739                  * this is a special case as we need to fragment input data
740                  * into passthrough + encr_data and then insert AAD in between.
741                  */
742                 if (hash_type != ROC_SE_GMAC_TYPE) {
743                         passthrough_len = encr_offset;
744                         auth_offset = passthrough_len + iv_len;
745                         encr_offset = passthrough_len + aad_len + iv_len;
746                         auth_data_len = aad_len + encr_data_len;
747                 } else {
748                         passthrough_len = 16 + aad_len;
749                         auth_offset = passthrough_len + iv_len;
750                         auth_data_len = aad_len;
751                 }
752         } else {
753                 encr_offset += iv_len;
754                 auth_offset += iv_len;
755         }
756
757         /* Decryption */
758         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
759         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
760         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
761
762         if (hash_type == ROC_SE_GMAC_TYPE) {
763                 encr_offset = 0;
764                 encr_data_len = 0;
765         }
766
767         enc_dlen = encr_offset + encr_data_len;
768         auth_dlen = auth_offset + auth_data_len;
769
770         if (auth_dlen > enc_dlen) {
771                 inputlen = auth_dlen + mac_len;
772                 outputlen = auth_dlen;
773         } else {
774                 inputlen = enc_dlen + mac_len;
775                 outputlen = enc_dlen;
776         }
777
778         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
779                 outputlen = inputlen = enc_dlen;
780
781         cpt_inst_w4.s.param1 = encr_data_len;
782         cpt_inst_w4.s.param2 = auth_data_len;
783
784         /*
785          * In cn9k, cn10k since we have a limitation of
786          * IV & Offset control word not part of instruction
787          * and need to be part of Data Buffer, we check if
788          * head room is there and then only do the Direct mode processing
789          */
790         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
791                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
792                 void *dm_vaddr = fc_params->bufs[0].vaddr;
793
794                 /* Use Direct mode */
795
796                 offset_vaddr =
797                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
798                 inst->dptr = (uint64_t)offset_vaddr;
799
800                 /* RPTR should just exclude offset control word */
801                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
802
803                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
804
805                 if (likely(iv_len)) {
806                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
807                                                       ROC_SE_OFF_CTRL_LEN);
808                         uint64_t *src = fc_params->iv_buf;
809                         dest[0] = src[0];
810                         dest[1] = src[1];
811                 }
812
813         } else {
814                 void *m_vaddr = fc_params->meta_buf.vaddr;
815                 uint32_t g_size_bytes, s_size_bytes;
816                 struct roc_se_sglist_comp *gather_comp;
817                 struct roc_se_sglist_comp *scatter_comp;
818                 uint8_t *in_buffer;
819                 uint8_t i = 0;
820
821                 /* This falls under strict SG mode */
822                 offset_vaddr = m_vaddr;
823                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
824
825                 m_vaddr = (uint8_t *)m_vaddr + size;
826
827                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
828
829                 if (likely(iv_len)) {
830                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
831                                                       ROC_SE_OFF_CTRL_LEN);
832                         uint64_t *src = fc_params->iv_buf;
833                         dest[0] = src[0];
834                         dest[1] = src[1];
835                 }
836
837                 /* DPTR has SG list */
838                 in_buffer = m_vaddr;
839
840                 ((uint16_t *)in_buffer)[0] = 0;
841                 ((uint16_t *)in_buffer)[1] = 0;
842
843                 /* TODO Add error check if space will be sufficient */
844                 gather_comp =
845                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
846
847                 /*
848                  * Input Gather List
849                  */
850                 i = 0;
851
852                 /* Offset control word that includes iv */
853                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
854                                  ROC_SE_OFF_CTRL_LEN + iv_len);
855
856                 /* Add input data */
857                 if (flags & ROC_SE_VALID_MAC_BUF) {
858                         size = inputlen - iv_len - mac_len;
859                         if (size) {
860                                 /* input data only */
861                                 if (unlikely(flags &
862                                              ROC_SE_SINGLE_BUF_INPLACE)) {
863                                         i = fill_sg_comp_from_buf_min(
864                                                 gather_comp, i, fc_params->bufs,
865                                                 &size);
866                                 } else {
867                                         uint32_t aad_offset =
868                                                 aad_len ? passthrough_len : 0;
869
870                                         i = fill_sg_comp_from_iov(
871                                                 gather_comp, i,
872                                                 fc_params->src_iov, 0, &size,
873                                                 aad_buf, aad_offset);
874                                 }
875                                 if (unlikely(size)) {
876                                         plt_dp_err("Insufficient buffer"
877                                                    " space, size %d needed",
878                                                    size);
879                                         return -1;
880                                 }
881                         }
882
883                         /* mac data */
884                         if (mac_len) {
885                                 i = fill_sg_comp_from_buf(gather_comp, i,
886                                                           &fc_params->mac_buf);
887                         }
888                 } else {
889                         /* input data + mac */
890                         size = inputlen - iv_len;
891                         if (size) {
892                                 if (unlikely(flags &
893                                              ROC_SE_SINGLE_BUF_INPLACE)) {
894                                         i = fill_sg_comp_from_buf_min(
895                                                 gather_comp, i, fc_params->bufs,
896                                                 &size);
897                                 } else {
898                                         uint32_t aad_offset =
899                                                 aad_len ? passthrough_len : 0;
900
901                                         if (unlikely(!fc_params->src_iov)) {
902                                                 plt_dp_err("Bad input args");
903                                                 return -1;
904                                         }
905
906                                         i = fill_sg_comp_from_iov(
907                                                 gather_comp, i,
908                                                 fc_params->src_iov, 0, &size,
909                                                 aad_buf, aad_offset);
910                                 }
911
912                                 if (unlikely(size)) {
913                                         plt_dp_err("Insufficient buffer"
914                                                    " space, size %d needed",
915                                                    size);
916                                         return -1;
917                                 }
918                         }
919                 }
920                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
921                 g_size_bytes =
922                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
923
924                 /*
925                  * Output Scatter List
926                  */
927
928                 i = 0;
929                 scatter_comp =
930                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
931                                                       g_size_bytes);
932
933                 /* Add iv */
934                 if (iv_len) {
935                         i = fill_sg_comp(scatter_comp, i,
936                                          (uint64_t)offset_vaddr +
937                                                  ROC_SE_OFF_CTRL_LEN,
938                                          iv_len);
939                 }
940
941                 /* Add output data */
942                 size = outputlen - iv_len;
943                 if (size) {
944                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
945                                 /* handle single buffer here */
946                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
947                                                               fc_params->bufs,
948                                                               &size);
949                         } else {
950                                 uint32_t aad_offset =
951                                         aad_len ? passthrough_len : 0;
952
953                                 if (unlikely(!fc_params->dst_iov)) {
954                                         plt_dp_err("Bad input args");
955                                         return -1;
956                                 }
957
958                                 i = fill_sg_comp_from_iov(
959                                         scatter_comp, i, fc_params->dst_iov, 0,
960                                         &size, aad_buf, aad_offset);
961                         }
962
963                         if (unlikely(size)) {
964                                 plt_dp_err("Insufficient buffer space,"
965                                            " size %d needed",
966                                            size);
967                                 return -1;
968                         }
969                 }
970
971                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
972                 s_size_bytes =
973                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
974
975                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
976
977                 /* This is DPTR len in case of SG mode */
978                 cpt_inst_w4.s.dlen = size;
979
980                 inst->dptr = (uint64_t)in_buffer;
981         }
982
983         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
984                      (auth_offset >> 8))) {
985                 plt_dp_err("Offset not supported");
986                 plt_dp_err("enc_offset: %d", encr_offset);
987                 plt_dp_err("iv_offset : %d", iv_offset);
988                 plt_dp_err("auth_offset: %d", auth_offset);
989                 return -1;
990         }
991
992         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
993                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
994                 ((uint64_t)auth_offset));
995
996         inst->w4.u64 = cpt_inst_w4.u64;
997         return 0;
998 }
999
1000 static __rte_always_inline int
1001 cpt_pdcp_alg_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1002                   struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1003 {
1004         uint32_t size;
1005         int32_t inputlen, outputlen;
1006         struct roc_se_ctx *se_ctx;
1007         uint32_t mac_len = 0;
1008         uint8_t pdcp_alg_type;
1009         uint32_t encr_offset, auth_offset;
1010         uint32_t encr_data_len, auth_data_len;
1011         int flags, iv_len;
1012         uint64_t offset_ctrl;
1013         uint64_t *offset_vaddr;
1014         uint8_t *iv_s;
1015         uint8_t pack_iv = 0;
1016         union cpt_inst_w4 cpt_inst_w4;
1017
1018         se_ctx = params->ctx_buf.vaddr;
1019         flags = se_ctx->zsk_flags;
1020         mac_len = se_ctx->mac_len;
1021         pdcp_alg_type = se_ctx->pdcp_alg_type;
1022
1023         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_PDCP;
1024         cpt_inst_w4.s.opcode_minor = se_ctx->template_w4.s.opcode_minor;
1025
1026         if (flags == 0x1) {
1027                 iv_s = params->auth_iv_buf;
1028
1029                 /*
1030                  * Microcode expects offsets in bytes
1031                  * TODO: Rounding off
1032                  */
1033                 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1034                 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
1035
1036                 if (se_ctx->pdcp_alg_type != ROC_SE_PDCP_ALG_TYPE_AES_CTR) {
1037                         iv_len = params->auth_iv_len;
1038
1039                         if (iv_len == 25) {
1040                                 iv_len -= 2;
1041                                 pack_iv = 1;
1042                         }
1043
1044                         auth_offset = auth_offset / 8;
1045
1046                         /* consider iv len */
1047                         auth_offset += iv_len;
1048
1049                         inputlen =
1050                                 auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1051                 } else {
1052                         iv_len = 16;
1053
1054                         /* consider iv len */
1055                         auth_offset += iv_len;
1056
1057                         inputlen = auth_offset + auth_data_len;
1058                 }
1059
1060                 outputlen = mac_len;
1061
1062                 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1063
1064                 encr_data_len = 0;
1065                 encr_offset = 0;
1066         } else {
1067                 iv_s = params->iv_buf;
1068                 iv_len = params->cipher_iv_len;
1069
1070                 if (iv_len == 25) {
1071                         iv_len -= 2;
1072                         pack_iv = 1;
1073                 }
1074
1075                 /*
1076                  * Microcode expects offsets in bytes
1077                  * TODO: Rounding off
1078                  */
1079                 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1080
1081                 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1082                 encr_offset = encr_offset / 8;
1083                 /* consider iv len */
1084                 encr_offset += iv_len;
1085
1086                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1087                 outputlen = inputlen;
1088
1089                 /* iv offset is 0 */
1090                 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1091
1092                 auth_data_len = 0;
1093                 auth_offset = 0;
1094         }
1095
1096         if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1097                 plt_dp_err("Offset not supported");
1098                 plt_dp_err("enc_offset: %d", encr_offset);
1099                 plt_dp_err("auth_offset: %d", auth_offset);
1100                 return -1;
1101         }
1102
1103         /*
1104          * GP op header, lengths are expected in bits.
1105          */
1106         cpt_inst_w4.s.param1 = encr_data_len;
1107         cpt_inst_w4.s.param2 = auth_data_len;
1108
1109         /*
1110          * In cn9k, cn10k since we have a limitation of
1111          * IV & Offset control word not part of instruction
1112          * and need to be part of Data Buffer, we check if
1113          * head room is there and then only do the Direct mode processing
1114          */
1115         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1116                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1117                 void *dm_vaddr = params->bufs[0].vaddr;
1118
1119                 /* Use Direct mode */
1120
1121                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1122                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1123
1124                 /* DPTR */
1125                 inst->dptr = (uint64_t)offset_vaddr;
1126                 /* RPTR should just exclude offset control word */
1127                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1128
1129                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1130
1131                 uint8_t *iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1132                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1133
1134                 *offset_vaddr = offset_ctrl;
1135         } else {
1136                 void *m_vaddr = params->meta_buf.vaddr;
1137                 uint32_t i, g_size_bytes, s_size_bytes;
1138                 struct roc_se_sglist_comp *gather_comp;
1139                 struct roc_se_sglist_comp *scatter_comp;
1140                 uint8_t *in_buffer;
1141                 uint8_t *iv_d;
1142
1143                 /* save space for iv */
1144                 offset_vaddr = m_vaddr;
1145
1146                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN +
1147                           RTE_ALIGN_CEIL(iv_len, 8);
1148
1149                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1150
1151                 /* DPTR has SG list */
1152                 in_buffer = m_vaddr;
1153
1154                 ((uint16_t *)in_buffer)[0] = 0;
1155                 ((uint16_t *)in_buffer)[1] = 0;
1156
1157                 /* TODO Add error check if space will be sufficient */
1158                 gather_comp =
1159                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1160
1161                 /*
1162                  * Input Gather List
1163                  */
1164                 i = 0;
1165
1166                 /* Offset control word followed by iv */
1167
1168                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1169                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1170
1171                 /* iv offset is 0 */
1172                 *offset_vaddr = offset_ctrl;
1173
1174                 iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1175                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1176
1177                 /* input data */
1178                 size = inputlen - iv_len;
1179                 if (size) {
1180                         i = fill_sg_comp_from_iov(gather_comp, i,
1181                                                   params->src_iov, 0, &size,
1182                                                   NULL, 0);
1183                         if (unlikely(size)) {
1184                                 plt_dp_err("Insufficient buffer space,"
1185                                            " size %d needed",
1186                                            size);
1187                                 return -1;
1188                         }
1189                 }
1190                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1191                 g_size_bytes =
1192                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1193
1194                 /*
1195                  * Output Scatter List
1196                  */
1197
1198                 i = 0;
1199                 scatter_comp =
1200                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1201                                                       g_size_bytes);
1202
1203                 if (flags == 0x1) {
1204                         /* IV in SLIST only for EEA3 & UEA2 */
1205                         iv_len = 0;
1206                 }
1207
1208                 if (iv_len) {
1209                         i = fill_sg_comp(scatter_comp, i,
1210                                          (uint64_t)offset_vaddr +
1211                                                  ROC_SE_OFF_CTRL_LEN,
1212                                          iv_len);
1213                 }
1214
1215                 /* Add output data */
1216                 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1217                         size = outputlen - iv_len - mac_len;
1218                         if (size) {
1219                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1220                                                           params->dst_iov, 0,
1221                                                           &size, NULL, 0);
1222
1223                                 if (unlikely(size)) {
1224                                         plt_dp_err("Insufficient buffer space,"
1225                                                    " size %d needed",
1226                                                    size);
1227                                         return -1;
1228                                 }
1229                         }
1230
1231                         /* mac data */
1232                         if (mac_len) {
1233                                 i = fill_sg_comp_from_buf(scatter_comp, i,
1234                                                           &params->mac_buf);
1235                         }
1236                 } else {
1237                         /* Output including mac */
1238                         size = outputlen - iv_len;
1239                         if (size) {
1240                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1241                                                           params->dst_iov, 0,
1242                                                           &size, NULL, 0);
1243
1244                                 if (unlikely(size)) {
1245                                         plt_dp_err("Insufficient buffer space,"
1246                                                    " size %d needed",
1247                                                    size);
1248                                         return -1;
1249                                 }
1250                         }
1251                 }
1252                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1253                 s_size_bytes =
1254                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1255
1256                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1257
1258                 /* This is DPTR len in case of SG mode */
1259                 cpt_inst_w4.s.dlen = size;
1260
1261                 inst->dptr = (uint64_t)in_buffer;
1262         }
1263
1264         inst->w4.u64 = cpt_inst_w4.u64;
1265
1266         return 0;
1267 }
1268
1269 static __rte_always_inline int
1270 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1271                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1272 {
1273         void *m_vaddr = params->meta_buf.vaddr;
1274         uint32_t size;
1275         int32_t inputlen = 0, outputlen = 0;
1276         struct roc_se_ctx *se_ctx;
1277         uint32_t mac_len = 0;
1278         uint8_t i = 0;
1279         uint32_t encr_offset, auth_offset;
1280         uint32_t encr_data_len, auth_data_len;
1281         int flags;
1282         uint8_t *iv_s, *iv_d, iv_len = 8;
1283         uint8_t dir = 0;
1284         uint64_t *offset_vaddr;
1285         union cpt_inst_w4 cpt_inst_w4;
1286         uint8_t *in_buffer;
1287         uint32_t g_size_bytes, s_size_bytes;
1288         struct roc_se_sglist_comp *gather_comp;
1289         struct roc_se_sglist_comp *scatter_comp;
1290
1291         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1292         auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1293         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1294         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1295
1296         se_ctx = params->ctx_buf.vaddr;
1297         flags = se_ctx->zsk_flags;
1298         mac_len = se_ctx->mac_len;
1299
1300         if (flags == 0x0)
1301                 iv_s = params->iv_buf;
1302         else
1303                 iv_s = params->auth_iv_buf;
1304
1305         dir = iv_s[8] & 0x1;
1306
1307         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1308
1309         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1310         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1311                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1312
1313         /*
1314          * GP op header, lengths are expected in bits.
1315          */
1316         cpt_inst_w4.s.param1 = encr_data_len;
1317         cpt_inst_w4.s.param2 = auth_data_len;
1318
1319         /* consider iv len */
1320         if (flags == 0x0) {
1321                 encr_offset += iv_len;
1322                 auth_offset += iv_len;
1323         }
1324
1325         /* save space for offset ctrl and iv */
1326         offset_vaddr = m_vaddr;
1327
1328         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1329
1330         /* DPTR has SG list */
1331         in_buffer = m_vaddr;
1332
1333         ((uint16_t *)in_buffer)[0] = 0;
1334         ((uint16_t *)in_buffer)[1] = 0;
1335
1336         /* TODO Add error check if space will be sufficient */
1337         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1338
1339         /*
1340          * Input Gather List
1341          */
1342         i = 0;
1343
1344         /* Offset control word followed by iv */
1345
1346         if (flags == 0x0) {
1347                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1348                 outputlen = inputlen;
1349                 /* iv offset is 0 */
1350                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1351                 if (unlikely((encr_offset >> 16))) {
1352                         plt_dp_err("Offset not supported");
1353                         plt_dp_err("enc_offset: %d", encr_offset);
1354                         return -1;
1355                 }
1356         } else {
1357                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1358                 outputlen = mac_len;
1359                 /* iv offset is 0 */
1360                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1361                 if (unlikely((auth_offset >> 8))) {
1362                         plt_dp_err("Offset not supported");
1363                         plt_dp_err("auth_offset: %d", auth_offset);
1364                         return -1;
1365                 }
1366         }
1367
1368         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1369                          ROC_SE_OFF_CTRL_LEN + iv_len);
1370
1371         /* IV */
1372         iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1373         memcpy(iv_d, iv_s, iv_len);
1374
1375         /* input data */
1376         size = inputlen - iv_len;
1377         if (size) {
1378                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1379                                           &size, NULL, 0);
1380
1381                 if (unlikely(size)) {
1382                         plt_dp_err("Insufficient buffer space,"
1383                                    " size %d needed",
1384                                    size);
1385                         return -1;
1386                 }
1387         }
1388         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1389         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1390
1391         /*
1392          * Output Scatter List
1393          */
1394
1395         i = 0;
1396         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1397                                                      g_size_bytes);
1398
1399         if (flags == 0x1) {
1400                 /* IV in SLIST only for F8 */
1401                 iv_len = 0;
1402         }
1403
1404         /* IV */
1405         if (iv_len) {
1406                 i = fill_sg_comp(scatter_comp, i,
1407                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1408                                  iv_len);
1409         }
1410
1411         /* Add output data */
1412         if (req_flags & ROC_SE_VALID_MAC_BUF) {
1413                 size = outputlen - iv_len - mac_len;
1414                 if (size) {
1415                         i = fill_sg_comp_from_iov(scatter_comp, i,
1416                                                   params->dst_iov, 0, &size,
1417                                                   NULL, 0);
1418
1419                         if (unlikely(size)) {
1420                                 plt_dp_err("Insufficient buffer space,"
1421                                            " size %d needed",
1422                                            size);
1423                                 return -1;
1424                         }
1425                 }
1426
1427                 /* mac data */
1428                 if (mac_len) {
1429                         i = fill_sg_comp_from_buf(scatter_comp, i,
1430                                                   &params->mac_buf);
1431                 }
1432         } else {
1433                 /* Output including mac */
1434                 size = outputlen - iv_len;
1435                 if (size) {
1436                         i = fill_sg_comp_from_iov(scatter_comp, i,
1437                                                   params->dst_iov, 0, &size,
1438                                                   NULL, 0);
1439
1440                         if (unlikely(size)) {
1441                                 plt_dp_err("Insufficient buffer space,"
1442                                            " size %d needed",
1443                                            size);
1444                                 return -1;
1445                         }
1446                 }
1447         }
1448         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1449         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1450
1451         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1452
1453         /* This is DPTR len in case of SG mode */
1454         cpt_inst_w4.s.dlen = size;
1455
1456         inst->dptr = (uint64_t)in_buffer;
1457         inst->w4.u64 = cpt_inst_w4.u64;
1458
1459         return 0;
1460 }
1461
1462 static __rte_always_inline int
1463 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1464                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1465 {
1466         void *m_vaddr = params->meta_buf.vaddr;
1467         uint32_t size;
1468         int32_t inputlen = 0, outputlen;
1469         struct roc_se_ctx *se_ctx;
1470         uint8_t i = 0, iv_len = 8;
1471         uint32_t encr_offset;
1472         uint32_t encr_data_len;
1473         int flags;
1474         uint8_t dir = 0;
1475         uint64_t *offset_vaddr;
1476         union cpt_inst_w4 cpt_inst_w4;
1477         uint8_t *in_buffer;
1478         uint32_t g_size_bytes, s_size_bytes;
1479         struct roc_se_sglist_comp *gather_comp;
1480         struct roc_se_sglist_comp *scatter_comp;
1481
1482         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1483         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1484
1485         se_ctx = params->ctx_buf.vaddr;
1486         flags = se_ctx->zsk_flags;
1487
1488         cpt_inst_w4.u64 = 0;
1489         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1490
1491         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1492         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1493                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1494
1495         /*
1496          * GP op header, lengths are expected in bits.
1497          */
1498         cpt_inst_w4.s.param1 = encr_data_len;
1499
1500         /* consider iv len */
1501         encr_offset += iv_len;
1502
1503         inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1504         outputlen = inputlen;
1505
1506         /* save space for offset ctrl & iv */
1507         offset_vaddr = m_vaddr;
1508
1509         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1510
1511         /* DPTR has SG list */
1512         in_buffer = m_vaddr;
1513
1514         ((uint16_t *)in_buffer)[0] = 0;
1515         ((uint16_t *)in_buffer)[1] = 0;
1516
1517         /* TODO Add error check if space will be sufficient */
1518         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1519
1520         /*
1521          * Input Gather List
1522          */
1523         i = 0;
1524
1525         /* Offset control word followed by iv */
1526         *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1527         if (unlikely((encr_offset >> 16))) {
1528                 plt_dp_err("Offset not supported");
1529                 plt_dp_err("enc_offset: %d", encr_offset);
1530                 return -1;
1531         }
1532
1533         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1534                          ROC_SE_OFF_CTRL_LEN + iv_len);
1535
1536         /* IV */
1537         memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1538                iv_len);
1539
1540         /* Add input data */
1541         size = inputlen - iv_len;
1542         if (size) {
1543                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1544                                           &size, NULL, 0);
1545                 if (unlikely(size)) {
1546                         plt_dp_err("Insufficient buffer space,"
1547                                    " size %d needed",
1548                                    size);
1549                         return -1;
1550                 }
1551         }
1552         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1553         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1554
1555         /*
1556          * Output Scatter List
1557          */
1558
1559         i = 0;
1560         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1561                                                      g_size_bytes);
1562
1563         /* IV */
1564         i = fill_sg_comp(scatter_comp, i,
1565                          (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1566
1567         /* Add output data */
1568         size = outputlen - iv_len;
1569         if (size) {
1570                 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1571                                           &size, NULL, 0);
1572                 if (unlikely(size)) {
1573                         plt_dp_err("Insufficient buffer space,"
1574                                    " size %d needed",
1575                                    size);
1576                         return -1;
1577                 }
1578         }
1579         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1580         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1581
1582         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1583
1584         /* This is DPTR len in case of SG mode */
1585         cpt_inst_w4.s.dlen = size;
1586
1587         inst->dptr = (uint64_t)in_buffer;
1588         inst->w4.u64 = cpt_inst_w4.u64;
1589
1590         return 0;
1591 }
1592
1593 static __rte_always_inline int
1594 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1595                      struct roc_se_fc_params *fc_params,
1596                      struct cpt_inst_s *inst)
1597 {
1598         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1599         uint8_t fc_type;
1600         int ret = -1;
1601
1602         fc_type = ctx->fc_type;
1603
1604         if (likely(fc_type == ROC_SE_FC_GEN)) {
1605                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1606         } else if (fc_type == ROC_SE_PDCP) {
1607                 ret = cpt_pdcp_alg_prep(flags, d_offs, d_lens, fc_params, inst);
1608         } else if (fc_type == ROC_SE_KASUMI) {
1609                 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1610         }
1611
1612         /*
1613          * For AUTH_ONLY case,
1614          * MC only supports digest generation and verification
1615          * should be done in software by memcmp()
1616          */
1617
1618         return ret;
1619 }
1620
1621 static __rte_always_inline int
1622 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1623                      struct roc_se_fc_params *fc_params,
1624                      struct cpt_inst_s *inst)
1625 {
1626         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1627         uint8_t fc_type;
1628         int ret = -1;
1629
1630         fc_type = ctx->fc_type;
1631
1632         if (likely(fc_type == ROC_SE_FC_GEN)) {
1633                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1634         } else if (fc_type == ROC_SE_PDCP) {
1635                 ret = cpt_pdcp_alg_prep(flags, d_offs, d_lens, fc_params, inst);
1636         } else if (fc_type == ROC_SE_KASUMI) {
1637                 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1638                                           inst);
1639         } else if (fc_type == ROC_SE_HASH_HMAC) {
1640                 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1641         }
1642
1643         return ret;
1644 }
1645
1646 static __rte_always_inline int
1647 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1648 {
1649         struct rte_crypto_aead_xform *aead_form;
1650         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1651         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1652         uint32_t cipher_key_len = 0;
1653         uint8_t aes_gcm = 0;
1654         aead_form = &xform->aead;
1655
1656         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1657                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1658                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1659         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1660                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1661                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1662         } else {
1663                 plt_dp_err("Unknown aead operation\n");
1664                 return -1;
1665         }
1666         switch (aead_form->algo) {
1667         case RTE_CRYPTO_AEAD_AES_GCM:
1668                 enc_type = ROC_SE_AES_GCM;
1669                 cipher_key_len = 16;
1670                 aes_gcm = 1;
1671                 break;
1672         case RTE_CRYPTO_AEAD_AES_CCM:
1673                 plt_dp_err("Crypto: Unsupported cipher algo %u",
1674                            aead_form->algo);
1675                 return -1;
1676         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1677                 enc_type = ROC_SE_CHACHA20;
1678                 auth_type = ROC_SE_POLY1305;
1679                 cipher_key_len = 32;
1680                 sess->chacha_poly = 1;
1681                 break;
1682         default:
1683                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1684                            aead_form->algo);
1685                 return -1;
1686         }
1687         if (aead_form->key.length < cipher_key_len) {
1688                 plt_dp_err("Invalid cipher params keylen %u",
1689                            aead_form->key.length);
1690                 return -1;
1691         }
1692         sess->zsk_flag = 0;
1693         sess->aes_gcm = aes_gcm;
1694         sess->mac_len = aead_form->digest_length;
1695         sess->iv_offset = aead_form->iv.offset;
1696         sess->iv_length = aead_form->iv.length;
1697         sess->aad_length = aead_form->aad_length;
1698
1699         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1700                                          aead_form->key.data,
1701                                          aead_form->key.length, NULL)))
1702                 return -1;
1703
1704         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1705                                          aead_form->digest_length)))
1706                 return -1;
1707
1708         return 0;
1709 }
1710
1711 static __rte_always_inline int
1712 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1713 {
1714         struct rte_crypto_cipher_xform *c_form;
1715         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1716         uint32_t cipher_key_len = 0;
1717         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1718
1719         c_form = &xform->cipher;
1720
1721         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1722                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1723         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1724                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1725                 if (xform->next != NULL &&
1726                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1727                         /* Perform decryption followed by auth verify */
1728                         sess->roc_se_ctx.template_w4.s.opcode_minor =
1729                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1730                 }
1731         } else {
1732                 plt_dp_err("Unknown cipher operation\n");
1733                 return -1;
1734         }
1735
1736         switch (c_form->algo) {
1737         case RTE_CRYPTO_CIPHER_AES_CBC:
1738                 enc_type = ROC_SE_AES_CBC;
1739                 cipher_key_len = 16;
1740                 break;
1741         case RTE_CRYPTO_CIPHER_3DES_CBC:
1742                 enc_type = ROC_SE_DES3_CBC;
1743                 cipher_key_len = 24;
1744                 break;
1745         case RTE_CRYPTO_CIPHER_DES_CBC:
1746                 /* DES is implemented using 3DES in hardware */
1747                 enc_type = ROC_SE_DES3_CBC;
1748                 cipher_key_len = 8;
1749                 break;
1750         case RTE_CRYPTO_CIPHER_AES_CTR:
1751                 enc_type = ROC_SE_AES_CTR;
1752                 cipher_key_len = 16;
1753                 aes_ctr = 1;
1754                 break;
1755         case RTE_CRYPTO_CIPHER_NULL:
1756                 enc_type = 0;
1757                 is_null = 1;
1758                 break;
1759         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1760                 enc_type = ROC_SE_KASUMI_F8_ECB;
1761                 cipher_key_len = 16;
1762                 zsk_flag = ROC_SE_K_F8;
1763                 break;
1764         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1765                 enc_type = ROC_SE_SNOW3G_UEA2;
1766                 cipher_key_len = 16;
1767                 zsk_flag = ROC_SE_ZS_EA;
1768                 break;
1769         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1770                 enc_type = ROC_SE_ZUC_EEA3;
1771                 cipher_key_len = c_form->key.length;
1772                 zsk_flag = ROC_SE_ZS_EA;
1773                 break;
1774         case RTE_CRYPTO_CIPHER_AES_XTS:
1775                 enc_type = ROC_SE_AES_XTS;
1776                 cipher_key_len = 16;
1777                 break;
1778         case RTE_CRYPTO_CIPHER_3DES_ECB:
1779                 enc_type = ROC_SE_DES3_ECB;
1780                 cipher_key_len = 24;
1781                 break;
1782         case RTE_CRYPTO_CIPHER_AES_ECB:
1783                 enc_type = ROC_SE_AES_ECB;
1784                 cipher_key_len = 16;
1785                 break;
1786         case RTE_CRYPTO_CIPHER_3DES_CTR:
1787         case RTE_CRYPTO_CIPHER_AES_F8:
1788         case RTE_CRYPTO_CIPHER_ARC4:
1789                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1790                 return -1;
1791         default:
1792                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1793                            c_form->algo);
1794                 return -1;
1795         }
1796
1797         if (c_form->key.length < cipher_key_len) {
1798                 plt_dp_err("Invalid cipher params keylen %u",
1799                            c_form->key.length);
1800                 return -1;
1801         }
1802
1803         sess->zsk_flag = zsk_flag;
1804         sess->aes_gcm = 0;
1805         sess->aes_ctr = aes_ctr;
1806         sess->iv_offset = c_form->iv.offset;
1807         sess->iv_length = c_form->iv.length;
1808         sess->is_null = is_null;
1809
1810         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1811                                          c_form->key.data, c_form->key.length,
1812                                          NULL)))
1813                 return -1;
1814
1815         if ((enc_type >= ROC_SE_ZUC_EEA3) && (enc_type <= ROC_SE_AES_CTR_EEA2))
1816                 roc_se_ctx_swap(&sess->roc_se_ctx);
1817         return 0;
1818 }
1819
1820 static __rte_always_inline int
1821 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1822 {
1823         struct rte_crypto_auth_xform *a_form;
1824         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1825         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1826
1827         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
1828                 return fill_sess_gmac(xform, sess);
1829
1830         if (xform->next != NULL &&
1831             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1832             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1833                 /* Perform auth followed by encryption */
1834                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1835                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1836         }
1837
1838         a_form = &xform->auth;
1839
1840         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1841                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1842         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1843                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1844         else {
1845                 plt_dp_err("Unknown auth operation");
1846                 return -1;
1847         }
1848
1849         switch (a_form->algo) {
1850         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1851                 /* Fall through */
1852         case RTE_CRYPTO_AUTH_SHA1:
1853                 auth_type = ROC_SE_SHA1_TYPE;
1854                 break;
1855         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1856         case RTE_CRYPTO_AUTH_SHA256:
1857                 auth_type = ROC_SE_SHA2_SHA256;
1858                 break;
1859         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1860         case RTE_CRYPTO_AUTH_SHA512:
1861                 auth_type = ROC_SE_SHA2_SHA512;
1862                 break;
1863         case RTE_CRYPTO_AUTH_AES_GMAC:
1864                 auth_type = ROC_SE_GMAC_TYPE;
1865                 aes_gcm = 1;
1866                 break;
1867         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1868         case RTE_CRYPTO_AUTH_SHA224:
1869                 auth_type = ROC_SE_SHA2_SHA224;
1870                 break;
1871         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1872         case RTE_CRYPTO_AUTH_SHA384:
1873                 auth_type = ROC_SE_SHA2_SHA384;
1874                 break;
1875         case RTE_CRYPTO_AUTH_MD5_HMAC:
1876         case RTE_CRYPTO_AUTH_MD5:
1877                 auth_type = ROC_SE_MD5_TYPE;
1878                 break;
1879         case RTE_CRYPTO_AUTH_KASUMI_F9:
1880                 auth_type = ROC_SE_KASUMI_F9_ECB;
1881                 /*
1882                  * Indicate that direction needs to be taken out
1883                  * from end of src
1884                  */
1885                 zsk_flag = ROC_SE_K_F9;
1886                 break;
1887         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1888                 auth_type = ROC_SE_SNOW3G_UIA2;
1889                 zsk_flag = ROC_SE_ZS_IA;
1890                 break;
1891         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1892                 auth_type = ROC_SE_ZUC_EIA3;
1893                 zsk_flag = ROC_SE_ZS_IA;
1894                 break;
1895         case RTE_CRYPTO_AUTH_NULL:
1896                 auth_type = 0;
1897                 is_null = 1;
1898                 break;
1899         case RTE_CRYPTO_AUTH_AES_CMAC:
1900                 auth_type = ROC_SE_AES_CMAC_EIA2;
1901                 zsk_flag = ROC_SE_ZS_IA;
1902                 break;
1903         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1904         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1905                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1906                 return -1;
1907         default:
1908                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1909                            a_form->algo);
1910                 return -1;
1911         }
1912
1913         sess->zsk_flag = zsk_flag;
1914         sess->aes_gcm = aes_gcm;
1915         sess->mac_len = a_form->digest_length;
1916         sess->is_null = is_null;
1917         if (zsk_flag) {
1918                 sess->auth_iv_offset = a_form->iv.offset;
1919                 sess->auth_iv_length = a_form->iv.length;
1920         }
1921         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1922                                          a_form->key.data, a_form->key.length,
1923                                          a_form->digest_length)))
1924                 return -1;
1925
1926         if ((auth_type >= ROC_SE_ZUC_EIA3) &&
1927             (auth_type <= ROC_SE_AES_CMAC_EIA2))
1928                 roc_se_ctx_swap(&sess->roc_se_ctx);
1929
1930         return 0;
1931 }
1932
1933 static __rte_always_inline int
1934 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1935 {
1936         struct rte_crypto_auth_xform *a_form;
1937         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1938         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1939
1940         a_form = &xform->auth;
1941
1942         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1943                 sess->cpt_op |= ROC_SE_OP_ENCODE;
1944         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1945                 sess->cpt_op |= ROC_SE_OP_DECODE;
1946         else {
1947                 plt_dp_err("Unknown auth operation");
1948                 return -1;
1949         }
1950
1951         switch (a_form->algo) {
1952         case RTE_CRYPTO_AUTH_AES_GMAC:
1953                 enc_type = ROC_SE_AES_GCM;
1954                 auth_type = ROC_SE_GMAC_TYPE;
1955                 break;
1956         default:
1957                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1958                            a_form->algo);
1959                 return -1;
1960         }
1961
1962         sess->zsk_flag = 0;
1963         sess->aes_gcm = 0;
1964         sess->is_gmac = 1;
1965         sess->iv_offset = a_form->iv.offset;
1966         sess->iv_length = a_form->iv.length;
1967         sess->mac_len = a_form->digest_length;
1968
1969         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1970                                          a_form->key.data, a_form->key.length,
1971                                          NULL)))
1972                 return -1;
1973
1974         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1975                                          a_form->digest_length)))
1976                 return -1;
1977
1978         return 0;
1979 }
1980
1981 static __rte_always_inline void *
1982 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1983               struct rte_mempool *cpt_meta_pool,
1984               struct cpt_inflight_req *infl_req)
1985 {
1986         uint8_t *mdata;
1987
1988         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1989                 return NULL;
1990
1991         buf->vaddr = mdata;
1992         buf->size = len;
1993
1994         infl_req->mdata = mdata;
1995         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1996
1997         return mdata;
1998 }
1999
2000 static __rte_always_inline uint32_t
2001 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
2002                      uint32_t start_offset)
2003 {
2004         uint16_t index = 0;
2005         void *seg_data = NULL;
2006         int32_t seg_size = 0;
2007
2008         if (!pkt) {
2009                 iovec->buf_cnt = 0;
2010                 return 0;
2011         }
2012
2013         if (!start_offset) {
2014                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2015                 seg_size = pkt->data_len;
2016         } else {
2017                 while (start_offset >= pkt->data_len) {
2018                         start_offset -= pkt->data_len;
2019                         pkt = pkt->next;
2020                 }
2021
2022                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2023                 seg_size = pkt->data_len - start_offset;
2024                 if (!seg_size)
2025                         return 1;
2026         }
2027
2028         /* first seg */
2029         iovec->bufs[index].vaddr = seg_data;
2030         iovec->bufs[index].size = seg_size;
2031         index++;
2032         pkt = pkt->next;
2033
2034         while (unlikely(pkt != NULL)) {
2035                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2036                 seg_size = pkt->data_len;
2037                 if (!seg_size)
2038                         break;
2039
2040                 iovec->bufs[index].vaddr = seg_data;
2041                 iovec->bufs[index].size = seg_size;
2042
2043                 index++;
2044
2045                 pkt = pkt->next;
2046         }
2047
2048         iovec->buf_cnt = index;
2049         return 0;
2050 }
2051
2052 static __rte_always_inline void
2053 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2054                              struct roc_se_fc_params *param, uint32_t *flags)
2055 {
2056         uint16_t index = 0;
2057         void *seg_data = NULL;
2058         uint32_t seg_size = 0;
2059         struct roc_se_iov_ptr *iovec;
2060
2061         seg_data = rte_pktmbuf_mtod(pkt, void *);
2062         seg_size = pkt->data_len;
2063
2064         /* first seg */
2065         if (likely(!pkt->next)) {
2066                 uint32_t headroom;
2067
2068                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2069                 headroom = rte_pktmbuf_headroom(pkt);
2070                 if (likely(headroom >= 24))
2071                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2072
2073                 param->bufs[0].vaddr = seg_data;
2074                 param->bufs[0].size = seg_size;
2075                 return;
2076         }
2077         iovec = param->src_iov;
2078         iovec->bufs[index].vaddr = seg_data;
2079         iovec->bufs[index].size = seg_size;
2080         index++;
2081         pkt = pkt->next;
2082
2083         while (unlikely(pkt != NULL)) {
2084                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2085                 seg_size = pkt->data_len;
2086
2087                 if (!seg_size)
2088                         break;
2089
2090                 iovec->bufs[index].vaddr = seg_data;
2091                 iovec->bufs[index].size = seg_size;
2092
2093                 index++;
2094
2095                 pkt = pkt->next;
2096         }
2097
2098         iovec->buf_cnt = index;
2099         return;
2100 }
2101
2102 static __rte_always_inline int
2103 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2104                struct cpt_qp_meta_info *m_info,
2105                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2106 {
2107         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2108         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2109         struct rte_crypto_sym_op *sym_op = cop->sym;
2110         void *mdata = NULL;
2111         uint32_t mc_hash_off;
2112         uint32_t flags = 0;
2113         uint64_t d_offs, d_lens;
2114         struct rte_mbuf *m_src, *m_dst;
2115         uint8_t cpt_op = sess->cpt_op;
2116 #ifdef CPT_ALWAYS_USE_SG_MODE
2117         uint8_t inplace = 0;
2118 #else
2119         uint8_t inplace = 1;
2120 #endif
2121         struct roc_se_fc_params fc_params;
2122         char src[SRC_IOV_SIZE];
2123         char dst[SRC_IOV_SIZE];
2124         uint32_t iv_buf[4];
2125         int ret;
2126
2127         fc_params.cipher_iv_len = sess->iv_length;
2128         fc_params.auth_iv_len = sess->auth_iv_length;
2129
2130         if (likely(sess->iv_length)) {
2131                 flags |= ROC_SE_VALID_IV_BUF;
2132                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2133                                                              sess->iv_offset);
2134                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2135                         memcpy((uint8_t *)iv_buf,
2136                                rte_crypto_op_ctod_offset(cop, uint8_t *,
2137                                                          sess->iv_offset),
2138                                12);
2139                         iv_buf[3] = rte_cpu_to_be_32(0x1);
2140                         fc_params.iv_buf = iv_buf;
2141                 }
2142         }
2143
2144         if (sess->zsk_flag) {
2145                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2146                         cop, uint8_t *, sess->auth_iv_offset);
2147                 if (sess->zsk_flag != ROC_SE_ZS_EA)
2148                         inplace = 0;
2149         }
2150         m_src = sym_op->m_src;
2151         m_dst = sym_op->m_dst;
2152
2153         if (sess->aes_gcm || sess->chacha_poly) {
2154                 uint8_t *salt;
2155                 uint8_t *aad_data;
2156                 uint16_t aad_len;
2157
2158                 d_offs = sym_op->aead.data.offset;
2159                 d_lens = sym_op->aead.data.length;
2160                 mc_hash_off =
2161                         sym_op->aead.data.offset + sym_op->aead.data.length;
2162
2163                 aad_data = sym_op->aead.aad.data;
2164                 aad_len = sess->aad_length;
2165                 if (likely((aad_data + aad_len) ==
2166                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2167                                                    sym_op->aead.data.offset))) {
2168                         d_offs = (d_offs - aad_len) | (d_offs << 16);
2169                         d_lens = (d_lens + aad_len) | (d_lens << 32);
2170                 } else {
2171                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2172                         fc_params.aad_buf.size = aad_len;
2173                         flags |= ROC_SE_VALID_AAD_BUF;
2174                         inplace = 0;
2175                         d_offs = d_offs << 16;
2176                         d_lens = d_lens << 32;
2177                 }
2178
2179                 salt = fc_params.iv_buf;
2180                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2181                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2182                         sess->salt = *(uint32_t *)salt;
2183                 }
2184                 fc_params.iv_buf = salt + 4;
2185                 if (likely(sess->mac_len)) {
2186                         struct rte_mbuf *m =
2187                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2188
2189                         if (!m)
2190                                 m = m_src;
2191
2192                         /* hmac immediately following data is best case */
2193                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2194                                              mc_hash_off !=
2195                                      (uint8_t *)sym_op->aead.digest.data)) {
2196                                 flags |= ROC_SE_VALID_MAC_BUF;
2197                                 fc_params.mac_buf.size = sess->mac_len;
2198                                 fc_params.mac_buf.vaddr =
2199                                         sym_op->aead.digest.data;
2200                                 inplace = 0;
2201                         }
2202                 }
2203         } else {
2204                 d_offs = sym_op->cipher.data.offset;
2205                 d_lens = sym_op->cipher.data.length;
2206                 mc_hash_off =
2207                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
2208                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2209                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2210
2211                 if (mc_hash_off <
2212                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2213                         mc_hash_off = (sym_op->auth.data.offset +
2214                                        sym_op->auth.data.length);
2215                 }
2216                 /* for gmac, salt should be updated like in gcm */
2217                 if (unlikely(sess->is_gmac)) {
2218                         uint8_t *salt;
2219                         salt = fc_params.iv_buf;
2220                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
2221                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2222                                 sess->salt = *(uint32_t *)salt;
2223                         }
2224                         fc_params.iv_buf = salt + 4;
2225                 }
2226                 if (likely(sess->mac_len)) {
2227                         struct rte_mbuf *m;
2228
2229                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2230                         if (!m)
2231                                 m = m_src;
2232
2233                         /* hmac immediately following data is best case */
2234                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2235                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2236                                               mc_hash_off !=
2237                                       (uint8_t *)sym_op->auth.digest.data))) {
2238                                 flags |= ROC_SE_VALID_MAC_BUF;
2239                                 fc_params.mac_buf.size = sess->mac_len;
2240                                 fc_params.mac_buf.vaddr =
2241                                         sym_op->auth.digest.data;
2242                                 inplace = 0;
2243                         }
2244                 }
2245         }
2246         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2247
2248         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2249             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2250                 inplace = 0;
2251
2252         if (likely(!m_dst && inplace)) {
2253                 /* Case of single buffer without AAD buf or
2254                  * separate mac buf in place and
2255                  * not air crypto
2256                  */
2257                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2258
2259                 prepare_iov_from_pkt_inplace(m_src, &fc_params, &flags);
2260
2261         } else {
2262                 /* Out of place processing */
2263                 fc_params.src_iov = (void *)src;
2264                 fc_params.dst_iov = (void *)dst;
2265
2266                 /* Store SG I/O in the api for reuse */
2267                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2268                         plt_dp_err("Prepare src iov failed");
2269                         ret = -EINVAL;
2270                         goto err_exit;
2271                 }
2272
2273                 if (unlikely(m_dst != NULL)) {
2274                         uint32_t pkt_len;
2275
2276                         /* Try to make room as much as src has */
2277                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
2278
2279                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2280                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2281                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2282                                         plt_dp_err("Not enough space in "
2283                                                    "m_dst %p, need %u"
2284                                                    " more",
2285                                                    m_dst, pkt_len);
2286                                         ret = -EINVAL;
2287                                         goto err_exit;
2288                                 }
2289                         }
2290
2291                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2292                                 plt_dp_err("Prepare dst iov failed for "
2293                                            "m_dst %p",
2294                                            m_dst);
2295                                 ret = -EINVAL;
2296                                 goto err_exit;
2297                         }
2298                 } else {
2299                         fc_params.dst_iov = (void *)src;
2300                 }
2301         }
2302
2303         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2304                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2305                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
2306                         (ctx->fc_type == ROC_SE_PDCP))))) {
2307                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2308                                       m_info->pool, infl_req);
2309                 if (mdata == NULL) {
2310                         plt_dp_err("Error allocating meta buffer for request");
2311                         return -ENOMEM;
2312                 }
2313         }
2314
2315         /* Finally prepare the instruction */
2316         if (cpt_op & ROC_SE_OP_ENCODE)
2317                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2318                                            inst);
2319         else
2320                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2321                                            inst);
2322
2323         if (unlikely(ret)) {
2324                 plt_dp_err("Preparing request failed due to bad input arg");
2325                 goto free_mdata_and_exit;
2326         }
2327
2328         return 0;
2329
2330 free_mdata_and_exit:
2331         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2332                 rte_mempool_put(m_info->pool, infl_req->mdata);
2333 err_exit:
2334         return ret;
2335 }
2336
2337 static __rte_always_inline void
2338 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2339 {
2340         uint8_t *mac;
2341         struct rte_crypto_sym_op *sym_op = op->sym;
2342
2343         if (sym_op->auth.digest.data)
2344                 mac = sym_op->auth.digest.data;
2345         else
2346                 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2347                                               sym_op->auth.data.length +
2348                                                       sym_op->auth.data.offset);
2349         if (!mac) {
2350                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2351                 return;
2352         }
2353
2354         if (memcmp(mac, gen_mac, mac_len))
2355                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2356         else
2357                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2358 }
2359
2360 static __rte_always_inline void
2361 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2362                                    uint32_t *addr_length_in_bits,
2363                                    uint8_t *addr_direction)
2364 {
2365         uint8_t found = 0;
2366         uint32_t pos;
2367         uint8_t last_byte;
2368         while (!found && counter_num_bytes > 0) {
2369                 counter_num_bytes--;
2370                 if (src[counter_num_bytes] == 0x00)
2371                         continue;
2372                 pos = rte_bsf32(src[counter_num_bytes]);
2373                 if (pos == 7) {
2374                         if (likely(counter_num_bytes > 0)) {
2375                                 last_byte = src[counter_num_bytes - 1];
2376                                 *addr_direction = last_byte & 0x1;
2377                                 *addr_length_in_bits =
2378                                         counter_num_bytes * 8 - 1;
2379                         }
2380                 } else {
2381                         last_byte = src[counter_num_bytes];
2382                         *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2383                         *addr_length_in_bits =
2384                                 counter_num_bytes * 8 + (8 - (pos + 2));
2385                 }
2386                 found = 1;
2387         }
2388 }
2389
2390 /*
2391  * This handles all auth only except AES_GMAC
2392  */
2393 static __rte_always_inline int
2394 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2395                    struct cpt_qp_meta_info *m_info,
2396                    struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2397 {
2398         uint32_t space = 0;
2399         struct rte_crypto_sym_op *sym_op = cop->sym;
2400         void *mdata;
2401         uint32_t auth_range_off;
2402         uint32_t flags = 0;
2403         uint64_t d_offs = 0, d_lens;
2404         struct rte_mbuf *m_src, *m_dst;
2405         uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2406         uint16_t mac_len = sess->mac_len;
2407         struct roc_se_fc_params params;
2408         char src[SRC_IOV_SIZE];
2409         uint8_t iv_buf[16];
2410         int ret;
2411
2412         memset(&params, 0, sizeof(struct roc_se_fc_params));
2413
2414         m_src = sym_op->m_src;
2415
2416         mdata = alloc_op_meta(&params.meta_buf, m_info->mlen, m_info->pool,
2417                               infl_req);
2418         if (mdata == NULL) {
2419                 ret = -ENOMEM;
2420                 goto err_exit;
2421         }
2422
2423         auth_range_off = sym_op->auth.data.offset;
2424
2425         flags = ROC_SE_VALID_MAC_BUF;
2426         params.src_iov = (void *)src;
2427         if (unlikely(sess->zsk_flag)) {
2428                 /*
2429                  * Since for Zuc, Kasumi, Snow3g offsets are in bits
2430                  * we will send pass through even for auth only case,
2431                  * let MC handle it
2432                  */
2433                 d_offs = auth_range_off;
2434                 auth_range_off = 0;
2435                 params.auth_iv_len = sess->auth_iv_length;
2436                 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2437                         cop, uint8_t *, sess->auth_iv_offset);
2438                 if (sess->zsk_flag == ROC_SE_K_F9) {
2439                         uint32_t length_in_bits, num_bytes;
2440                         uint8_t *src, direction = 0;
2441
2442                         memcpy(iv_buf,
2443                                rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2444                         /*
2445                          * This is kasumi f9, take direction from
2446                          * source buffer
2447                          */
2448                         length_in_bits = cop->sym->auth.data.length;
2449                         num_bytes = (length_in_bits >> 3);
2450                         src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2451                         find_kasumif9_direction_and_length(
2452                                 src, num_bytes, &length_in_bits, &direction);
2453                         length_in_bits -= 64;
2454                         cop->sym->auth.data.offset += 64;
2455                         d_offs = cop->sym->auth.data.offset;
2456                         auth_range_off = d_offs / 8;
2457                         cop->sym->auth.data.length = length_in_bits;
2458
2459                         /* Store it at end of auth iv */
2460                         iv_buf[8] = direction;
2461                         params.auth_iv_buf = iv_buf;
2462                 }
2463         }
2464
2465         d_lens = sym_op->auth.data.length;
2466
2467         params.ctx_buf.vaddr = &sess->roc_se_ctx;
2468
2469         if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2470                 if (sym_op->auth.digest.data) {
2471                         /*
2472                          * Digest to be generated
2473                          * in separate buffer
2474                          */
2475                         params.mac_buf.size = sess->mac_len;
2476                         params.mac_buf.vaddr = sym_op->auth.digest.data;
2477                 } else {
2478                         uint32_t off = sym_op->auth.data.offset +
2479                                        sym_op->auth.data.length;
2480                         int32_t dlen, space;
2481
2482                         m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2483                         dlen = rte_pktmbuf_pkt_len(m_dst);
2484
2485                         space = off + mac_len - dlen;
2486                         if (space > 0)
2487                                 if (!rte_pktmbuf_append(m_dst, space)) {
2488                                         plt_dp_err("Failed to extend "
2489                                                    "mbuf by %uB",
2490                                                    space);
2491                                         ret = -EINVAL;
2492                                         goto free_mdata_and_exit;
2493                                 }
2494
2495                         params.mac_buf.vaddr =
2496                                 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2497                         params.mac_buf.size = mac_len;
2498                 }
2499         } else {
2500                 uint64_t *op = mdata;
2501
2502                 /* Need space for storing generated mac */
2503                 space += 2 * sizeof(uint64_t);
2504
2505                 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2506                 params.mac_buf.size = mac_len;
2507                 space += RTE_ALIGN_CEIL(mac_len, 8);
2508                 op[0] = (uintptr_t)params.mac_buf.vaddr;
2509                 op[1] = mac_len;
2510                 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2511         }
2512
2513         params.meta_buf.vaddr = (uint8_t *)mdata + space;
2514         params.meta_buf.size -= space;
2515
2516         /* Out of place processing */
2517         params.src_iov = (void *)src;
2518
2519         /*Store SG I/O in the api for reuse */
2520         if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2521                 plt_dp_err("Prepare src iov failed");
2522                 ret = -EINVAL;
2523                 goto free_mdata_and_exit;
2524         }
2525
2526         ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, inst);
2527         if (ret)
2528                 goto free_mdata_and_exit;
2529
2530         return 0;
2531
2532 free_mdata_and_exit:
2533         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2534                 rte_mempool_put(m_info->pool, infl_req->mdata);
2535 err_exit:
2536         return ret;
2537 }
2538 #endif /*_CNXK_SE_H_ */