3ed6b909fe8eaba002075a578320c7969f092f34
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
38
39 static __rte_always_inline int
40 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
41 {
42         uint16_t mac_len = auth->digest_length;
43         int ret;
44
45         switch (auth->algo) {
46         case RTE_CRYPTO_AUTH_MD5:
47         case RTE_CRYPTO_AUTH_MD5_HMAC:
48                 ret = (mac_len == 16) ? 0 : -1;
49                 break;
50         case RTE_CRYPTO_AUTH_SHA1:
51         case RTE_CRYPTO_AUTH_SHA1_HMAC:
52                 ret = (mac_len == 20) ? 0 : -1;
53                 break;
54         case RTE_CRYPTO_AUTH_SHA224:
55         case RTE_CRYPTO_AUTH_SHA224_HMAC:
56                 ret = (mac_len == 28) ? 0 : -1;
57                 break;
58         case RTE_CRYPTO_AUTH_SHA256:
59         case RTE_CRYPTO_AUTH_SHA256_HMAC:
60                 ret = (mac_len == 32) ? 0 : -1;
61                 break;
62         case RTE_CRYPTO_AUTH_SHA384:
63         case RTE_CRYPTO_AUTH_SHA384_HMAC:
64                 ret = (mac_len == 48) ? 0 : -1;
65                 break;
66         case RTE_CRYPTO_AUTH_SHA512:
67         case RTE_CRYPTO_AUTH_SHA512_HMAC:
68                 ret = (mac_len == 64) ? 0 : -1;
69                 break;
70         case RTE_CRYPTO_AUTH_NULL:
71                 ret = 0;
72                 break;
73         default:
74                 ret = -1;
75         }
76
77         return ret;
78 }
79
80 static __rte_always_inline void
81 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
82 {
83         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
84         memcpy(fctx->enc.encr_iv, salt, 4);
85 }
86
87 static __rte_always_inline uint32_t
88 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
89              uint32_t size)
90 {
91         struct roc_se_sglist_comp *to = &list[i >> 2];
92
93         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
94         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
95         i++;
96         return i;
97 }
98
99 static __rte_always_inline uint32_t
100 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
101                       struct roc_se_buf_ptr *from)
102 {
103         struct roc_se_sglist_comp *to = &list[i >> 2];
104
105         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
106         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
107         i++;
108         return i;
109 }
110
111 static __rte_always_inline uint32_t
112 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
113                           struct roc_se_buf_ptr *from, uint32_t *psize)
114 {
115         struct roc_se_sglist_comp *to = &list[i >> 2];
116         uint32_t size = *psize;
117         uint32_t e_len;
118
119         e_len = (size > from->size) ? from->size : size;
120         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
121         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
122         *psize -= e_len;
123         i++;
124         return i;
125 }
126
127 /*
128  * This fills the MC expected SGIO list
129  * from IOV given by user.
130  */
131 static __rte_always_inline uint32_t
132 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
133                       struct roc_se_iov_ptr *from, uint32_t from_offset,
134                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
135                       uint32_t extra_offset)
136 {
137         int32_t j;
138         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
139         uint32_t size = *psize;
140         struct roc_se_buf_ptr *bufs;
141
142         bufs = from->bufs;
143         for (j = 0; (j < from->buf_cnt) && size; j++) {
144                 uint64_t e_vaddr;
145                 uint32_t e_len;
146                 struct roc_se_sglist_comp *to = &list[i >> 2];
147
148                 if (unlikely(from_offset)) {
149                         if (from_offset >= bufs[j].size) {
150                                 from_offset -= bufs[j].size;
151                                 continue;
152                         }
153                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
154                         e_len = (size > (bufs[j].size - from_offset)) ?
155                                         (bufs[j].size - from_offset) :
156                                         size;
157                         from_offset = 0;
158                 } else {
159                         e_vaddr = (uint64_t)bufs[j].vaddr;
160                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
161                 }
162
163                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
164                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
165
166                 if (extra_len && (e_len >= extra_offset)) {
167                         /* Break the data at given offset */
168                         uint32_t next_len = e_len - extra_offset;
169                         uint64_t next_vaddr = e_vaddr + extra_offset;
170
171                         if (!extra_offset) {
172                                 i--;
173                         } else {
174                                 e_len = extra_offset;
175                                 size -= e_len;
176                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
177                         }
178
179                         extra_len = RTE_MIN(extra_len, size);
180                         /* Insert extra data ptr */
181                         if (extra_len) {
182                                 i++;
183                                 to = &list[i >> 2];
184                                 to->u.s.len[i % 4] =
185                                         rte_cpu_to_be_16(extra_len);
186                                 to->ptr[i % 4] = rte_cpu_to_be_64(
187                                         (uint64_t)extra_buf->vaddr);
188                                 size -= extra_len;
189                         }
190
191                         next_len = RTE_MIN(next_len, size);
192                         /* insert the rest of the data */
193                         if (next_len) {
194                                 i++;
195                                 to = &list[i >> 2];
196                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
197                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
198                                 size -= next_len;
199                         }
200                         extra_len = 0;
201
202                 } else {
203                         size -= e_len;
204                 }
205                 if (extra_offset)
206                         extra_offset -= size;
207                 i++;
208         }
209
210         *psize = size;
211         return (uint32_t)i;
212 }
213
214 static __rte_always_inline int
215 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
216                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
217 {
218         void *m_vaddr = params->meta_buf.vaddr;
219         uint32_t size, i;
220         uint16_t data_len, mac_len, key_len;
221         roc_se_auth_type hash_type;
222         struct roc_se_ctx *ctx;
223         struct roc_se_sglist_comp *gather_comp;
224         struct roc_se_sglist_comp *scatter_comp;
225         uint8_t *in_buffer;
226         uint32_t g_size_bytes, s_size_bytes;
227         union cpt_inst_w4 cpt_inst_w4;
228
229         ctx = params->ctx_buf.vaddr;
230
231         hash_type = ctx->hash_type;
232         mac_len = ctx->mac_len;
233         key_len = ctx->auth_key_len;
234         data_len = ROC_SE_AUTH_DLEN(d_lens);
235
236         /*GP op header */
237         cpt_inst_w4.s.opcode_minor = 0;
238         cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
239         if (ctx->hmac) {
240                 cpt_inst_w4.s.opcode_major =
241                         ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
242                 cpt_inst_w4.s.param1 = key_len;
243                 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
244         } else {
245                 cpt_inst_w4.s.opcode_major =
246                         ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
247                 cpt_inst_w4.s.param1 = 0;
248                 cpt_inst_w4.s.dlen = data_len;
249         }
250
251         /* Null auth only case enters the if */
252         if (unlikely(!hash_type && !ctx->enc_cipher)) {
253                 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
254                 /* Minor op is passthrough */
255                 cpt_inst_w4.s.opcode_minor = 0x03;
256                 /* Send out completion code only */
257                 cpt_inst_w4.s.param2 = 0x1;
258         }
259
260         /* DPTR has SG list */
261         in_buffer = m_vaddr;
262
263         ((uint16_t *)in_buffer)[0] = 0;
264         ((uint16_t *)in_buffer)[1] = 0;
265
266         /* TODO Add error check if space will be sufficient */
267         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
268
269         /*
270          * Input gather list
271          */
272
273         i = 0;
274
275         if (ctx->hmac) {
276                 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
277                 /* Key */
278                 i = fill_sg_comp(gather_comp, i, k_vaddr,
279                                  RTE_ALIGN_CEIL(key_len, 8));
280         }
281
282         /* input data */
283         size = data_len;
284         if (size) {
285                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
286                                           &size, NULL, 0);
287                 if (unlikely(size)) {
288                         plt_dp_err("Insufficient dst IOV size, short by %dB",
289                                    size);
290                         return -1;
291                 }
292         } else {
293                 /*
294                  * Looks like we need to support zero data
295                  * gather ptr in case of hash & hmac
296                  */
297                 i++;
298         }
299         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
300         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
301
302         /*
303          * Output Gather list
304          */
305
306         i = 0;
307         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
308                                                      g_size_bytes);
309
310         if (flags & ROC_SE_VALID_MAC_BUF) {
311                 if (unlikely(params->mac_buf.size < mac_len)) {
312                         plt_dp_err("Insufficient MAC size");
313                         return -1;
314                 }
315
316                 size = mac_len;
317                 i = fill_sg_comp_from_buf_min(scatter_comp, i, &params->mac_buf,
318                                               &size);
319         } else {
320                 size = mac_len;
321                 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
322                                           data_len, &size, NULL, 0);
323                 if (unlikely(size)) {
324                         plt_dp_err("Insufficient dst IOV size, short by %dB",
325                                    size);
326                         return -1;
327                 }
328         }
329
330         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
331         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
332
333         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
334
335         /* This is DPTR len in case of SG mode */
336         cpt_inst_w4.s.dlen = size;
337
338         inst->dptr = (uint64_t)in_buffer;
339         inst->w4.u64 = cpt_inst_w4.u64;
340
341         return 0;
342 }
343
344 static __rte_always_inline int
345 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
346                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
347 {
348         uint32_t iv_offset = 0;
349         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
350         struct roc_se_ctx *se_ctx;
351         uint32_t cipher_type, hash_type;
352         uint32_t mac_len, size;
353         uint8_t iv_len = 16;
354         struct roc_se_buf_ptr *aad_buf = NULL;
355         uint32_t encr_offset, auth_offset;
356         uint32_t encr_data_len, auth_data_len, aad_len = 0;
357         uint32_t passthrough_len = 0;
358         union cpt_inst_w4 cpt_inst_w4;
359         void *offset_vaddr;
360         uint8_t op_minor;
361
362         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
363         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
364         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
365         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
366         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
367                 /* We don't support both AAD and auth data separately */
368                 auth_data_len = 0;
369                 auth_offset = 0;
370                 aad_len = fc_params->aad_buf.size;
371                 aad_buf = &fc_params->aad_buf;
372         }
373         se_ctx = fc_params->ctx_buf.vaddr;
374         cipher_type = se_ctx->enc_cipher;
375         hash_type = se_ctx->hash_type;
376         mac_len = se_ctx->mac_len;
377         op_minor = se_ctx->template_w4.s.opcode_minor;
378
379         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
380                 iv_len = 0;
381                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
382         }
383
384         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
385                 /*
386                  * When AAD is given, data above encr_offset is pass through
387                  * Since AAD is given as separate pointer and not as offset,
388                  * this is a special case as we need to fragment input data
389                  * into passthrough + encr_data and then insert AAD in between.
390                  */
391                 if (hash_type != ROC_SE_GMAC_TYPE) {
392                         passthrough_len = encr_offset;
393                         auth_offset = passthrough_len + iv_len;
394                         encr_offset = passthrough_len + aad_len + iv_len;
395                         auth_data_len = aad_len + encr_data_len;
396                 } else {
397                         passthrough_len = 16 + aad_len;
398                         auth_offset = passthrough_len + iv_len;
399                         auth_data_len = aad_len;
400                 }
401         } else {
402                 encr_offset += iv_len;
403                 auth_offset += iv_len;
404         }
405
406         /* Encryption */
407         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
408         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
409         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
410
411         if (hash_type == ROC_SE_GMAC_TYPE) {
412                 encr_offset = 0;
413                 encr_data_len = 0;
414         }
415
416         auth_dlen = auth_offset + auth_data_len;
417         enc_dlen = encr_data_len + encr_offset;
418         if (unlikely(encr_data_len & 0xf)) {
419                 if ((cipher_type == ROC_SE_DES3_CBC) ||
420                     (cipher_type == ROC_SE_DES3_ECB))
421                         enc_dlen =
422                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
423                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
424                                 (cipher_type == ROC_SE_AES_ECB)))
425                         enc_dlen =
426                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
427         }
428
429         if (unlikely(auth_dlen > enc_dlen)) {
430                 inputlen = auth_dlen;
431                 outputlen = auth_dlen + mac_len;
432         } else {
433                 inputlen = enc_dlen;
434                 outputlen = enc_dlen + mac_len;
435         }
436
437         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
438                 outputlen = enc_dlen;
439
440         /* GP op header */
441         cpt_inst_w4.s.param1 = encr_data_len;
442         cpt_inst_w4.s.param2 = auth_data_len;
443
444         /*
445          * In cn9k, cn10k since we have a limitation of
446          * IV & Offset control word not part of instruction
447          * and need to be part of Data Buffer, we check if
448          * head room is there and then only do the Direct mode processing
449          */
450         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
451                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
452                 void *dm_vaddr = fc_params->bufs[0].vaddr;
453
454                 /* Use Direct mode */
455
456                 offset_vaddr =
457                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
458
459                 /* DPTR */
460                 inst->dptr = (uint64_t)offset_vaddr;
461
462                 /* RPTR should just exclude offset control word */
463                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
464
465                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
466
467                 if (likely(iv_len)) {
468                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
469                                                       ROC_SE_OFF_CTRL_LEN);
470                         uint64_t *src = fc_params->iv_buf;
471                         dest[0] = src[0];
472                         dest[1] = src[1];
473                 }
474
475         } else {
476                 void *m_vaddr = fc_params->meta_buf.vaddr;
477                 uint32_t i, g_size_bytes, s_size_bytes;
478                 struct roc_se_sglist_comp *gather_comp;
479                 struct roc_se_sglist_comp *scatter_comp;
480                 uint8_t *in_buffer;
481
482                 /* This falls under strict SG mode */
483                 offset_vaddr = m_vaddr;
484                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
485
486                 m_vaddr = (uint8_t *)m_vaddr + size;
487
488                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
489
490                 if (likely(iv_len)) {
491                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
492                                                       ROC_SE_OFF_CTRL_LEN);
493                         uint64_t *src = fc_params->iv_buf;
494                         dest[0] = src[0];
495                         dest[1] = src[1];
496                 }
497
498                 /* DPTR has SG list */
499                 in_buffer = m_vaddr;
500
501                 ((uint16_t *)in_buffer)[0] = 0;
502                 ((uint16_t *)in_buffer)[1] = 0;
503
504                 /* TODO Add error check if space will be sufficient */
505                 gather_comp =
506                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
507
508                 /*
509                  * Input Gather List
510                  */
511
512                 i = 0;
513
514                 /* Offset control word that includes iv */
515                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
516                                  ROC_SE_OFF_CTRL_LEN + iv_len);
517
518                 /* Add input data */
519                 size = inputlen - iv_len;
520                 if (likely(size)) {
521                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
522
523                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
524                                 i = fill_sg_comp_from_buf_min(
525                                         gather_comp, i, fc_params->bufs, &size);
526                         } else {
527                                 i = fill_sg_comp_from_iov(
528                                         gather_comp, i, fc_params->src_iov, 0,
529                                         &size, aad_buf, aad_offset);
530                         }
531
532                         if (unlikely(size)) {
533                                 plt_dp_err("Insufficient buffer space,"
534                                            " size %d needed",
535                                            size);
536                                 return -1;
537                         }
538                 }
539                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
540                 g_size_bytes =
541                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
542
543                 /*
544                  * Output Scatter list
545                  */
546                 i = 0;
547                 scatter_comp =
548                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
549                                                       g_size_bytes);
550
551                 /* Add IV */
552                 if (likely(iv_len)) {
553                         i = fill_sg_comp(scatter_comp, i,
554                                          (uint64_t)offset_vaddr +
555                                                  ROC_SE_OFF_CTRL_LEN,
556                                          iv_len);
557                 }
558
559                 /* output data or output data + digest*/
560                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
561                         size = outputlen - iv_len - mac_len;
562                         if (size) {
563                                 uint32_t aad_offset =
564                                         aad_len ? passthrough_len : 0;
565
566                                 if (unlikely(flags &
567                                              ROC_SE_SINGLE_BUF_INPLACE)) {
568                                         i = fill_sg_comp_from_buf_min(
569                                                 scatter_comp, i,
570                                                 fc_params->bufs, &size);
571                                 } else {
572                                         i = fill_sg_comp_from_iov(
573                                                 scatter_comp, i,
574                                                 fc_params->dst_iov, 0, &size,
575                                                 aad_buf, aad_offset);
576                                 }
577                                 if (unlikely(size)) {
578                                         plt_dp_err("Insufficient buffer"
579                                                    " space, size %d needed",
580                                                    size);
581                                         return -1;
582                                 }
583                         }
584                         /* mac_data */
585                         if (mac_len) {
586                                 i = fill_sg_comp_from_buf(scatter_comp, i,
587                                                           &fc_params->mac_buf);
588                         }
589                 } else {
590                         /* Output including mac */
591                         size = outputlen - iv_len;
592                         if (likely(size)) {
593                                 uint32_t aad_offset =
594                                         aad_len ? passthrough_len : 0;
595
596                                 if (unlikely(flags &
597                                              ROC_SE_SINGLE_BUF_INPLACE)) {
598                                         i = fill_sg_comp_from_buf_min(
599                                                 scatter_comp, i,
600                                                 fc_params->bufs, &size);
601                                 } else {
602                                         i = fill_sg_comp_from_iov(
603                                                 scatter_comp, i,
604                                                 fc_params->dst_iov, 0, &size,
605                                                 aad_buf, aad_offset);
606                                 }
607                                 if (unlikely(size)) {
608                                         plt_dp_err("Insufficient buffer"
609                                                    " space, size %d needed",
610                                                    size);
611                                         return -1;
612                                 }
613                         }
614                 }
615                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
616                 s_size_bytes =
617                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
618
619                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
620
621                 /* This is DPTR len in case of SG mode */
622                 cpt_inst_w4.s.dlen = size;
623
624                 inst->dptr = (uint64_t)in_buffer;
625         }
626
627         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
628                      (auth_offset >> 8))) {
629                 plt_dp_err("Offset not supported");
630                 plt_dp_err("enc_offset: %d", encr_offset);
631                 plt_dp_err("iv_offset : %d", iv_offset);
632                 plt_dp_err("auth_offset: %d", auth_offset);
633                 return -1;
634         }
635
636         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
637                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
638                 ((uint64_t)auth_offset));
639
640         inst->w4.u64 = cpt_inst_w4.u64;
641         return 0;
642 }
643
644 static __rte_always_inline int
645 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
646                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
647 {
648         uint32_t iv_offset = 0, size;
649         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
650         struct roc_se_ctx *se_ctx;
651         int32_t hash_type, mac_len;
652         uint8_t iv_len = 16;
653         struct roc_se_buf_ptr *aad_buf = NULL;
654         uint32_t encr_offset, auth_offset;
655         uint32_t encr_data_len, auth_data_len, aad_len = 0;
656         uint32_t passthrough_len = 0;
657         union cpt_inst_w4 cpt_inst_w4;
658         void *offset_vaddr;
659         uint8_t op_minor;
660
661         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
662         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
663         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
664         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
665
666         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
667                 /* We don't support both AAD and auth data separately */
668                 auth_data_len = 0;
669                 auth_offset = 0;
670                 aad_len = fc_params->aad_buf.size;
671                 aad_buf = &fc_params->aad_buf;
672         }
673
674         se_ctx = fc_params->ctx_buf.vaddr;
675         hash_type = se_ctx->hash_type;
676         mac_len = se_ctx->mac_len;
677         op_minor = se_ctx->template_w4.s.opcode_minor;
678
679         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
680                 iv_len = 0;
681                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
682         }
683
684         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
685                 /*
686                  * When AAD is given, data above encr_offset is pass through
687                  * Since AAD is given as separate pointer and not as offset,
688                  * this is a special case as we need to fragment input data
689                  * into passthrough + encr_data and then insert AAD in between.
690                  */
691                 if (hash_type != ROC_SE_GMAC_TYPE) {
692                         passthrough_len = encr_offset;
693                         auth_offset = passthrough_len + iv_len;
694                         encr_offset = passthrough_len + aad_len + iv_len;
695                         auth_data_len = aad_len + encr_data_len;
696                 } else {
697                         passthrough_len = 16 + aad_len;
698                         auth_offset = passthrough_len + iv_len;
699                         auth_data_len = aad_len;
700                 }
701         } else {
702                 encr_offset += iv_len;
703                 auth_offset += iv_len;
704         }
705
706         /* Decryption */
707         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
708         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
709         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
710
711         if (hash_type == ROC_SE_GMAC_TYPE) {
712                 encr_offset = 0;
713                 encr_data_len = 0;
714         }
715
716         enc_dlen = encr_offset + encr_data_len;
717         auth_dlen = auth_offset + auth_data_len;
718
719         if (auth_dlen > enc_dlen) {
720                 inputlen = auth_dlen + mac_len;
721                 outputlen = auth_dlen;
722         } else {
723                 inputlen = enc_dlen + mac_len;
724                 outputlen = enc_dlen;
725         }
726
727         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
728                 outputlen = inputlen = enc_dlen;
729
730         cpt_inst_w4.s.param1 = encr_data_len;
731         cpt_inst_w4.s.param2 = auth_data_len;
732
733         /*
734          * In cn9k, cn10k since we have a limitation of
735          * IV & Offset control word not part of instruction
736          * and need to be part of Data Buffer, we check if
737          * head room is there and then only do the Direct mode processing
738          */
739         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
740                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
741                 void *dm_vaddr = fc_params->bufs[0].vaddr;
742
743                 /* Use Direct mode */
744
745                 offset_vaddr =
746                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
747                 inst->dptr = (uint64_t)offset_vaddr;
748
749                 /* RPTR should just exclude offset control word */
750                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
751
752                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
753
754                 if (likely(iv_len)) {
755                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
756                                                       ROC_SE_OFF_CTRL_LEN);
757                         uint64_t *src = fc_params->iv_buf;
758                         dest[0] = src[0];
759                         dest[1] = src[1];
760                 }
761
762         } else {
763                 void *m_vaddr = fc_params->meta_buf.vaddr;
764                 uint32_t g_size_bytes, s_size_bytes;
765                 struct roc_se_sglist_comp *gather_comp;
766                 struct roc_se_sglist_comp *scatter_comp;
767                 uint8_t *in_buffer;
768                 uint8_t i = 0;
769
770                 /* This falls under strict SG mode */
771                 offset_vaddr = m_vaddr;
772                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
773
774                 m_vaddr = (uint8_t *)m_vaddr + size;
775
776                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
777
778                 if (likely(iv_len)) {
779                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
780                                                       ROC_SE_OFF_CTRL_LEN);
781                         uint64_t *src = fc_params->iv_buf;
782                         dest[0] = src[0];
783                         dest[1] = src[1];
784                 }
785
786                 /* DPTR has SG list */
787                 in_buffer = m_vaddr;
788
789                 ((uint16_t *)in_buffer)[0] = 0;
790                 ((uint16_t *)in_buffer)[1] = 0;
791
792                 /* TODO Add error check if space will be sufficient */
793                 gather_comp =
794                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
795
796                 /*
797                  * Input Gather List
798                  */
799                 i = 0;
800
801                 /* Offset control word that includes iv */
802                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
803                                  ROC_SE_OFF_CTRL_LEN + iv_len);
804
805                 /* Add input data */
806                 if (flags & ROC_SE_VALID_MAC_BUF) {
807                         size = inputlen - iv_len - mac_len;
808                         if (size) {
809                                 /* input data only */
810                                 if (unlikely(flags &
811                                              ROC_SE_SINGLE_BUF_INPLACE)) {
812                                         i = fill_sg_comp_from_buf_min(
813                                                 gather_comp, i, fc_params->bufs,
814                                                 &size);
815                                 } else {
816                                         uint32_t aad_offset =
817                                                 aad_len ? passthrough_len : 0;
818
819                                         i = fill_sg_comp_from_iov(
820                                                 gather_comp, i,
821                                                 fc_params->src_iov, 0, &size,
822                                                 aad_buf, aad_offset);
823                                 }
824                                 if (unlikely(size)) {
825                                         plt_dp_err("Insufficient buffer"
826                                                    " space, size %d needed",
827                                                    size);
828                                         return -1;
829                                 }
830                         }
831
832                         /* mac data */
833                         if (mac_len) {
834                                 i = fill_sg_comp_from_buf(gather_comp, i,
835                                                           &fc_params->mac_buf);
836                         }
837                 } else {
838                         /* input data + mac */
839                         size = inputlen - iv_len;
840                         if (size) {
841                                 if (unlikely(flags &
842                                              ROC_SE_SINGLE_BUF_INPLACE)) {
843                                         i = fill_sg_comp_from_buf_min(
844                                                 gather_comp, i, fc_params->bufs,
845                                                 &size);
846                                 } else {
847                                         uint32_t aad_offset =
848                                                 aad_len ? passthrough_len : 0;
849
850                                         if (unlikely(!fc_params->src_iov)) {
851                                                 plt_dp_err("Bad input args");
852                                                 return -1;
853                                         }
854
855                                         i = fill_sg_comp_from_iov(
856                                                 gather_comp, i,
857                                                 fc_params->src_iov, 0, &size,
858                                                 aad_buf, aad_offset);
859                                 }
860
861                                 if (unlikely(size)) {
862                                         plt_dp_err("Insufficient buffer"
863                                                    " space, size %d needed",
864                                                    size);
865                                         return -1;
866                                 }
867                         }
868                 }
869                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
870                 g_size_bytes =
871                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
872
873                 /*
874                  * Output Scatter List
875                  */
876
877                 i = 0;
878                 scatter_comp =
879                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
880                                                       g_size_bytes);
881
882                 /* Add iv */
883                 if (iv_len) {
884                         i = fill_sg_comp(scatter_comp, i,
885                                          (uint64_t)offset_vaddr +
886                                                  ROC_SE_OFF_CTRL_LEN,
887                                          iv_len);
888                 }
889
890                 /* Add output data */
891                 size = outputlen - iv_len;
892                 if (size) {
893                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
894                                 /* handle single buffer here */
895                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
896                                                               fc_params->bufs,
897                                                               &size);
898                         } else {
899                                 uint32_t aad_offset =
900                                         aad_len ? passthrough_len : 0;
901
902                                 if (unlikely(!fc_params->dst_iov)) {
903                                         plt_dp_err("Bad input args");
904                                         return -1;
905                                 }
906
907                                 i = fill_sg_comp_from_iov(
908                                         scatter_comp, i, fc_params->dst_iov, 0,
909                                         &size, aad_buf, aad_offset);
910                         }
911
912                         if (unlikely(size)) {
913                                 plt_dp_err("Insufficient buffer space,"
914                                            " size %d needed",
915                                            size);
916                                 return -1;
917                         }
918                 }
919
920                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
921                 s_size_bytes =
922                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
923
924                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
925
926                 /* This is DPTR len in case of SG mode */
927                 cpt_inst_w4.s.dlen = size;
928
929                 inst->dptr = (uint64_t)in_buffer;
930         }
931
932         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
933                      (auth_offset >> 8))) {
934                 plt_dp_err("Offset not supported");
935                 plt_dp_err("enc_offset: %d", encr_offset);
936                 plt_dp_err("iv_offset : %d", iv_offset);
937                 plt_dp_err("auth_offset: %d", auth_offset);
938                 return -1;
939         }
940
941         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
942                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
943                 ((uint64_t)auth_offset));
944
945         inst->w4.u64 = cpt_inst_w4.u64;
946         return 0;
947 }
948
949 static __rte_always_inline int
950 cpt_zuc_snow3g_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
951                         struct roc_se_fc_params *params,
952                         struct cpt_inst_s *inst)
953 {
954         uint32_t size;
955         int32_t inputlen, outputlen;
956         struct roc_se_ctx *se_ctx;
957         uint32_t mac_len = 0;
958         uint8_t pdcp_alg_type, j;
959         uint32_t encr_offset = 0, auth_offset = 0;
960         uint32_t encr_data_len = 0, auth_data_len = 0;
961         int flags, iv_len = 16;
962         uint64_t offset_ctrl;
963         uint64_t *offset_vaddr;
964         uint32_t *iv_s, iv[4];
965         union cpt_inst_w4 cpt_inst_w4;
966
967         se_ctx = params->ctx_buf.vaddr;
968         flags = se_ctx->zsk_flags;
969         mac_len = se_ctx->mac_len;
970         pdcp_alg_type = se_ctx->pdcp_alg_type;
971
972         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
973
974         /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
975
976         cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
977                                       (0 << 4) | (0 << 3) | (flags & 0x7));
978
979         if (flags == 0x1) {
980                 /*
981                  * Microcode expects offsets in bytes
982                  * TODO: Rounding off
983                  */
984                 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
985
986                 /* EIA3 or UIA2 */
987                 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
988                 auth_offset = auth_offset / 8;
989
990                 /* consider iv len */
991                 auth_offset += iv_len;
992
993                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
994                 outputlen = mac_len;
995
996                 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
997
998         } else {
999                 /* EEA3 or UEA2 */
1000                 /*
1001                  * Microcode expects offsets in bytes
1002                  * TODO: Rounding off
1003                  */
1004                 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1005
1006                 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1007                 encr_offset = encr_offset / 8;
1008                 /* consider iv len */
1009                 encr_offset += iv_len;
1010
1011                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1012                 outputlen = inputlen;
1013
1014                 /* iv offset is 0 */
1015                 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1016         }
1017
1018         if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1019                 plt_dp_err("Offset not supported");
1020                 plt_dp_err("enc_offset: %d", encr_offset);
1021                 plt_dp_err("auth_offset: %d", auth_offset);
1022                 return -1;
1023         }
1024
1025         /* IV */
1026         iv_s = (flags == 0x1) ? params->auth_iv_buf : params->iv_buf;
1027
1028         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
1029                 /*
1030                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1031                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1032                  */
1033
1034                 for (j = 0; j < 4; j++)
1035                         iv[j] = iv_s[3 - j];
1036         } else {
1037                 /* ZUC doesn't need a swap */
1038                 for (j = 0; j < 4; j++)
1039                         iv[j] = iv_s[j];
1040         }
1041
1042         /*
1043          * GP op header, lengths are expected in bits.
1044          */
1045         cpt_inst_w4.s.param1 = encr_data_len;
1046         cpt_inst_w4.s.param2 = auth_data_len;
1047
1048         /*
1049          * In cn9k, cn10k since we have a limitation of
1050          * IV & Offset control word not part of instruction
1051          * and need to be part of Data Buffer, we check if
1052          * head room is there and then only do the Direct mode processing
1053          */
1054         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1055                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1056                 void *dm_vaddr = params->bufs[0].vaddr;
1057
1058                 /* Use Direct mode */
1059
1060                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1061                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1062
1063                 /* DPTR */
1064                 inst->dptr = (uint64_t)offset_vaddr;
1065                 /* RPTR should just exclude offset control word */
1066                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1067
1068                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1069
1070                 if (likely(iv_len)) {
1071                         uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1072                                                       ROC_SE_OFF_CTRL_LEN);
1073                         memcpy(iv_d, iv, 16);
1074                 }
1075
1076                 *offset_vaddr = offset_ctrl;
1077         } else {
1078                 void *m_vaddr = params->meta_buf.vaddr;
1079                 uint32_t i, g_size_bytes, s_size_bytes;
1080                 struct roc_se_sglist_comp *gather_comp;
1081                 struct roc_se_sglist_comp *scatter_comp;
1082                 uint8_t *in_buffer;
1083                 uint32_t *iv_d;
1084
1085                 /* save space for iv */
1086                 offset_vaddr = m_vaddr;
1087
1088                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1089
1090                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1091
1092                 /* DPTR has SG list */
1093                 in_buffer = m_vaddr;
1094
1095                 ((uint16_t *)in_buffer)[0] = 0;
1096                 ((uint16_t *)in_buffer)[1] = 0;
1097
1098                 /* TODO Add error check if space will be sufficient */
1099                 gather_comp =
1100                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1101
1102                 /*
1103                  * Input Gather List
1104                  */
1105                 i = 0;
1106
1107                 /* Offset control word followed by iv */
1108
1109                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1110                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1111
1112                 /* iv offset is 0 */
1113                 *offset_vaddr = offset_ctrl;
1114
1115                 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1116                                     ROC_SE_OFF_CTRL_LEN);
1117                 memcpy(iv_d, iv, 16);
1118
1119                 /* input data */
1120                 size = inputlen - iv_len;
1121                 if (size) {
1122                         i = fill_sg_comp_from_iov(gather_comp, i,
1123                                                   params->src_iov, 0, &size,
1124                                                   NULL, 0);
1125                         if (unlikely(size)) {
1126                                 plt_dp_err("Insufficient buffer space,"
1127                                            " size %d needed",
1128                                            size);
1129                                 return -1;
1130                         }
1131                 }
1132                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1133                 g_size_bytes =
1134                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1135
1136                 /*
1137                  * Output Scatter List
1138                  */
1139
1140                 i = 0;
1141                 scatter_comp =
1142                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1143                                                       g_size_bytes);
1144
1145                 if (flags == 0x1) {
1146                         /* IV in SLIST only for EEA3 & UEA2 */
1147                         iv_len = 0;
1148                 }
1149
1150                 if (iv_len) {
1151                         i = fill_sg_comp(scatter_comp, i,
1152                                          (uint64_t)offset_vaddr +
1153                                                  ROC_SE_OFF_CTRL_LEN,
1154                                          iv_len);
1155                 }
1156
1157                 /* Add output data */
1158                 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1159                         size = outputlen - iv_len - mac_len;
1160                         if (size) {
1161                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1162                                                           params->dst_iov, 0,
1163                                                           &size, NULL, 0);
1164
1165                                 if (unlikely(size)) {
1166                                         plt_dp_err("Insufficient buffer space,"
1167                                                    " size %d needed",
1168                                                    size);
1169                                         return -1;
1170                                 }
1171                         }
1172
1173                         /* mac data */
1174                         if (mac_len) {
1175                                 i = fill_sg_comp_from_buf(scatter_comp, i,
1176                                                           &params->mac_buf);
1177                         }
1178                 } else {
1179                         /* Output including mac */
1180                         size = outputlen - iv_len;
1181                         if (size) {
1182                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1183                                                           params->dst_iov, 0,
1184                                                           &size, NULL, 0);
1185
1186                                 if (unlikely(size)) {
1187                                         plt_dp_err("Insufficient buffer space,"
1188                                                    " size %d needed",
1189                                                    size);
1190                                         return -1;
1191                                 }
1192                         }
1193                 }
1194                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1195                 s_size_bytes =
1196                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1197
1198                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1199
1200                 /* This is DPTR len in case of SG mode */
1201                 cpt_inst_w4.s.dlen = size;
1202
1203                 inst->dptr = (uint64_t)in_buffer;
1204         }
1205
1206         inst->w4.u64 = cpt_inst_w4.u64;
1207
1208         return 0;
1209 }
1210
1211 static __rte_always_inline int
1212 cpt_zuc_snow3g_dec_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1213                         struct roc_se_fc_params *params,
1214                         struct cpt_inst_s *inst)
1215 {
1216         uint32_t size;
1217         int32_t inputlen = 0, outputlen;
1218         struct roc_se_ctx *se_ctx;
1219         uint8_t pdcp_alg_type, iv_len = 16;
1220         uint32_t encr_offset;
1221         uint32_t encr_data_len;
1222         int flags;
1223         uint64_t *offset_vaddr;
1224         uint32_t *iv_s, iv[4], j;
1225         union cpt_inst_w4 cpt_inst_w4;
1226
1227         /*
1228          * Microcode expects offsets in bytes
1229          * TODO: Rounding off
1230          */
1231         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1232         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1233
1234         se_ctx = params->ctx_buf.vaddr;
1235         flags = se_ctx->zsk_flags;
1236         pdcp_alg_type = se_ctx->pdcp_alg_type;
1237
1238         cpt_inst_w4.u64 = 0;
1239         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
1240
1241         /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1242
1243         cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
1244                                       (0 << 4) | (0 << 3) | (flags & 0x7));
1245
1246         /* consider iv len */
1247         encr_offset += iv_len;
1248
1249         inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1250         outputlen = inputlen;
1251
1252         /* IV */
1253         iv_s = params->iv_buf;
1254         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
1255                 /*
1256                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1257                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1258                  */
1259
1260                 for (j = 0; j < 4; j++)
1261                         iv[j] = iv_s[3 - j];
1262         } else {
1263                 /* ZUC doesn't need a swap */
1264                 for (j = 0; j < 4; j++)
1265                         iv[j] = iv_s[j];
1266         }
1267
1268         /*
1269          * GP op header, lengths are expected in bits.
1270          */
1271         cpt_inst_w4.s.param1 = encr_data_len;
1272
1273         /*
1274          * In cn9k, cn10k since we have a limitation of
1275          * IV & Offset control word not part of instruction
1276          * and need to be part of Data Buffer, we check if
1277          * head room is there and then only do the Direct mode processing
1278          */
1279         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1280                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1281                 void *dm_vaddr = params->bufs[0].vaddr;
1282
1283                 /* Use Direct mode */
1284
1285                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1286                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1287
1288                 /* DPTR */
1289                 inst->dptr = (uint64_t)offset_vaddr;
1290
1291                 /* RPTR should just exclude offset control word */
1292                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1293
1294                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1295
1296                 if (likely(iv_len)) {
1297                         uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1298                                                       ROC_SE_OFF_CTRL_LEN);
1299                         memcpy(iv_d, iv, 16);
1300                 }
1301
1302                 /* iv offset is 0 */
1303                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1304         } else {
1305                 void *m_vaddr = params->meta_buf.vaddr;
1306                 uint32_t i, g_size_bytes, s_size_bytes;
1307                 struct roc_se_sglist_comp *gather_comp;
1308                 struct roc_se_sglist_comp *scatter_comp;
1309                 uint8_t *in_buffer;
1310                 uint32_t *iv_d;
1311
1312                 /* save space for offset and iv... */
1313                 offset_vaddr = m_vaddr;
1314
1315                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1316
1317                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1318
1319                 /* DPTR has SG list */
1320                 in_buffer = m_vaddr;
1321
1322                 ((uint16_t *)in_buffer)[0] = 0;
1323                 ((uint16_t *)in_buffer)[1] = 0;
1324
1325                 /* TODO Add error check if space will be sufficient */
1326                 gather_comp =
1327                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1328
1329                 /*
1330                  * Input Gather List
1331                  */
1332                 i = 0;
1333
1334                 /* Offset control word */
1335
1336                 /* iv offset is 0 */
1337                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1338
1339                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1340                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1341
1342                 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1343                                     ROC_SE_OFF_CTRL_LEN);
1344                 memcpy(iv_d, iv, 16);
1345
1346                 /* Add input data */
1347                 size = inputlen - iv_len;
1348                 if (size) {
1349                         i = fill_sg_comp_from_iov(gather_comp, i,
1350                                                   params->src_iov, 0, &size,
1351                                                   NULL, 0);
1352                         if (unlikely(size)) {
1353                                 plt_dp_err("Insufficient buffer space,"
1354                                            " size %d needed",
1355                                            size);
1356                                 return -1;
1357                         }
1358                 }
1359                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1360                 g_size_bytes =
1361                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1362
1363                 /*
1364                  * Output Scatter List
1365                  */
1366
1367                 i = 0;
1368                 scatter_comp =
1369                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1370                                                       g_size_bytes);
1371
1372                 /* IV */
1373                 i = fill_sg_comp(scatter_comp, i,
1374                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1375                                  iv_len);
1376
1377                 /* Add output data */
1378                 size = outputlen - iv_len;
1379                 if (size) {
1380                         i = fill_sg_comp_from_iov(scatter_comp, i,
1381                                                   params->dst_iov, 0, &size,
1382                                                   NULL, 0);
1383
1384                         if (unlikely(size)) {
1385                                 plt_dp_err("Insufficient buffer space,"
1386                                            " size %d needed",
1387                                            size);
1388                                 return -1;
1389                         }
1390                 }
1391                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1392                 s_size_bytes =
1393                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1394
1395                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1396
1397                 /* This is DPTR len in case of SG mode */
1398                 cpt_inst_w4.s.dlen = size;
1399
1400                 inst->dptr = (uint64_t)in_buffer;
1401         }
1402
1403         if (unlikely((encr_offset >> 16))) {
1404                 plt_dp_err("Offset not supported");
1405                 plt_dp_err("enc_offset: %d", encr_offset);
1406                 return -1;
1407         }
1408
1409         inst->w4.u64 = cpt_inst_w4.u64;
1410
1411         return 0;
1412 }
1413
1414 static __rte_always_inline int
1415 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1416                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1417 {
1418         void *m_vaddr = params->meta_buf.vaddr;
1419         uint32_t size;
1420         int32_t inputlen = 0, outputlen = 0;
1421         struct roc_se_ctx *se_ctx;
1422         uint32_t mac_len = 0;
1423         uint8_t i = 0;
1424         uint32_t encr_offset, auth_offset;
1425         uint32_t encr_data_len, auth_data_len;
1426         int flags;
1427         uint8_t *iv_s, *iv_d, iv_len = 8;
1428         uint8_t dir = 0;
1429         uint64_t *offset_vaddr;
1430         union cpt_inst_w4 cpt_inst_w4;
1431         uint8_t *in_buffer;
1432         uint32_t g_size_bytes, s_size_bytes;
1433         struct roc_se_sglist_comp *gather_comp;
1434         struct roc_se_sglist_comp *scatter_comp;
1435
1436         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1437         auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1438         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1439         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1440
1441         se_ctx = params->ctx_buf.vaddr;
1442         flags = se_ctx->zsk_flags;
1443         mac_len = se_ctx->mac_len;
1444
1445         if (flags == 0x0)
1446                 iv_s = params->iv_buf;
1447         else
1448                 iv_s = params->auth_iv_buf;
1449
1450         dir = iv_s[8] & 0x1;
1451
1452         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1453
1454         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1455         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1456                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1457
1458         /*
1459          * GP op header, lengths are expected in bits.
1460          */
1461         cpt_inst_w4.s.param1 = encr_data_len;
1462         cpt_inst_w4.s.param2 = auth_data_len;
1463
1464         /* consider iv len */
1465         if (flags == 0x0) {
1466                 encr_offset += iv_len;
1467                 auth_offset += iv_len;
1468         }
1469
1470         /* save space for offset ctrl and iv */
1471         offset_vaddr = m_vaddr;
1472
1473         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1474
1475         /* DPTR has SG list */
1476         in_buffer = m_vaddr;
1477
1478         ((uint16_t *)in_buffer)[0] = 0;
1479         ((uint16_t *)in_buffer)[1] = 0;
1480
1481         /* TODO Add error check if space will be sufficient */
1482         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1483
1484         /*
1485          * Input Gather List
1486          */
1487         i = 0;
1488
1489         /* Offset control word followed by iv */
1490
1491         if (flags == 0x0) {
1492                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1493                 outputlen = inputlen;
1494                 /* iv offset is 0 */
1495                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1496                 if (unlikely((encr_offset >> 16))) {
1497                         plt_dp_err("Offset not supported");
1498                         plt_dp_err("enc_offset: %d", encr_offset);
1499                         return -1;
1500                 }
1501         } else {
1502                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1503                 outputlen = mac_len;
1504                 /* iv offset is 0 */
1505                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1506                 if (unlikely((auth_offset >> 8))) {
1507                         plt_dp_err("Offset not supported");
1508                         plt_dp_err("auth_offset: %d", auth_offset);
1509                         return -1;
1510                 }
1511         }
1512
1513         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1514                          ROC_SE_OFF_CTRL_LEN + iv_len);
1515
1516         /* IV */
1517         iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1518         memcpy(iv_d, iv_s, iv_len);
1519
1520         /* input data */
1521         size = inputlen - iv_len;
1522         if (size) {
1523                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1524                                           &size, NULL, 0);
1525
1526                 if (unlikely(size)) {
1527                         plt_dp_err("Insufficient buffer space,"
1528                                    " size %d needed",
1529                                    size);
1530                         return -1;
1531                 }
1532         }
1533         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1534         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1535
1536         /*
1537          * Output Scatter List
1538          */
1539
1540         i = 0;
1541         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1542                                                      g_size_bytes);
1543
1544         if (flags == 0x1) {
1545                 /* IV in SLIST only for F8 */
1546                 iv_len = 0;
1547         }
1548
1549         /* IV */
1550         if (iv_len) {
1551                 i = fill_sg_comp(scatter_comp, i,
1552                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1553                                  iv_len);
1554         }
1555
1556         /* Add output data */
1557         if (req_flags & ROC_SE_VALID_MAC_BUF) {
1558                 size = outputlen - iv_len - mac_len;
1559                 if (size) {
1560                         i = fill_sg_comp_from_iov(scatter_comp, i,
1561                                                   params->dst_iov, 0, &size,
1562                                                   NULL, 0);
1563
1564                         if (unlikely(size)) {
1565                                 plt_dp_err("Insufficient buffer space,"
1566                                            " size %d needed",
1567                                            size);
1568                                 return -1;
1569                         }
1570                 }
1571
1572                 /* mac data */
1573                 if (mac_len) {
1574                         i = fill_sg_comp_from_buf(scatter_comp, i,
1575                                                   &params->mac_buf);
1576                 }
1577         } else {
1578                 /* Output including mac */
1579                 size = outputlen - iv_len;
1580                 if (size) {
1581                         i = fill_sg_comp_from_iov(scatter_comp, i,
1582                                                   params->dst_iov, 0, &size,
1583                                                   NULL, 0);
1584
1585                         if (unlikely(size)) {
1586                                 plt_dp_err("Insufficient buffer space,"
1587                                            " size %d needed",
1588                                            size);
1589                                 return -1;
1590                         }
1591                 }
1592         }
1593         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1594         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1595
1596         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1597
1598         /* This is DPTR len in case of SG mode */
1599         cpt_inst_w4.s.dlen = size;
1600
1601         inst->dptr = (uint64_t)in_buffer;
1602         inst->w4.u64 = cpt_inst_w4.u64;
1603
1604         return 0;
1605 }
1606
1607 static __rte_always_inline int
1608 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1609                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1610 {
1611         void *m_vaddr = params->meta_buf.vaddr;
1612         uint32_t size;
1613         int32_t inputlen = 0, outputlen;
1614         struct roc_se_ctx *se_ctx;
1615         uint8_t i = 0, iv_len = 8;
1616         uint32_t encr_offset;
1617         uint32_t encr_data_len;
1618         int flags;
1619         uint8_t dir = 0;
1620         uint64_t *offset_vaddr;
1621         union cpt_inst_w4 cpt_inst_w4;
1622         uint8_t *in_buffer;
1623         uint32_t g_size_bytes, s_size_bytes;
1624         struct roc_se_sglist_comp *gather_comp;
1625         struct roc_se_sglist_comp *scatter_comp;
1626
1627         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1628         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1629
1630         se_ctx = params->ctx_buf.vaddr;
1631         flags = se_ctx->zsk_flags;
1632
1633         cpt_inst_w4.u64 = 0;
1634         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1635
1636         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1637         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1638                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1639
1640         /*
1641          * GP op header, lengths are expected in bits.
1642          */
1643         cpt_inst_w4.s.param1 = encr_data_len;
1644
1645         /* consider iv len */
1646         encr_offset += iv_len;
1647
1648         inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
1649         outputlen = inputlen;
1650
1651         /* save space for offset ctrl & iv */
1652         offset_vaddr = m_vaddr;
1653
1654         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1655
1656         /* DPTR has SG list */
1657         in_buffer = m_vaddr;
1658
1659         ((uint16_t *)in_buffer)[0] = 0;
1660         ((uint16_t *)in_buffer)[1] = 0;
1661
1662         /* TODO Add error check if space will be sufficient */
1663         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1664
1665         /*
1666          * Input Gather List
1667          */
1668         i = 0;
1669
1670         /* Offset control word followed by iv */
1671         *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1672         if (unlikely((encr_offset >> 16))) {
1673                 plt_dp_err("Offset not supported");
1674                 plt_dp_err("enc_offset: %d", encr_offset);
1675                 return -1;
1676         }
1677
1678         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1679                          ROC_SE_OFF_CTRL_LEN + iv_len);
1680
1681         /* IV */
1682         memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1683                iv_len);
1684
1685         /* Add input data */
1686         size = inputlen - iv_len;
1687         if (size) {
1688                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1689                                           &size, NULL, 0);
1690                 if (unlikely(size)) {
1691                         plt_dp_err("Insufficient buffer space,"
1692                                    " size %d needed",
1693                                    size);
1694                         return -1;
1695                 }
1696         }
1697         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1698         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1699
1700         /*
1701          * Output Scatter List
1702          */
1703
1704         i = 0;
1705         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1706                                                      g_size_bytes);
1707
1708         /* IV */
1709         i = fill_sg_comp(scatter_comp, i,
1710                          (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1711
1712         /* Add output data */
1713         size = outputlen - iv_len;
1714         if (size) {
1715                 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1716                                           &size, NULL, 0);
1717                 if (unlikely(size)) {
1718                         plt_dp_err("Insufficient buffer space,"
1719                                    " size %d needed",
1720                                    size);
1721                         return -1;
1722                 }
1723         }
1724         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1725         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1726
1727         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1728
1729         /* This is DPTR len in case of SG mode */
1730         cpt_inst_w4.s.dlen = size;
1731
1732         inst->dptr = (uint64_t)in_buffer;
1733         inst->w4.u64 = cpt_inst_w4.u64;
1734
1735         return 0;
1736 }
1737
1738 static __rte_always_inline int
1739 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1740                      struct roc_se_fc_params *fc_params,
1741                      struct cpt_inst_s *inst)
1742 {
1743         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1744         uint8_t fc_type;
1745         int ret = -1;
1746
1747         fc_type = ctx->fc_type;
1748
1749         if (likely(fc_type == ROC_SE_FC_GEN)) {
1750                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1751         } else if (fc_type == ROC_SE_PDCP) {
1752                 ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params,
1753                                               inst);
1754         } else if (fc_type == ROC_SE_KASUMI) {
1755                 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1756         }
1757
1758         /*
1759          * For AUTH_ONLY case,
1760          * MC only supports digest generation and verification
1761          * should be done in software by memcmp()
1762          */
1763
1764         return ret;
1765 }
1766
1767 static __rte_always_inline int
1768 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1769                      struct roc_se_fc_params *fc_params,
1770                      struct cpt_inst_s *inst)
1771 {
1772         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1773         uint8_t fc_type;
1774         int ret = -1;
1775
1776         fc_type = ctx->fc_type;
1777
1778         if (likely(fc_type == ROC_SE_FC_GEN)) {
1779                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1780         } else if (fc_type == ROC_SE_PDCP) {
1781                 ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params,
1782                                               inst);
1783         } else if (fc_type == ROC_SE_KASUMI) {
1784                 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1785                                           inst);
1786         } else if (fc_type == ROC_SE_HASH_HMAC) {
1787                 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1788         }
1789
1790         return ret;
1791 }
1792
1793 static __rte_always_inline int
1794 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1795 {
1796         struct rte_crypto_aead_xform *aead_form;
1797         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1798         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1799         uint32_t cipher_key_len = 0;
1800         uint8_t aes_gcm = 0;
1801         aead_form = &xform->aead;
1802
1803         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1804                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1805                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1806         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1807                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1808                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1809         } else {
1810                 plt_dp_err("Unknown aead operation\n");
1811                 return -1;
1812         }
1813         switch (aead_form->algo) {
1814         case RTE_CRYPTO_AEAD_AES_GCM:
1815                 enc_type = ROC_SE_AES_GCM;
1816                 cipher_key_len = 16;
1817                 aes_gcm = 1;
1818                 break;
1819         case RTE_CRYPTO_AEAD_AES_CCM:
1820                 plt_dp_err("Crypto: Unsupported cipher algo %u",
1821                            aead_form->algo);
1822                 return -1;
1823         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1824                 enc_type = ROC_SE_CHACHA20;
1825                 auth_type = ROC_SE_POLY1305;
1826                 cipher_key_len = 32;
1827                 sess->chacha_poly = 1;
1828                 break;
1829         default:
1830                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1831                            aead_form->algo);
1832                 return -1;
1833         }
1834         if (aead_form->key.length < cipher_key_len) {
1835                 plt_dp_err("Invalid cipher params keylen %u",
1836                            aead_form->key.length);
1837                 return -1;
1838         }
1839         sess->zsk_flag = 0;
1840         sess->aes_gcm = aes_gcm;
1841         sess->mac_len = aead_form->digest_length;
1842         sess->iv_offset = aead_form->iv.offset;
1843         sess->iv_length = aead_form->iv.length;
1844         sess->aad_length = aead_form->aad_length;
1845
1846         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1847                                          aead_form->key.data,
1848                                          aead_form->key.length, NULL)))
1849                 return -1;
1850
1851         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1852                                          aead_form->digest_length)))
1853                 return -1;
1854
1855         return 0;
1856 }
1857
1858 static __rte_always_inline int
1859 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1860 {
1861         struct rte_crypto_cipher_xform *c_form;
1862         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1863         uint32_t cipher_key_len = 0;
1864         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1865
1866         c_form = &xform->cipher;
1867
1868         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1869                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1870         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1871                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1872                 if (xform->next != NULL &&
1873                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1874                         /* Perform decryption followed by auth verify */
1875                         sess->roc_se_ctx.template_w4.s.opcode_minor =
1876                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1877                 }
1878         } else {
1879                 plt_dp_err("Unknown cipher operation\n");
1880                 return -1;
1881         }
1882
1883         switch (c_form->algo) {
1884         case RTE_CRYPTO_CIPHER_AES_CBC:
1885                 enc_type = ROC_SE_AES_CBC;
1886                 cipher_key_len = 16;
1887                 break;
1888         case RTE_CRYPTO_CIPHER_3DES_CBC:
1889                 enc_type = ROC_SE_DES3_CBC;
1890                 cipher_key_len = 24;
1891                 break;
1892         case RTE_CRYPTO_CIPHER_DES_CBC:
1893                 /* DES is implemented using 3DES in hardware */
1894                 enc_type = ROC_SE_DES3_CBC;
1895                 cipher_key_len = 8;
1896                 break;
1897         case RTE_CRYPTO_CIPHER_AES_CTR:
1898                 enc_type = ROC_SE_AES_CTR;
1899                 cipher_key_len = 16;
1900                 aes_ctr = 1;
1901                 break;
1902         case RTE_CRYPTO_CIPHER_NULL:
1903                 enc_type = 0;
1904                 is_null = 1;
1905                 break;
1906         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1907                 enc_type = ROC_SE_KASUMI_F8_ECB;
1908                 cipher_key_len = 16;
1909                 zsk_flag = ROC_SE_K_F8;
1910                 break;
1911         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1912                 enc_type = ROC_SE_SNOW3G_UEA2;
1913                 cipher_key_len = 16;
1914                 zsk_flag = ROC_SE_ZS_EA;
1915                 break;
1916         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1917                 enc_type = ROC_SE_ZUC_EEA3;
1918                 cipher_key_len = 16;
1919                 zsk_flag = ROC_SE_ZS_EA;
1920                 break;
1921         case RTE_CRYPTO_CIPHER_AES_XTS:
1922                 enc_type = ROC_SE_AES_XTS;
1923                 cipher_key_len = 16;
1924                 break;
1925         case RTE_CRYPTO_CIPHER_3DES_ECB:
1926                 enc_type = ROC_SE_DES3_ECB;
1927                 cipher_key_len = 24;
1928                 break;
1929         case RTE_CRYPTO_CIPHER_AES_ECB:
1930                 enc_type = ROC_SE_AES_ECB;
1931                 cipher_key_len = 16;
1932                 break;
1933         case RTE_CRYPTO_CIPHER_3DES_CTR:
1934         case RTE_CRYPTO_CIPHER_AES_F8:
1935         case RTE_CRYPTO_CIPHER_ARC4:
1936                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1937                 return -1;
1938         default:
1939                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1940                            c_form->algo);
1941                 return -1;
1942         }
1943
1944         if (c_form->key.length < cipher_key_len) {
1945                 plt_dp_err("Invalid cipher params keylen %u",
1946                            c_form->key.length);
1947                 return -1;
1948         }
1949
1950         sess->zsk_flag = zsk_flag;
1951         sess->aes_gcm = 0;
1952         sess->aes_ctr = aes_ctr;
1953         sess->iv_offset = c_form->iv.offset;
1954         sess->iv_length = c_form->iv.length;
1955         sess->is_null = is_null;
1956
1957         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1958                                          c_form->key.data, c_form->key.length,
1959                                          NULL)))
1960                 return -1;
1961
1962         return 0;
1963 }
1964
1965 static __rte_always_inline int
1966 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1967 {
1968         struct rte_crypto_auth_xform *a_form;
1969         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1970         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1971
1972         if (xform->next != NULL &&
1973             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1974             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1975                 /* Perform auth followed by encryption */
1976                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1977                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1978         }
1979
1980         a_form = &xform->auth;
1981
1982         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1983                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1984         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1985                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1986         else {
1987                 plt_dp_err("Unknown auth operation");
1988                 return -1;
1989         }
1990
1991         switch (a_form->algo) {
1992         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1993                 /* Fall through */
1994         case RTE_CRYPTO_AUTH_SHA1:
1995                 auth_type = ROC_SE_SHA1_TYPE;
1996                 break;
1997         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1998         case RTE_CRYPTO_AUTH_SHA256:
1999                 auth_type = ROC_SE_SHA2_SHA256;
2000                 break;
2001         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2002         case RTE_CRYPTO_AUTH_SHA512:
2003                 auth_type = ROC_SE_SHA2_SHA512;
2004                 break;
2005         case RTE_CRYPTO_AUTH_AES_GMAC:
2006                 auth_type = ROC_SE_GMAC_TYPE;
2007                 aes_gcm = 1;
2008                 break;
2009         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2010         case RTE_CRYPTO_AUTH_SHA224:
2011                 auth_type = ROC_SE_SHA2_SHA224;
2012                 break;
2013         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2014         case RTE_CRYPTO_AUTH_SHA384:
2015                 auth_type = ROC_SE_SHA2_SHA384;
2016                 break;
2017         case RTE_CRYPTO_AUTH_MD5_HMAC:
2018         case RTE_CRYPTO_AUTH_MD5:
2019                 auth_type = ROC_SE_MD5_TYPE;
2020                 break;
2021         case RTE_CRYPTO_AUTH_KASUMI_F9:
2022                 auth_type = ROC_SE_KASUMI_F9_ECB;
2023                 /*
2024                  * Indicate that direction needs to be taken out
2025                  * from end of src
2026                  */
2027                 zsk_flag = ROC_SE_K_F9;
2028                 break;
2029         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2030                 auth_type = ROC_SE_SNOW3G_UIA2;
2031                 zsk_flag = ROC_SE_ZS_IA;
2032                 break;
2033         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2034                 auth_type = ROC_SE_ZUC_EIA3;
2035                 zsk_flag = ROC_SE_ZS_IA;
2036                 break;
2037         case RTE_CRYPTO_AUTH_NULL:
2038                 auth_type = 0;
2039                 is_null = 1;
2040                 break;
2041         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2042         case RTE_CRYPTO_AUTH_AES_CMAC:
2043         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2044                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
2045                 return -1;
2046         default:
2047                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
2048                            a_form->algo);
2049                 return -1;
2050         }
2051
2052         sess->zsk_flag = zsk_flag;
2053         sess->aes_gcm = aes_gcm;
2054         sess->mac_len = a_form->digest_length;
2055         sess->is_null = is_null;
2056         if (zsk_flag) {
2057                 sess->auth_iv_offset = a_form->iv.offset;
2058                 sess->auth_iv_length = a_form->iv.length;
2059         }
2060         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
2061                                          a_form->key.data, a_form->key.length,
2062                                          a_form->digest_length)))
2063                 return -1;
2064
2065         return 0;
2066 }
2067
2068 static __rte_always_inline int
2069 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
2070 {
2071         struct rte_crypto_auth_xform *a_form;
2072         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
2073         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
2074
2075         a_form = &xform->auth;
2076
2077         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2078                 sess->cpt_op |= ROC_SE_OP_ENCODE;
2079         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2080                 sess->cpt_op |= ROC_SE_OP_DECODE;
2081         else {
2082                 plt_dp_err("Unknown auth operation");
2083                 return -1;
2084         }
2085
2086         switch (a_form->algo) {
2087         case RTE_CRYPTO_AUTH_AES_GMAC:
2088                 enc_type = ROC_SE_AES_GCM;
2089                 auth_type = ROC_SE_GMAC_TYPE;
2090                 break;
2091         default:
2092                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
2093                            a_form->algo);
2094                 return -1;
2095         }
2096
2097         sess->zsk_flag = 0;
2098         sess->aes_gcm = 0;
2099         sess->is_gmac = 1;
2100         sess->iv_offset = a_form->iv.offset;
2101         sess->iv_length = a_form->iv.length;
2102         sess->mac_len = a_form->digest_length;
2103
2104         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
2105                                          a_form->key.data, a_form->key.length,
2106                                          NULL)))
2107                 return -1;
2108
2109         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
2110                                          a_form->digest_length)))
2111                 return -1;
2112
2113         return 0;
2114 }
2115
2116 static __rte_always_inline void *
2117 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
2118               struct rte_mempool *cpt_meta_pool,
2119               struct cpt_inflight_req *infl_req)
2120 {
2121         uint8_t *mdata;
2122
2123         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2124                 return NULL;
2125
2126         buf->vaddr = mdata;
2127         buf->size = len;
2128
2129         infl_req->mdata = mdata;
2130         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
2131
2132         return mdata;
2133 }
2134
2135 static __rte_always_inline uint32_t
2136 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
2137                      uint32_t start_offset)
2138 {
2139         uint16_t index = 0;
2140         void *seg_data = NULL;
2141         int32_t seg_size = 0;
2142
2143         if (!pkt) {
2144                 iovec->buf_cnt = 0;
2145                 return 0;
2146         }
2147
2148         if (!start_offset) {
2149                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2150                 seg_size = pkt->data_len;
2151         } else {
2152                 while (start_offset >= pkt->data_len) {
2153                         start_offset -= pkt->data_len;
2154                         pkt = pkt->next;
2155                 }
2156
2157                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2158                 seg_size = pkt->data_len - start_offset;
2159                 if (!seg_size)
2160                         return 1;
2161         }
2162
2163         /* first seg */
2164         iovec->bufs[index].vaddr = seg_data;
2165         iovec->bufs[index].size = seg_size;
2166         index++;
2167         pkt = pkt->next;
2168
2169         while (unlikely(pkt != NULL)) {
2170                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2171                 seg_size = pkt->data_len;
2172                 if (!seg_size)
2173                         break;
2174
2175                 iovec->bufs[index].vaddr = seg_data;
2176                 iovec->bufs[index].size = seg_size;
2177
2178                 index++;
2179
2180                 pkt = pkt->next;
2181         }
2182
2183         iovec->buf_cnt = index;
2184         return 0;
2185 }
2186
2187 static __rte_always_inline uint32_t
2188 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2189                              struct roc_se_fc_params *param, uint32_t *flags)
2190 {
2191         uint16_t index = 0;
2192         void *seg_data = NULL;
2193         uint32_t seg_size = 0;
2194         struct roc_se_iov_ptr *iovec;
2195
2196         seg_data = rte_pktmbuf_mtod(pkt, void *);
2197         seg_size = pkt->data_len;
2198
2199         /* first seg */
2200         if (likely(!pkt->next)) {
2201                 uint32_t headroom;
2202
2203                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2204                 headroom = rte_pktmbuf_headroom(pkt);
2205                 if (likely(headroom >= 24))
2206                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2207
2208                 param->bufs[0].vaddr = seg_data;
2209                 param->bufs[0].size = seg_size;
2210                 return 0;
2211         }
2212         iovec = param->src_iov;
2213         iovec->bufs[index].vaddr = seg_data;
2214         iovec->bufs[index].size = seg_size;
2215         index++;
2216         pkt = pkt->next;
2217
2218         while (unlikely(pkt != NULL)) {
2219                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2220                 seg_size = pkt->data_len;
2221
2222                 if (!seg_size)
2223                         break;
2224
2225                 iovec->bufs[index].vaddr = seg_data;
2226                 iovec->bufs[index].size = seg_size;
2227
2228                 index++;
2229
2230                 pkt = pkt->next;
2231         }
2232
2233         iovec->buf_cnt = index;
2234         return 0;
2235 }
2236
2237 static __rte_always_inline int
2238 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2239                struct cpt_qp_meta_info *m_info,
2240                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2241 {
2242         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2243         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2244         struct rte_crypto_sym_op *sym_op = cop->sym;
2245         void *mdata = NULL;
2246         uint32_t mc_hash_off;
2247         uint32_t flags = 0;
2248         uint64_t d_offs, d_lens;
2249         struct rte_mbuf *m_src, *m_dst;
2250         uint8_t cpt_op = sess->cpt_op;
2251 #ifdef CPT_ALWAYS_USE_SG_MODE
2252         uint8_t inplace = 0;
2253 #else
2254         uint8_t inplace = 1;
2255 #endif
2256         struct roc_se_fc_params fc_params;
2257         char src[SRC_IOV_SIZE];
2258         char dst[SRC_IOV_SIZE];
2259         uint32_t iv_buf[4];
2260         int ret;
2261
2262         if (likely(sess->iv_length)) {
2263                 flags |= ROC_SE_VALID_IV_BUF;
2264                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2265                                                              sess->iv_offset);
2266                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2267                         memcpy((uint8_t *)iv_buf,
2268                                rte_crypto_op_ctod_offset(cop, uint8_t *,
2269                                                          sess->iv_offset),
2270                                12);
2271                         iv_buf[3] = rte_cpu_to_be_32(0x1);
2272                         fc_params.iv_buf = iv_buf;
2273                 }
2274         }
2275
2276         if (sess->zsk_flag) {
2277                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2278                         cop, uint8_t *, sess->auth_iv_offset);
2279                 if (sess->zsk_flag != ROC_SE_ZS_EA)
2280                         inplace = 0;
2281         }
2282         m_src = sym_op->m_src;
2283         m_dst = sym_op->m_dst;
2284
2285         if (sess->aes_gcm || sess->chacha_poly) {
2286                 uint8_t *salt;
2287                 uint8_t *aad_data;
2288                 uint16_t aad_len;
2289
2290                 d_offs = sym_op->aead.data.offset;
2291                 d_lens = sym_op->aead.data.length;
2292                 mc_hash_off =
2293                         sym_op->aead.data.offset + sym_op->aead.data.length;
2294
2295                 aad_data = sym_op->aead.aad.data;
2296                 aad_len = sess->aad_length;
2297                 if (likely((aad_data + aad_len) ==
2298                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2299                                                    sym_op->aead.data.offset))) {
2300                         d_offs = (d_offs - aad_len) | (d_offs << 16);
2301                         d_lens = (d_lens + aad_len) | (d_lens << 32);
2302                 } else {
2303                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2304                         fc_params.aad_buf.size = aad_len;
2305                         flags |= ROC_SE_VALID_AAD_BUF;
2306                         inplace = 0;
2307                         d_offs = d_offs << 16;
2308                         d_lens = d_lens << 32;
2309                 }
2310
2311                 salt = fc_params.iv_buf;
2312                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2313                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2314                         sess->salt = *(uint32_t *)salt;
2315                 }
2316                 fc_params.iv_buf = salt + 4;
2317                 if (likely(sess->mac_len)) {
2318                         struct rte_mbuf *m =
2319                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2320
2321                         if (!m)
2322                                 m = m_src;
2323
2324                         /* hmac immediately following data is best case */
2325                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2326                                              mc_hash_off !=
2327                                      (uint8_t *)sym_op->aead.digest.data)) {
2328                                 flags |= ROC_SE_VALID_MAC_BUF;
2329                                 fc_params.mac_buf.size = sess->mac_len;
2330                                 fc_params.mac_buf.vaddr =
2331                                         sym_op->aead.digest.data;
2332                                 inplace = 0;
2333                         }
2334                 }
2335         } else {
2336                 d_offs = sym_op->cipher.data.offset;
2337                 d_lens = sym_op->cipher.data.length;
2338                 mc_hash_off =
2339                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
2340                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2341                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2342
2343                 if (mc_hash_off <
2344                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2345                         mc_hash_off = (sym_op->auth.data.offset +
2346                                        sym_op->auth.data.length);
2347                 }
2348                 /* for gmac, salt should be updated like in gcm */
2349                 if (unlikely(sess->is_gmac)) {
2350                         uint8_t *salt;
2351                         salt = fc_params.iv_buf;
2352                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
2353                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2354                                 sess->salt = *(uint32_t *)salt;
2355                         }
2356                         fc_params.iv_buf = salt + 4;
2357                 }
2358                 if (likely(sess->mac_len)) {
2359                         struct rte_mbuf *m;
2360
2361                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2362                         if (!m)
2363                                 m = m_src;
2364
2365                         /* hmac immediately following data is best case */
2366                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2367                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2368                                               mc_hash_off !=
2369                                       (uint8_t *)sym_op->auth.digest.data))) {
2370                                 flags |= ROC_SE_VALID_MAC_BUF;
2371                                 fc_params.mac_buf.size = sess->mac_len;
2372                                 fc_params.mac_buf.vaddr =
2373                                         sym_op->auth.digest.data;
2374                                 inplace = 0;
2375                         }
2376                 }
2377         }
2378         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2379
2380         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2381             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2382                 inplace = 0;
2383
2384         if (likely(!m_dst && inplace)) {
2385                 /* Case of single buffer without AAD buf or
2386                  * separate mac buf in place and
2387                  * not air crypto
2388                  */
2389                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2390
2391                 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2392                                                           &flags))) {
2393                         plt_dp_err("Prepare inplace src iov failed");
2394                         ret = -EINVAL;
2395                         goto err_exit;
2396                 }
2397
2398         } else {
2399                 /* Out of place processing */
2400                 fc_params.src_iov = (void *)src;
2401                 fc_params.dst_iov = (void *)dst;
2402
2403                 /* Store SG I/O in the api for reuse */
2404                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2405                         plt_dp_err("Prepare src iov failed");
2406                         ret = -EINVAL;
2407                         goto err_exit;
2408                 }
2409
2410                 if (unlikely(m_dst != NULL)) {
2411                         uint32_t pkt_len;
2412
2413                         /* Try to make room as much as src has */
2414                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
2415
2416                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2417                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2418                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2419                                         plt_dp_err("Not enough space in "
2420                                                    "m_dst %p, need %u"
2421                                                    " more",
2422                                                    m_dst, pkt_len);
2423                                         ret = -EINVAL;
2424                                         goto err_exit;
2425                                 }
2426                         }
2427
2428                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2429                                 plt_dp_err("Prepare dst iov failed for "
2430                                            "m_dst %p",
2431                                            m_dst);
2432                                 ret = -EINVAL;
2433                                 goto err_exit;
2434                         }
2435                 } else {
2436                         fc_params.dst_iov = (void *)src;
2437                 }
2438         }
2439
2440         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2441                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2442                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
2443                         (ctx->fc_type == ROC_SE_PDCP))))) {
2444                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2445                                       m_info->pool, infl_req);
2446                 if (mdata == NULL) {
2447                         plt_dp_err("Error allocating meta buffer for request");
2448                         return -ENOMEM;
2449                 }
2450         }
2451
2452         /* Finally prepare the instruction */
2453         if (cpt_op & ROC_SE_OP_ENCODE)
2454                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2455                                            inst);
2456         else
2457                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2458                                            inst);
2459
2460         if (unlikely(ret)) {
2461                 plt_dp_err("Preparing request failed due to bad input arg");
2462                 goto free_mdata_and_exit;
2463         }
2464
2465         return 0;
2466
2467 free_mdata_and_exit:
2468         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2469                 rte_mempool_put(m_info->pool, infl_req->mdata);
2470 err_exit:
2471         return ret;
2472 }
2473
2474 static __rte_always_inline void
2475 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2476 {
2477         uint8_t *mac;
2478         struct rte_crypto_sym_op *sym_op = op->sym;
2479
2480         if (sym_op->auth.digest.data)
2481                 mac = sym_op->auth.digest.data;
2482         else
2483                 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2484                                               sym_op->auth.data.length +
2485                                                       sym_op->auth.data.offset);
2486         if (!mac) {
2487                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2488                 return;
2489         }
2490
2491         if (memcmp(mac, gen_mac, mac_len))
2492                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2493         else
2494                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2495 }
2496
2497 static __rte_always_inline void
2498 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2499                                    uint32_t *addr_length_in_bits,
2500                                    uint8_t *addr_direction)
2501 {
2502         uint8_t found = 0;
2503         uint32_t pos;
2504         uint8_t last_byte;
2505         while (!found && counter_num_bytes > 0) {
2506                 counter_num_bytes--;
2507                 if (src[counter_num_bytes] == 0x00)
2508                         continue;
2509                 pos = rte_bsf32(src[counter_num_bytes]);
2510                 if (pos == 7) {
2511                         if (likely(counter_num_bytes > 0)) {
2512                                 last_byte = src[counter_num_bytes - 1];
2513                                 *addr_direction = last_byte & 0x1;
2514                                 *addr_length_in_bits =
2515                                         counter_num_bytes * 8 - 1;
2516                         }
2517                 } else {
2518                         last_byte = src[counter_num_bytes];
2519                         *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2520                         *addr_length_in_bits =
2521                                 counter_num_bytes * 8 + (8 - (pos + 2));
2522                 }
2523                 found = 1;
2524         }
2525 }
2526
2527 /*
2528  * This handles all auth only except AES_GMAC
2529  */
2530 static __rte_always_inline int
2531 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2532                    struct cpt_qp_meta_info *m_info,
2533                    struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2534 {
2535         uint32_t space = 0;
2536         struct rte_crypto_sym_op *sym_op = cop->sym;
2537         void *mdata;
2538         uint32_t auth_range_off;
2539         uint32_t flags = 0;
2540         uint64_t d_offs = 0, d_lens;
2541         struct rte_mbuf *m_src, *m_dst;
2542         uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2543         uint16_t mac_len = sess->mac_len;
2544         struct roc_se_fc_params params;
2545         char src[SRC_IOV_SIZE];
2546         uint8_t iv_buf[16];
2547         int ret;
2548
2549         memset(&params, 0, sizeof(struct roc_se_fc_params));
2550
2551         m_src = sym_op->m_src;
2552
2553         mdata = alloc_op_meta(&params.meta_buf, m_info->mlen, m_info->pool,
2554                               infl_req);
2555         if (mdata == NULL) {
2556                 ret = -ENOMEM;
2557                 goto err_exit;
2558         }
2559
2560         auth_range_off = sym_op->auth.data.offset;
2561
2562         flags = ROC_SE_VALID_MAC_BUF;
2563         params.src_iov = (void *)src;
2564         if (unlikely(sess->zsk_flag)) {
2565                 /*
2566                  * Since for Zuc, Kasumi, Snow3g offsets are in bits
2567                  * we will send pass through even for auth only case,
2568                  * let MC handle it
2569                  */
2570                 d_offs = auth_range_off;
2571                 auth_range_off = 0;
2572                 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2573                         cop, uint8_t *, sess->auth_iv_offset);
2574                 if (sess->zsk_flag == ROC_SE_K_F9) {
2575                         uint32_t length_in_bits, num_bytes;
2576                         uint8_t *src, direction = 0;
2577
2578                         memcpy(iv_buf,
2579                                rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2580                         /*
2581                          * This is kasumi f9, take direction from
2582                          * source buffer
2583                          */
2584                         length_in_bits = cop->sym->auth.data.length;
2585                         num_bytes = (length_in_bits >> 3);
2586                         src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2587                         find_kasumif9_direction_and_length(
2588                                 src, num_bytes, &length_in_bits, &direction);
2589                         length_in_bits -= 64;
2590                         cop->sym->auth.data.offset += 64;
2591                         d_offs = cop->sym->auth.data.offset;
2592                         auth_range_off = d_offs / 8;
2593                         cop->sym->auth.data.length = length_in_bits;
2594
2595                         /* Store it at end of auth iv */
2596                         iv_buf[8] = direction;
2597                         params.auth_iv_buf = iv_buf;
2598                 }
2599         }
2600
2601         d_lens = sym_op->auth.data.length;
2602
2603         params.ctx_buf.vaddr = &sess->roc_se_ctx;
2604
2605         if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2606                 if (sym_op->auth.digest.data) {
2607                         /*
2608                          * Digest to be generated
2609                          * in separate buffer
2610                          */
2611                         params.mac_buf.size = sess->mac_len;
2612                         params.mac_buf.vaddr = sym_op->auth.digest.data;
2613                 } else {
2614                         uint32_t off = sym_op->auth.data.offset +
2615                                        sym_op->auth.data.length;
2616                         int32_t dlen, space;
2617
2618                         m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2619                         dlen = rte_pktmbuf_pkt_len(m_dst);
2620
2621                         space = off + mac_len - dlen;
2622                         if (space > 0)
2623                                 if (!rte_pktmbuf_append(m_dst, space)) {
2624                                         plt_dp_err("Failed to extend "
2625                                                    "mbuf by %uB",
2626                                                    space);
2627                                         ret = -EINVAL;
2628                                         goto free_mdata_and_exit;
2629                                 }
2630
2631                         params.mac_buf.vaddr =
2632                                 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2633                         params.mac_buf.size = mac_len;
2634                 }
2635         } else {
2636                 uint64_t *op = mdata;
2637
2638                 /* Need space for storing generated mac */
2639                 space += 2 * sizeof(uint64_t);
2640
2641                 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2642                 params.mac_buf.size = mac_len;
2643                 space += RTE_ALIGN_CEIL(mac_len, 8);
2644                 op[0] = (uintptr_t)params.mac_buf.vaddr;
2645                 op[1] = mac_len;
2646                 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2647         }
2648
2649         params.meta_buf.vaddr = (uint8_t *)mdata + space;
2650         params.meta_buf.size -= space;
2651
2652         /* Out of place processing */
2653         params.src_iov = (void *)src;
2654
2655         /*Store SG I/O in the api for reuse */
2656         if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2657                 plt_dp_err("Prepare src iov failed");
2658                 ret = -EINVAL;
2659                 goto free_mdata_and_exit;
2660         }
2661
2662         ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, inst);
2663         if (ret)
2664                 goto free_mdata_and_exit;
2665
2666         return 0;
2667
2668 free_mdata_and_exit:
2669         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2670                 rte_mempool_put(m_info->pool, infl_req->mdata);
2671 err_exit:
2672         return ret;
2673 }
2674 #endif /*_CNXK_SE_H_ */