crypto/cnxk: add security capabilities
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
38
39 static __rte_always_inline int
40 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
41 {
42         uint16_t mac_len = auth->digest_length;
43         int ret;
44
45         switch (auth->algo) {
46         case RTE_CRYPTO_AUTH_MD5:
47         case RTE_CRYPTO_AUTH_MD5_HMAC:
48                 ret = (mac_len == 16) ? 0 : -1;
49                 break;
50         case RTE_CRYPTO_AUTH_SHA1:
51         case RTE_CRYPTO_AUTH_SHA1_HMAC:
52                 ret = (mac_len == 20) ? 0 : -1;
53                 break;
54         case RTE_CRYPTO_AUTH_SHA224:
55         case RTE_CRYPTO_AUTH_SHA224_HMAC:
56                 ret = (mac_len == 28) ? 0 : -1;
57                 break;
58         case RTE_CRYPTO_AUTH_SHA256:
59         case RTE_CRYPTO_AUTH_SHA256_HMAC:
60                 ret = (mac_len == 32) ? 0 : -1;
61                 break;
62         case RTE_CRYPTO_AUTH_SHA384:
63         case RTE_CRYPTO_AUTH_SHA384_HMAC:
64                 ret = (mac_len == 48) ? 0 : -1;
65                 break;
66         case RTE_CRYPTO_AUTH_SHA512:
67         case RTE_CRYPTO_AUTH_SHA512_HMAC:
68                 ret = (mac_len == 64) ? 0 : -1;
69                 break;
70         case RTE_CRYPTO_AUTH_NULL:
71                 ret = 0;
72                 break;
73         default:
74                 ret = -1;
75         }
76
77         return ret;
78 }
79
80 static __rte_always_inline void
81 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
82 {
83         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
84         memcpy(fctx->enc.encr_iv, salt, 4);
85 }
86
87 static __rte_always_inline uint32_t
88 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
89              uint32_t size)
90 {
91         struct roc_se_sglist_comp *to = &list[i >> 2];
92
93         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
94         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
95         i++;
96         return i;
97 }
98
99 static __rte_always_inline uint32_t
100 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
101                       struct roc_se_buf_ptr *from)
102 {
103         struct roc_se_sglist_comp *to = &list[i >> 2];
104
105         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
106         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
107         i++;
108         return i;
109 }
110
111 static __rte_always_inline uint32_t
112 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
113                           struct roc_se_buf_ptr *from, uint32_t *psize)
114 {
115         struct roc_se_sglist_comp *to = &list[i >> 2];
116         uint32_t size = *psize;
117         uint32_t e_len;
118
119         e_len = (size > from->size) ? from->size : size;
120         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
121         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
122         *psize -= e_len;
123         i++;
124         return i;
125 }
126
127 /*
128  * This fills the MC expected SGIO list
129  * from IOV given by user.
130  */
131 static __rte_always_inline uint32_t
132 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
133                       struct roc_se_iov_ptr *from, uint32_t from_offset,
134                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
135                       uint32_t extra_offset)
136 {
137         int32_t j;
138         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
139         uint32_t size = *psize;
140         struct roc_se_buf_ptr *bufs;
141
142         bufs = from->bufs;
143         for (j = 0; (j < from->buf_cnt) && size; j++) {
144                 uint64_t e_vaddr;
145                 uint32_t e_len;
146                 struct roc_se_sglist_comp *to = &list[i >> 2];
147
148                 if (unlikely(from_offset)) {
149                         if (from_offset >= bufs[j].size) {
150                                 from_offset -= bufs[j].size;
151                                 continue;
152                         }
153                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
154                         e_len = (size > (bufs[j].size - from_offset)) ?
155                                         (bufs[j].size - from_offset) :
156                                         size;
157                         from_offset = 0;
158                 } else {
159                         e_vaddr = (uint64_t)bufs[j].vaddr;
160                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
161                 }
162
163                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
164                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
165
166                 if (extra_len && (e_len >= extra_offset)) {
167                         /* Break the data at given offset */
168                         uint32_t next_len = e_len - extra_offset;
169                         uint64_t next_vaddr = e_vaddr + extra_offset;
170
171                         if (!extra_offset) {
172                                 i--;
173                         } else {
174                                 e_len = extra_offset;
175                                 size -= e_len;
176                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
177                         }
178
179                         extra_len = RTE_MIN(extra_len, size);
180                         /* Insert extra data ptr */
181                         if (extra_len) {
182                                 i++;
183                                 to = &list[i >> 2];
184                                 to->u.s.len[i % 4] =
185                                         rte_cpu_to_be_16(extra_len);
186                                 to->ptr[i % 4] = rte_cpu_to_be_64(
187                                         (uint64_t)extra_buf->vaddr);
188                                 size -= extra_len;
189                         }
190
191                         next_len = RTE_MIN(next_len, size);
192                         /* insert the rest of the data */
193                         if (next_len) {
194                                 i++;
195                                 to = &list[i >> 2];
196                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
197                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
198                                 size -= next_len;
199                         }
200                         extra_len = 0;
201
202                 } else {
203                         size -= e_len;
204                 }
205                 if (extra_offset)
206                         extra_offset -= size;
207                 i++;
208         }
209
210         *psize = size;
211         return (uint32_t)i;
212 }
213
214 static __rte_always_inline int
215 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
216                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
217 {
218         void *m_vaddr = params->meta_buf.vaddr;
219         uint32_t size, i;
220         uint16_t data_len, mac_len, key_len;
221         roc_se_auth_type hash_type;
222         struct roc_se_ctx *ctx;
223         struct roc_se_sglist_comp *gather_comp;
224         struct roc_se_sglist_comp *scatter_comp;
225         uint8_t *in_buffer;
226         uint32_t g_size_bytes, s_size_bytes;
227         union cpt_inst_w4 cpt_inst_w4;
228
229         ctx = params->ctx_buf.vaddr;
230
231         hash_type = ctx->hash_type;
232         mac_len = ctx->mac_len;
233         key_len = ctx->auth_key_len;
234         data_len = ROC_SE_AUTH_DLEN(d_lens);
235
236         /*GP op header */
237         cpt_inst_w4.s.opcode_minor = 0;
238         cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
239         if (ctx->hmac) {
240                 cpt_inst_w4.s.opcode_major =
241                         ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
242                 cpt_inst_w4.s.param1 = key_len;
243                 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
244         } else {
245                 cpt_inst_w4.s.opcode_major =
246                         ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
247                 cpt_inst_w4.s.param1 = 0;
248                 cpt_inst_w4.s.dlen = data_len;
249         }
250
251         /* Null auth only case enters the if */
252         if (unlikely(!hash_type && !ctx->enc_cipher)) {
253                 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
254                 /* Minor op is passthrough */
255                 cpt_inst_w4.s.opcode_minor = 0x03;
256                 /* Send out completion code only */
257                 cpt_inst_w4.s.param2 = 0x1;
258         }
259
260         /* DPTR has SG list */
261         in_buffer = m_vaddr;
262
263         ((uint16_t *)in_buffer)[0] = 0;
264         ((uint16_t *)in_buffer)[1] = 0;
265
266         /* TODO Add error check if space will be sufficient */
267         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
268
269         /*
270          * Input gather list
271          */
272
273         i = 0;
274
275         if (ctx->hmac) {
276                 uint64_t k_vaddr = (uint64_t)params->ctx_buf.vaddr +
277                                    offsetof(struct roc_se_ctx, auth_key);
278                 /* Key */
279                 i = fill_sg_comp(gather_comp, i, k_vaddr,
280                                  RTE_ALIGN_CEIL(key_len, 8));
281         }
282
283         /* input data */
284         size = data_len;
285         if (size) {
286                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
287                                           &size, NULL, 0);
288                 if (unlikely(size)) {
289                         plt_dp_err("Insufficient dst IOV size, short by %dB",
290                                    size);
291                         return -1;
292                 }
293         } else {
294                 /*
295                  * Looks like we need to support zero data
296                  * gather ptr in case of hash & hmac
297                  */
298                 i++;
299         }
300         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
301         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
302
303         /*
304          * Output Gather list
305          */
306
307         i = 0;
308         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
309                                                      g_size_bytes);
310
311         if (flags & ROC_SE_VALID_MAC_BUF) {
312                 if (unlikely(params->mac_buf.size < mac_len)) {
313                         plt_dp_err("Insufficient MAC size");
314                         return -1;
315                 }
316
317                 size = mac_len;
318                 i = fill_sg_comp_from_buf_min(scatter_comp, i, &params->mac_buf,
319                                               &size);
320         } else {
321                 size = mac_len;
322                 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
323                                           data_len, &size, NULL, 0);
324                 if (unlikely(size)) {
325                         plt_dp_err("Insufficient dst IOV size, short by %dB",
326                                    size);
327                         return -1;
328                 }
329         }
330
331         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
332         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
333
334         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
335
336         /* This is DPTR len in case of SG mode */
337         cpt_inst_w4.s.dlen = size;
338
339         inst->dptr = (uint64_t)in_buffer;
340         inst->w4.u64 = cpt_inst_w4.u64;
341
342         return 0;
343 }
344
345 static __rte_always_inline int
346 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
347                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
348 {
349         uint32_t iv_offset = 0;
350         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
351         struct roc_se_ctx *se_ctx;
352         uint32_t cipher_type, hash_type;
353         uint32_t mac_len, size;
354         uint8_t iv_len = 16;
355         struct roc_se_buf_ptr *aad_buf = NULL;
356         uint32_t encr_offset, auth_offset;
357         uint32_t encr_data_len, auth_data_len, aad_len = 0;
358         uint32_t passthrough_len = 0;
359         union cpt_inst_w4 cpt_inst_w4;
360         void *offset_vaddr;
361         uint8_t op_minor;
362
363         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
364         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
365         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
366         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
367         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
368                 /* We don't support both AAD and auth data separately */
369                 auth_data_len = 0;
370                 auth_offset = 0;
371                 aad_len = fc_params->aad_buf.size;
372                 aad_buf = &fc_params->aad_buf;
373         }
374         se_ctx = fc_params->ctx_buf.vaddr;
375         cipher_type = se_ctx->enc_cipher;
376         hash_type = se_ctx->hash_type;
377         mac_len = se_ctx->mac_len;
378         op_minor = se_ctx->template_w4.s.opcode_minor;
379
380         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
381                 iv_len = 0;
382                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
383         }
384
385         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
386                 /*
387                  * When AAD is given, data above encr_offset is pass through
388                  * Since AAD is given as separate pointer and not as offset,
389                  * this is a special case as we need to fragment input data
390                  * into passthrough + encr_data and then insert AAD in between.
391                  */
392                 if (hash_type != ROC_SE_GMAC_TYPE) {
393                         passthrough_len = encr_offset;
394                         auth_offset = passthrough_len + iv_len;
395                         encr_offset = passthrough_len + aad_len + iv_len;
396                         auth_data_len = aad_len + encr_data_len;
397                 } else {
398                         passthrough_len = 16 + aad_len;
399                         auth_offset = passthrough_len + iv_len;
400                         auth_data_len = aad_len;
401                 }
402         } else {
403                 encr_offset += iv_len;
404                 auth_offset += iv_len;
405         }
406
407         /* Encryption */
408         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
409         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
410         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
411
412         if (hash_type == ROC_SE_GMAC_TYPE) {
413                 encr_offset = 0;
414                 encr_data_len = 0;
415         }
416
417         auth_dlen = auth_offset + auth_data_len;
418         enc_dlen = encr_data_len + encr_offset;
419         if (unlikely(encr_data_len & 0xf)) {
420                 if ((cipher_type == ROC_SE_DES3_CBC) ||
421                     (cipher_type == ROC_SE_DES3_ECB))
422                         enc_dlen =
423                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
424                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
425                                 (cipher_type == ROC_SE_AES_ECB)))
426                         enc_dlen =
427                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
428         }
429
430         if (unlikely(auth_dlen > enc_dlen)) {
431                 inputlen = auth_dlen;
432                 outputlen = auth_dlen + mac_len;
433         } else {
434                 inputlen = enc_dlen;
435                 outputlen = enc_dlen + mac_len;
436         }
437
438         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
439                 outputlen = enc_dlen;
440
441         /* GP op header */
442         cpt_inst_w4.s.param1 = encr_data_len;
443         cpt_inst_w4.s.param2 = auth_data_len;
444
445         /*
446          * In cn9k, cn10k since we have a limitation of
447          * IV & Offset control word not part of instruction
448          * and need to be part of Data Buffer, we check if
449          * head room is there and then only do the Direct mode processing
450          */
451         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
452                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
453                 void *dm_vaddr = fc_params->bufs[0].vaddr;
454
455                 /* Use Direct mode */
456
457                 offset_vaddr =
458                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
459
460                 /* DPTR */
461                 inst->dptr = (uint64_t)offset_vaddr;
462
463                 /* RPTR should just exclude offset control word */
464                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
465
466                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
467
468                 if (likely(iv_len)) {
469                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
470                                                       ROC_SE_OFF_CTRL_LEN);
471                         uint64_t *src = fc_params->iv_buf;
472                         dest[0] = src[0];
473                         dest[1] = src[1];
474                 }
475
476         } else {
477                 void *m_vaddr = fc_params->meta_buf.vaddr;
478                 uint32_t i, g_size_bytes, s_size_bytes;
479                 struct roc_se_sglist_comp *gather_comp;
480                 struct roc_se_sglist_comp *scatter_comp;
481                 uint8_t *in_buffer;
482
483                 /* This falls under strict SG mode */
484                 offset_vaddr = m_vaddr;
485                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
486
487                 m_vaddr = (uint8_t *)m_vaddr + size;
488
489                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
490
491                 if (likely(iv_len)) {
492                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
493                                                       ROC_SE_OFF_CTRL_LEN);
494                         uint64_t *src = fc_params->iv_buf;
495                         dest[0] = src[0];
496                         dest[1] = src[1];
497                 }
498
499                 /* DPTR has SG list */
500                 in_buffer = m_vaddr;
501
502                 ((uint16_t *)in_buffer)[0] = 0;
503                 ((uint16_t *)in_buffer)[1] = 0;
504
505                 /* TODO Add error check if space will be sufficient */
506                 gather_comp =
507                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
508
509                 /*
510                  * Input Gather List
511                  */
512
513                 i = 0;
514
515                 /* Offset control word that includes iv */
516                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
517                                  ROC_SE_OFF_CTRL_LEN + iv_len);
518
519                 /* Add input data */
520                 size = inputlen - iv_len;
521                 if (likely(size)) {
522                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
523
524                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
525                                 i = fill_sg_comp_from_buf_min(
526                                         gather_comp, i, fc_params->bufs, &size);
527                         } else {
528                                 i = fill_sg_comp_from_iov(
529                                         gather_comp, i, fc_params->src_iov, 0,
530                                         &size, aad_buf, aad_offset);
531                         }
532
533                         if (unlikely(size)) {
534                                 plt_dp_err("Insufficient buffer space,"
535                                            " size %d needed",
536                                            size);
537                                 return -1;
538                         }
539                 }
540                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
541                 g_size_bytes =
542                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
543
544                 /*
545                  * Output Scatter list
546                  */
547                 i = 0;
548                 scatter_comp =
549                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
550                                                       g_size_bytes);
551
552                 /* Add IV */
553                 if (likely(iv_len)) {
554                         i = fill_sg_comp(scatter_comp, i,
555                                          (uint64_t)offset_vaddr +
556                                                  ROC_SE_OFF_CTRL_LEN,
557                                          iv_len);
558                 }
559
560                 /* output data or output data + digest*/
561                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
562                         size = outputlen - iv_len - mac_len;
563                         if (size) {
564                                 uint32_t aad_offset =
565                                         aad_len ? passthrough_len : 0;
566
567                                 if (unlikely(flags &
568                                              ROC_SE_SINGLE_BUF_INPLACE)) {
569                                         i = fill_sg_comp_from_buf_min(
570                                                 scatter_comp, i,
571                                                 fc_params->bufs, &size);
572                                 } else {
573                                         i = fill_sg_comp_from_iov(
574                                                 scatter_comp, i,
575                                                 fc_params->dst_iov, 0, &size,
576                                                 aad_buf, aad_offset);
577                                 }
578                                 if (unlikely(size)) {
579                                         plt_dp_err("Insufficient buffer"
580                                                    " space, size %d needed",
581                                                    size);
582                                         return -1;
583                                 }
584                         }
585                         /* mac_data */
586                         if (mac_len) {
587                                 i = fill_sg_comp_from_buf(scatter_comp, i,
588                                                           &fc_params->mac_buf);
589                         }
590                 } else {
591                         /* Output including mac */
592                         size = outputlen - iv_len;
593                         if (likely(size)) {
594                                 uint32_t aad_offset =
595                                         aad_len ? passthrough_len : 0;
596
597                                 if (unlikely(flags &
598                                              ROC_SE_SINGLE_BUF_INPLACE)) {
599                                         i = fill_sg_comp_from_buf_min(
600                                                 scatter_comp, i,
601                                                 fc_params->bufs, &size);
602                                 } else {
603                                         i = fill_sg_comp_from_iov(
604                                                 scatter_comp, i,
605                                                 fc_params->dst_iov, 0, &size,
606                                                 aad_buf, aad_offset);
607                                 }
608                                 if (unlikely(size)) {
609                                         plt_dp_err("Insufficient buffer"
610                                                    " space, size %d needed",
611                                                    size);
612                                         return -1;
613                                 }
614                         }
615                 }
616                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
617                 s_size_bytes =
618                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
619
620                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
621
622                 /* This is DPTR len in case of SG mode */
623                 cpt_inst_w4.s.dlen = size;
624
625                 inst->dptr = (uint64_t)in_buffer;
626         }
627
628         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
629                      (auth_offset >> 8))) {
630                 plt_dp_err("Offset not supported");
631                 plt_dp_err("enc_offset: %d", encr_offset);
632                 plt_dp_err("iv_offset : %d", iv_offset);
633                 plt_dp_err("auth_offset: %d", auth_offset);
634                 return -1;
635         }
636
637         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
638                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
639                 ((uint64_t)auth_offset));
640
641         inst->w4.u64 = cpt_inst_w4.u64;
642         return 0;
643 }
644
645 static __rte_always_inline int
646 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
647                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
648 {
649         uint32_t iv_offset = 0, size;
650         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
651         struct roc_se_ctx *se_ctx;
652         int32_t hash_type, mac_len;
653         uint8_t iv_len = 16;
654         struct roc_se_buf_ptr *aad_buf = NULL;
655         uint32_t encr_offset, auth_offset;
656         uint32_t encr_data_len, auth_data_len, aad_len = 0;
657         uint32_t passthrough_len = 0;
658         union cpt_inst_w4 cpt_inst_w4;
659         void *offset_vaddr;
660         uint8_t op_minor;
661
662         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
663         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
664         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
665         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
666
667         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
668                 /* We don't support both AAD and auth data separately */
669                 auth_data_len = 0;
670                 auth_offset = 0;
671                 aad_len = fc_params->aad_buf.size;
672                 aad_buf = &fc_params->aad_buf;
673         }
674
675         se_ctx = fc_params->ctx_buf.vaddr;
676         hash_type = se_ctx->hash_type;
677         mac_len = se_ctx->mac_len;
678         op_minor = se_ctx->template_w4.s.opcode_minor;
679
680         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
681                 iv_len = 0;
682                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
683         }
684
685         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
686                 /*
687                  * When AAD is given, data above encr_offset is pass through
688                  * Since AAD is given as separate pointer and not as offset,
689                  * this is a special case as we need to fragment input data
690                  * into passthrough + encr_data and then insert AAD in between.
691                  */
692                 if (hash_type != ROC_SE_GMAC_TYPE) {
693                         passthrough_len = encr_offset;
694                         auth_offset = passthrough_len + iv_len;
695                         encr_offset = passthrough_len + aad_len + iv_len;
696                         auth_data_len = aad_len + encr_data_len;
697                 } else {
698                         passthrough_len = 16 + aad_len;
699                         auth_offset = passthrough_len + iv_len;
700                         auth_data_len = aad_len;
701                 }
702         } else {
703                 encr_offset += iv_len;
704                 auth_offset += iv_len;
705         }
706
707         /* Decryption */
708         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
709         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
710         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
711
712         if (hash_type == ROC_SE_GMAC_TYPE) {
713                 encr_offset = 0;
714                 encr_data_len = 0;
715         }
716
717         enc_dlen = encr_offset + encr_data_len;
718         auth_dlen = auth_offset + auth_data_len;
719
720         if (auth_dlen > enc_dlen) {
721                 inputlen = auth_dlen + mac_len;
722                 outputlen = auth_dlen;
723         } else {
724                 inputlen = enc_dlen + mac_len;
725                 outputlen = enc_dlen;
726         }
727
728         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
729                 outputlen = inputlen = enc_dlen;
730
731         cpt_inst_w4.s.param1 = encr_data_len;
732         cpt_inst_w4.s.param2 = auth_data_len;
733
734         /*
735          * In cn9k, cn10k since we have a limitation of
736          * IV & Offset control word not part of instruction
737          * and need to be part of Data Buffer, we check if
738          * head room is there and then only do the Direct mode processing
739          */
740         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
741                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
742                 void *dm_vaddr = fc_params->bufs[0].vaddr;
743
744                 /* Use Direct mode */
745
746                 offset_vaddr =
747                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
748                 inst->dptr = (uint64_t)offset_vaddr;
749
750                 /* RPTR should just exclude offset control word */
751                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
752
753                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
754
755                 if (likely(iv_len)) {
756                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
757                                                       ROC_SE_OFF_CTRL_LEN);
758                         uint64_t *src = fc_params->iv_buf;
759                         dest[0] = src[0];
760                         dest[1] = src[1];
761                 }
762
763         } else {
764                 void *m_vaddr = fc_params->meta_buf.vaddr;
765                 uint32_t g_size_bytes, s_size_bytes;
766                 struct roc_se_sglist_comp *gather_comp;
767                 struct roc_se_sglist_comp *scatter_comp;
768                 uint8_t *in_buffer;
769                 uint8_t i = 0;
770
771                 /* This falls under strict SG mode */
772                 offset_vaddr = m_vaddr;
773                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
774
775                 m_vaddr = (uint8_t *)m_vaddr + size;
776
777                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
778
779                 if (likely(iv_len)) {
780                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
781                                                       ROC_SE_OFF_CTRL_LEN);
782                         uint64_t *src = fc_params->iv_buf;
783                         dest[0] = src[0];
784                         dest[1] = src[1];
785                 }
786
787                 /* DPTR has SG list */
788                 in_buffer = m_vaddr;
789
790                 ((uint16_t *)in_buffer)[0] = 0;
791                 ((uint16_t *)in_buffer)[1] = 0;
792
793                 /* TODO Add error check if space will be sufficient */
794                 gather_comp =
795                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
796
797                 /*
798                  * Input Gather List
799                  */
800                 i = 0;
801
802                 /* Offset control word that includes iv */
803                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
804                                  ROC_SE_OFF_CTRL_LEN + iv_len);
805
806                 /* Add input data */
807                 if (flags & ROC_SE_VALID_MAC_BUF) {
808                         size = inputlen - iv_len - mac_len;
809                         if (size) {
810                                 /* input data only */
811                                 if (unlikely(flags &
812                                              ROC_SE_SINGLE_BUF_INPLACE)) {
813                                         i = fill_sg_comp_from_buf_min(
814                                                 gather_comp, i, fc_params->bufs,
815                                                 &size);
816                                 } else {
817                                         uint32_t aad_offset =
818                                                 aad_len ? passthrough_len : 0;
819
820                                         i = fill_sg_comp_from_iov(
821                                                 gather_comp, i,
822                                                 fc_params->src_iov, 0, &size,
823                                                 aad_buf, aad_offset);
824                                 }
825                                 if (unlikely(size)) {
826                                         plt_dp_err("Insufficient buffer"
827                                                    " space, size %d needed",
828                                                    size);
829                                         return -1;
830                                 }
831                         }
832
833                         /* mac data */
834                         if (mac_len) {
835                                 i = fill_sg_comp_from_buf(gather_comp, i,
836                                                           &fc_params->mac_buf);
837                         }
838                 } else {
839                         /* input data + mac */
840                         size = inputlen - iv_len;
841                         if (size) {
842                                 if (unlikely(flags &
843                                              ROC_SE_SINGLE_BUF_INPLACE)) {
844                                         i = fill_sg_comp_from_buf_min(
845                                                 gather_comp, i, fc_params->bufs,
846                                                 &size);
847                                 } else {
848                                         uint32_t aad_offset =
849                                                 aad_len ? passthrough_len : 0;
850
851                                         if (unlikely(!fc_params->src_iov)) {
852                                                 plt_dp_err("Bad input args");
853                                                 return -1;
854                                         }
855
856                                         i = fill_sg_comp_from_iov(
857                                                 gather_comp, i,
858                                                 fc_params->src_iov, 0, &size,
859                                                 aad_buf, aad_offset);
860                                 }
861
862                                 if (unlikely(size)) {
863                                         plt_dp_err("Insufficient buffer"
864                                                    " space, size %d needed",
865                                                    size);
866                                         return -1;
867                                 }
868                         }
869                 }
870                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
871                 g_size_bytes =
872                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
873
874                 /*
875                  * Output Scatter List
876                  */
877
878                 i = 0;
879                 scatter_comp =
880                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
881                                                       g_size_bytes);
882
883                 /* Add iv */
884                 if (iv_len) {
885                         i = fill_sg_comp(scatter_comp, i,
886                                          (uint64_t)offset_vaddr +
887                                                  ROC_SE_OFF_CTRL_LEN,
888                                          iv_len);
889                 }
890
891                 /* Add output data */
892                 size = outputlen - iv_len;
893                 if (size) {
894                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
895                                 /* handle single buffer here */
896                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
897                                                               fc_params->bufs,
898                                                               &size);
899                         } else {
900                                 uint32_t aad_offset =
901                                         aad_len ? passthrough_len : 0;
902
903                                 if (unlikely(!fc_params->dst_iov)) {
904                                         plt_dp_err("Bad input args");
905                                         return -1;
906                                 }
907
908                                 i = fill_sg_comp_from_iov(
909                                         scatter_comp, i, fc_params->dst_iov, 0,
910                                         &size, aad_buf, aad_offset);
911                         }
912
913                         if (unlikely(size)) {
914                                 plt_dp_err("Insufficient buffer space,"
915                                            " size %d needed",
916                                            size);
917                                 return -1;
918                         }
919                 }
920
921                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
922                 s_size_bytes =
923                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
924
925                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
926
927                 /* This is DPTR len in case of SG mode */
928                 cpt_inst_w4.s.dlen = size;
929
930                 inst->dptr = (uint64_t)in_buffer;
931         }
932
933         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
934                      (auth_offset >> 8))) {
935                 plt_dp_err("Offset not supported");
936                 plt_dp_err("enc_offset: %d", encr_offset);
937                 plt_dp_err("iv_offset : %d", iv_offset);
938                 plt_dp_err("auth_offset: %d", auth_offset);
939                 return -1;
940         }
941
942         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
943                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
944                 ((uint64_t)auth_offset));
945
946         inst->w4.u64 = cpt_inst_w4.u64;
947         return 0;
948 }
949
950 static __rte_always_inline int
951 cpt_zuc_snow3g_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
952                         struct roc_se_fc_params *params,
953                         struct cpt_inst_s *inst)
954 {
955         uint32_t size;
956         int32_t inputlen, outputlen;
957         struct roc_se_ctx *se_ctx;
958         uint32_t mac_len = 0;
959         uint8_t pdcp_alg_type, j;
960         uint32_t encr_offset = 0, auth_offset = 0;
961         uint32_t encr_data_len = 0, auth_data_len = 0;
962         int flags, iv_len = 16;
963         uint64_t offset_ctrl;
964         uint64_t *offset_vaddr;
965         uint32_t *iv_s, iv[4];
966         union cpt_inst_w4 cpt_inst_w4;
967
968         se_ctx = params->ctx_buf.vaddr;
969         flags = se_ctx->zsk_flags;
970         mac_len = se_ctx->mac_len;
971         pdcp_alg_type = se_ctx->pdcp_alg_type;
972
973         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
974
975         /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
976
977         cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
978                                       (0 << 4) | (0 << 3) | (flags & 0x7));
979
980         if (flags == 0x1) {
981                 /*
982                  * Microcode expects offsets in bytes
983                  * TODO: Rounding off
984                  */
985                 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
986
987                 /* EIA3 or UIA2 */
988                 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
989                 auth_offset = auth_offset / 8;
990
991                 /* consider iv len */
992                 auth_offset += iv_len;
993
994                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
995                 outputlen = mac_len;
996
997                 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
998
999         } else {
1000                 /* EEA3 or UEA2 */
1001                 /*
1002                  * Microcode expects offsets in bytes
1003                  * TODO: Rounding off
1004                  */
1005                 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1006
1007                 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1008                 encr_offset = encr_offset / 8;
1009                 /* consider iv len */
1010                 encr_offset += iv_len;
1011
1012                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1013                 outputlen = inputlen;
1014
1015                 /* iv offset is 0 */
1016                 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1017         }
1018
1019         if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1020                 plt_dp_err("Offset not supported");
1021                 plt_dp_err("enc_offset: %d", encr_offset);
1022                 plt_dp_err("auth_offset: %d", auth_offset);
1023                 return -1;
1024         }
1025
1026         /* IV */
1027         iv_s = (flags == 0x1) ? params->auth_iv_buf : params->iv_buf;
1028
1029         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
1030                 /*
1031                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1032                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1033                  */
1034
1035                 for (j = 0; j < 4; j++)
1036                         iv[j] = iv_s[3 - j];
1037         } else {
1038                 /* ZUC doesn't need a swap */
1039                 for (j = 0; j < 4; j++)
1040                         iv[j] = iv_s[j];
1041         }
1042
1043         /*
1044          * GP op header, lengths are expected in bits.
1045          */
1046         cpt_inst_w4.s.param1 = encr_data_len;
1047         cpt_inst_w4.s.param2 = auth_data_len;
1048
1049         /*
1050          * In cn9k, cn10k since we have a limitation of
1051          * IV & Offset control word not part of instruction
1052          * and need to be part of Data Buffer, we check if
1053          * head room is there and then only do the Direct mode processing
1054          */
1055         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1056                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1057                 void *dm_vaddr = params->bufs[0].vaddr;
1058
1059                 /* Use Direct mode */
1060
1061                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1062                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1063
1064                 /* DPTR */
1065                 inst->dptr = (uint64_t)offset_vaddr;
1066                 /* RPTR should just exclude offset control word */
1067                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1068
1069                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1070
1071                 if (likely(iv_len)) {
1072                         uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1073                                                       ROC_SE_OFF_CTRL_LEN);
1074                         memcpy(iv_d, iv, 16);
1075                 }
1076
1077                 *offset_vaddr = offset_ctrl;
1078         } else {
1079                 void *m_vaddr = params->meta_buf.vaddr;
1080                 uint32_t i, g_size_bytes, s_size_bytes;
1081                 struct roc_se_sglist_comp *gather_comp;
1082                 struct roc_se_sglist_comp *scatter_comp;
1083                 uint8_t *in_buffer;
1084                 uint32_t *iv_d;
1085
1086                 /* save space for iv */
1087                 offset_vaddr = m_vaddr;
1088
1089                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1090
1091                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1092
1093                 /* DPTR has SG list */
1094                 in_buffer = m_vaddr;
1095
1096                 ((uint16_t *)in_buffer)[0] = 0;
1097                 ((uint16_t *)in_buffer)[1] = 0;
1098
1099                 /* TODO Add error check if space will be sufficient */
1100                 gather_comp =
1101                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1102
1103                 /*
1104                  * Input Gather List
1105                  */
1106                 i = 0;
1107
1108                 /* Offset control word followed by iv */
1109
1110                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1111                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1112
1113                 /* iv offset is 0 */
1114                 *offset_vaddr = offset_ctrl;
1115
1116                 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1117                                     ROC_SE_OFF_CTRL_LEN);
1118                 memcpy(iv_d, iv, 16);
1119
1120                 /* input data */
1121                 size = inputlen - iv_len;
1122                 if (size) {
1123                         i = fill_sg_comp_from_iov(gather_comp, i,
1124                                                   params->src_iov, 0, &size,
1125                                                   NULL, 0);
1126                         if (unlikely(size)) {
1127                                 plt_dp_err("Insufficient buffer space,"
1128                                            " size %d needed",
1129                                            size);
1130                                 return -1;
1131                         }
1132                 }
1133                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1134                 g_size_bytes =
1135                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1136
1137                 /*
1138                  * Output Scatter List
1139                  */
1140
1141                 i = 0;
1142                 scatter_comp =
1143                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1144                                                       g_size_bytes);
1145
1146                 if (flags == 0x1) {
1147                         /* IV in SLIST only for EEA3 & UEA2 */
1148                         iv_len = 0;
1149                 }
1150
1151                 if (iv_len) {
1152                         i = fill_sg_comp(scatter_comp, i,
1153                                          (uint64_t)offset_vaddr +
1154                                                  ROC_SE_OFF_CTRL_LEN,
1155                                          iv_len);
1156                 }
1157
1158                 /* Add output data */
1159                 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1160                         size = outputlen - iv_len - mac_len;
1161                         if (size) {
1162                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1163                                                           params->dst_iov, 0,
1164                                                           &size, NULL, 0);
1165
1166                                 if (unlikely(size)) {
1167                                         plt_dp_err("Insufficient buffer space,"
1168                                                    " size %d needed",
1169                                                    size);
1170                                         return -1;
1171                                 }
1172                         }
1173
1174                         /* mac data */
1175                         if (mac_len) {
1176                                 i = fill_sg_comp_from_buf(scatter_comp, i,
1177                                                           &params->mac_buf);
1178                         }
1179                 } else {
1180                         /* Output including mac */
1181                         size = outputlen - iv_len;
1182                         if (size) {
1183                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1184                                                           params->dst_iov, 0,
1185                                                           &size, NULL, 0);
1186
1187                                 if (unlikely(size)) {
1188                                         plt_dp_err("Insufficient buffer space,"
1189                                                    " size %d needed",
1190                                                    size);
1191                                         return -1;
1192                                 }
1193                         }
1194                 }
1195                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1196                 s_size_bytes =
1197                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1198
1199                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1200
1201                 /* This is DPTR len in case of SG mode */
1202                 cpt_inst_w4.s.dlen = size;
1203
1204                 inst->dptr = (uint64_t)in_buffer;
1205         }
1206
1207         inst->w4.u64 = cpt_inst_w4.u64;
1208
1209         return 0;
1210 }
1211
1212 static __rte_always_inline int
1213 cpt_zuc_snow3g_dec_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1214                         struct roc_se_fc_params *params,
1215                         struct cpt_inst_s *inst)
1216 {
1217         uint32_t size;
1218         int32_t inputlen = 0, outputlen;
1219         struct roc_se_ctx *se_ctx;
1220         uint8_t pdcp_alg_type, iv_len = 16;
1221         uint32_t encr_offset;
1222         uint32_t encr_data_len;
1223         int flags;
1224         uint64_t *offset_vaddr;
1225         uint32_t *iv_s, iv[4], j;
1226         union cpt_inst_w4 cpt_inst_w4;
1227
1228         /*
1229          * Microcode expects offsets in bytes
1230          * TODO: Rounding off
1231          */
1232         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1233         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1234
1235         se_ctx = params->ctx_buf.vaddr;
1236         flags = se_ctx->zsk_flags;
1237         pdcp_alg_type = se_ctx->pdcp_alg_type;
1238
1239         cpt_inst_w4.u64 = 0;
1240         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
1241
1242         /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1243
1244         cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
1245                                       (0 << 4) | (0 << 3) | (flags & 0x7));
1246
1247         /* consider iv len */
1248         encr_offset += iv_len;
1249
1250         inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1251         outputlen = inputlen;
1252
1253         /* IV */
1254         iv_s = params->iv_buf;
1255         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
1256                 /*
1257                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1258                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1259                  */
1260
1261                 for (j = 0; j < 4; j++)
1262                         iv[j] = iv_s[3 - j];
1263         } else {
1264                 /* ZUC doesn't need a swap */
1265                 for (j = 0; j < 4; j++)
1266                         iv[j] = iv_s[j];
1267         }
1268
1269         /*
1270          * GP op header, lengths are expected in bits.
1271          */
1272         cpt_inst_w4.s.param1 = encr_data_len;
1273
1274         /*
1275          * In cn9k, cn10k since we have a limitation of
1276          * IV & Offset control word not part of instruction
1277          * and need to be part of Data Buffer, we check if
1278          * head room is there and then only do the Direct mode processing
1279          */
1280         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1281                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1282                 void *dm_vaddr = params->bufs[0].vaddr;
1283
1284                 /* Use Direct mode */
1285
1286                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1287                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1288
1289                 /* DPTR */
1290                 inst->dptr = (uint64_t)offset_vaddr;
1291
1292                 /* RPTR should just exclude offset control word */
1293                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1294
1295                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1296
1297                 if (likely(iv_len)) {
1298                         uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1299                                                       ROC_SE_OFF_CTRL_LEN);
1300                         memcpy(iv_d, iv, 16);
1301                 }
1302
1303                 /* iv offset is 0 */
1304                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1305         } else {
1306                 void *m_vaddr = params->meta_buf.vaddr;
1307                 uint32_t i, g_size_bytes, s_size_bytes;
1308                 struct roc_se_sglist_comp *gather_comp;
1309                 struct roc_se_sglist_comp *scatter_comp;
1310                 uint8_t *in_buffer;
1311                 uint32_t *iv_d;
1312
1313                 /* save space for offset and iv... */
1314                 offset_vaddr = m_vaddr;
1315
1316                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1317
1318                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1319
1320                 /* DPTR has SG list */
1321                 in_buffer = m_vaddr;
1322
1323                 ((uint16_t *)in_buffer)[0] = 0;
1324                 ((uint16_t *)in_buffer)[1] = 0;
1325
1326                 /* TODO Add error check if space will be sufficient */
1327                 gather_comp =
1328                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1329
1330                 /*
1331                  * Input Gather List
1332                  */
1333                 i = 0;
1334
1335                 /* Offset control word */
1336
1337                 /* iv offset is 0 */
1338                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1339
1340                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1341                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1342
1343                 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1344                                     ROC_SE_OFF_CTRL_LEN);
1345                 memcpy(iv_d, iv, 16);
1346
1347                 /* Add input data */
1348                 size = inputlen - iv_len;
1349                 if (size) {
1350                         i = fill_sg_comp_from_iov(gather_comp, i,
1351                                                   params->src_iov, 0, &size,
1352                                                   NULL, 0);
1353                         if (unlikely(size)) {
1354                                 plt_dp_err("Insufficient buffer space,"
1355                                            " size %d needed",
1356                                            size);
1357                                 return -1;
1358                         }
1359                 }
1360                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1361                 g_size_bytes =
1362                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1363
1364                 /*
1365                  * Output Scatter List
1366                  */
1367
1368                 i = 0;
1369                 scatter_comp =
1370                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1371                                                       g_size_bytes);
1372
1373                 /* IV */
1374                 i = fill_sg_comp(scatter_comp, i,
1375                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1376                                  iv_len);
1377
1378                 /* Add output data */
1379                 size = outputlen - iv_len;
1380                 if (size) {
1381                         i = fill_sg_comp_from_iov(scatter_comp, i,
1382                                                   params->dst_iov, 0, &size,
1383                                                   NULL, 0);
1384
1385                         if (unlikely(size)) {
1386                                 plt_dp_err("Insufficient buffer space,"
1387                                            " size %d needed",
1388                                            size);
1389                                 return -1;
1390                         }
1391                 }
1392                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1393                 s_size_bytes =
1394                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1395
1396                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1397
1398                 /* This is DPTR len in case of SG mode */
1399                 cpt_inst_w4.s.dlen = size;
1400
1401                 inst->dptr = (uint64_t)in_buffer;
1402         }
1403
1404         if (unlikely((encr_offset >> 16))) {
1405                 plt_dp_err("Offset not supported");
1406                 plt_dp_err("enc_offset: %d", encr_offset);
1407                 return -1;
1408         }
1409
1410         inst->w4.u64 = cpt_inst_w4.u64;
1411
1412         return 0;
1413 }
1414
1415 static __rte_always_inline int
1416 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1417                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1418 {
1419         void *m_vaddr = params->meta_buf.vaddr;
1420         uint32_t size;
1421         int32_t inputlen = 0, outputlen = 0;
1422         struct roc_se_ctx *se_ctx;
1423         uint32_t mac_len = 0;
1424         uint8_t i = 0;
1425         uint32_t encr_offset, auth_offset;
1426         uint32_t encr_data_len, auth_data_len;
1427         int flags;
1428         uint8_t *iv_s, *iv_d, iv_len = 8;
1429         uint8_t dir = 0;
1430         uint64_t *offset_vaddr;
1431         union cpt_inst_w4 cpt_inst_w4;
1432         uint8_t *in_buffer;
1433         uint32_t g_size_bytes, s_size_bytes;
1434         struct roc_se_sglist_comp *gather_comp;
1435         struct roc_se_sglist_comp *scatter_comp;
1436
1437         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1438         auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1439         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1440         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1441
1442         se_ctx = params->ctx_buf.vaddr;
1443         flags = se_ctx->zsk_flags;
1444         mac_len = se_ctx->mac_len;
1445
1446         if (flags == 0x0)
1447                 iv_s = params->iv_buf;
1448         else
1449                 iv_s = params->auth_iv_buf;
1450
1451         dir = iv_s[8] & 0x1;
1452
1453         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1454
1455         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1456         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1457                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1458
1459         /*
1460          * GP op header, lengths are expected in bits.
1461          */
1462         cpt_inst_w4.s.param1 = encr_data_len;
1463         cpt_inst_w4.s.param2 = auth_data_len;
1464
1465         /* consider iv len */
1466         if (flags == 0x0) {
1467                 encr_offset += iv_len;
1468                 auth_offset += iv_len;
1469         }
1470
1471         /* save space for offset ctrl and iv */
1472         offset_vaddr = m_vaddr;
1473
1474         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1475
1476         /* DPTR has SG list */
1477         in_buffer = m_vaddr;
1478
1479         ((uint16_t *)in_buffer)[0] = 0;
1480         ((uint16_t *)in_buffer)[1] = 0;
1481
1482         /* TODO Add error check if space will be sufficient */
1483         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1484
1485         /*
1486          * Input Gather List
1487          */
1488         i = 0;
1489
1490         /* Offset control word followed by iv */
1491
1492         if (flags == 0x0) {
1493                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1494                 outputlen = inputlen;
1495                 /* iv offset is 0 */
1496                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1497                 if (unlikely((encr_offset >> 16))) {
1498                         plt_dp_err("Offset not supported");
1499                         plt_dp_err("enc_offset: %d", encr_offset);
1500                         return -1;
1501                 }
1502         } else {
1503                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1504                 outputlen = mac_len;
1505                 /* iv offset is 0 */
1506                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1507                 if (unlikely((auth_offset >> 8))) {
1508                         plt_dp_err("Offset not supported");
1509                         plt_dp_err("auth_offset: %d", auth_offset);
1510                         return -1;
1511                 }
1512         }
1513
1514         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1515                          ROC_SE_OFF_CTRL_LEN + iv_len);
1516
1517         /* IV */
1518         iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1519         memcpy(iv_d, iv_s, iv_len);
1520
1521         /* input data */
1522         size = inputlen - iv_len;
1523         if (size) {
1524                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1525                                           &size, NULL, 0);
1526
1527                 if (unlikely(size)) {
1528                         plt_dp_err("Insufficient buffer space,"
1529                                    " size %d needed",
1530                                    size);
1531                         return -1;
1532                 }
1533         }
1534         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1535         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1536
1537         /*
1538          * Output Scatter List
1539          */
1540
1541         i = 0;
1542         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1543                                                      g_size_bytes);
1544
1545         if (flags == 0x1) {
1546                 /* IV in SLIST only for F8 */
1547                 iv_len = 0;
1548         }
1549
1550         /* IV */
1551         if (iv_len) {
1552                 i = fill_sg_comp(scatter_comp, i,
1553                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1554                                  iv_len);
1555         }
1556
1557         /* Add output data */
1558         if (req_flags & ROC_SE_VALID_MAC_BUF) {
1559                 size = outputlen - iv_len - mac_len;
1560                 if (size) {
1561                         i = fill_sg_comp_from_iov(scatter_comp, i,
1562                                                   params->dst_iov, 0, &size,
1563                                                   NULL, 0);
1564
1565                         if (unlikely(size)) {
1566                                 plt_dp_err("Insufficient buffer space,"
1567                                            " size %d needed",
1568                                            size);
1569                                 return -1;
1570                         }
1571                 }
1572
1573                 /* mac data */
1574                 if (mac_len) {
1575                         i = fill_sg_comp_from_buf(scatter_comp, i,
1576                                                   &params->mac_buf);
1577                 }
1578         } else {
1579                 /* Output including mac */
1580                 size = outputlen - iv_len;
1581                 if (size) {
1582                         i = fill_sg_comp_from_iov(scatter_comp, i,
1583                                                   params->dst_iov, 0, &size,
1584                                                   NULL, 0);
1585
1586                         if (unlikely(size)) {
1587                                 plt_dp_err("Insufficient buffer space,"
1588                                            " size %d needed",
1589                                            size);
1590                                 return -1;
1591                         }
1592                 }
1593         }
1594         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1595         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1596
1597         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1598
1599         /* This is DPTR len in case of SG mode */
1600         cpt_inst_w4.s.dlen = size;
1601
1602         inst->dptr = (uint64_t)in_buffer;
1603         inst->w4.u64 = cpt_inst_w4.u64;
1604
1605         return 0;
1606 }
1607
1608 static __rte_always_inline int
1609 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1610                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1611 {
1612         void *m_vaddr = params->meta_buf.vaddr;
1613         uint32_t size;
1614         int32_t inputlen = 0, outputlen;
1615         struct roc_se_ctx *se_ctx;
1616         uint8_t i = 0, iv_len = 8;
1617         uint32_t encr_offset;
1618         uint32_t encr_data_len;
1619         int flags;
1620         uint8_t dir = 0;
1621         uint64_t *offset_vaddr;
1622         union cpt_inst_w4 cpt_inst_w4;
1623         uint8_t *in_buffer;
1624         uint32_t g_size_bytes, s_size_bytes;
1625         struct roc_se_sglist_comp *gather_comp;
1626         struct roc_se_sglist_comp *scatter_comp;
1627
1628         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1629         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1630
1631         se_ctx = params->ctx_buf.vaddr;
1632         flags = se_ctx->zsk_flags;
1633
1634         cpt_inst_w4.u64 = 0;
1635         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1636
1637         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1638         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1639                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1640
1641         /*
1642          * GP op header, lengths are expected in bits.
1643          */
1644         cpt_inst_w4.s.param1 = encr_data_len;
1645
1646         /* consider iv len */
1647         encr_offset += iv_len;
1648
1649         inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
1650         outputlen = inputlen;
1651
1652         /* save space for offset ctrl & iv */
1653         offset_vaddr = m_vaddr;
1654
1655         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1656
1657         /* DPTR has SG list */
1658         in_buffer = m_vaddr;
1659
1660         ((uint16_t *)in_buffer)[0] = 0;
1661         ((uint16_t *)in_buffer)[1] = 0;
1662
1663         /* TODO Add error check if space will be sufficient */
1664         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1665
1666         /*
1667          * Input Gather List
1668          */
1669         i = 0;
1670
1671         /* Offset control word followed by iv */
1672         *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1673         if (unlikely((encr_offset >> 16))) {
1674                 plt_dp_err("Offset not supported");
1675                 plt_dp_err("enc_offset: %d", encr_offset);
1676                 return -1;
1677         }
1678
1679         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1680                          ROC_SE_OFF_CTRL_LEN + iv_len);
1681
1682         /* IV */
1683         memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1684                iv_len);
1685
1686         /* Add input data */
1687         size = inputlen - iv_len;
1688         if (size) {
1689                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1690                                           &size, NULL, 0);
1691                 if (unlikely(size)) {
1692                         plt_dp_err("Insufficient buffer space,"
1693                                    " size %d needed",
1694                                    size);
1695                         return -1;
1696                 }
1697         }
1698         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1699         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1700
1701         /*
1702          * Output Scatter List
1703          */
1704
1705         i = 0;
1706         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1707                                                      g_size_bytes);
1708
1709         /* IV */
1710         i = fill_sg_comp(scatter_comp, i,
1711                          (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1712
1713         /* Add output data */
1714         size = outputlen - iv_len;
1715         if (size) {
1716                 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1717                                           &size, NULL, 0);
1718                 if (unlikely(size)) {
1719                         plt_dp_err("Insufficient buffer space,"
1720                                    " size %d needed",
1721                                    size);
1722                         return -1;
1723                 }
1724         }
1725         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1726         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1727
1728         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1729
1730         /* This is DPTR len in case of SG mode */
1731         cpt_inst_w4.s.dlen = size;
1732
1733         inst->dptr = (uint64_t)in_buffer;
1734         inst->w4.u64 = cpt_inst_w4.u64;
1735
1736         return 0;
1737 }
1738
1739 static __rte_always_inline int
1740 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1741                      struct roc_se_fc_params *fc_params,
1742                      struct cpt_inst_s *inst)
1743 {
1744         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1745         uint8_t fc_type;
1746         int ret = -1;
1747
1748         fc_type = ctx->fc_type;
1749
1750         if (likely(fc_type == ROC_SE_FC_GEN)) {
1751                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1752         } else if (fc_type == ROC_SE_PDCP) {
1753                 ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params,
1754                                               inst);
1755         } else if (fc_type == ROC_SE_KASUMI) {
1756                 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1757         }
1758
1759         /*
1760          * For AUTH_ONLY case,
1761          * MC only supports digest generation and verification
1762          * should be done in software by memcmp()
1763          */
1764
1765         return ret;
1766 }
1767
1768 static __rte_always_inline int
1769 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1770                      struct roc_se_fc_params *fc_params,
1771                      struct cpt_inst_s *inst)
1772 {
1773         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1774         uint8_t fc_type;
1775         int ret = -1;
1776
1777         fc_type = ctx->fc_type;
1778
1779         if (likely(fc_type == ROC_SE_FC_GEN)) {
1780                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1781         } else if (fc_type == ROC_SE_PDCP) {
1782                 ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params,
1783                                               inst);
1784         } else if (fc_type == ROC_SE_KASUMI) {
1785                 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1786                                           inst);
1787         } else if (fc_type == ROC_SE_HASH_HMAC) {
1788                 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1789         }
1790
1791         return ret;
1792 }
1793
1794 static __rte_always_inline int
1795 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1796 {
1797         struct rte_crypto_aead_xform *aead_form;
1798         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1799         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1800         uint32_t cipher_key_len = 0;
1801         uint8_t aes_gcm = 0;
1802         aead_form = &xform->aead;
1803
1804         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1805                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1806                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1807         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1808                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1809                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1810         } else {
1811                 plt_dp_err("Unknown aead operation\n");
1812                 return -1;
1813         }
1814         switch (aead_form->algo) {
1815         case RTE_CRYPTO_AEAD_AES_GCM:
1816                 enc_type = ROC_SE_AES_GCM;
1817                 cipher_key_len = 16;
1818                 aes_gcm = 1;
1819                 break;
1820         case RTE_CRYPTO_AEAD_AES_CCM:
1821                 plt_dp_err("Crypto: Unsupported cipher algo %u",
1822                            aead_form->algo);
1823                 return -1;
1824         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1825                 enc_type = ROC_SE_CHACHA20;
1826                 auth_type = ROC_SE_POLY1305;
1827                 cipher_key_len = 32;
1828                 sess->chacha_poly = 1;
1829                 break;
1830         default:
1831                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1832                            aead_form->algo);
1833                 return -1;
1834         }
1835         if (aead_form->key.length < cipher_key_len) {
1836                 plt_dp_err("Invalid cipher params keylen %u",
1837                            aead_form->key.length);
1838                 return -1;
1839         }
1840         sess->zsk_flag = 0;
1841         sess->aes_gcm = aes_gcm;
1842         sess->mac_len = aead_form->digest_length;
1843         sess->iv_offset = aead_form->iv.offset;
1844         sess->iv_length = aead_form->iv.length;
1845         sess->aad_length = aead_form->aad_length;
1846
1847         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1848                                          aead_form->key.data,
1849                                          aead_form->key.length, NULL)))
1850                 return -1;
1851
1852         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1853                                          aead_form->digest_length)))
1854                 return -1;
1855
1856         return 0;
1857 }
1858
1859 static __rte_always_inline int
1860 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1861 {
1862         struct rte_crypto_cipher_xform *c_form;
1863         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1864         uint32_t cipher_key_len = 0;
1865         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1866
1867         c_form = &xform->cipher;
1868
1869         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1870                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1871         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1872                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1873                 if (xform->next != NULL &&
1874                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1875                         /* Perform decryption followed by auth verify */
1876                         sess->roc_se_ctx.template_w4.s.opcode_minor =
1877                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1878                 }
1879         } else {
1880                 plt_dp_err("Unknown cipher operation\n");
1881                 return -1;
1882         }
1883
1884         switch (c_form->algo) {
1885         case RTE_CRYPTO_CIPHER_AES_CBC:
1886                 enc_type = ROC_SE_AES_CBC;
1887                 cipher_key_len = 16;
1888                 break;
1889         case RTE_CRYPTO_CIPHER_3DES_CBC:
1890                 enc_type = ROC_SE_DES3_CBC;
1891                 cipher_key_len = 24;
1892                 break;
1893         case RTE_CRYPTO_CIPHER_DES_CBC:
1894                 /* DES is implemented using 3DES in hardware */
1895                 enc_type = ROC_SE_DES3_CBC;
1896                 cipher_key_len = 8;
1897                 break;
1898         case RTE_CRYPTO_CIPHER_AES_CTR:
1899                 enc_type = ROC_SE_AES_CTR;
1900                 cipher_key_len = 16;
1901                 aes_ctr = 1;
1902                 break;
1903         case RTE_CRYPTO_CIPHER_NULL:
1904                 enc_type = 0;
1905                 is_null = 1;
1906                 break;
1907         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1908                 enc_type = ROC_SE_KASUMI_F8_ECB;
1909                 cipher_key_len = 16;
1910                 zsk_flag = ROC_SE_K_F8;
1911                 break;
1912         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1913                 enc_type = ROC_SE_SNOW3G_UEA2;
1914                 cipher_key_len = 16;
1915                 zsk_flag = ROC_SE_ZS_EA;
1916                 break;
1917         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1918                 enc_type = ROC_SE_ZUC_EEA3;
1919                 cipher_key_len = 16;
1920                 zsk_flag = ROC_SE_ZS_EA;
1921                 break;
1922         case RTE_CRYPTO_CIPHER_AES_XTS:
1923                 enc_type = ROC_SE_AES_XTS;
1924                 cipher_key_len = 16;
1925                 break;
1926         case RTE_CRYPTO_CIPHER_3DES_ECB:
1927                 enc_type = ROC_SE_DES3_ECB;
1928                 cipher_key_len = 24;
1929                 break;
1930         case RTE_CRYPTO_CIPHER_AES_ECB:
1931                 enc_type = ROC_SE_AES_ECB;
1932                 cipher_key_len = 16;
1933                 break;
1934         case RTE_CRYPTO_CIPHER_3DES_CTR:
1935         case RTE_CRYPTO_CIPHER_AES_F8:
1936         case RTE_CRYPTO_CIPHER_ARC4:
1937                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1938                 return -1;
1939         default:
1940                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1941                            c_form->algo);
1942                 return -1;
1943         }
1944
1945         if (c_form->key.length < cipher_key_len) {
1946                 plt_dp_err("Invalid cipher params keylen %u",
1947                            c_form->key.length);
1948                 return -1;
1949         }
1950
1951         sess->zsk_flag = zsk_flag;
1952         sess->aes_gcm = 0;
1953         sess->aes_ctr = aes_ctr;
1954         sess->iv_offset = c_form->iv.offset;
1955         sess->iv_length = c_form->iv.length;
1956         sess->is_null = is_null;
1957
1958         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1959                                          c_form->key.data, c_form->key.length,
1960                                          NULL)))
1961                 return -1;
1962
1963         return 0;
1964 }
1965
1966 static __rte_always_inline int
1967 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1968 {
1969         struct rte_crypto_auth_xform *a_form;
1970         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1971         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1972
1973         if (xform->next != NULL &&
1974             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1975             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1976                 /* Perform auth followed by encryption */
1977                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1978                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1979         }
1980
1981         a_form = &xform->auth;
1982
1983         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1984                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1985         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1986                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1987         else {
1988                 plt_dp_err("Unknown auth operation");
1989                 return -1;
1990         }
1991
1992         switch (a_form->algo) {
1993         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1994                 /* Fall through */
1995         case RTE_CRYPTO_AUTH_SHA1:
1996                 auth_type = ROC_SE_SHA1_TYPE;
1997                 break;
1998         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1999         case RTE_CRYPTO_AUTH_SHA256:
2000                 auth_type = ROC_SE_SHA2_SHA256;
2001                 break;
2002         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2003         case RTE_CRYPTO_AUTH_SHA512:
2004                 auth_type = ROC_SE_SHA2_SHA512;
2005                 break;
2006         case RTE_CRYPTO_AUTH_AES_GMAC:
2007                 auth_type = ROC_SE_GMAC_TYPE;
2008                 aes_gcm = 1;
2009                 break;
2010         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2011         case RTE_CRYPTO_AUTH_SHA224:
2012                 auth_type = ROC_SE_SHA2_SHA224;
2013                 break;
2014         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2015         case RTE_CRYPTO_AUTH_SHA384:
2016                 auth_type = ROC_SE_SHA2_SHA384;
2017                 break;
2018         case RTE_CRYPTO_AUTH_MD5_HMAC:
2019         case RTE_CRYPTO_AUTH_MD5:
2020                 auth_type = ROC_SE_MD5_TYPE;
2021                 break;
2022         case RTE_CRYPTO_AUTH_KASUMI_F9:
2023                 auth_type = ROC_SE_KASUMI_F9_ECB;
2024                 /*
2025                  * Indicate that direction needs to be taken out
2026                  * from end of src
2027                  */
2028                 zsk_flag = ROC_SE_K_F9;
2029                 break;
2030         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2031                 auth_type = ROC_SE_SNOW3G_UIA2;
2032                 zsk_flag = ROC_SE_ZS_IA;
2033                 break;
2034         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2035                 auth_type = ROC_SE_ZUC_EIA3;
2036                 zsk_flag = ROC_SE_ZS_IA;
2037                 break;
2038         case RTE_CRYPTO_AUTH_NULL:
2039                 auth_type = 0;
2040                 is_null = 1;
2041                 break;
2042         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2043         case RTE_CRYPTO_AUTH_AES_CMAC:
2044         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2045                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
2046                 return -1;
2047         default:
2048                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
2049                            a_form->algo);
2050                 return -1;
2051         }
2052
2053         sess->zsk_flag = zsk_flag;
2054         sess->aes_gcm = aes_gcm;
2055         sess->mac_len = a_form->digest_length;
2056         sess->is_null = is_null;
2057         if (zsk_flag) {
2058                 sess->auth_iv_offset = a_form->iv.offset;
2059                 sess->auth_iv_length = a_form->iv.length;
2060         }
2061         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
2062                                          a_form->key.data, a_form->key.length,
2063                                          a_form->digest_length)))
2064                 return -1;
2065
2066         return 0;
2067 }
2068
2069 static __rte_always_inline int
2070 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
2071 {
2072         struct rte_crypto_auth_xform *a_form;
2073         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
2074         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
2075
2076         a_form = &xform->auth;
2077
2078         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2079                 sess->cpt_op |= ROC_SE_OP_ENCODE;
2080         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2081                 sess->cpt_op |= ROC_SE_OP_DECODE;
2082         else {
2083                 plt_dp_err("Unknown auth operation");
2084                 return -1;
2085         }
2086
2087         switch (a_form->algo) {
2088         case RTE_CRYPTO_AUTH_AES_GMAC:
2089                 enc_type = ROC_SE_AES_GCM;
2090                 auth_type = ROC_SE_GMAC_TYPE;
2091                 break;
2092         default:
2093                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
2094                            a_form->algo);
2095                 return -1;
2096         }
2097
2098         sess->zsk_flag = 0;
2099         sess->aes_gcm = 0;
2100         sess->is_gmac = 1;
2101         sess->iv_offset = a_form->iv.offset;
2102         sess->iv_length = a_form->iv.length;
2103         sess->mac_len = a_form->digest_length;
2104
2105         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
2106                                          a_form->key.data, a_form->key.length,
2107                                          NULL)))
2108                 return -1;
2109
2110         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
2111                                          a_form->digest_length)))
2112                 return -1;
2113
2114         return 0;
2115 }
2116
2117 static __rte_always_inline void *
2118 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
2119               struct rte_mempool *cpt_meta_pool,
2120               struct cpt_inflight_req *infl_req)
2121 {
2122         uint8_t *mdata;
2123
2124         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2125                 return NULL;
2126
2127         buf->vaddr = mdata;
2128         buf->size = len;
2129
2130         infl_req->mdata = mdata;
2131         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
2132
2133         return mdata;
2134 }
2135
2136 static __rte_always_inline uint32_t
2137 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
2138                      uint32_t start_offset)
2139 {
2140         uint16_t index = 0;
2141         void *seg_data = NULL;
2142         int32_t seg_size = 0;
2143
2144         if (!pkt) {
2145                 iovec->buf_cnt = 0;
2146                 return 0;
2147         }
2148
2149         if (!start_offset) {
2150                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2151                 seg_size = pkt->data_len;
2152         } else {
2153                 while (start_offset >= pkt->data_len) {
2154                         start_offset -= pkt->data_len;
2155                         pkt = pkt->next;
2156                 }
2157
2158                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2159                 seg_size = pkt->data_len - start_offset;
2160                 if (!seg_size)
2161                         return 1;
2162         }
2163
2164         /* first seg */
2165         iovec->bufs[index].vaddr = seg_data;
2166         iovec->bufs[index].size = seg_size;
2167         index++;
2168         pkt = pkt->next;
2169
2170         while (unlikely(pkt != NULL)) {
2171                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2172                 seg_size = pkt->data_len;
2173                 if (!seg_size)
2174                         break;
2175
2176                 iovec->bufs[index].vaddr = seg_data;
2177                 iovec->bufs[index].size = seg_size;
2178
2179                 index++;
2180
2181                 pkt = pkt->next;
2182         }
2183
2184         iovec->buf_cnt = index;
2185         return 0;
2186 }
2187
2188 static __rte_always_inline uint32_t
2189 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2190                              struct roc_se_fc_params *param, uint32_t *flags)
2191 {
2192         uint16_t index = 0;
2193         void *seg_data = NULL;
2194         uint32_t seg_size = 0;
2195         struct roc_se_iov_ptr *iovec;
2196
2197         seg_data = rte_pktmbuf_mtod(pkt, void *);
2198         seg_size = pkt->data_len;
2199
2200         /* first seg */
2201         if (likely(!pkt->next)) {
2202                 uint32_t headroom;
2203
2204                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2205                 headroom = rte_pktmbuf_headroom(pkt);
2206                 if (likely(headroom >= 24))
2207                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2208
2209                 param->bufs[0].vaddr = seg_data;
2210                 param->bufs[0].size = seg_size;
2211                 return 0;
2212         }
2213         iovec = param->src_iov;
2214         iovec->bufs[index].vaddr = seg_data;
2215         iovec->bufs[index].size = seg_size;
2216         index++;
2217         pkt = pkt->next;
2218
2219         while (unlikely(pkt != NULL)) {
2220                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2221                 seg_size = pkt->data_len;
2222
2223                 if (!seg_size)
2224                         break;
2225
2226                 iovec->bufs[index].vaddr = seg_data;
2227                 iovec->bufs[index].size = seg_size;
2228
2229                 index++;
2230
2231                 pkt = pkt->next;
2232         }
2233
2234         iovec->buf_cnt = index;
2235         return 0;
2236 }
2237
2238 static __rte_always_inline int
2239 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2240                struct cpt_qp_meta_info *m_info,
2241                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2242 {
2243         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2244         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2245         struct rte_crypto_sym_op *sym_op = cop->sym;
2246         void *mdata = NULL;
2247         uint32_t mc_hash_off;
2248         uint32_t flags = 0;
2249         uint64_t d_offs, d_lens;
2250         struct rte_mbuf *m_src, *m_dst;
2251         uint8_t cpt_op = sess->cpt_op;
2252 #ifdef CPT_ALWAYS_USE_SG_MODE
2253         uint8_t inplace = 0;
2254 #else
2255         uint8_t inplace = 1;
2256 #endif
2257         struct roc_se_fc_params fc_params;
2258         char src[SRC_IOV_SIZE];
2259         char dst[SRC_IOV_SIZE];
2260         uint32_t iv_buf[4];
2261         int ret;
2262
2263         if (likely(sess->iv_length)) {
2264                 flags |= ROC_SE_VALID_IV_BUF;
2265                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2266                                                              sess->iv_offset);
2267                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2268                         memcpy((uint8_t *)iv_buf,
2269                                rte_crypto_op_ctod_offset(cop, uint8_t *,
2270                                                          sess->iv_offset),
2271                                12);
2272                         iv_buf[3] = rte_cpu_to_be_32(0x1);
2273                         fc_params.iv_buf = iv_buf;
2274                 }
2275         }
2276
2277         if (sess->zsk_flag) {
2278                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2279                         cop, uint8_t *, sess->auth_iv_offset);
2280                 if (sess->zsk_flag != ROC_SE_ZS_EA)
2281                         inplace = 0;
2282         }
2283         m_src = sym_op->m_src;
2284         m_dst = sym_op->m_dst;
2285
2286         if (sess->aes_gcm || sess->chacha_poly) {
2287                 uint8_t *salt;
2288                 uint8_t *aad_data;
2289                 uint16_t aad_len;
2290
2291                 d_offs = sym_op->aead.data.offset;
2292                 d_lens = sym_op->aead.data.length;
2293                 mc_hash_off =
2294                         sym_op->aead.data.offset + sym_op->aead.data.length;
2295
2296                 aad_data = sym_op->aead.aad.data;
2297                 aad_len = sess->aad_length;
2298                 if (likely((aad_data + aad_len) ==
2299                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2300                                                    sym_op->aead.data.offset))) {
2301                         d_offs = (d_offs - aad_len) | (d_offs << 16);
2302                         d_lens = (d_lens + aad_len) | (d_lens << 32);
2303                 } else {
2304                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2305                         fc_params.aad_buf.size = aad_len;
2306                         flags |= ROC_SE_VALID_AAD_BUF;
2307                         inplace = 0;
2308                         d_offs = d_offs << 16;
2309                         d_lens = d_lens << 32;
2310                 }
2311
2312                 salt = fc_params.iv_buf;
2313                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2314                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2315                         sess->salt = *(uint32_t *)salt;
2316                 }
2317                 fc_params.iv_buf = salt + 4;
2318                 if (likely(sess->mac_len)) {
2319                         struct rte_mbuf *m =
2320                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2321
2322                         if (!m)
2323                                 m = m_src;
2324
2325                         /* hmac immediately following data is best case */
2326                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2327                                              mc_hash_off !=
2328                                      (uint8_t *)sym_op->aead.digest.data)) {
2329                                 flags |= ROC_SE_VALID_MAC_BUF;
2330                                 fc_params.mac_buf.size = sess->mac_len;
2331                                 fc_params.mac_buf.vaddr =
2332                                         sym_op->aead.digest.data;
2333                                 inplace = 0;
2334                         }
2335                 }
2336         } else {
2337                 d_offs = sym_op->cipher.data.offset;
2338                 d_lens = sym_op->cipher.data.length;
2339                 mc_hash_off =
2340                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
2341                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2342                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2343
2344                 if (mc_hash_off <
2345                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2346                         mc_hash_off = (sym_op->auth.data.offset +
2347                                        sym_op->auth.data.length);
2348                 }
2349                 /* for gmac, salt should be updated like in gcm */
2350                 if (unlikely(sess->is_gmac)) {
2351                         uint8_t *salt;
2352                         salt = fc_params.iv_buf;
2353                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
2354                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2355                                 sess->salt = *(uint32_t *)salt;
2356                         }
2357                         fc_params.iv_buf = salt + 4;
2358                 }
2359                 if (likely(sess->mac_len)) {
2360                         struct rte_mbuf *m;
2361
2362                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2363                         if (!m)
2364                                 m = m_src;
2365
2366                         /* hmac immediately following data is best case */
2367                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2368                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2369                                               mc_hash_off !=
2370                                       (uint8_t *)sym_op->auth.digest.data))) {
2371                                 flags |= ROC_SE_VALID_MAC_BUF;
2372                                 fc_params.mac_buf.size = sess->mac_len;
2373                                 fc_params.mac_buf.vaddr =
2374                                         sym_op->auth.digest.data;
2375                                 inplace = 0;
2376                         }
2377                 }
2378         }
2379         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2380
2381         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2382             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2383                 inplace = 0;
2384
2385         if (likely(!m_dst && inplace)) {
2386                 /* Case of single buffer without AAD buf or
2387                  * separate mac buf in place and
2388                  * not air crypto
2389                  */
2390                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2391
2392                 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2393                                                           &flags))) {
2394                         plt_dp_err("Prepare inplace src iov failed");
2395                         ret = -EINVAL;
2396                         goto err_exit;
2397                 }
2398
2399         } else {
2400                 /* Out of place processing */
2401                 fc_params.src_iov = (void *)src;
2402                 fc_params.dst_iov = (void *)dst;
2403
2404                 /* Store SG I/O in the api for reuse */
2405                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2406                         plt_dp_err("Prepare src iov failed");
2407                         ret = -EINVAL;
2408                         goto err_exit;
2409                 }
2410
2411                 if (unlikely(m_dst != NULL)) {
2412                         uint32_t pkt_len;
2413
2414                         /* Try to make room as much as src has */
2415                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
2416
2417                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2418                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2419                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2420                                         plt_dp_err("Not enough space in "
2421                                                    "m_dst %p, need %u"
2422                                                    " more",
2423                                                    m_dst, pkt_len);
2424                                         ret = -EINVAL;
2425                                         goto err_exit;
2426                                 }
2427                         }
2428
2429                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2430                                 plt_dp_err("Prepare dst iov failed for "
2431                                            "m_dst %p",
2432                                            m_dst);
2433                                 ret = -EINVAL;
2434                                 goto err_exit;
2435                         }
2436                 } else {
2437                         fc_params.dst_iov = (void *)src;
2438                 }
2439         }
2440
2441         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2442                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2443                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
2444                         (ctx->fc_type == ROC_SE_PDCP))))) {
2445                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2446                                       m_info->pool, infl_req);
2447                 if (mdata == NULL) {
2448                         plt_dp_err("Error allocating meta buffer for request");
2449                         return -ENOMEM;
2450                 }
2451         }
2452
2453         /* Finally prepare the instruction */
2454         if (cpt_op & ROC_SE_OP_ENCODE)
2455                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2456                                            inst);
2457         else
2458                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2459                                            inst);
2460
2461         if (unlikely(ret)) {
2462                 plt_dp_err("Preparing request failed due to bad input arg");
2463                 goto free_mdata_and_exit;
2464         }
2465
2466         return 0;
2467
2468 free_mdata_and_exit:
2469         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2470                 rte_mempool_put(m_info->pool, infl_req->mdata);
2471 err_exit:
2472         return ret;
2473 }
2474
2475 static __rte_always_inline void
2476 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2477 {
2478         uint8_t *mac;
2479         struct rte_crypto_sym_op *sym_op = op->sym;
2480
2481         if (sym_op->auth.digest.data)
2482                 mac = sym_op->auth.digest.data;
2483         else
2484                 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2485                                               sym_op->auth.data.length +
2486                                                       sym_op->auth.data.offset);
2487         if (!mac) {
2488                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2489                 return;
2490         }
2491
2492         if (memcmp(mac, gen_mac, mac_len))
2493                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2494         else
2495                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2496 }
2497
2498 static __rte_always_inline void
2499 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2500                                    uint32_t *addr_length_in_bits,
2501                                    uint8_t *addr_direction)
2502 {
2503         uint8_t found = 0;
2504         uint32_t pos;
2505         uint8_t last_byte;
2506         while (!found && counter_num_bytes > 0) {
2507                 counter_num_bytes--;
2508                 if (src[counter_num_bytes] == 0x00)
2509                         continue;
2510                 pos = rte_bsf32(src[counter_num_bytes]);
2511                 if (pos == 7) {
2512                         if (likely(counter_num_bytes > 0)) {
2513                                 last_byte = src[counter_num_bytes - 1];
2514                                 *addr_direction = last_byte & 0x1;
2515                                 *addr_length_in_bits =
2516                                         counter_num_bytes * 8 - 1;
2517                         }
2518                 } else {
2519                         last_byte = src[counter_num_bytes];
2520                         *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2521                         *addr_length_in_bits =
2522                                 counter_num_bytes * 8 + (8 - (pos + 2));
2523                 }
2524                 found = 1;
2525         }
2526 }
2527
2528 /*
2529  * This handles all auth only except AES_GMAC
2530  */
2531 static __rte_always_inline int
2532 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2533                    struct cpt_qp_meta_info *m_info,
2534                    struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2535 {
2536         uint32_t space = 0;
2537         struct rte_crypto_sym_op *sym_op = cop->sym;
2538         void *mdata;
2539         uint32_t auth_range_off;
2540         uint32_t flags = 0;
2541         uint64_t d_offs = 0, d_lens;
2542         struct rte_mbuf *m_src, *m_dst;
2543         uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2544         uint16_t mac_len = sess->mac_len;
2545         struct roc_se_fc_params params;
2546         char src[SRC_IOV_SIZE];
2547         uint8_t iv_buf[16];
2548         int ret;
2549
2550         memset(&params, 0, sizeof(struct roc_se_fc_params));
2551
2552         m_src = sym_op->m_src;
2553
2554         mdata = alloc_op_meta(&params.meta_buf, m_info->mlen, m_info->pool,
2555                               infl_req);
2556         if (mdata == NULL) {
2557                 ret = -ENOMEM;
2558                 goto err_exit;
2559         }
2560
2561         auth_range_off = sym_op->auth.data.offset;
2562
2563         flags = ROC_SE_VALID_MAC_BUF;
2564         params.src_iov = (void *)src;
2565         if (unlikely(sess->zsk_flag)) {
2566                 /*
2567                  * Since for Zuc, Kasumi, Snow3g offsets are in bits
2568                  * we will send pass through even for auth only case,
2569                  * let MC handle it
2570                  */
2571                 d_offs = auth_range_off;
2572                 auth_range_off = 0;
2573                 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2574                         cop, uint8_t *, sess->auth_iv_offset);
2575                 if (sess->zsk_flag == ROC_SE_K_F9) {
2576                         uint32_t length_in_bits, num_bytes;
2577                         uint8_t *src, direction = 0;
2578
2579                         memcpy(iv_buf,
2580                                rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2581                         /*
2582                          * This is kasumi f9, take direction from
2583                          * source buffer
2584                          */
2585                         length_in_bits = cop->sym->auth.data.length;
2586                         num_bytes = (length_in_bits >> 3);
2587                         src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2588                         find_kasumif9_direction_and_length(
2589                                 src, num_bytes, &length_in_bits, &direction);
2590                         length_in_bits -= 64;
2591                         cop->sym->auth.data.offset += 64;
2592                         d_offs = cop->sym->auth.data.offset;
2593                         auth_range_off = d_offs / 8;
2594                         cop->sym->auth.data.length = length_in_bits;
2595
2596                         /* Store it at end of auth iv */
2597                         iv_buf[8] = direction;
2598                         params.auth_iv_buf = iv_buf;
2599                 }
2600         }
2601
2602         d_lens = sym_op->auth.data.length;
2603
2604         params.ctx_buf.vaddr = &sess->roc_se_ctx;
2605
2606         if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2607                 if (sym_op->auth.digest.data) {
2608                         /*
2609                          * Digest to be generated
2610                          * in separate buffer
2611                          */
2612                         params.mac_buf.size = sess->mac_len;
2613                         params.mac_buf.vaddr = sym_op->auth.digest.data;
2614                 } else {
2615                         uint32_t off = sym_op->auth.data.offset +
2616                                        sym_op->auth.data.length;
2617                         int32_t dlen, space;
2618
2619                         m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2620                         dlen = rte_pktmbuf_pkt_len(m_dst);
2621
2622                         space = off + mac_len - dlen;
2623                         if (space > 0)
2624                                 if (!rte_pktmbuf_append(m_dst, space)) {
2625                                         plt_dp_err("Failed to extend "
2626                                                    "mbuf by %uB",
2627                                                    space);
2628                                         ret = -EINVAL;
2629                                         goto free_mdata_and_exit;
2630                                 }
2631
2632                         params.mac_buf.vaddr =
2633                                 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2634                         params.mac_buf.size = mac_len;
2635                 }
2636         } else {
2637                 uint64_t *op = mdata;
2638
2639                 /* Need space for storing generated mac */
2640                 space += 2 * sizeof(uint64_t);
2641
2642                 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2643                 params.mac_buf.size = mac_len;
2644                 space += RTE_ALIGN_CEIL(mac_len, 8);
2645                 op[0] = (uintptr_t)params.mac_buf.vaddr;
2646                 op[1] = mac_len;
2647                 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2648         }
2649
2650         params.meta_buf.vaddr = (uint8_t *)mdata + space;
2651         params.meta_buf.size -= space;
2652
2653         /* Out of place processing */
2654         params.src_iov = (void *)src;
2655
2656         /*Store SG I/O in the api for reuse */
2657         if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2658                 plt_dp_err("Prepare src iov failed");
2659                 ret = -EINVAL;
2660                 goto free_mdata_and_exit;
2661         }
2662
2663         ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, inst);
2664         if (ret)
2665                 goto free_mdata_and_exit;
2666
2667         return 0;
2668
2669 free_mdata_and_exit:
2670         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2671                 rte_mempool_put(m_info->pool, infl_req->mdata);
2672 err_exit:
2673         return ret;
2674 }
2675 #endif /*_CNXK_SE_H_ */