a8cd2c5ce5b0a51d527cdbbe80acd11255e97390
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
38
39 static __rte_always_inline int
40 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess);
41
42 static inline void
43 cpt_pack_iv(uint8_t *iv_src, uint8_t *iv_dst)
44 {
45         iv_dst[16] = iv_src[16];
46         /* pack the last 8 bytes of IV to 6 bytes.
47          * discard the 2 MSB bits of each byte
48          */
49         iv_dst[17] = (((iv_src[17] & 0x3f) << 2) | ((iv_src[18] >> 4) & 0x3));
50         iv_dst[18] = (((iv_src[18] & 0xf) << 4) | ((iv_src[19] >> 2) & 0xf));
51         iv_dst[19] = (((iv_src[19] & 0x3) << 6) | (iv_src[20] & 0x3f));
52
53         iv_dst[20] = (((iv_src[21] & 0x3f) << 2) | ((iv_src[22] >> 4) & 0x3));
54         iv_dst[21] = (((iv_src[22] & 0xf) << 4) | ((iv_src[23] >> 2) & 0xf));
55         iv_dst[22] = (((iv_src[23] & 0x3) << 6) | (iv_src[24] & 0x3f));
56 }
57
58 static inline void
59 pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type,
60              uint8_t pack_iv)
61 {
62         uint32_t *iv_s_temp, iv_temp[4];
63         int j;
64
65         if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
66                 /*
67                  * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
68                  * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
69                  */
70
71                 iv_s_temp = (uint32_t *)iv_s;
72
73                 for (j = 0; j < 4; j++)
74                         iv_temp[j] = iv_s_temp[3 - j];
75                 memcpy(iv_d, iv_temp, 16);
76         } else {
77                 /* ZUC doesn't need a swap */
78                 memcpy(iv_d, iv_s, 16);
79                 if (pack_iv)
80                         cpt_pack_iv(iv_s, iv_d);
81         }
82 }
83
84 static __rte_always_inline int
85 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
86 {
87         uint16_t mac_len = auth->digest_length;
88         int ret;
89
90         switch (auth->algo) {
91         case RTE_CRYPTO_AUTH_MD5:
92         case RTE_CRYPTO_AUTH_MD5_HMAC:
93                 ret = (mac_len == 16) ? 0 : -1;
94                 break;
95         case RTE_CRYPTO_AUTH_SHA1:
96         case RTE_CRYPTO_AUTH_SHA1_HMAC:
97                 ret = (mac_len == 20) ? 0 : -1;
98                 break;
99         case RTE_CRYPTO_AUTH_SHA224:
100         case RTE_CRYPTO_AUTH_SHA224_HMAC:
101                 ret = (mac_len == 28) ? 0 : -1;
102                 break;
103         case RTE_CRYPTO_AUTH_SHA256:
104         case RTE_CRYPTO_AUTH_SHA256_HMAC:
105                 ret = (mac_len == 32) ? 0 : -1;
106                 break;
107         case RTE_CRYPTO_AUTH_SHA384:
108         case RTE_CRYPTO_AUTH_SHA384_HMAC:
109                 ret = (mac_len == 48) ? 0 : -1;
110                 break;
111         case RTE_CRYPTO_AUTH_SHA512:
112         case RTE_CRYPTO_AUTH_SHA512_HMAC:
113                 ret = (mac_len == 64) ? 0 : -1;
114                 break;
115         case RTE_CRYPTO_AUTH_NULL:
116                 ret = 0;
117                 break;
118         default:
119                 ret = -1;
120         }
121
122         return ret;
123 }
124
125 static __rte_always_inline void
126 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
127 {
128         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
129         memcpy(fctx->enc.encr_iv, salt, 4);
130 }
131
132 static __rte_always_inline uint32_t
133 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
134              uint32_t size)
135 {
136         struct roc_se_sglist_comp *to = &list[i >> 2];
137
138         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
139         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
140         i++;
141         return i;
142 }
143
144 static __rte_always_inline uint32_t
145 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
146                       struct roc_se_buf_ptr *from)
147 {
148         struct roc_se_sglist_comp *to = &list[i >> 2];
149
150         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
151         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
152         i++;
153         return i;
154 }
155
156 static __rte_always_inline uint32_t
157 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
158                           struct roc_se_buf_ptr *from, uint32_t *psize)
159 {
160         struct roc_se_sglist_comp *to = &list[i >> 2];
161         uint32_t size = *psize;
162         uint32_t e_len;
163
164         e_len = (size > from->size) ? from->size : size;
165         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
166         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
167         *psize -= e_len;
168         i++;
169         return i;
170 }
171
172 /*
173  * This fills the MC expected SGIO list
174  * from IOV given by user.
175  */
176 static __rte_always_inline uint32_t
177 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
178                       struct roc_se_iov_ptr *from, uint32_t from_offset,
179                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
180                       uint32_t extra_offset)
181 {
182         int32_t j;
183         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
184         uint32_t size = *psize;
185         struct roc_se_buf_ptr *bufs;
186
187         bufs = from->bufs;
188         for (j = 0; (j < from->buf_cnt) && size; j++) {
189                 uint64_t e_vaddr;
190                 uint32_t e_len;
191                 struct roc_se_sglist_comp *to = &list[i >> 2];
192
193                 if (unlikely(from_offset)) {
194                         if (from_offset >= bufs[j].size) {
195                                 from_offset -= bufs[j].size;
196                                 continue;
197                         }
198                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
199                         e_len = (size > (bufs[j].size - from_offset)) ?
200                                         (bufs[j].size - from_offset) :
201                                         size;
202                         from_offset = 0;
203                 } else {
204                         e_vaddr = (uint64_t)bufs[j].vaddr;
205                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
206                 }
207
208                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
209                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
210
211                 if (extra_len && (e_len >= extra_offset)) {
212                         /* Break the data at given offset */
213                         uint32_t next_len = e_len - extra_offset;
214                         uint64_t next_vaddr = e_vaddr + extra_offset;
215
216                         if (!extra_offset) {
217                                 i--;
218                         } else {
219                                 e_len = extra_offset;
220                                 size -= e_len;
221                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
222                         }
223
224                         extra_len = RTE_MIN(extra_len, size);
225                         /* Insert extra data ptr */
226                         if (extra_len) {
227                                 i++;
228                                 to = &list[i >> 2];
229                                 to->u.s.len[i % 4] =
230                                         rte_cpu_to_be_16(extra_len);
231                                 to->ptr[i % 4] = rte_cpu_to_be_64(
232                                         (uint64_t)extra_buf->vaddr);
233                                 size -= extra_len;
234                         }
235
236                         next_len = RTE_MIN(next_len, size);
237                         /* insert the rest of the data */
238                         if (next_len) {
239                                 i++;
240                                 to = &list[i >> 2];
241                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
242                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
243                                 size -= next_len;
244                         }
245                         extra_len = 0;
246
247                 } else {
248                         size -= e_len;
249                 }
250                 if (extra_offset)
251                         extra_offset -= size;
252                 i++;
253         }
254
255         *psize = size;
256         return (uint32_t)i;
257 }
258
259 static __rte_always_inline int
260 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
261                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
262 {
263         void *m_vaddr = params->meta_buf.vaddr;
264         uint32_t size, i;
265         uint16_t data_len, mac_len, key_len;
266         roc_se_auth_type hash_type;
267         struct roc_se_ctx *ctx;
268         struct roc_se_sglist_comp *gather_comp;
269         struct roc_se_sglist_comp *scatter_comp;
270         uint8_t *in_buffer;
271         uint32_t g_size_bytes, s_size_bytes;
272         union cpt_inst_w4 cpt_inst_w4;
273
274         ctx = params->ctx_buf.vaddr;
275
276         hash_type = ctx->hash_type;
277         mac_len = ctx->mac_len;
278         key_len = ctx->auth_key_len;
279         data_len = ROC_SE_AUTH_DLEN(d_lens);
280
281         /*GP op header */
282         cpt_inst_w4.s.opcode_minor = 0;
283         cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
284         if (ctx->hmac) {
285                 cpt_inst_w4.s.opcode_major =
286                         ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
287                 cpt_inst_w4.s.param1 = key_len;
288                 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
289         } else {
290                 cpt_inst_w4.s.opcode_major =
291                         ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
292                 cpt_inst_w4.s.param1 = 0;
293                 cpt_inst_w4.s.dlen = data_len;
294         }
295
296         /* Null auth only case enters the if */
297         if (unlikely(!hash_type && !ctx->enc_cipher)) {
298                 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
299                 /* Minor op is passthrough */
300                 cpt_inst_w4.s.opcode_minor = 0x03;
301                 /* Send out completion code only */
302                 cpt_inst_w4.s.param2 = 0x1;
303         }
304
305         /* DPTR has SG list */
306         in_buffer = m_vaddr;
307
308         ((uint16_t *)in_buffer)[0] = 0;
309         ((uint16_t *)in_buffer)[1] = 0;
310
311         /* TODO Add error check if space will be sufficient */
312         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
313
314         /*
315          * Input gather list
316          */
317
318         i = 0;
319
320         if (ctx->hmac) {
321                 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
322                 /* Key */
323                 i = fill_sg_comp(gather_comp, i, k_vaddr,
324                                  RTE_ALIGN_CEIL(key_len, 8));
325         }
326
327         /* input data */
328         size = data_len;
329         if (size) {
330                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
331                                           &size, NULL, 0);
332                 if (unlikely(size)) {
333                         plt_dp_err("Insufficient dst IOV size, short by %dB",
334                                    size);
335                         return -1;
336                 }
337         } else {
338                 /*
339                  * Looks like we need to support zero data
340                  * gather ptr in case of hash & hmac
341                  */
342                 i++;
343         }
344         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
345         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
346
347         /*
348          * Output Gather list
349          */
350
351         i = 0;
352         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
353                                                      g_size_bytes);
354
355         if (flags & ROC_SE_VALID_MAC_BUF) {
356                 if (unlikely(params->mac_buf.size < mac_len)) {
357                         plt_dp_err("Insufficient MAC size");
358                         return -1;
359                 }
360
361                 size = mac_len;
362                 i = fill_sg_comp_from_buf_min(scatter_comp, i, &params->mac_buf,
363                                               &size);
364         } else {
365                 size = mac_len;
366                 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
367                                           data_len, &size, NULL, 0);
368                 if (unlikely(size)) {
369                         plt_dp_err("Insufficient dst IOV size, short by %dB",
370                                    size);
371                         return -1;
372                 }
373         }
374
375         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
376         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
377
378         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
379
380         /* This is DPTR len in case of SG mode */
381         cpt_inst_w4.s.dlen = size;
382
383         inst->dptr = (uint64_t)in_buffer;
384         inst->w4.u64 = cpt_inst_w4.u64;
385
386         return 0;
387 }
388
389 static __rte_always_inline int
390 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
391                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
392 {
393         uint32_t iv_offset = 0;
394         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
395         struct roc_se_ctx *se_ctx;
396         uint32_t cipher_type, hash_type;
397         uint32_t mac_len, size;
398         uint8_t iv_len = 16;
399         struct roc_se_buf_ptr *aad_buf = NULL;
400         uint32_t encr_offset, auth_offset;
401         uint32_t encr_data_len, auth_data_len, aad_len = 0;
402         uint32_t passthrough_len = 0;
403         union cpt_inst_w4 cpt_inst_w4;
404         void *offset_vaddr;
405         uint8_t op_minor;
406
407         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
408         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
409         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
410         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
411         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
412                 /* We don't support both AAD and auth data separately */
413                 auth_data_len = 0;
414                 auth_offset = 0;
415                 aad_len = fc_params->aad_buf.size;
416                 aad_buf = &fc_params->aad_buf;
417         }
418         se_ctx = fc_params->ctx_buf.vaddr;
419         cipher_type = se_ctx->enc_cipher;
420         hash_type = se_ctx->hash_type;
421         mac_len = se_ctx->mac_len;
422         op_minor = se_ctx->template_w4.s.opcode_minor;
423
424         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
425                 iv_len = 0;
426                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
427         }
428
429         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
430                 /*
431                  * When AAD is given, data above encr_offset is pass through
432                  * Since AAD is given as separate pointer and not as offset,
433                  * this is a special case as we need to fragment input data
434                  * into passthrough + encr_data and then insert AAD in between.
435                  */
436                 if (hash_type != ROC_SE_GMAC_TYPE) {
437                         passthrough_len = encr_offset;
438                         auth_offset = passthrough_len + iv_len;
439                         encr_offset = passthrough_len + aad_len + iv_len;
440                         auth_data_len = aad_len + encr_data_len;
441                 } else {
442                         passthrough_len = 16 + aad_len;
443                         auth_offset = passthrough_len + iv_len;
444                         auth_data_len = aad_len;
445                 }
446         } else {
447                 encr_offset += iv_len;
448                 auth_offset += iv_len;
449         }
450
451         /* Encryption */
452         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
453         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
454         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
455
456         if (hash_type == ROC_SE_GMAC_TYPE) {
457                 encr_offset = 0;
458                 encr_data_len = 0;
459         }
460
461         auth_dlen = auth_offset + auth_data_len;
462         enc_dlen = encr_data_len + encr_offset;
463         if (unlikely(encr_data_len & 0xf)) {
464                 if ((cipher_type == ROC_SE_DES3_CBC) ||
465                     (cipher_type == ROC_SE_DES3_ECB))
466                         enc_dlen =
467                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
468                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
469                                 (cipher_type == ROC_SE_AES_ECB)))
470                         enc_dlen =
471                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
472         }
473
474         if (unlikely(auth_dlen > enc_dlen)) {
475                 inputlen = auth_dlen;
476                 outputlen = auth_dlen + mac_len;
477         } else {
478                 inputlen = enc_dlen;
479                 outputlen = enc_dlen + mac_len;
480         }
481
482         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
483                 outputlen = enc_dlen;
484
485         /* GP op header */
486         cpt_inst_w4.s.param1 = encr_data_len;
487         cpt_inst_w4.s.param2 = auth_data_len;
488
489         /*
490          * In cn9k, cn10k since we have a limitation of
491          * IV & Offset control word not part of instruction
492          * and need to be part of Data Buffer, we check if
493          * head room is there and then only do the Direct mode processing
494          */
495         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
496                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
497                 void *dm_vaddr = fc_params->bufs[0].vaddr;
498
499                 /* Use Direct mode */
500
501                 offset_vaddr =
502                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
503
504                 /* DPTR */
505                 inst->dptr = (uint64_t)offset_vaddr;
506
507                 /* RPTR should just exclude offset control word */
508                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
509
510                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
511
512                 if (likely(iv_len)) {
513                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
514                                                       ROC_SE_OFF_CTRL_LEN);
515                         uint64_t *src = fc_params->iv_buf;
516                         dest[0] = src[0];
517                         dest[1] = src[1];
518                 }
519
520         } else {
521                 void *m_vaddr = fc_params->meta_buf.vaddr;
522                 uint32_t i, g_size_bytes, s_size_bytes;
523                 struct roc_se_sglist_comp *gather_comp;
524                 struct roc_se_sglist_comp *scatter_comp;
525                 uint8_t *in_buffer;
526
527                 /* This falls under strict SG mode */
528                 offset_vaddr = m_vaddr;
529                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
530
531                 m_vaddr = (uint8_t *)m_vaddr + size;
532
533                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
534
535                 if (likely(iv_len)) {
536                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
537                                                       ROC_SE_OFF_CTRL_LEN);
538                         uint64_t *src = fc_params->iv_buf;
539                         dest[0] = src[0];
540                         dest[1] = src[1];
541                 }
542
543                 /* DPTR has SG list */
544                 in_buffer = m_vaddr;
545
546                 ((uint16_t *)in_buffer)[0] = 0;
547                 ((uint16_t *)in_buffer)[1] = 0;
548
549                 /* TODO Add error check if space will be sufficient */
550                 gather_comp =
551                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
552
553                 /*
554                  * Input Gather List
555                  */
556
557                 i = 0;
558
559                 /* Offset control word that includes iv */
560                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
561                                  ROC_SE_OFF_CTRL_LEN + iv_len);
562
563                 /* Add input data */
564                 size = inputlen - iv_len;
565                 if (likely(size)) {
566                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
567
568                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
569                                 i = fill_sg_comp_from_buf_min(
570                                         gather_comp, i, fc_params->bufs, &size);
571                         } else {
572                                 i = fill_sg_comp_from_iov(
573                                         gather_comp, i, fc_params->src_iov, 0,
574                                         &size, aad_buf, aad_offset);
575                         }
576
577                         if (unlikely(size)) {
578                                 plt_dp_err("Insufficient buffer space,"
579                                            " size %d needed",
580                                            size);
581                                 return -1;
582                         }
583                 }
584                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
585                 g_size_bytes =
586                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
587
588                 /*
589                  * Output Scatter list
590                  */
591                 i = 0;
592                 scatter_comp =
593                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
594                                                       g_size_bytes);
595
596                 /* Add IV */
597                 if (likely(iv_len)) {
598                         i = fill_sg_comp(scatter_comp, i,
599                                          (uint64_t)offset_vaddr +
600                                                  ROC_SE_OFF_CTRL_LEN,
601                                          iv_len);
602                 }
603
604                 /* output data or output data + digest*/
605                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
606                         size = outputlen - iv_len - mac_len;
607                         if (size) {
608                                 uint32_t aad_offset =
609                                         aad_len ? passthrough_len : 0;
610
611                                 if (unlikely(flags &
612                                              ROC_SE_SINGLE_BUF_INPLACE)) {
613                                         i = fill_sg_comp_from_buf_min(
614                                                 scatter_comp, i,
615                                                 fc_params->bufs, &size);
616                                 } else {
617                                         i = fill_sg_comp_from_iov(
618                                                 scatter_comp, i,
619                                                 fc_params->dst_iov, 0, &size,
620                                                 aad_buf, aad_offset);
621                                 }
622                                 if (unlikely(size)) {
623                                         plt_dp_err("Insufficient buffer"
624                                                    " space, size %d needed",
625                                                    size);
626                                         return -1;
627                                 }
628                         }
629                         /* mac_data */
630                         if (mac_len) {
631                                 i = fill_sg_comp_from_buf(scatter_comp, i,
632                                                           &fc_params->mac_buf);
633                         }
634                 } else {
635                         /* Output including mac */
636                         size = outputlen - iv_len;
637                         if (likely(size)) {
638                                 uint32_t aad_offset =
639                                         aad_len ? passthrough_len : 0;
640
641                                 if (unlikely(flags &
642                                              ROC_SE_SINGLE_BUF_INPLACE)) {
643                                         i = fill_sg_comp_from_buf_min(
644                                                 scatter_comp, i,
645                                                 fc_params->bufs, &size);
646                                 } else {
647                                         i = fill_sg_comp_from_iov(
648                                                 scatter_comp, i,
649                                                 fc_params->dst_iov, 0, &size,
650                                                 aad_buf, aad_offset);
651                                 }
652                                 if (unlikely(size)) {
653                                         plt_dp_err("Insufficient buffer"
654                                                    " space, size %d needed",
655                                                    size);
656                                         return -1;
657                                 }
658                         }
659                 }
660                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
661                 s_size_bytes =
662                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
663
664                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
665
666                 /* This is DPTR len in case of SG mode */
667                 cpt_inst_w4.s.dlen = size;
668
669                 inst->dptr = (uint64_t)in_buffer;
670         }
671
672         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
673                      (auth_offset >> 8))) {
674                 plt_dp_err("Offset not supported");
675                 plt_dp_err("enc_offset: %d", encr_offset);
676                 plt_dp_err("iv_offset : %d", iv_offset);
677                 plt_dp_err("auth_offset: %d", auth_offset);
678                 return -1;
679         }
680
681         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
682                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
683                 ((uint64_t)auth_offset));
684
685         inst->w4.u64 = cpt_inst_w4.u64;
686         return 0;
687 }
688
689 static __rte_always_inline int
690 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
691                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
692 {
693         uint32_t iv_offset = 0, size;
694         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
695         struct roc_se_ctx *se_ctx;
696         int32_t hash_type, mac_len;
697         uint8_t iv_len = 16;
698         struct roc_se_buf_ptr *aad_buf = NULL;
699         uint32_t encr_offset, auth_offset;
700         uint32_t encr_data_len, auth_data_len, aad_len = 0;
701         uint32_t passthrough_len = 0;
702         union cpt_inst_w4 cpt_inst_w4;
703         void *offset_vaddr;
704         uint8_t op_minor;
705
706         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
707         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
708         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
709         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
710
711         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
712                 /* We don't support both AAD and auth data separately */
713                 auth_data_len = 0;
714                 auth_offset = 0;
715                 aad_len = fc_params->aad_buf.size;
716                 aad_buf = &fc_params->aad_buf;
717         }
718
719         se_ctx = fc_params->ctx_buf.vaddr;
720         hash_type = se_ctx->hash_type;
721         mac_len = se_ctx->mac_len;
722         op_minor = se_ctx->template_w4.s.opcode_minor;
723
724         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
725                 iv_len = 0;
726                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
727         }
728
729         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
730                 /*
731                  * When AAD is given, data above encr_offset is pass through
732                  * Since AAD is given as separate pointer and not as offset,
733                  * this is a special case as we need to fragment input data
734                  * into passthrough + encr_data and then insert AAD in between.
735                  */
736                 if (hash_type != ROC_SE_GMAC_TYPE) {
737                         passthrough_len = encr_offset;
738                         auth_offset = passthrough_len + iv_len;
739                         encr_offset = passthrough_len + aad_len + iv_len;
740                         auth_data_len = aad_len + encr_data_len;
741                 } else {
742                         passthrough_len = 16 + aad_len;
743                         auth_offset = passthrough_len + iv_len;
744                         auth_data_len = aad_len;
745                 }
746         } else {
747                 encr_offset += iv_len;
748                 auth_offset += iv_len;
749         }
750
751         /* Decryption */
752         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
753         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
754         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
755
756         if (hash_type == ROC_SE_GMAC_TYPE) {
757                 encr_offset = 0;
758                 encr_data_len = 0;
759         }
760
761         enc_dlen = encr_offset + encr_data_len;
762         auth_dlen = auth_offset + auth_data_len;
763
764         if (auth_dlen > enc_dlen) {
765                 inputlen = auth_dlen + mac_len;
766                 outputlen = auth_dlen;
767         } else {
768                 inputlen = enc_dlen + mac_len;
769                 outputlen = enc_dlen;
770         }
771
772         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
773                 outputlen = inputlen = enc_dlen;
774
775         cpt_inst_w4.s.param1 = encr_data_len;
776         cpt_inst_w4.s.param2 = auth_data_len;
777
778         /*
779          * In cn9k, cn10k since we have a limitation of
780          * IV & Offset control word not part of instruction
781          * and need to be part of Data Buffer, we check if
782          * head room is there and then only do the Direct mode processing
783          */
784         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
785                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
786                 void *dm_vaddr = fc_params->bufs[0].vaddr;
787
788                 /* Use Direct mode */
789
790                 offset_vaddr =
791                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
792                 inst->dptr = (uint64_t)offset_vaddr;
793
794                 /* RPTR should just exclude offset control word */
795                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
796
797                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
798
799                 if (likely(iv_len)) {
800                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
801                                                       ROC_SE_OFF_CTRL_LEN);
802                         uint64_t *src = fc_params->iv_buf;
803                         dest[0] = src[0];
804                         dest[1] = src[1];
805                 }
806
807         } else {
808                 void *m_vaddr = fc_params->meta_buf.vaddr;
809                 uint32_t g_size_bytes, s_size_bytes;
810                 struct roc_se_sglist_comp *gather_comp;
811                 struct roc_se_sglist_comp *scatter_comp;
812                 uint8_t *in_buffer;
813                 uint8_t i = 0;
814
815                 /* This falls under strict SG mode */
816                 offset_vaddr = m_vaddr;
817                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
818
819                 m_vaddr = (uint8_t *)m_vaddr + size;
820
821                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
822
823                 if (likely(iv_len)) {
824                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
825                                                       ROC_SE_OFF_CTRL_LEN);
826                         uint64_t *src = fc_params->iv_buf;
827                         dest[0] = src[0];
828                         dest[1] = src[1];
829                 }
830
831                 /* DPTR has SG list */
832                 in_buffer = m_vaddr;
833
834                 ((uint16_t *)in_buffer)[0] = 0;
835                 ((uint16_t *)in_buffer)[1] = 0;
836
837                 /* TODO Add error check if space will be sufficient */
838                 gather_comp =
839                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
840
841                 /*
842                  * Input Gather List
843                  */
844                 i = 0;
845
846                 /* Offset control word that includes iv */
847                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
848                                  ROC_SE_OFF_CTRL_LEN + iv_len);
849
850                 /* Add input data */
851                 if (flags & ROC_SE_VALID_MAC_BUF) {
852                         size = inputlen - iv_len - mac_len;
853                         if (size) {
854                                 /* input data only */
855                                 if (unlikely(flags &
856                                              ROC_SE_SINGLE_BUF_INPLACE)) {
857                                         i = fill_sg_comp_from_buf_min(
858                                                 gather_comp, i, fc_params->bufs,
859                                                 &size);
860                                 } else {
861                                         uint32_t aad_offset =
862                                                 aad_len ? passthrough_len : 0;
863
864                                         i = fill_sg_comp_from_iov(
865                                                 gather_comp, i,
866                                                 fc_params->src_iov, 0, &size,
867                                                 aad_buf, aad_offset);
868                                 }
869                                 if (unlikely(size)) {
870                                         plt_dp_err("Insufficient buffer"
871                                                    " space, size %d needed",
872                                                    size);
873                                         return -1;
874                                 }
875                         }
876
877                         /* mac data */
878                         if (mac_len) {
879                                 i = fill_sg_comp_from_buf(gather_comp, i,
880                                                           &fc_params->mac_buf);
881                         }
882                 } else {
883                         /* input data + mac */
884                         size = inputlen - iv_len;
885                         if (size) {
886                                 if (unlikely(flags &
887                                              ROC_SE_SINGLE_BUF_INPLACE)) {
888                                         i = fill_sg_comp_from_buf_min(
889                                                 gather_comp, i, fc_params->bufs,
890                                                 &size);
891                                 } else {
892                                         uint32_t aad_offset =
893                                                 aad_len ? passthrough_len : 0;
894
895                                         if (unlikely(!fc_params->src_iov)) {
896                                                 plt_dp_err("Bad input args");
897                                                 return -1;
898                                         }
899
900                                         i = fill_sg_comp_from_iov(
901                                                 gather_comp, i,
902                                                 fc_params->src_iov, 0, &size,
903                                                 aad_buf, aad_offset);
904                                 }
905
906                                 if (unlikely(size)) {
907                                         plt_dp_err("Insufficient buffer"
908                                                    " space, size %d needed",
909                                                    size);
910                                         return -1;
911                                 }
912                         }
913                 }
914                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
915                 g_size_bytes =
916                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
917
918                 /*
919                  * Output Scatter List
920                  */
921
922                 i = 0;
923                 scatter_comp =
924                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
925                                                       g_size_bytes);
926
927                 /* Add iv */
928                 if (iv_len) {
929                         i = fill_sg_comp(scatter_comp, i,
930                                          (uint64_t)offset_vaddr +
931                                                  ROC_SE_OFF_CTRL_LEN,
932                                          iv_len);
933                 }
934
935                 /* Add output data */
936                 size = outputlen - iv_len;
937                 if (size) {
938                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
939                                 /* handle single buffer here */
940                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
941                                                               fc_params->bufs,
942                                                               &size);
943                         } else {
944                                 uint32_t aad_offset =
945                                         aad_len ? passthrough_len : 0;
946
947                                 if (unlikely(!fc_params->dst_iov)) {
948                                         plt_dp_err("Bad input args");
949                                         return -1;
950                                 }
951
952                                 i = fill_sg_comp_from_iov(
953                                         scatter_comp, i, fc_params->dst_iov, 0,
954                                         &size, aad_buf, aad_offset);
955                         }
956
957                         if (unlikely(size)) {
958                                 plt_dp_err("Insufficient buffer space,"
959                                            " size %d needed",
960                                            size);
961                                 return -1;
962                         }
963                 }
964
965                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
966                 s_size_bytes =
967                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
968
969                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
970
971                 /* This is DPTR len in case of SG mode */
972                 cpt_inst_w4.s.dlen = size;
973
974                 inst->dptr = (uint64_t)in_buffer;
975         }
976
977         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
978                      (auth_offset >> 8))) {
979                 plt_dp_err("Offset not supported");
980                 plt_dp_err("enc_offset: %d", encr_offset);
981                 plt_dp_err("iv_offset : %d", iv_offset);
982                 plt_dp_err("auth_offset: %d", auth_offset);
983                 return -1;
984         }
985
986         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
987                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
988                 ((uint64_t)auth_offset));
989
990         inst->w4.u64 = cpt_inst_w4.u64;
991         return 0;
992 }
993
994 static __rte_always_inline int
995 cpt_zuc_snow3g_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
996                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
997 {
998         uint32_t size;
999         int32_t inputlen, outputlen;
1000         struct roc_se_ctx *se_ctx;
1001         uint32_t mac_len = 0;
1002         uint8_t pdcp_alg_type;
1003         uint32_t encr_offset, auth_offset;
1004         uint32_t encr_data_len, auth_data_len;
1005         int flags, iv_len;
1006         uint64_t offset_ctrl;
1007         uint64_t *offset_vaddr;
1008         uint8_t *iv_s;
1009         uint8_t pack_iv = 0;
1010         union cpt_inst_w4 cpt_inst_w4;
1011
1012         se_ctx = params->ctx_buf.vaddr;
1013         flags = se_ctx->zsk_flags;
1014         mac_len = se_ctx->mac_len;
1015         pdcp_alg_type = se_ctx->pdcp_alg_type;
1016
1017         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
1018
1019         cpt_inst_w4.s.opcode_minor = se_ctx->template_w4.s.opcode_minor;
1020
1021         if (flags == 0x1) {
1022                 iv_s = params->auth_iv_buf;
1023                 iv_len = params->auth_iv_len;
1024
1025                 if (iv_len == 25) {
1026                         iv_len -= 2;
1027                         pack_iv = 1;
1028                 }
1029
1030                 /*
1031                  * Microcode expects offsets in bytes
1032                  * TODO: Rounding off
1033                  */
1034                 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1035
1036                 /* EIA3 or UIA2 */
1037                 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
1038                 auth_offset = auth_offset / 8;
1039
1040                 /* consider iv len */
1041                 auth_offset += iv_len;
1042
1043                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1044                 outputlen = mac_len;
1045
1046                 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1047
1048                 encr_data_len = 0;
1049                 encr_offset = 0;
1050         } else {
1051                 iv_s = params->iv_buf;
1052                 iv_len = params->cipher_iv_len;
1053
1054                 if (iv_len == 25) {
1055                         iv_len -= 2;
1056                         pack_iv = 1;
1057                 }
1058
1059                 /* EEA3 or UEA2 */
1060                 /*
1061                  * Microcode expects offsets in bytes
1062                  * TODO: Rounding off
1063                  */
1064                 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1065
1066                 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1067                 encr_offset = encr_offset / 8;
1068                 /* consider iv len */
1069                 encr_offset += iv_len;
1070
1071                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1072                 outputlen = inputlen;
1073
1074                 /* iv offset is 0 */
1075                 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1076
1077                 auth_data_len = 0;
1078                 auth_offset = 0;
1079         }
1080
1081         if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1082                 plt_dp_err("Offset not supported");
1083                 plt_dp_err("enc_offset: %d", encr_offset);
1084                 plt_dp_err("auth_offset: %d", auth_offset);
1085                 return -1;
1086         }
1087
1088         /*
1089          * GP op header, lengths are expected in bits.
1090          */
1091         cpt_inst_w4.s.param1 = encr_data_len;
1092         cpt_inst_w4.s.param2 = auth_data_len;
1093
1094         /*
1095          * In cn9k, cn10k since we have a limitation of
1096          * IV & Offset control word not part of instruction
1097          * and need to be part of Data Buffer, we check if
1098          * head room is there and then only do the Direct mode processing
1099          */
1100         if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1101                    (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1102                 void *dm_vaddr = params->bufs[0].vaddr;
1103
1104                 /* Use Direct mode */
1105
1106                 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1107                                             ROC_SE_OFF_CTRL_LEN - iv_len);
1108
1109                 /* DPTR */
1110                 inst->dptr = (uint64_t)offset_vaddr;
1111                 /* RPTR should just exclude offset control word */
1112                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1113
1114                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1115
1116                 uint8_t *iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1117                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1118
1119                 *offset_vaddr = offset_ctrl;
1120         } else {
1121                 void *m_vaddr = params->meta_buf.vaddr;
1122                 uint32_t i, g_size_bytes, s_size_bytes;
1123                 struct roc_se_sglist_comp *gather_comp;
1124                 struct roc_se_sglist_comp *scatter_comp;
1125                 uint8_t *in_buffer;
1126                 uint8_t *iv_d;
1127
1128                 /* save space for iv */
1129                 offset_vaddr = m_vaddr;
1130
1131                 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN +
1132                           RTE_ALIGN_CEIL(iv_len, 8);
1133
1134                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1135
1136                 /* DPTR has SG list */
1137                 in_buffer = m_vaddr;
1138
1139                 ((uint16_t *)in_buffer)[0] = 0;
1140                 ((uint16_t *)in_buffer)[1] = 0;
1141
1142                 /* TODO Add error check if space will be sufficient */
1143                 gather_comp =
1144                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1145
1146                 /*
1147                  * Input Gather List
1148                  */
1149                 i = 0;
1150
1151                 /* Offset control word followed by iv */
1152
1153                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1154                                  ROC_SE_OFF_CTRL_LEN + iv_len);
1155
1156                 /* iv offset is 0 */
1157                 *offset_vaddr = offset_ctrl;
1158
1159                 iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1160                 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1161
1162                 /* input data */
1163                 size = inputlen - iv_len;
1164                 if (size) {
1165                         i = fill_sg_comp_from_iov(gather_comp, i,
1166                                                   params->src_iov, 0, &size,
1167                                                   NULL, 0);
1168                         if (unlikely(size)) {
1169                                 plt_dp_err("Insufficient buffer space,"
1170                                            " size %d needed",
1171                                            size);
1172                                 return -1;
1173                         }
1174                 }
1175                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1176                 g_size_bytes =
1177                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1178
1179                 /*
1180                  * Output Scatter List
1181                  */
1182
1183                 i = 0;
1184                 scatter_comp =
1185                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1186                                                       g_size_bytes);
1187
1188                 if (flags == 0x1) {
1189                         /* IV in SLIST only for EEA3 & UEA2 */
1190                         iv_len = 0;
1191                 }
1192
1193                 if (iv_len) {
1194                         i = fill_sg_comp(scatter_comp, i,
1195                                          (uint64_t)offset_vaddr +
1196                                                  ROC_SE_OFF_CTRL_LEN,
1197                                          iv_len);
1198                 }
1199
1200                 /* Add output data */
1201                 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1202                         size = outputlen - iv_len - mac_len;
1203                         if (size) {
1204                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1205                                                           params->dst_iov, 0,
1206                                                           &size, NULL, 0);
1207
1208                                 if (unlikely(size)) {
1209                                         plt_dp_err("Insufficient buffer space,"
1210                                                    " size %d needed",
1211                                                    size);
1212                                         return -1;
1213                                 }
1214                         }
1215
1216                         /* mac data */
1217                         if (mac_len) {
1218                                 i = fill_sg_comp_from_buf(scatter_comp, i,
1219                                                           &params->mac_buf);
1220                         }
1221                 } else {
1222                         /* Output including mac */
1223                         size = outputlen - iv_len;
1224                         if (size) {
1225                                 i = fill_sg_comp_from_iov(scatter_comp, i,
1226                                                           params->dst_iov, 0,
1227                                                           &size, NULL, 0);
1228
1229                                 if (unlikely(size)) {
1230                                         plt_dp_err("Insufficient buffer space,"
1231                                                    " size %d needed",
1232                                                    size);
1233                                         return -1;
1234                                 }
1235                         }
1236                 }
1237                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1238                 s_size_bytes =
1239                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1240
1241                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1242
1243                 /* This is DPTR len in case of SG mode */
1244                 cpt_inst_w4.s.dlen = size;
1245
1246                 inst->dptr = (uint64_t)in_buffer;
1247         }
1248
1249         inst->w4.u64 = cpt_inst_w4.u64;
1250
1251         return 0;
1252 }
1253
1254 static __rte_always_inline int
1255 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1256                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1257 {
1258         void *m_vaddr = params->meta_buf.vaddr;
1259         uint32_t size;
1260         int32_t inputlen = 0, outputlen = 0;
1261         struct roc_se_ctx *se_ctx;
1262         uint32_t mac_len = 0;
1263         uint8_t i = 0;
1264         uint32_t encr_offset, auth_offset;
1265         uint32_t encr_data_len, auth_data_len;
1266         int flags;
1267         uint8_t *iv_s, *iv_d, iv_len = 8;
1268         uint8_t dir = 0;
1269         uint64_t *offset_vaddr;
1270         union cpt_inst_w4 cpt_inst_w4;
1271         uint8_t *in_buffer;
1272         uint32_t g_size_bytes, s_size_bytes;
1273         struct roc_se_sglist_comp *gather_comp;
1274         struct roc_se_sglist_comp *scatter_comp;
1275
1276         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1277         auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1278         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1279         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1280
1281         se_ctx = params->ctx_buf.vaddr;
1282         flags = se_ctx->zsk_flags;
1283         mac_len = se_ctx->mac_len;
1284
1285         if (flags == 0x0)
1286                 iv_s = params->iv_buf;
1287         else
1288                 iv_s = params->auth_iv_buf;
1289
1290         dir = iv_s[8] & 0x1;
1291
1292         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1293
1294         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1295         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1296                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1297
1298         /*
1299          * GP op header, lengths are expected in bits.
1300          */
1301         cpt_inst_w4.s.param1 = encr_data_len;
1302         cpt_inst_w4.s.param2 = auth_data_len;
1303
1304         /* consider iv len */
1305         if (flags == 0x0) {
1306                 encr_offset += iv_len;
1307                 auth_offset += iv_len;
1308         }
1309
1310         /* save space for offset ctrl and iv */
1311         offset_vaddr = m_vaddr;
1312
1313         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1314
1315         /* DPTR has SG list */
1316         in_buffer = m_vaddr;
1317
1318         ((uint16_t *)in_buffer)[0] = 0;
1319         ((uint16_t *)in_buffer)[1] = 0;
1320
1321         /* TODO Add error check if space will be sufficient */
1322         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1323
1324         /*
1325          * Input Gather List
1326          */
1327         i = 0;
1328
1329         /* Offset control word followed by iv */
1330
1331         if (flags == 0x0) {
1332                 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1333                 outputlen = inputlen;
1334                 /* iv offset is 0 */
1335                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1336                 if (unlikely((encr_offset >> 16))) {
1337                         plt_dp_err("Offset not supported");
1338                         plt_dp_err("enc_offset: %d", encr_offset);
1339                         return -1;
1340                 }
1341         } else {
1342                 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1343                 outputlen = mac_len;
1344                 /* iv offset is 0 */
1345                 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1346                 if (unlikely((auth_offset >> 8))) {
1347                         plt_dp_err("Offset not supported");
1348                         plt_dp_err("auth_offset: %d", auth_offset);
1349                         return -1;
1350                 }
1351         }
1352
1353         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1354                          ROC_SE_OFF_CTRL_LEN + iv_len);
1355
1356         /* IV */
1357         iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1358         memcpy(iv_d, iv_s, iv_len);
1359
1360         /* input data */
1361         size = inputlen - iv_len;
1362         if (size) {
1363                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1364                                           &size, NULL, 0);
1365
1366                 if (unlikely(size)) {
1367                         plt_dp_err("Insufficient buffer space,"
1368                                    " size %d needed",
1369                                    size);
1370                         return -1;
1371                 }
1372         }
1373         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1374         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1375
1376         /*
1377          * Output Scatter List
1378          */
1379
1380         i = 0;
1381         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1382                                                      g_size_bytes);
1383
1384         if (flags == 0x1) {
1385                 /* IV in SLIST only for F8 */
1386                 iv_len = 0;
1387         }
1388
1389         /* IV */
1390         if (iv_len) {
1391                 i = fill_sg_comp(scatter_comp, i,
1392                                  (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1393                                  iv_len);
1394         }
1395
1396         /* Add output data */
1397         if (req_flags & ROC_SE_VALID_MAC_BUF) {
1398                 size = outputlen - iv_len - mac_len;
1399                 if (size) {
1400                         i = fill_sg_comp_from_iov(scatter_comp, i,
1401                                                   params->dst_iov, 0, &size,
1402                                                   NULL, 0);
1403
1404                         if (unlikely(size)) {
1405                                 plt_dp_err("Insufficient buffer space,"
1406                                            " size %d needed",
1407                                            size);
1408                                 return -1;
1409                         }
1410                 }
1411
1412                 /* mac data */
1413                 if (mac_len) {
1414                         i = fill_sg_comp_from_buf(scatter_comp, i,
1415                                                   &params->mac_buf);
1416                 }
1417         } else {
1418                 /* Output including mac */
1419                 size = outputlen - iv_len;
1420                 if (size) {
1421                         i = fill_sg_comp_from_iov(scatter_comp, i,
1422                                                   params->dst_iov, 0, &size,
1423                                                   NULL, 0);
1424
1425                         if (unlikely(size)) {
1426                                 plt_dp_err("Insufficient buffer space,"
1427                                            " size %d needed",
1428                                            size);
1429                                 return -1;
1430                         }
1431                 }
1432         }
1433         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1434         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1435
1436         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1437
1438         /* This is DPTR len in case of SG mode */
1439         cpt_inst_w4.s.dlen = size;
1440
1441         inst->dptr = (uint64_t)in_buffer;
1442         inst->w4.u64 = cpt_inst_w4.u64;
1443
1444         return 0;
1445 }
1446
1447 static __rte_always_inline int
1448 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1449                     struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1450 {
1451         void *m_vaddr = params->meta_buf.vaddr;
1452         uint32_t size;
1453         int32_t inputlen = 0, outputlen;
1454         struct roc_se_ctx *se_ctx;
1455         uint8_t i = 0, iv_len = 8;
1456         uint32_t encr_offset;
1457         uint32_t encr_data_len;
1458         int flags;
1459         uint8_t dir = 0;
1460         uint64_t *offset_vaddr;
1461         union cpt_inst_w4 cpt_inst_w4;
1462         uint8_t *in_buffer;
1463         uint32_t g_size_bytes, s_size_bytes;
1464         struct roc_se_sglist_comp *gather_comp;
1465         struct roc_se_sglist_comp *scatter_comp;
1466
1467         encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1468         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1469
1470         se_ctx = params->ctx_buf.vaddr;
1471         flags = se_ctx->zsk_flags;
1472
1473         cpt_inst_w4.u64 = 0;
1474         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1475
1476         /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1477         cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1478                                       (dir << 4) | (0 << 3) | (flags & 0x7));
1479
1480         /*
1481          * GP op header, lengths are expected in bits.
1482          */
1483         cpt_inst_w4.s.param1 = encr_data_len;
1484
1485         /* consider iv len */
1486         encr_offset += iv_len;
1487
1488         inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1489         outputlen = inputlen;
1490
1491         /* save space for offset ctrl & iv */
1492         offset_vaddr = m_vaddr;
1493
1494         m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1495
1496         /* DPTR has SG list */
1497         in_buffer = m_vaddr;
1498
1499         ((uint16_t *)in_buffer)[0] = 0;
1500         ((uint16_t *)in_buffer)[1] = 0;
1501
1502         /* TODO Add error check if space will be sufficient */
1503         gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1504
1505         /*
1506          * Input Gather List
1507          */
1508         i = 0;
1509
1510         /* Offset control word followed by iv */
1511         *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1512         if (unlikely((encr_offset >> 16))) {
1513                 plt_dp_err("Offset not supported");
1514                 plt_dp_err("enc_offset: %d", encr_offset);
1515                 return -1;
1516         }
1517
1518         i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1519                          ROC_SE_OFF_CTRL_LEN + iv_len);
1520
1521         /* IV */
1522         memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1523                iv_len);
1524
1525         /* Add input data */
1526         size = inputlen - iv_len;
1527         if (size) {
1528                 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1529                                           &size, NULL, 0);
1530                 if (unlikely(size)) {
1531                         plt_dp_err("Insufficient buffer space,"
1532                                    " size %d needed",
1533                                    size);
1534                         return -1;
1535                 }
1536         }
1537         ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1538         g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1539
1540         /*
1541          * Output Scatter List
1542          */
1543
1544         i = 0;
1545         scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1546                                                      g_size_bytes);
1547
1548         /* IV */
1549         i = fill_sg_comp(scatter_comp, i,
1550                          (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1551
1552         /* Add output data */
1553         size = outputlen - iv_len;
1554         if (size) {
1555                 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1556                                           &size, NULL, 0);
1557                 if (unlikely(size)) {
1558                         plt_dp_err("Insufficient buffer space,"
1559                                    " size %d needed",
1560                                    size);
1561                         return -1;
1562                 }
1563         }
1564         ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1565         s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1566
1567         size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1568
1569         /* This is DPTR len in case of SG mode */
1570         cpt_inst_w4.s.dlen = size;
1571
1572         inst->dptr = (uint64_t)in_buffer;
1573         inst->w4.u64 = cpt_inst_w4.u64;
1574
1575         return 0;
1576 }
1577
1578 static __rte_always_inline int
1579 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1580                      struct roc_se_fc_params *fc_params,
1581                      struct cpt_inst_s *inst)
1582 {
1583         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1584         uint8_t fc_type;
1585         int ret = -1;
1586
1587         fc_type = ctx->fc_type;
1588
1589         if (likely(fc_type == ROC_SE_FC_GEN)) {
1590                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1591         } else if (fc_type == ROC_SE_PDCP) {
1592                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1593                                           inst);
1594         } else if (fc_type == ROC_SE_KASUMI) {
1595                 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1596         }
1597
1598         /*
1599          * For AUTH_ONLY case,
1600          * MC only supports digest generation and verification
1601          * should be done in software by memcmp()
1602          */
1603
1604         return ret;
1605 }
1606
1607 static __rte_always_inline int
1608 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1609                      struct roc_se_fc_params *fc_params,
1610                      struct cpt_inst_s *inst)
1611 {
1612         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1613         uint8_t fc_type;
1614         int ret = -1;
1615
1616         fc_type = ctx->fc_type;
1617
1618         if (likely(fc_type == ROC_SE_FC_GEN)) {
1619                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1620         } else if (fc_type == ROC_SE_PDCP) {
1621                 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1622                                           inst);
1623         } else if (fc_type == ROC_SE_KASUMI) {
1624                 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1625                                           inst);
1626         } else if (fc_type == ROC_SE_HASH_HMAC) {
1627                 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1628         }
1629
1630         return ret;
1631 }
1632
1633 static __rte_always_inline int
1634 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1635 {
1636         struct rte_crypto_aead_xform *aead_form;
1637         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1638         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1639         uint32_t cipher_key_len = 0;
1640         uint8_t aes_gcm = 0;
1641         aead_form = &xform->aead;
1642
1643         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1644                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1645                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1646         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1647                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1648                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1649         } else {
1650                 plt_dp_err("Unknown aead operation\n");
1651                 return -1;
1652         }
1653         switch (aead_form->algo) {
1654         case RTE_CRYPTO_AEAD_AES_GCM:
1655                 enc_type = ROC_SE_AES_GCM;
1656                 cipher_key_len = 16;
1657                 aes_gcm = 1;
1658                 break;
1659         case RTE_CRYPTO_AEAD_AES_CCM:
1660                 plt_dp_err("Crypto: Unsupported cipher algo %u",
1661                            aead_form->algo);
1662                 return -1;
1663         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1664                 enc_type = ROC_SE_CHACHA20;
1665                 auth_type = ROC_SE_POLY1305;
1666                 cipher_key_len = 32;
1667                 sess->chacha_poly = 1;
1668                 break;
1669         default:
1670                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1671                            aead_form->algo);
1672                 return -1;
1673         }
1674         if (aead_form->key.length < cipher_key_len) {
1675                 plt_dp_err("Invalid cipher params keylen %u",
1676                            aead_form->key.length);
1677                 return -1;
1678         }
1679         sess->zsk_flag = 0;
1680         sess->aes_gcm = aes_gcm;
1681         sess->mac_len = aead_form->digest_length;
1682         sess->iv_offset = aead_form->iv.offset;
1683         sess->iv_length = aead_form->iv.length;
1684         sess->aad_length = aead_form->aad_length;
1685
1686         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1687                                          aead_form->key.data,
1688                                          aead_form->key.length, NULL)))
1689                 return -1;
1690
1691         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1692                                          aead_form->digest_length)))
1693                 return -1;
1694
1695         return 0;
1696 }
1697
1698 static __rte_always_inline int
1699 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1700 {
1701         struct rte_crypto_cipher_xform *c_form;
1702         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1703         uint32_t cipher_key_len = 0;
1704         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1705
1706         c_form = &xform->cipher;
1707
1708         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1709                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1710         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1711                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1712                 if (xform->next != NULL &&
1713                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1714                         /* Perform decryption followed by auth verify */
1715                         sess->roc_se_ctx.template_w4.s.opcode_minor =
1716                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1717                 }
1718         } else {
1719                 plt_dp_err("Unknown cipher operation\n");
1720                 return -1;
1721         }
1722
1723         switch (c_form->algo) {
1724         case RTE_CRYPTO_CIPHER_AES_CBC:
1725                 enc_type = ROC_SE_AES_CBC;
1726                 cipher_key_len = 16;
1727                 break;
1728         case RTE_CRYPTO_CIPHER_3DES_CBC:
1729                 enc_type = ROC_SE_DES3_CBC;
1730                 cipher_key_len = 24;
1731                 break;
1732         case RTE_CRYPTO_CIPHER_DES_CBC:
1733                 /* DES is implemented using 3DES in hardware */
1734                 enc_type = ROC_SE_DES3_CBC;
1735                 cipher_key_len = 8;
1736                 break;
1737         case RTE_CRYPTO_CIPHER_AES_CTR:
1738                 enc_type = ROC_SE_AES_CTR;
1739                 cipher_key_len = 16;
1740                 aes_ctr = 1;
1741                 break;
1742         case RTE_CRYPTO_CIPHER_NULL:
1743                 enc_type = 0;
1744                 is_null = 1;
1745                 break;
1746         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1747                 enc_type = ROC_SE_KASUMI_F8_ECB;
1748                 cipher_key_len = 16;
1749                 zsk_flag = ROC_SE_K_F8;
1750                 break;
1751         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1752                 enc_type = ROC_SE_SNOW3G_UEA2;
1753                 cipher_key_len = 16;
1754                 zsk_flag = ROC_SE_ZS_EA;
1755                 break;
1756         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1757                 enc_type = ROC_SE_ZUC_EEA3;
1758                 cipher_key_len = c_form->key.length;
1759                 zsk_flag = ROC_SE_ZS_EA;
1760                 break;
1761         case RTE_CRYPTO_CIPHER_AES_XTS:
1762                 enc_type = ROC_SE_AES_XTS;
1763                 cipher_key_len = 16;
1764                 break;
1765         case RTE_CRYPTO_CIPHER_3DES_ECB:
1766                 enc_type = ROC_SE_DES3_ECB;
1767                 cipher_key_len = 24;
1768                 break;
1769         case RTE_CRYPTO_CIPHER_AES_ECB:
1770                 enc_type = ROC_SE_AES_ECB;
1771                 cipher_key_len = 16;
1772                 break;
1773         case RTE_CRYPTO_CIPHER_3DES_CTR:
1774         case RTE_CRYPTO_CIPHER_AES_F8:
1775         case RTE_CRYPTO_CIPHER_ARC4:
1776                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1777                 return -1;
1778         default:
1779                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1780                            c_form->algo);
1781                 return -1;
1782         }
1783
1784         if (c_form->key.length < cipher_key_len) {
1785                 plt_dp_err("Invalid cipher params keylen %u",
1786                            c_form->key.length);
1787                 return -1;
1788         }
1789
1790         sess->zsk_flag = zsk_flag;
1791         sess->aes_gcm = 0;
1792         sess->aes_ctr = aes_ctr;
1793         sess->iv_offset = c_form->iv.offset;
1794         sess->iv_length = c_form->iv.length;
1795         sess->is_null = is_null;
1796
1797         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1798                                          c_form->key.data, c_form->key.length,
1799                                          NULL)))
1800                 return -1;
1801
1802         if ((enc_type >= ROC_SE_ZUC_EEA3) && (enc_type <= ROC_SE_AES_CTR_EEA2))
1803                 roc_se_ctx_swap(&sess->roc_se_ctx);
1804         return 0;
1805 }
1806
1807 static __rte_always_inline int
1808 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1809 {
1810         struct rte_crypto_auth_xform *a_form;
1811         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1812         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1813
1814         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
1815                 return fill_sess_gmac(xform, sess);
1816
1817         if (xform->next != NULL &&
1818             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1819             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1820                 /* Perform auth followed by encryption */
1821                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1822                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1823         }
1824
1825         a_form = &xform->auth;
1826
1827         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1828                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1829         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1830                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1831         else {
1832                 plt_dp_err("Unknown auth operation");
1833                 return -1;
1834         }
1835
1836         switch (a_form->algo) {
1837         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1838                 /* Fall through */
1839         case RTE_CRYPTO_AUTH_SHA1:
1840                 auth_type = ROC_SE_SHA1_TYPE;
1841                 break;
1842         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1843         case RTE_CRYPTO_AUTH_SHA256:
1844                 auth_type = ROC_SE_SHA2_SHA256;
1845                 break;
1846         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1847         case RTE_CRYPTO_AUTH_SHA512:
1848                 auth_type = ROC_SE_SHA2_SHA512;
1849                 break;
1850         case RTE_CRYPTO_AUTH_AES_GMAC:
1851                 auth_type = ROC_SE_GMAC_TYPE;
1852                 aes_gcm = 1;
1853                 break;
1854         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1855         case RTE_CRYPTO_AUTH_SHA224:
1856                 auth_type = ROC_SE_SHA2_SHA224;
1857                 break;
1858         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1859         case RTE_CRYPTO_AUTH_SHA384:
1860                 auth_type = ROC_SE_SHA2_SHA384;
1861                 break;
1862         case RTE_CRYPTO_AUTH_MD5_HMAC:
1863         case RTE_CRYPTO_AUTH_MD5:
1864                 auth_type = ROC_SE_MD5_TYPE;
1865                 break;
1866         case RTE_CRYPTO_AUTH_KASUMI_F9:
1867                 auth_type = ROC_SE_KASUMI_F9_ECB;
1868                 /*
1869                  * Indicate that direction needs to be taken out
1870                  * from end of src
1871                  */
1872                 zsk_flag = ROC_SE_K_F9;
1873                 break;
1874         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1875                 auth_type = ROC_SE_SNOW3G_UIA2;
1876                 zsk_flag = ROC_SE_ZS_IA;
1877                 break;
1878         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1879                 auth_type = ROC_SE_ZUC_EIA3;
1880                 zsk_flag = ROC_SE_ZS_IA;
1881                 break;
1882         case RTE_CRYPTO_AUTH_NULL:
1883                 auth_type = 0;
1884                 is_null = 1;
1885                 break;
1886         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1887         case RTE_CRYPTO_AUTH_AES_CMAC:
1888         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1889                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1890                 return -1;
1891         default:
1892                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1893                            a_form->algo);
1894                 return -1;
1895         }
1896
1897         sess->zsk_flag = zsk_flag;
1898         sess->aes_gcm = aes_gcm;
1899         sess->mac_len = a_form->digest_length;
1900         sess->is_null = is_null;
1901         if (zsk_flag) {
1902                 sess->auth_iv_offset = a_form->iv.offset;
1903                 sess->auth_iv_length = a_form->iv.length;
1904         }
1905         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1906                                          a_form->key.data, a_form->key.length,
1907                                          a_form->digest_length)))
1908                 return -1;
1909
1910         if ((auth_type >= ROC_SE_ZUC_EIA3) &&
1911             (auth_type <= ROC_SE_AES_CMAC_EIA2))
1912                 roc_se_ctx_swap(&sess->roc_se_ctx);
1913
1914         return 0;
1915 }
1916
1917 static __rte_always_inline int
1918 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1919 {
1920         struct rte_crypto_auth_xform *a_form;
1921         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1922         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1923
1924         a_form = &xform->auth;
1925
1926         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1927                 sess->cpt_op |= ROC_SE_OP_ENCODE;
1928         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1929                 sess->cpt_op |= ROC_SE_OP_DECODE;
1930         else {
1931                 plt_dp_err("Unknown auth operation");
1932                 return -1;
1933         }
1934
1935         switch (a_form->algo) {
1936         case RTE_CRYPTO_AUTH_AES_GMAC:
1937                 enc_type = ROC_SE_AES_GCM;
1938                 auth_type = ROC_SE_GMAC_TYPE;
1939                 break;
1940         default:
1941                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1942                            a_form->algo);
1943                 return -1;
1944         }
1945
1946         sess->zsk_flag = 0;
1947         sess->aes_gcm = 0;
1948         sess->is_gmac = 1;
1949         sess->iv_offset = a_form->iv.offset;
1950         sess->iv_length = a_form->iv.length;
1951         sess->mac_len = a_form->digest_length;
1952
1953         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1954                                          a_form->key.data, a_form->key.length,
1955                                          NULL)))
1956                 return -1;
1957
1958         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1959                                          a_form->digest_length)))
1960                 return -1;
1961
1962         return 0;
1963 }
1964
1965 static __rte_always_inline void *
1966 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1967               struct rte_mempool *cpt_meta_pool,
1968               struct cpt_inflight_req *infl_req)
1969 {
1970         uint8_t *mdata;
1971
1972         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1973                 return NULL;
1974
1975         buf->vaddr = mdata;
1976         buf->size = len;
1977
1978         infl_req->mdata = mdata;
1979         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1980
1981         return mdata;
1982 }
1983
1984 static __rte_always_inline uint32_t
1985 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
1986                      uint32_t start_offset)
1987 {
1988         uint16_t index = 0;
1989         void *seg_data = NULL;
1990         int32_t seg_size = 0;
1991
1992         if (!pkt) {
1993                 iovec->buf_cnt = 0;
1994                 return 0;
1995         }
1996
1997         if (!start_offset) {
1998                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1999                 seg_size = pkt->data_len;
2000         } else {
2001                 while (start_offset >= pkt->data_len) {
2002                         start_offset -= pkt->data_len;
2003                         pkt = pkt->next;
2004                 }
2005
2006                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2007                 seg_size = pkt->data_len - start_offset;
2008                 if (!seg_size)
2009                         return 1;
2010         }
2011
2012         /* first seg */
2013         iovec->bufs[index].vaddr = seg_data;
2014         iovec->bufs[index].size = seg_size;
2015         index++;
2016         pkt = pkt->next;
2017
2018         while (unlikely(pkt != NULL)) {
2019                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2020                 seg_size = pkt->data_len;
2021                 if (!seg_size)
2022                         break;
2023
2024                 iovec->bufs[index].vaddr = seg_data;
2025                 iovec->bufs[index].size = seg_size;
2026
2027                 index++;
2028
2029                 pkt = pkt->next;
2030         }
2031
2032         iovec->buf_cnt = index;
2033         return 0;
2034 }
2035
2036 static __rte_always_inline uint32_t
2037 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2038                              struct roc_se_fc_params *param, uint32_t *flags)
2039 {
2040         uint16_t index = 0;
2041         void *seg_data = NULL;
2042         uint32_t seg_size = 0;
2043         struct roc_se_iov_ptr *iovec;
2044
2045         seg_data = rte_pktmbuf_mtod(pkt, void *);
2046         seg_size = pkt->data_len;
2047
2048         /* first seg */
2049         if (likely(!pkt->next)) {
2050                 uint32_t headroom;
2051
2052                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2053                 headroom = rte_pktmbuf_headroom(pkt);
2054                 if (likely(headroom >= 24))
2055                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2056
2057                 param->bufs[0].vaddr = seg_data;
2058                 param->bufs[0].size = seg_size;
2059                 return 0;
2060         }
2061         iovec = param->src_iov;
2062         iovec->bufs[index].vaddr = seg_data;
2063         iovec->bufs[index].size = seg_size;
2064         index++;
2065         pkt = pkt->next;
2066
2067         while (unlikely(pkt != NULL)) {
2068                 seg_data = rte_pktmbuf_mtod(pkt, void *);
2069                 seg_size = pkt->data_len;
2070
2071                 if (!seg_size)
2072                         break;
2073
2074                 iovec->bufs[index].vaddr = seg_data;
2075                 iovec->bufs[index].size = seg_size;
2076
2077                 index++;
2078
2079                 pkt = pkt->next;
2080         }
2081
2082         iovec->buf_cnt = index;
2083         return 0;
2084 }
2085
2086 static __rte_always_inline int
2087 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2088                struct cpt_qp_meta_info *m_info,
2089                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2090 {
2091         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2092         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2093         struct rte_crypto_sym_op *sym_op = cop->sym;
2094         void *mdata = NULL;
2095         uint32_t mc_hash_off;
2096         uint32_t flags = 0;
2097         uint64_t d_offs, d_lens;
2098         struct rte_mbuf *m_src, *m_dst;
2099         uint8_t cpt_op = sess->cpt_op;
2100 #ifdef CPT_ALWAYS_USE_SG_MODE
2101         uint8_t inplace = 0;
2102 #else
2103         uint8_t inplace = 1;
2104 #endif
2105         struct roc_se_fc_params fc_params;
2106         char src[SRC_IOV_SIZE];
2107         char dst[SRC_IOV_SIZE];
2108         uint32_t iv_buf[4];
2109         int ret;
2110
2111         fc_params.cipher_iv_len = sess->iv_length;
2112         fc_params.auth_iv_len = sess->auth_iv_length;
2113
2114         if (likely(sess->iv_length)) {
2115                 flags |= ROC_SE_VALID_IV_BUF;
2116                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2117                                                              sess->iv_offset);
2118                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2119                         memcpy((uint8_t *)iv_buf,
2120                                rte_crypto_op_ctod_offset(cop, uint8_t *,
2121                                                          sess->iv_offset),
2122                                12);
2123                         iv_buf[3] = rte_cpu_to_be_32(0x1);
2124                         fc_params.iv_buf = iv_buf;
2125                 }
2126         }
2127
2128         if (sess->zsk_flag) {
2129                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2130                         cop, uint8_t *, sess->auth_iv_offset);
2131                 if (sess->zsk_flag != ROC_SE_ZS_EA)
2132                         inplace = 0;
2133         }
2134         m_src = sym_op->m_src;
2135         m_dst = sym_op->m_dst;
2136
2137         if (sess->aes_gcm || sess->chacha_poly) {
2138                 uint8_t *salt;
2139                 uint8_t *aad_data;
2140                 uint16_t aad_len;
2141
2142                 d_offs = sym_op->aead.data.offset;
2143                 d_lens = sym_op->aead.data.length;
2144                 mc_hash_off =
2145                         sym_op->aead.data.offset + sym_op->aead.data.length;
2146
2147                 aad_data = sym_op->aead.aad.data;
2148                 aad_len = sess->aad_length;
2149                 if (likely((aad_data + aad_len) ==
2150                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2151                                                    sym_op->aead.data.offset))) {
2152                         d_offs = (d_offs - aad_len) | (d_offs << 16);
2153                         d_lens = (d_lens + aad_len) | (d_lens << 32);
2154                 } else {
2155                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2156                         fc_params.aad_buf.size = aad_len;
2157                         flags |= ROC_SE_VALID_AAD_BUF;
2158                         inplace = 0;
2159                         d_offs = d_offs << 16;
2160                         d_lens = d_lens << 32;
2161                 }
2162
2163                 salt = fc_params.iv_buf;
2164                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2165                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2166                         sess->salt = *(uint32_t *)salt;
2167                 }
2168                 fc_params.iv_buf = salt + 4;
2169                 if (likely(sess->mac_len)) {
2170                         struct rte_mbuf *m =
2171                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2172
2173                         if (!m)
2174                                 m = m_src;
2175
2176                         /* hmac immediately following data is best case */
2177                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2178                                              mc_hash_off !=
2179                                      (uint8_t *)sym_op->aead.digest.data)) {
2180                                 flags |= ROC_SE_VALID_MAC_BUF;
2181                                 fc_params.mac_buf.size = sess->mac_len;
2182                                 fc_params.mac_buf.vaddr =
2183                                         sym_op->aead.digest.data;
2184                                 inplace = 0;
2185                         }
2186                 }
2187         } else {
2188                 d_offs = sym_op->cipher.data.offset;
2189                 d_lens = sym_op->cipher.data.length;
2190                 mc_hash_off =
2191                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
2192                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2193                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2194
2195                 if (mc_hash_off <
2196                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2197                         mc_hash_off = (sym_op->auth.data.offset +
2198                                        sym_op->auth.data.length);
2199                 }
2200                 /* for gmac, salt should be updated like in gcm */
2201                 if (unlikely(sess->is_gmac)) {
2202                         uint8_t *salt;
2203                         salt = fc_params.iv_buf;
2204                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
2205                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2206                                 sess->salt = *(uint32_t *)salt;
2207                         }
2208                         fc_params.iv_buf = salt + 4;
2209                 }
2210                 if (likely(sess->mac_len)) {
2211                         struct rte_mbuf *m;
2212
2213                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2214                         if (!m)
2215                                 m = m_src;
2216
2217                         /* hmac immediately following data is best case */
2218                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2219                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2220                                               mc_hash_off !=
2221                                       (uint8_t *)sym_op->auth.digest.data))) {
2222                                 flags |= ROC_SE_VALID_MAC_BUF;
2223                                 fc_params.mac_buf.size = sess->mac_len;
2224                                 fc_params.mac_buf.vaddr =
2225                                         sym_op->auth.digest.data;
2226                                 inplace = 0;
2227                         }
2228                 }
2229         }
2230         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2231
2232         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2233             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2234                 inplace = 0;
2235
2236         if (likely(!m_dst && inplace)) {
2237                 /* Case of single buffer without AAD buf or
2238                  * separate mac buf in place and
2239                  * not air crypto
2240                  */
2241                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2242
2243                 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2244                                                           &flags))) {
2245                         plt_dp_err("Prepare inplace src iov failed");
2246                         ret = -EINVAL;
2247                         goto err_exit;
2248                 }
2249
2250         } else {
2251                 /* Out of place processing */
2252                 fc_params.src_iov = (void *)src;
2253                 fc_params.dst_iov = (void *)dst;
2254
2255                 /* Store SG I/O in the api for reuse */
2256                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2257                         plt_dp_err("Prepare src iov failed");
2258                         ret = -EINVAL;
2259                         goto err_exit;
2260                 }
2261
2262                 if (unlikely(m_dst != NULL)) {
2263                         uint32_t pkt_len;
2264
2265                         /* Try to make room as much as src has */
2266                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
2267
2268                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2269                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2270                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2271                                         plt_dp_err("Not enough space in "
2272                                                    "m_dst %p, need %u"
2273                                                    " more",
2274                                                    m_dst, pkt_len);
2275                                         ret = -EINVAL;
2276                                         goto err_exit;
2277                                 }
2278                         }
2279
2280                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2281                                 plt_dp_err("Prepare dst iov failed for "
2282                                            "m_dst %p",
2283                                            m_dst);
2284                                 ret = -EINVAL;
2285                                 goto err_exit;
2286                         }
2287                 } else {
2288                         fc_params.dst_iov = (void *)src;
2289                 }
2290         }
2291
2292         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2293                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2294                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
2295                         (ctx->fc_type == ROC_SE_PDCP))))) {
2296                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2297                                       m_info->pool, infl_req);
2298                 if (mdata == NULL) {
2299                         plt_dp_err("Error allocating meta buffer for request");
2300                         return -ENOMEM;
2301                 }
2302         }
2303
2304         /* Finally prepare the instruction */
2305         if (cpt_op & ROC_SE_OP_ENCODE)
2306                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2307                                            inst);
2308         else
2309                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2310                                            inst);
2311
2312         if (unlikely(ret)) {
2313                 plt_dp_err("Preparing request failed due to bad input arg");
2314                 goto free_mdata_and_exit;
2315         }
2316
2317         return 0;
2318
2319 free_mdata_and_exit:
2320         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2321                 rte_mempool_put(m_info->pool, infl_req->mdata);
2322 err_exit:
2323         return ret;
2324 }
2325
2326 static __rte_always_inline void
2327 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2328 {
2329         uint8_t *mac;
2330         struct rte_crypto_sym_op *sym_op = op->sym;
2331
2332         if (sym_op->auth.digest.data)
2333                 mac = sym_op->auth.digest.data;
2334         else
2335                 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2336                                               sym_op->auth.data.length +
2337                                                       sym_op->auth.data.offset);
2338         if (!mac) {
2339                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2340                 return;
2341         }
2342
2343         if (memcmp(mac, gen_mac, mac_len))
2344                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2345         else
2346                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2347 }
2348
2349 static __rte_always_inline void
2350 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2351                                    uint32_t *addr_length_in_bits,
2352                                    uint8_t *addr_direction)
2353 {
2354         uint8_t found = 0;
2355         uint32_t pos;
2356         uint8_t last_byte;
2357         while (!found && counter_num_bytes > 0) {
2358                 counter_num_bytes--;
2359                 if (src[counter_num_bytes] == 0x00)
2360                         continue;
2361                 pos = rte_bsf32(src[counter_num_bytes]);
2362                 if (pos == 7) {
2363                         if (likely(counter_num_bytes > 0)) {
2364                                 last_byte = src[counter_num_bytes - 1];
2365                                 *addr_direction = last_byte & 0x1;
2366                                 *addr_length_in_bits =
2367                                         counter_num_bytes * 8 - 1;
2368                         }
2369                 } else {
2370                         last_byte = src[counter_num_bytes];
2371                         *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2372                         *addr_length_in_bits =
2373                                 counter_num_bytes * 8 + (8 - (pos + 2));
2374                 }
2375                 found = 1;
2376         }
2377 }
2378
2379 /*
2380  * This handles all auth only except AES_GMAC
2381  */
2382 static __rte_always_inline int
2383 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2384                    struct cpt_qp_meta_info *m_info,
2385                    struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2386 {
2387         uint32_t space = 0;
2388         struct rte_crypto_sym_op *sym_op = cop->sym;
2389         void *mdata;
2390         uint32_t auth_range_off;
2391         uint32_t flags = 0;
2392         uint64_t d_offs = 0, d_lens;
2393         struct rte_mbuf *m_src, *m_dst;
2394         uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2395         uint16_t mac_len = sess->mac_len;
2396         struct roc_se_fc_params params;
2397         char src[SRC_IOV_SIZE];
2398         uint8_t iv_buf[16];
2399         int ret;
2400
2401         memset(&params, 0, sizeof(struct roc_se_fc_params));
2402
2403         m_src = sym_op->m_src;
2404
2405         mdata = alloc_op_meta(&params.meta_buf, m_info->mlen, m_info->pool,
2406                               infl_req);
2407         if (mdata == NULL) {
2408                 ret = -ENOMEM;
2409                 goto err_exit;
2410         }
2411
2412         auth_range_off = sym_op->auth.data.offset;
2413
2414         flags = ROC_SE_VALID_MAC_BUF;
2415         params.src_iov = (void *)src;
2416         if (unlikely(sess->zsk_flag)) {
2417                 /*
2418                  * Since for Zuc, Kasumi, Snow3g offsets are in bits
2419                  * we will send pass through even for auth only case,
2420                  * let MC handle it
2421                  */
2422                 d_offs = auth_range_off;
2423                 auth_range_off = 0;
2424                 params.auth_iv_len = sess->auth_iv_length;
2425                 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2426                         cop, uint8_t *, sess->auth_iv_offset);
2427                 if (sess->zsk_flag == ROC_SE_K_F9) {
2428                         uint32_t length_in_bits, num_bytes;
2429                         uint8_t *src, direction = 0;
2430
2431                         memcpy(iv_buf,
2432                                rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2433                         /*
2434                          * This is kasumi f9, take direction from
2435                          * source buffer
2436                          */
2437                         length_in_bits = cop->sym->auth.data.length;
2438                         num_bytes = (length_in_bits >> 3);
2439                         src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2440                         find_kasumif9_direction_and_length(
2441                                 src, num_bytes, &length_in_bits, &direction);
2442                         length_in_bits -= 64;
2443                         cop->sym->auth.data.offset += 64;
2444                         d_offs = cop->sym->auth.data.offset;
2445                         auth_range_off = d_offs / 8;
2446                         cop->sym->auth.data.length = length_in_bits;
2447
2448                         /* Store it at end of auth iv */
2449                         iv_buf[8] = direction;
2450                         params.auth_iv_buf = iv_buf;
2451                 }
2452         }
2453
2454         d_lens = sym_op->auth.data.length;
2455
2456         params.ctx_buf.vaddr = &sess->roc_se_ctx;
2457
2458         if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2459                 if (sym_op->auth.digest.data) {
2460                         /*
2461                          * Digest to be generated
2462                          * in separate buffer
2463                          */
2464                         params.mac_buf.size = sess->mac_len;
2465                         params.mac_buf.vaddr = sym_op->auth.digest.data;
2466                 } else {
2467                         uint32_t off = sym_op->auth.data.offset +
2468                                        sym_op->auth.data.length;
2469                         int32_t dlen, space;
2470
2471                         m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2472                         dlen = rte_pktmbuf_pkt_len(m_dst);
2473
2474                         space = off + mac_len - dlen;
2475                         if (space > 0)
2476                                 if (!rte_pktmbuf_append(m_dst, space)) {
2477                                         plt_dp_err("Failed to extend "
2478                                                    "mbuf by %uB",
2479                                                    space);
2480                                         ret = -EINVAL;
2481                                         goto free_mdata_and_exit;
2482                                 }
2483
2484                         params.mac_buf.vaddr =
2485                                 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2486                         params.mac_buf.size = mac_len;
2487                 }
2488         } else {
2489                 uint64_t *op = mdata;
2490
2491                 /* Need space for storing generated mac */
2492                 space += 2 * sizeof(uint64_t);
2493
2494                 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2495                 params.mac_buf.size = mac_len;
2496                 space += RTE_ALIGN_CEIL(mac_len, 8);
2497                 op[0] = (uintptr_t)params.mac_buf.vaddr;
2498                 op[1] = mac_len;
2499                 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2500         }
2501
2502         params.meta_buf.vaddr = (uint8_t *)mdata + space;
2503         params.meta_buf.size -= space;
2504
2505         /* Out of place processing */
2506         params.src_iov = (void *)src;
2507
2508         /*Store SG I/O in the api for reuse */
2509         if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2510                 plt_dp_err("Prepare src iov failed");
2511                 ret = -EINVAL;
2512                 goto free_mdata_and_exit;
2513         }
2514
2515         ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &params, inst);
2516         if (ret)
2517                 goto free_mdata_and_exit;
2518
2519         return 0;
2520
2521 free_mdata_and_exit:
2522         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2523                 rte_mempool_put(m_info->pool, infl_req->mdata);
2524 err_exit:
2525         return ret;
2526 }
2527 #endif /*_CNXK_SE_H_ */