e8be6630567366a0b3b02035e9114d66885081d0
[dpdk.git] / drivers / crypto / cnxk / cnxk_se.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_SE_H_
6 #define _CNXK_SE_H_
7 #include <stdbool.h>
8
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
11
12 #define SRC_IOV_SIZE                                                           \
13         (sizeof(struct roc_se_iov_ptr) +                                       \
14          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE                                                           \
16         (sizeof(struct roc_se_iov_ptr) +                                       \
17          (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
18
19 struct cnxk_se_sess {
20         uint16_t cpt_op : 4;
21         uint16_t zsk_flag : 4;
22         uint16_t aes_gcm : 1;
23         uint16_t aes_ctr : 1;
24         uint16_t chacha_poly : 1;
25         uint16_t is_null : 1;
26         uint16_t is_gmac : 1;
27         uint16_t rsvd1 : 3;
28         uint16_t aad_length;
29         uint8_t mac_len;
30         uint8_t iv_length;
31         uint8_t auth_iv_length;
32         uint16_t iv_offset;
33         uint16_t auth_iv_offset;
34         uint32_t salt;
35         uint64_t cpt_inst_w7;
36         struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
38
39 static __rte_always_inline int
40 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
41 {
42         uint16_t mac_len = auth->digest_length;
43         int ret;
44
45         switch (auth->algo) {
46         case RTE_CRYPTO_AUTH_MD5:
47         case RTE_CRYPTO_AUTH_MD5_HMAC:
48                 ret = (mac_len == 16) ? 0 : -1;
49                 break;
50         case RTE_CRYPTO_AUTH_SHA1:
51         case RTE_CRYPTO_AUTH_SHA1_HMAC:
52                 ret = (mac_len == 20) ? 0 : -1;
53                 break;
54         case RTE_CRYPTO_AUTH_SHA224:
55         case RTE_CRYPTO_AUTH_SHA224_HMAC:
56                 ret = (mac_len == 28) ? 0 : -1;
57                 break;
58         case RTE_CRYPTO_AUTH_SHA256:
59         case RTE_CRYPTO_AUTH_SHA256_HMAC:
60                 ret = (mac_len == 32) ? 0 : -1;
61                 break;
62         case RTE_CRYPTO_AUTH_SHA384:
63         case RTE_CRYPTO_AUTH_SHA384_HMAC:
64                 ret = (mac_len == 48) ? 0 : -1;
65                 break;
66         case RTE_CRYPTO_AUTH_SHA512:
67         case RTE_CRYPTO_AUTH_SHA512_HMAC:
68                 ret = (mac_len == 64) ? 0 : -1;
69                 break;
70         case RTE_CRYPTO_AUTH_NULL:
71                 ret = 0;
72                 break;
73         default:
74                 ret = -1;
75         }
76
77         return ret;
78 }
79
80 static __rte_always_inline void
81 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
82 {
83         struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
84         memcpy(fctx->enc.encr_iv, salt, 4);
85 }
86
87 static __rte_always_inline uint32_t
88 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
89              uint32_t size)
90 {
91         struct roc_se_sglist_comp *to = &list[i >> 2];
92
93         to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
94         to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
95         i++;
96         return i;
97 }
98
99 static __rte_always_inline uint32_t
100 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
101                       struct roc_se_buf_ptr *from)
102 {
103         struct roc_se_sglist_comp *to = &list[i >> 2];
104
105         to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
106         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
107         i++;
108         return i;
109 }
110
111 static __rte_always_inline uint32_t
112 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
113                           struct roc_se_buf_ptr *from, uint32_t *psize)
114 {
115         struct roc_se_sglist_comp *to = &list[i >> 2];
116         uint32_t size = *psize;
117         uint32_t e_len;
118
119         e_len = (size > from->size) ? from->size : size;
120         to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
121         to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
122         *psize -= e_len;
123         i++;
124         return i;
125 }
126
127 /*
128  * This fills the MC expected SGIO list
129  * from IOV given by user.
130  */
131 static __rte_always_inline uint32_t
132 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
133                       struct roc_se_iov_ptr *from, uint32_t from_offset,
134                       uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
135                       uint32_t extra_offset)
136 {
137         int32_t j;
138         uint32_t extra_len = extra_buf ? extra_buf->size : 0;
139         uint32_t size = *psize;
140         struct roc_se_buf_ptr *bufs;
141
142         bufs = from->bufs;
143         for (j = 0; (j < from->buf_cnt) && size; j++) {
144                 uint64_t e_vaddr;
145                 uint32_t e_len;
146                 struct roc_se_sglist_comp *to = &list[i >> 2];
147
148                 if (unlikely(from_offset)) {
149                         if (from_offset >= bufs[j].size) {
150                                 from_offset -= bufs[j].size;
151                                 continue;
152                         }
153                         e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
154                         e_len = (size > (bufs[j].size - from_offset)) ?
155                                         (bufs[j].size - from_offset) :
156                                         size;
157                         from_offset = 0;
158                 } else {
159                         e_vaddr = (uint64_t)bufs[j].vaddr;
160                         e_len = (size > bufs[j].size) ? bufs[j].size : size;
161                 }
162
163                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
164                 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
165
166                 if (extra_len && (e_len >= extra_offset)) {
167                         /* Break the data at given offset */
168                         uint32_t next_len = e_len - extra_offset;
169                         uint64_t next_vaddr = e_vaddr + extra_offset;
170
171                         if (!extra_offset) {
172                                 i--;
173                         } else {
174                                 e_len = extra_offset;
175                                 size -= e_len;
176                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
177                         }
178
179                         extra_len = RTE_MIN(extra_len, size);
180                         /* Insert extra data ptr */
181                         if (extra_len) {
182                                 i++;
183                                 to = &list[i >> 2];
184                                 to->u.s.len[i % 4] =
185                                         rte_cpu_to_be_16(extra_len);
186                                 to->ptr[i % 4] = rte_cpu_to_be_64(
187                                         (uint64_t)extra_buf->vaddr);
188                                 size -= extra_len;
189                         }
190
191                         next_len = RTE_MIN(next_len, size);
192                         /* insert the rest of the data */
193                         if (next_len) {
194                                 i++;
195                                 to = &list[i >> 2];
196                                 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
197                                 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
198                                 size -= next_len;
199                         }
200                         extra_len = 0;
201
202                 } else {
203                         size -= e_len;
204                 }
205                 if (extra_offset)
206                         extra_offset -= size;
207                 i++;
208         }
209
210         *psize = size;
211         return (uint32_t)i;
212 }
213
214 static __rte_always_inline int
215 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
216                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
217 {
218         uint32_t iv_offset = 0;
219         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
220         struct roc_se_ctx *se_ctx;
221         uint32_t cipher_type, hash_type;
222         uint32_t mac_len, size;
223         uint8_t iv_len = 16;
224         struct roc_se_buf_ptr *aad_buf = NULL;
225         uint32_t encr_offset, auth_offset;
226         uint32_t encr_data_len, auth_data_len, aad_len = 0;
227         uint32_t passthrough_len = 0;
228         union cpt_inst_w4 cpt_inst_w4;
229         void *offset_vaddr;
230         uint8_t op_minor;
231
232         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
233         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
234         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
235         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
236         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
237                 /* We don't support both AAD and auth data separately */
238                 auth_data_len = 0;
239                 auth_offset = 0;
240                 aad_len = fc_params->aad_buf.size;
241                 aad_buf = &fc_params->aad_buf;
242         }
243         se_ctx = fc_params->ctx_buf.vaddr;
244         cipher_type = se_ctx->enc_cipher;
245         hash_type = se_ctx->hash_type;
246         mac_len = se_ctx->mac_len;
247         op_minor = se_ctx->template_w4.s.opcode_minor;
248
249         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
250                 iv_len = 0;
251                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
252         }
253
254         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
255                 /*
256                  * When AAD is given, data above encr_offset is pass through
257                  * Since AAD is given as separate pointer and not as offset,
258                  * this is a special case as we need to fragment input data
259                  * into passthrough + encr_data and then insert AAD in between.
260                  */
261                 if (hash_type != ROC_SE_GMAC_TYPE) {
262                         passthrough_len = encr_offset;
263                         auth_offset = passthrough_len + iv_len;
264                         encr_offset = passthrough_len + aad_len + iv_len;
265                         auth_data_len = aad_len + encr_data_len;
266                 } else {
267                         passthrough_len = 16 + aad_len;
268                         auth_offset = passthrough_len + iv_len;
269                         auth_data_len = aad_len;
270                 }
271         } else {
272                 encr_offset += iv_len;
273                 auth_offset += iv_len;
274         }
275
276         /* Encryption */
277         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
278         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
279         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
280
281         if (hash_type == ROC_SE_GMAC_TYPE) {
282                 encr_offset = 0;
283                 encr_data_len = 0;
284         }
285
286         auth_dlen = auth_offset + auth_data_len;
287         enc_dlen = encr_data_len + encr_offset;
288         if (unlikely(encr_data_len & 0xf)) {
289                 if ((cipher_type == ROC_SE_DES3_CBC) ||
290                     (cipher_type == ROC_SE_DES3_ECB))
291                         enc_dlen =
292                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
293                 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
294                                 (cipher_type == ROC_SE_AES_ECB)))
295                         enc_dlen =
296                                 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
297         }
298
299         if (unlikely(auth_dlen > enc_dlen)) {
300                 inputlen = auth_dlen;
301                 outputlen = auth_dlen + mac_len;
302         } else {
303                 inputlen = enc_dlen;
304                 outputlen = enc_dlen + mac_len;
305         }
306
307         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
308                 outputlen = enc_dlen;
309
310         /* GP op header */
311         cpt_inst_w4.s.param1 = encr_data_len;
312         cpt_inst_w4.s.param2 = auth_data_len;
313
314         /*
315          * In cn9k, cn10k since we have a limitation of
316          * IV & Offset control word not part of instruction
317          * and need to be part of Data Buffer, we check if
318          * head room is there and then only do the Direct mode processing
319          */
320         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
321                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
322                 void *dm_vaddr = fc_params->bufs[0].vaddr;
323
324                 /* Use Direct mode */
325
326                 offset_vaddr =
327                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
328
329                 /* DPTR */
330                 inst->dptr = (uint64_t)offset_vaddr;
331
332                 /* RPTR should just exclude offset control word */
333                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
334
335                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
336
337                 if (likely(iv_len)) {
338                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
339                                                       ROC_SE_OFF_CTRL_LEN);
340                         uint64_t *src = fc_params->iv_buf;
341                         dest[0] = src[0];
342                         dest[1] = src[1];
343                 }
344
345         } else {
346                 void *m_vaddr = fc_params->meta_buf.vaddr;
347                 uint32_t i, g_size_bytes, s_size_bytes;
348                 struct roc_se_sglist_comp *gather_comp;
349                 struct roc_se_sglist_comp *scatter_comp;
350                 uint8_t *in_buffer;
351
352                 /* This falls under strict SG mode */
353                 offset_vaddr = m_vaddr;
354                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
355
356                 m_vaddr = (uint8_t *)m_vaddr + size;
357
358                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
359
360                 if (likely(iv_len)) {
361                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
362                                                       ROC_SE_OFF_CTRL_LEN);
363                         uint64_t *src = fc_params->iv_buf;
364                         dest[0] = src[0];
365                         dest[1] = src[1];
366                 }
367
368                 /* DPTR has SG list */
369                 in_buffer = m_vaddr;
370
371                 ((uint16_t *)in_buffer)[0] = 0;
372                 ((uint16_t *)in_buffer)[1] = 0;
373
374                 /* TODO Add error check if space will be sufficient */
375                 gather_comp =
376                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
377
378                 /*
379                  * Input Gather List
380                  */
381
382                 i = 0;
383
384                 /* Offset control word that includes iv */
385                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
386                                  ROC_SE_OFF_CTRL_LEN + iv_len);
387
388                 /* Add input data */
389                 size = inputlen - iv_len;
390                 if (likely(size)) {
391                         uint32_t aad_offset = aad_len ? passthrough_len : 0;
392
393                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
394                                 i = fill_sg_comp_from_buf_min(
395                                         gather_comp, i, fc_params->bufs, &size);
396                         } else {
397                                 i = fill_sg_comp_from_iov(
398                                         gather_comp, i, fc_params->src_iov, 0,
399                                         &size, aad_buf, aad_offset);
400                         }
401
402                         if (unlikely(size)) {
403                                 plt_dp_err("Insufficient buffer space,"
404                                            " size %d needed",
405                                            size);
406                                 return -1;
407                         }
408                 }
409                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
410                 g_size_bytes =
411                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
412
413                 /*
414                  * Output Scatter list
415                  */
416                 i = 0;
417                 scatter_comp =
418                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
419                                                       g_size_bytes);
420
421                 /* Add IV */
422                 if (likely(iv_len)) {
423                         i = fill_sg_comp(scatter_comp, i,
424                                          (uint64_t)offset_vaddr +
425                                                  ROC_SE_OFF_CTRL_LEN,
426                                          iv_len);
427                 }
428
429                 /* output data or output data + digest*/
430                 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
431                         size = outputlen - iv_len - mac_len;
432                         if (size) {
433                                 uint32_t aad_offset =
434                                         aad_len ? passthrough_len : 0;
435
436                                 if (unlikely(flags &
437                                              ROC_SE_SINGLE_BUF_INPLACE)) {
438                                         i = fill_sg_comp_from_buf_min(
439                                                 scatter_comp, i,
440                                                 fc_params->bufs, &size);
441                                 } else {
442                                         i = fill_sg_comp_from_iov(
443                                                 scatter_comp, i,
444                                                 fc_params->dst_iov, 0, &size,
445                                                 aad_buf, aad_offset);
446                                 }
447                                 if (unlikely(size)) {
448                                         plt_dp_err("Insufficient buffer"
449                                                    " space, size %d needed",
450                                                    size);
451                                         return -1;
452                                 }
453                         }
454                         /* mac_data */
455                         if (mac_len) {
456                                 i = fill_sg_comp_from_buf(scatter_comp, i,
457                                                           &fc_params->mac_buf);
458                         }
459                 } else {
460                         /* Output including mac */
461                         size = outputlen - iv_len;
462                         if (likely(size)) {
463                                 uint32_t aad_offset =
464                                         aad_len ? passthrough_len : 0;
465
466                                 if (unlikely(flags &
467                                              ROC_SE_SINGLE_BUF_INPLACE)) {
468                                         i = fill_sg_comp_from_buf_min(
469                                                 scatter_comp, i,
470                                                 fc_params->bufs, &size);
471                                 } else {
472                                         i = fill_sg_comp_from_iov(
473                                                 scatter_comp, i,
474                                                 fc_params->dst_iov, 0, &size,
475                                                 aad_buf, aad_offset);
476                                 }
477                                 if (unlikely(size)) {
478                                         plt_dp_err("Insufficient buffer"
479                                                    " space, size %d needed",
480                                                    size);
481                                         return -1;
482                                 }
483                         }
484                 }
485                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
486                 s_size_bytes =
487                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
488
489                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
490
491                 /* This is DPTR len in case of SG mode */
492                 cpt_inst_w4.s.dlen = size;
493
494                 inst->dptr = (uint64_t)in_buffer;
495         }
496
497         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
498                      (auth_offset >> 8))) {
499                 plt_dp_err("Offset not supported");
500                 plt_dp_err("enc_offset: %d", encr_offset);
501                 plt_dp_err("iv_offset : %d", iv_offset);
502                 plt_dp_err("auth_offset: %d", auth_offset);
503                 return -1;
504         }
505
506         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
507                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
508                 ((uint64_t)auth_offset));
509
510         inst->w4.u64 = cpt_inst_w4.u64;
511         return 0;
512 }
513
514 static __rte_always_inline int
515 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
516                   struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
517 {
518         uint32_t iv_offset = 0, size;
519         int32_t inputlen, outputlen, enc_dlen, auth_dlen;
520         struct roc_se_ctx *se_ctx;
521         int32_t hash_type, mac_len;
522         uint8_t iv_len = 16;
523         struct roc_se_buf_ptr *aad_buf = NULL;
524         uint32_t encr_offset, auth_offset;
525         uint32_t encr_data_len, auth_data_len, aad_len = 0;
526         uint32_t passthrough_len = 0;
527         union cpt_inst_w4 cpt_inst_w4;
528         void *offset_vaddr;
529         uint8_t op_minor;
530
531         encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
532         auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
533         encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
534         auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
535
536         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
537                 /* We don't support both AAD and auth data separately */
538                 auth_data_len = 0;
539                 auth_offset = 0;
540                 aad_len = fc_params->aad_buf.size;
541                 aad_buf = &fc_params->aad_buf;
542         }
543
544         se_ctx = fc_params->ctx_buf.vaddr;
545         hash_type = se_ctx->hash_type;
546         mac_len = se_ctx->mac_len;
547         op_minor = se_ctx->template_w4.s.opcode_minor;
548
549         if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
550                 iv_len = 0;
551                 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
552         }
553
554         if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
555                 /*
556                  * When AAD is given, data above encr_offset is pass through
557                  * Since AAD is given as separate pointer and not as offset,
558                  * this is a special case as we need to fragment input data
559                  * into passthrough + encr_data and then insert AAD in between.
560                  */
561                 if (hash_type != ROC_SE_GMAC_TYPE) {
562                         passthrough_len = encr_offset;
563                         auth_offset = passthrough_len + iv_len;
564                         encr_offset = passthrough_len + aad_len + iv_len;
565                         auth_data_len = aad_len + encr_data_len;
566                 } else {
567                         passthrough_len = 16 + aad_len;
568                         auth_offset = passthrough_len + iv_len;
569                         auth_data_len = aad_len;
570                 }
571         } else {
572                 encr_offset += iv_len;
573                 auth_offset += iv_len;
574         }
575
576         /* Decryption */
577         cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
578         cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
579         cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
580
581         if (hash_type == ROC_SE_GMAC_TYPE) {
582                 encr_offset = 0;
583                 encr_data_len = 0;
584         }
585
586         enc_dlen = encr_offset + encr_data_len;
587         auth_dlen = auth_offset + auth_data_len;
588
589         if (auth_dlen > enc_dlen) {
590                 inputlen = auth_dlen + mac_len;
591                 outputlen = auth_dlen;
592         } else {
593                 inputlen = enc_dlen + mac_len;
594                 outputlen = enc_dlen;
595         }
596
597         if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
598                 outputlen = inputlen = enc_dlen;
599
600         cpt_inst_w4.s.param1 = encr_data_len;
601         cpt_inst_w4.s.param2 = auth_data_len;
602
603         /*
604          * In cn9k, cn10k since we have a limitation of
605          * IV & Offset control word not part of instruction
606          * and need to be part of Data Buffer, we check if
607          * head room is there and then only do the Direct mode processing
608          */
609         if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
610                    (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
611                 void *dm_vaddr = fc_params->bufs[0].vaddr;
612
613                 /* Use Direct mode */
614
615                 offset_vaddr =
616                         (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
617                 inst->dptr = (uint64_t)offset_vaddr;
618
619                 /* RPTR should just exclude offset control word */
620                 inst->rptr = (uint64_t)dm_vaddr - iv_len;
621
622                 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
623
624                 if (likely(iv_len)) {
625                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
626                                                       ROC_SE_OFF_CTRL_LEN);
627                         uint64_t *src = fc_params->iv_buf;
628                         dest[0] = src[0];
629                         dest[1] = src[1];
630                 }
631
632         } else {
633                 void *m_vaddr = fc_params->meta_buf.vaddr;
634                 uint32_t g_size_bytes, s_size_bytes;
635                 struct roc_se_sglist_comp *gather_comp;
636                 struct roc_se_sglist_comp *scatter_comp;
637                 uint8_t *in_buffer;
638                 uint8_t i = 0;
639
640                 /* This falls under strict SG mode */
641                 offset_vaddr = m_vaddr;
642                 size = ROC_SE_OFF_CTRL_LEN + iv_len;
643
644                 m_vaddr = (uint8_t *)m_vaddr + size;
645
646                 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
647
648                 if (likely(iv_len)) {
649                         uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
650                                                       ROC_SE_OFF_CTRL_LEN);
651                         uint64_t *src = fc_params->iv_buf;
652                         dest[0] = src[0];
653                         dest[1] = src[1];
654                 }
655
656                 /* DPTR has SG list */
657                 in_buffer = m_vaddr;
658
659                 ((uint16_t *)in_buffer)[0] = 0;
660                 ((uint16_t *)in_buffer)[1] = 0;
661
662                 /* TODO Add error check if space will be sufficient */
663                 gather_comp =
664                         (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
665
666                 /*
667                  * Input Gather List
668                  */
669                 i = 0;
670
671                 /* Offset control word that includes iv */
672                 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
673                                  ROC_SE_OFF_CTRL_LEN + iv_len);
674
675                 /* Add input data */
676                 if (flags & ROC_SE_VALID_MAC_BUF) {
677                         size = inputlen - iv_len - mac_len;
678                         if (size) {
679                                 /* input data only */
680                                 if (unlikely(flags &
681                                              ROC_SE_SINGLE_BUF_INPLACE)) {
682                                         i = fill_sg_comp_from_buf_min(
683                                                 gather_comp, i, fc_params->bufs,
684                                                 &size);
685                                 } else {
686                                         uint32_t aad_offset =
687                                                 aad_len ? passthrough_len : 0;
688
689                                         i = fill_sg_comp_from_iov(
690                                                 gather_comp, i,
691                                                 fc_params->src_iov, 0, &size,
692                                                 aad_buf, aad_offset);
693                                 }
694                                 if (unlikely(size)) {
695                                         plt_dp_err("Insufficient buffer"
696                                                    " space, size %d needed",
697                                                    size);
698                                         return -1;
699                                 }
700                         }
701
702                         /* mac data */
703                         if (mac_len) {
704                                 i = fill_sg_comp_from_buf(gather_comp, i,
705                                                           &fc_params->mac_buf);
706                         }
707                 } else {
708                         /* input data + mac */
709                         size = inputlen - iv_len;
710                         if (size) {
711                                 if (unlikely(flags &
712                                              ROC_SE_SINGLE_BUF_INPLACE)) {
713                                         i = fill_sg_comp_from_buf_min(
714                                                 gather_comp, i, fc_params->bufs,
715                                                 &size);
716                                 } else {
717                                         uint32_t aad_offset =
718                                                 aad_len ? passthrough_len : 0;
719
720                                         if (unlikely(!fc_params->src_iov)) {
721                                                 plt_dp_err("Bad input args");
722                                                 return -1;
723                                         }
724
725                                         i = fill_sg_comp_from_iov(
726                                                 gather_comp, i,
727                                                 fc_params->src_iov, 0, &size,
728                                                 aad_buf, aad_offset);
729                                 }
730
731                                 if (unlikely(size)) {
732                                         plt_dp_err("Insufficient buffer"
733                                                    " space, size %d needed",
734                                                    size);
735                                         return -1;
736                                 }
737                         }
738                 }
739                 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
740                 g_size_bytes =
741                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
742
743                 /*
744                  * Output Scatter List
745                  */
746
747                 i = 0;
748                 scatter_comp =
749                         (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
750                                                       g_size_bytes);
751
752                 /* Add iv */
753                 if (iv_len) {
754                         i = fill_sg_comp(scatter_comp, i,
755                                          (uint64_t)offset_vaddr +
756                                                  ROC_SE_OFF_CTRL_LEN,
757                                          iv_len);
758                 }
759
760                 /* Add output data */
761                 size = outputlen - iv_len;
762                 if (size) {
763                         if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
764                                 /* handle single buffer here */
765                                 i = fill_sg_comp_from_buf_min(scatter_comp, i,
766                                                               fc_params->bufs,
767                                                               &size);
768                         } else {
769                                 uint32_t aad_offset =
770                                         aad_len ? passthrough_len : 0;
771
772                                 if (unlikely(!fc_params->dst_iov)) {
773                                         plt_dp_err("Bad input args");
774                                         return -1;
775                                 }
776
777                                 i = fill_sg_comp_from_iov(
778                                         scatter_comp, i, fc_params->dst_iov, 0,
779                                         &size, aad_buf, aad_offset);
780                         }
781
782                         if (unlikely(size)) {
783                                 plt_dp_err("Insufficient buffer space,"
784                                            " size %d needed",
785                                            size);
786                                 return -1;
787                         }
788                 }
789
790                 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
791                 s_size_bytes =
792                         ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
793
794                 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
795
796                 /* This is DPTR len in case of SG mode */
797                 cpt_inst_w4.s.dlen = size;
798
799                 inst->dptr = (uint64_t)in_buffer;
800         }
801
802         if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
803                      (auth_offset >> 8))) {
804                 plt_dp_err("Offset not supported");
805                 plt_dp_err("enc_offset: %d", encr_offset);
806                 plt_dp_err("iv_offset : %d", iv_offset);
807                 plt_dp_err("auth_offset: %d", auth_offset);
808                 return -1;
809         }
810
811         *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
812                 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
813                 ((uint64_t)auth_offset));
814
815         inst->w4.u64 = cpt_inst_w4.u64;
816         return 0;
817 }
818
819 static __rte_always_inline int
820 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
821                      struct roc_se_fc_params *fc_params,
822                      struct cpt_inst_s *inst)
823 {
824         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
825         uint8_t fc_type;
826         int ret = -1;
827
828         fc_type = ctx->fc_type;
829
830         if (likely(fc_type == ROC_SE_FC_GEN))
831                 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
832         return ret;
833 }
834
835 static __rte_always_inline int
836 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
837                      struct roc_se_fc_params *fc_params,
838                      struct cpt_inst_s *inst)
839 {
840         struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
841         uint8_t fc_type;
842         int ret = -1;
843
844         fc_type = ctx->fc_type;
845
846         if (likely(fc_type == ROC_SE_FC_GEN))
847                 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
848
849         return ret;
850 }
851
852 static __rte_always_inline int
853 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
854 {
855         struct rte_crypto_aead_xform *aead_form;
856         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
857         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
858         uint32_t cipher_key_len = 0;
859         uint8_t aes_gcm = 0;
860         aead_form = &xform->aead;
861
862         if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
863                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
864                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
865         } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
866                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
867                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
868         } else {
869                 plt_dp_err("Unknown aead operation\n");
870                 return -1;
871         }
872         switch (aead_form->algo) {
873         case RTE_CRYPTO_AEAD_AES_GCM:
874                 enc_type = ROC_SE_AES_GCM;
875                 cipher_key_len = 16;
876                 aes_gcm = 1;
877                 break;
878         case RTE_CRYPTO_AEAD_AES_CCM:
879                 plt_dp_err("Crypto: Unsupported cipher algo %u",
880                            aead_form->algo);
881                 return -1;
882         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
883                 enc_type = ROC_SE_CHACHA20;
884                 auth_type = ROC_SE_POLY1305;
885                 cipher_key_len = 32;
886                 sess->chacha_poly = 1;
887                 break;
888         default:
889                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
890                            aead_form->algo);
891                 return -1;
892         }
893         if (aead_form->key.length < cipher_key_len) {
894                 plt_dp_err("Invalid cipher params keylen %u",
895                            aead_form->key.length);
896                 return -1;
897         }
898         sess->zsk_flag = 0;
899         sess->aes_gcm = aes_gcm;
900         sess->mac_len = aead_form->digest_length;
901         sess->iv_offset = aead_form->iv.offset;
902         sess->iv_length = aead_form->iv.length;
903         sess->aad_length = aead_form->aad_length;
904
905         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
906                                          aead_form->key.data,
907                                          aead_form->key.length, NULL)))
908                 return -1;
909
910         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
911                                          aead_form->digest_length)))
912                 return -1;
913
914         return 0;
915 }
916
917 static __rte_always_inline int
918 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
919 {
920         struct rte_crypto_cipher_xform *c_form;
921         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
922         uint32_t cipher_key_len = 0;
923         uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
924
925         c_form = &xform->cipher;
926
927         if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
928                 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
929         else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
930                 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
931                 if (xform->next != NULL &&
932                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
933                         /* Perform decryption followed by auth verify */
934                         sess->roc_se_ctx.template_w4.s.opcode_minor =
935                                 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
936                 }
937         } else {
938                 plt_dp_err("Unknown cipher operation\n");
939                 return -1;
940         }
941
942         switch (c_form->algo) {
943         case RTE_CRYPTO_CIPHER_AES_CBC:
944                 enc_type = ROC_SE_AES_CBC;
945                 cipher_key_len = 16;
946                 break;
947         case RTE_CRYPTO_CIPHER_3DES_CBC:
948                 enc_type = ROC_SE_DES3_CBC;
949                 cipher_key_len = 24;
950                 break;
951         case RTE_CRYPTO_CIPHER_DES_CBC:
952                 /* DES is implemented using 3DES in hardware */
953                 enc_type = ROC_SE_DES3_CBC;
954                 cipher_key_len = 8;
955                 break;
956         case RTE_CRYPTO_CIPHER_AES_CTR:
957                 enc_type = ROC_SE_AES_CTR;
958                 cipher_key_len = 16;
959                 aes_ctr = 1;
960                 break;
961         case RTE_CRYPTO_CIPHER_NULL:
962                 enc_type = 0;
963                 is_null = 1;
964                 break;
965         case RTE_CRYPTO_CIPHER_KASUMI_F8:
966                 enc_type = ROC_SE_KASUMI_F8_ECB;
967                 cipher_key_len = 16;
968                 zsk_flag = ROC_SE_K_F8;
969                 break;
970         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
971                 enc_type = ROC_SE_SNOW3G_UEA2;
972                 cipher_key_len = 16;
973                 zsk_flag = ROC_SE_ZS_EA;
974                 break;
975         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
976                 enc_type = ROC_SE_ZUC_EEA3;
977                 cipher_key_len = 16;
978                 zsk_flag = ROC_SE_ZS_EA;
979                 break;
980         case RTE_CRYPTO_CIPHER_AES_XTS:
981                 enc_type = ROC_SE_AES_XTS;
982                 cipher_key_len = 16;
983                 break;
984         case RTE_CRYPTO_CIPHER_3DES_ECB:
985                 enc_type = ROC_SE_DES3_ECB;
986                 cipher_key_len = 24;
987                 break;
988         case RTE_CRYPTO_CIPHER_AES_ECB:
989                 enc_type = ROC_SE_AES_ECB;
990                 cipher_key_len = 16;
991                 break;
992         case RTE_CRYPTO_CIPHER_3DES_CTR:
993         case RTE_CRYPTO_CIPHER_AES_F8:
994         case RTE_CRYPTO_CIPHER_ARC4:
995                 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
996                 return -1;
997         default:
998                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
999                            c_form->algo);
1000                 return -1;
1001         }
1002
1003         if (c_form->key.length < cipher_key_len) {
1004                 plt_dp_err("Invalid cipher params keylen %u",
1005                            c_form->key.length);
1006                 return -1;
1007         }
1008
1009         sess->zsk_flag = zsk_flag;
1010         sess->aes_gcm = 0;
1011         sess->aes_ctr = aes_ctr;
1012         sess->iv_offset = c_form->iv.offset;
1013         sess->iv_length = c_form->iv.length;
1014         sess->is_null = is_null;
1015
1016         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1017                                          c_form->key.data, c_form->key.length,
1018                                          NULL)))
1019                 return -1;
1020
1021         return 0;
1022 }
1023
1024 static __rte_always_inline int
1025 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1026 {
1027         struct rte_crypto_auth_xform *a_form;
1028         roc_se_auth_type auth_type = 0; /* NULL Auth type */
1029         uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1030
1031         if (xform->next != NULL &&
1032             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1033             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1034                 /* Perform auth followed by encryption */
1035                 sess->roc_se_ctx.template_w4.s.opcode_minor =
1036                         ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1037         }
1038
1039         a_form = &xform->auth;
1040
1041         if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1042                 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1043         else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1044                 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1045         else {
1046                 plt_dp_err("Unknown auth operation");
1047                 return -1;
1048         }
1049
1050         switch (a_form->algo) {
1051         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1052                 /* Fall through */
1053         case RTE_CRYPTO_AUTH_SHA1:
1054                 auth_type = ROC_SE_SHA1_TYPE;
1055                 break;
1056         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1057         case RTE_CRYPTO_AUTH_SHA256:
1058                 auth_type = ROC_SE_SHA2_SHA256;
1059                 break;
1060         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1061         case RTE_CRYPTO_AUTH_SHA512:
1062                 auth_type = ROC_SE_SHA2_SHA512;
1063                 break;
1064         case RTE_CRYPTO_AUTH_AES_GMAC:
1065                 auth_type = ROC_SE_GMAC_TYPE;
1066                 aes_gcm = 1;
1067                 break;
1068         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1069         case RTE_CRYPTO_AUTH_SHA224:
1070                 auth_type = ROC_SE_SHA2_SHA224;
1071                 break;
1072         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1073         case RTE_CRYPTO_AUTH_SHA384:
1074                 auth_type = ROC_SE_SHA2_SHA384;
1075                 break;
1076         case RTE_CRYPTO_AUTH_MD5_HMAC:
1077         case RTE_CRYPTO_AUTH_MD5:
1078                 auth_type = ROC_SE_MD5_TYPE;
1079                 break;
1080         case RTE_CRYPTO_AUTH_KASUMI_F9:
1081                 auth_type = ROC_SE_KASUMI_F9_ECB;
1082                 /*
1083                  * Indicate that direction needs to be taken out
1084                  * from end of src
1085                  */
1086                 zsk_flag = ROC_SE_K_F9;
1087                 break;
1088         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1089                 auth_type = ROC_SE_SNOW3G_UIA2;
1090                 zsk_flag = ROC_SE_ZS_IA;
1091                 break;
1092         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1093                 auth_type = ROC_SE_ZUC_EIA3;
1094                 zsk_flag = ROC_SE_ZS_IA;
1095                 break;
1096         case RTE_CRYPTO_AUTH_NULL:
1097                 auth_type = 0;
1098                 is_null = 1;
1099                 break;
1100         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1101         case RTE_CRYPTO_AUTH_AES_CMAC:
1102         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1103                 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1104                 return -1;
1105         default:
1106                 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1107                            a_form->algo);
1108                 return -1;
1109         }
1110
1111         sess->zsk_flag = zsk_flag;
1112         sess->aes_gcm = aes_gcm;
1113         sess->mac_len = a_form->digest_length;
1114         sess->is_null = is_null;
1115         if (zsk_flag) {
1116                 sess->auth_iv_offset = a_form->iv.offset;
1117                 sess->auth_iv_length = a_form->iv.length;
1118         }
1119         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1120                                          a_form->key.data, a_form->key.length,
1121                                          a_form->digest_length)))
1122                 return -1;
1123
1124         return 0;
1125 }
1126
1127 static __rte_always_inline int
1128 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1129 {
1130         struct rte_crypto_auth_xform *a_form;
1131         roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1132         roc_se_auth_type auth_type = 0;  /* NULL Auth type */
1133
1134         a_form = &xform->auth;
1135
1136         if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1137                 sess->cpt_op |= ROC_SE_OP_ENCODE;
1138         else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1139                 sess->cpt_op |= ROC_SE_OP_DECODE;
1140         else {
1141                 plt_dp_err("Unknown auth operation");
1142                 return -1;
1143         }
1144
1145         switch (a_form->algo) {
1146         case RTE_CRYPTO_AUTH_AES_GMAC:
1147                 enc_type = ROC_SE_AES_GCM;
1148                 auth_type = ROC_SE_GMAC_TYPE;
1149                 break;
1150         default:
1151                 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1152                            a_form->algo);
1153                 return -1;
1154         }
1155
1156         sess->zsk_flag = 0;
1157         sess->aes_gcm = 0;
1158         sess->is_gmac = 1;
1159         sess->iv_offset = a_form->iv.offset;
1160         sess->iv_length = a_form->iv.length;
1161         sess->mac_len = a_form->digest_length;
1162
1163         if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1164                                          a_form->key.data, a_form->key.length,
1165                                          NULL)))
1166                 return -1;
1167
1168         if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1169                                          a_form->digest_length)))
1170                 return -1;
1171
1172         return 0;
1173 }
1174
1175 static __rte_always_inline void *
1176 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1177               struct rte_mempool *cpt_meta_pool,
1178               struct cpt_inflight_req *infl_req)
1179 {
1180         uint8_t *mdata;
1181
1182         if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1183                 return NULL;
1184
1185         buf->vaddr = mdata;
1186         buf->size = len;
1187
1188         infl_req->mdata = mdata;
1189         infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1190
1191         return mdata;
1192 }
1193
1194 static __rte_always_inline uint32_t
1195 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
1196                      uint32_t start_offset)
1197 {
1198         uint16_t index = 0;
1199         void *seg_data = NULL;
1200         int32_t seg_size = 0;
1201
1202         if (!pkt) {
1203                 iovec->buf_cnt = 0;
1204                 return 0;
1205         }
1206
1207         if (!start_offset) {
1208                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1209                 seg_size = pkt->data_len;
1210         } else {
1211                 while (start_offset >= pkt->data_len) {
1212                         start_offset -= pkt->data_len;
1213                         pkt = pkt->next;
1214                 }
1215
1216                 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
1217                 seg_size = pkt->data_len - start_offset;
1218                 if (!seg_size)
1219                         return 1;
1220         }
1221
1222         /* first seg */
1223         iovec->bufs[index].vaddr = seg_data;
1224         iovec->bufs[index].size = seg_size;
1225         index++;
1226         pkt = pkt->next;
1227
1228         while (unlikely(pkt != NULL)) {
1229                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1230                 seg_size = pkt->data_len;
1231                 if (!seg_size)
1232                         break;
1233
1234                 iovec->bufs[index].vaddr = seg_data;
1235                 iovec->bufs[index].size = seg_size;
1236
1237                 index++;
1238
1239                 pkt = pkt->next;
1240         }
1241
1242         iovec->buf_cnt = index;
1243         return 0;
1244 }
1245
1246 static __rte_always_inline uint32_t
1247 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
1248                              struct roc_se_fc_params *param, uint32_t *flags)
1249 {
1250         uint16_t index = 0;
1251         void *seg_data = NULL;
1252         uint32_t seg_size = 0;
1253         struct roc_se_iov_ptr *iovec;
1254
1255         seg_data = rte_pktmbuf_mtod(pkt, void *);
1256         seg_size = pkt->data_len;
1257
1258         /* first seg */
1259         if (likely(!pkt->next)) {
1260                 uint32_t headroom;
1261
1262                 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
1263                 headroom = rte_pktmbuf_headroom(pkt);
1264                 if (likely(headroom >= 24))
1265                         *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
1266
1267                 param->bufs[0].vaddr = seg_data;
1268                 param->bufs[0].size = seg_size;
1269                 return 0;
1270         }
1271         iovec = param->src_iov;
1272         iovec->bufs[index].vaddr = seg_data;
1273         iovec->bufs[index].size = seg_size;
1274         index++;
1275         pkt = pkt->next;
1276
1277         while (unlikely(pkt != NULL)) {
1278                 seg_data = rte_pktmbuf_mtod(pkt, void *);
1279                 seg_size = pkt->data_len;
1280
1281                 if (!seg_size)
1282                         break;
1283
1284                 iovec->bufs[index].vaddr = seg_data;
1285                 iovec->bufs[index].size = seg_size;
1286
1287                 index++;
1288
1289                 pkt = pkt->next;
1290         }
1291
1292         iovec->buf_cnt = index;
1293         return 0;
1294 }
1295
1296 static __rte_always_inline int
1297 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
1298                struct cpt_qp_meta_info *m_info,
1299                struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
1300 {
1301         struct roc_se_ctx *ctx = &sess->roc_se_ctx;
1302         uint8_t op_minor = ctx->template_w4.s.opcode_minor;
1303         struct rte_crypto_sym_op *sym_op = cop->sym;
1304         void *mdata = NULL;
1305         uint32_t mc_hash_off;
1306         uint32_t flags = 0;
1307         uint64_t d_offs, d_lens;
1308         struct rte_mbuf *m_src, *m_dst;
1309         uint8_t cpt_op = sess->cpt_op;
1310 #ifdef CPT_ALWAYS_USE_SG_MODE
1311         uint8_t inplace = 0;
1312 #else
1313         uint8_t inplace = 1;
1314 #endif
1315         struct roc_se_fc_params fc_params;
1316         char src[SRC_IOV_SIZE];
1317         char dst[SRC_IOV_SIZE];
1318         uint32_t iv_buf[4];
1319         int ret;
1320
1321         if (likely(sess->iv_length)) {
1322                 flags |= ROC_SE_VALID_IV_BUF;
1323                 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
1324                                                              sess->iv_offset);
1325                 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
1326                         memcpy((uint8_t *)iv_buf,
1327                                rte_crypto_op_ctod_offset(cop, uint8_t *,
1328                                                          sess->iv_offset),
1329                                12);
1330                         iv_buf[3] = rte_cpu_to_be_32(0x1);
1331                         fc_params.iv_buf = iv_buf;
1332                 }
1333         }
1334
1335         if (sess->zsk_flag) {
1336                 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
1337                         cop, uint8_t *, sess->auth_iv_offset);
1338                 if (sess->zsk_flag != ROC_SE_ZS_EA)
1339                         inplace = 0;
1340         }
1341         m_src = sym_op->m_src;
1342         m_dst = sym_op->m_dst;
1343
1344         if (sess->aes_gcm || sess->chacha_poly) {
1345                 uint8_t *salt;
1346                 uint8_t *aad_data;
1347                 uint16_t aad_len;
1348
1349                 d_offs = sym_op->aead.data.offset;
1350                 d_lens = sym_op->aead.data.length;
1351                 mc_hash_off =
1352                         sym_op->aead.data.offset + sym_op->aead.data.length;
1353
1354                 aad_data = sym_op->aead.aad.data;
1355                 aad_len = sess->aad_length;
1356                 if (likely((aad_data + aad_len) ==
1357                            rte_pktmbuf_mtod_offset(m_src, uint8_t *,
1358                                                    sym_op->aead.data.offset))) {
1359                         d_offs = (d_offs - aad_len) | (d_offs << 16);
1360                         d_lens = (d_lens + aad_len) | (d_lens << 32);
1361                 } else {
1362                         fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
1363                         fc_params.aad_buf.size = aad_len;
1364                         flags |= ROC_SE_VALID_AAD_BUF;
1365                         inplace = 0;
1366                         d_offs = d_offs << 16;
1367                         d_lens = d_lens << 32;
1368                 }
1369
1370                 salt = fc_params.iv_buf;
1371                 if (unlikely(*(uint32_t *)salt != sess->salt)) {
1372                         cpt_fc_salt_update(&sess->roc_se_ctx, salt);
1373                         sess->salt = *(uint32_t *)salt;
1374                 }
1375                 fc_params.iv_buf = salt + 4;
1376                 if (likely(sess->mac_len)) {
1377                         struct rte_mbuf *m =
1378                                 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
1379
1380                         if (!m)
1381                                 m = m_src;
1382
1383                         /* hmac immediately following data is best case */
1384                         if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
1385                                              mc_hash_off !=
1386                                      (uint8_t *)sym_op->aead.digest.data)) {
1387                                 flags |= ROC_SE_VALID_MAC_BUF;
1388                                 fc_params.mac_buf.size = sess->mac_len;
1389                                 fc_params.mac_buf.vaddr =
1390                                         sym_op->aead.digest.data;
1391                                 inplace = 0;
1392                         }
1393                 }
1394         } else {
1395                 d_offs = sym_op->cipher.data.offset;
1396                 d_lens = sym_op->cipher.data.length;
1397                 mc_hash_off =
1398                         sym_op->cipher.data.offset + sym_op->cipher.data.length;
1399                 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
1400                 d_lens = (d_lens << 32) | sym_op->auth.data.length;
1401
1402                 if (mc_hash_off <
1403                     (sym_op->auth.data.offset + sym_op->auth.data.length)) {
1404                         mc_hash_off = (sym_op->auth.data.offset +
1405                                        sym_op->auth.data.length);
1406                 }
1407                 /* for gmac, salt should be updated like in gcm */
1408                 if (unlikely(sess->is_gmac)) {
1409                         uint8_t *salt;
1410                         salt = fc_params.iv_buf;
1411                         if (unlikely(*(uint32_t *)salt != sess->salt)) {
1412                                 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
1413                                 sess->salt = *(uint32_t *)salt;
1414                         }
1415                         fc_params.iv_buf = salt + 4;
1416                 }
1417                 if (likely(sess->mac_len)) {
1418                         struct rte_mbuf *m;
1419
1420                         m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
1421                         if (!m)
1422                                 m = m_src;
1423
1424                         /* hmac immediately following data is best case */
1425                         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
1426                             (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
1427                                               mc_hash_off !=
1428                                       (uint8_t *)sym_op->auth.digest.data))) {
1429                                 flags |= ROC_SE_VALID_MAC_BUF;
1430                                 fc_params.mac_buf.size = sess->mac_len;
1431                                 fc_params.mac_buf.vaddr =
1432                                         sym_op->auth.digest.data;
1433                                 inplace = 0;
1434                         }
1435                 }
1436         }
1437         fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
1438
1439         if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
1440             unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
1441                 inplace = 0;
1442
1443         if (likely(!m_dst && inplace)) {
1444                 /* Case of single buffer without AAD buf or
1445                  * separate mac buf in place and
1446                  * not air crypto
1447                  */
1448                 fc_params.dst_iov = fc_params.src_iov = (void *)src;
1449
1450                 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
1451                                                           &flags))) {
1452                         plt_dp_err("Prepare inplace src iov failed");
1453                         ret = -EINVAL;
1454                         goto err_exit;
1455                 }
1456
1457         } else {
1458                 /* Out of place processing */
1459                 fc_params.src_iov = (void *)src;
1460                 fc_params.dst_iov = (void *)dst;
1461
1462                 /* Store SG I/O in the api for reuse */
1463                 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
1464                         plt_dp_err("Prepare src iov failed");
1465                         ret = -EINVAL;
1466                         goto err_exit;
1467                 }
1468
1469                 if (unlikely(m_dst != NULL)) {
1470                         uint32_t pkt_len;
1471
1472                         /* Try to make room as much as src has */
1473                         pkt_len = rte_pktmbuf_pkt_len(m_dst);
1474
1475                         if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
1476                                 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
1477                                 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
1478                                         plt_dp_err("Not enough space in "
1479                                                    "m_dst %p, need %u"
1480                                                    " more",
1481                                                    m_dst, pkt_len);
1482                                         ret = -EINVAL;
1483                                         goto err_exit;
1484                                 }
1485                         }
1486
1487                         if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
1488                                 plt_dp_err("Prepare dst iov failed for "
1489                                            "m_dst %p",
1490                                            m_dst);
1491                                 ret = -EINVAL;
1492                                 goto err_exit;
1493                         }
1494                 } else {
1495                         fc_params.dst_iov = (void *)src;
1496                 }
1497         }
1498
1499         if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1500                        (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
1501                        ((ctx->fc_type == ROC_SE_FC_GEN) ||
1502                         (ctx->fc_type == ROC_SE_PDCP))))) {
1503                 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
1504                                       m_info->pool, infl_req);
1505                 if (mdata == NULL) {
1506                         plt_dp_err("Error allocating meta buffer for request");
1507                         return -ENOMEM;
1508                 }
1509         }
1510
1511         /* Finally prepare the instruction */
1512         if (cpt_op & ROC_SE_OP_ENCODE)
1513                 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
1514                                            inst);
1515         else
1516                 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
1517                                            inst);
1518
1519         if (unlikely(ret)) {
1520                 plt_dp_err("Preparing request failed due to bad input arg");
1521                 goto free_mdata_and_exit;
1522         }
1523
1524         return 0;
1525
1526 free_mdata_and_exit:
1527         if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
1528                 rte_mempool_put(m_info->pool, infl_req->mdata);
1529 err_exit:
1530         return ret;
1531 }
1532
1533 #endif /*_CNXK_SE_H_ */