crypto/ccp: support AES-CMAC
[dpdk.git] / drivers / crypto / ccp / ccp_crypto.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13 #include <openssl/cmac.h> /*sub key apis*/
14 #include <openssl/evp.h> /*sub key apis*/
15
16 #include <rte_hexdump.h>
17 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_memory.h>
20 #include <rte_spinlock.h>
21 #include <rte_string_fns.h>
22 #include <rte_cryptodev_pmd.h>
23
24 #include "ccp_dev.h"
25 #include "ccp_crypto.h"
26 #include "ccp_pci.h"
27 #include "ccp_pmd_private.h"
28
29 static enum ccp_cmd_order
30 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
31 {
32         enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
33
34         if (xform == NULL)
35                 return res;
36         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
37                 if (xform->next == NULL)
38                         return CCP_CMD_AUTH;
39                 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
40                         return CCP_CMD_HASH_CIPHER;
41         }
42         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
43                 if (xform->next == NULL)
44                         return CCP_CMD_CIPHER;
45                 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
46                         return CCP_CMD_CIPHER_HASH;
47         }
48         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
49                 return CCP_CMD_COMBINED;
50         return res;
51 }
52
53 /* prepare temporary keys K1 and K2 */
54 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
55 {
56         int i;
57         /* Shift block to left, including carry */
58         for (i = 0; i < bl; i++) {
59                 k[i] = l[i] << 1;
60                 if (i < bl - 1 && l[i + 1] & 0x80)
61                         k[i] |= 1;
62         }
63         /* If MSB set fixup with R */
64         if (l[0] & 0x80)
65                 k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
66 }
67
68 /* subkeys K1 and K2 generation for CMAC */
69 static int
70 generate_cmac_subkeys(struct ccp_session *sess)
71 {
72         const EVP_CIPHER *algo;
73         EVP_CIPHER_CTX *ctx;
74         unsigned char *ccp_ctx;
75         size_t i;
76         int dstlen, totlen;
77         unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
78         unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
79         unsigned char k1[AES_BLOCK_SIZE] = {0};
80         unsigned char k2[AES_BLOCK_SIZE] = {0};
81
82         if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
83                 algo =  EVP_aes_128_cbc();
84         else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
85                 algo =  EVP_aes_192_cbc();
86         else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
87                 algo =  EVP_aes_256_cbc();
88         else {
89                 CCP_LOG_ERR("Invalid CMAC type length");
90                 return -1;
91         }
92
93         ctx = EVP_CIPHER_CTX_new();
94         if (!ctx) {
95                 CCP_LOG_ERR("ctx creation failed");
96                 return -1;
97         }
98         if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
99                             (unsigned char *)zero_iv) <= 0)
100                 goto key_generate_err;
101         if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
102                 goto key_generate_err;
103         if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
104                               AES_BLOCK_SIZE) <= 0)
105                 goto key_generate_err;
106         if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
107                 goto key_generate_err;
108
109         memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
110
111         ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
112         prepare_key(k1, dst, AES_BLOCK_SIZE);
113         for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
114                 *ccp_ctx = k1[i];
115
116         ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
117                                    (2 * CCP_SB_BYTES) - 1);
118         prepare_key(k2, k1, AES_BLOCK_SIZE);
119         for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
120                 *ccp_ctx = k2[i];
121
122         EVP_CIPHER_CTX_free(ctx);
123
124         return 0;
125
126 key_generate_err:
127         CCP_LOG_ERR("CMAC Init failed");
128                 return -1;
129 }
130
131 /* configure session */
132 static int
133 ccp_configure_session_cipher(struct ccp_session *sess,
134                              const struct rte_crypto_sym_xform *xform)
135 {
136         const struct rte_crypto_cipher_xform *cipher_xform = NULL;
137         size_t i, j, x;
138
139         cipher_xform = &xform->cipher;
140
141         /* set cipher direction */
142         if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
143                 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
144         else
145                 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
146
147         /* set cipher key */
148         sess->cipher.key_length = cipher_xform->key.length;
149         rte_memcpy(sess->cipher.key, cipher_xform->key.data,
150                    cipher_xform->key.length);
151
152         /* set iv parameters */
153         sess->iv.offset = cipher_xform->iv.offset;
154         sess->iv.length = cipher_xform->iv.length;
155
156         switch (cipher_xform->algo) {
157         case RTE_CRYPTO_CIPHER_AES_CTR:
158                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
159                 sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
160                 sess->cipher.engine = CCP_ENGINE_AES;
161                 break;
162         case RTE_CRYPTO_CIPHER_AES_ECB:
163                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
164                 sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
165                 sess->cipher.engine = CCP_ENGINE_AES;
166                 break;
167         case RTE_CRYPTO_CIPHER_AES_CBC:
168                 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
169                 sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
170                 sess->cipher.engine = CCP_ENGINE_AES;
171                 break;
172         case RTE_CRYPTO_CIPHER_3DES_CBC:
173                 sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
174                 sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
175                 sess->cipher.engine = CCP_ENGINE_3DES;
176                 break;
177         default:
178                 CCP_LOG_ERR("Unsupported cipher algo");
179                 return -1;
180         }
181
182
183         switch (sess->cipher.engine) {
184         case CCP_ENGINE_AES:
185                 if (sess->cipher.key_length == 16)
186                         sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
187                 else if (sess->cipher.key_length == 24)
188                         sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
189                 else if (sess->cipher.key_length == 32)
190                         sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
191                 else {
192                         CCP_LOG_ERR("Invalid cipher key length");
193                         return -1;
194                 }
195                 for (i = 0; i < sess->cipher.key_length ; i++)
196                         sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
197                                 sess->cipher.key[i];
198                 break;
199         case CCP_ENGINE_3DES:
200                 if (sess->cipher.key_length == 16)
201                         sess->cipher.ut.des_type = CCP_DES_TYPE_128;
202                 else if (sess->cipher.key_length == 24)
203                         sess->cipher.ut.des_type = CCP_DES_TYPE_192;
204                 else {
205                         CCP_LOG_ERR("Invalid cipher key length");
206                         return -1;
207                 }
208                 for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
209                         for (i = 0; i < 8; i++)
210                                 sess->cipher.key_ccp[(8 + x) - i - 1] =
211                                         sess->cipher.key[i + x];
212                 break;
213         default:
214                 CCP_LOG_ERR("Invalid CCP Engine");
215                 return -ENOTSUP;
216         }
217         sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
218         sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
219         return 0;
220 }
221
222 static int
223 ccp_configure_session_auth(struct ccp_session *sess,
224                            const struct rte_crypto_sym_xform *xform)
225 {
226         const struct rte_crypto_auth_xform *auth_xform = NULL;
227         size_t i;
228
229         auth_xform = &xform->auth;
230
231         sess->auth.digest_length = auth_xform->digest_length;
232         if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
233                 sess->auth.op = CCP_AUTH_OP_GENERATE;
234         else
235                 sess->auth.op = CCP_AUTH_OP_VERIFY;
236         switch (auth_xform->algo) {
237         case RTE_CRYPTO_AUTH_AES_CMAC:
238                 sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
239                 sess->auth.engine = CCP_ENGINE_AES;
240                 sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
241                 sess->auth.key_length = auth_xform->key.length;
242                 /**<padding and hash result*/
243                 sess->auth.ctx_len = CCP_SB_BYTES << 1;
244                 sess->auth.offset = AES_BLOCK_SIZE;
245                 sess->auth.block_size = AES_BLOCK_SIZE;
246                 if (sess->auth.key_length == 16)
247                         sess->auth.ut.aes_type = CCP_AES_TYPE_128;
248                 else if (sess->auth.key_length == 24)
249                         sess->auth.ut.aes_type = CCP_AES_TYPE_192;
250                 else if (sess->auth.key_length == 32)
251                         sess->auth.ut.aes_type = CCP_AES_TYPE_256;
252                 else {
253                         CCP_LOG_ERR("Invalid CMAC key length");
254                         return -1;
255                 }
256                 rte_memcpy(sess->auth.key, auth_xform->key.data,
257                            sess->auth.key_length);
258                 for (i = 0; i < sess->auth.key_length; i++)
259                         sess->auth.key_ccp[sess->auth.key_length - i - 1] =
260                                 sess->auth.key[i];
261                 if (generate_cmac_subkeys(sess))
262                         return -1;
263                 break;
264         default:
265                 CCP_LOG_ERR("Unsupported hash algo");
266                 return -ENOTSUP;
267         }
268         return 0;
269 }
270
271 static int
272 ccp_configure_session_aead(struct ccp_session *sess,
273                            const struct rte_crypto_sym_xform *xform)
274 {
275         const struct rte_crypto_aead_xform *aead_xform = NULL;
276
277         aead_xform = &xform->aead;
278
279         sess->cipher.key_length = aead_xform->key.length;
280         rte_memcpy(sess->cipher.key, aead_xform->key.data,
281                    aead_xform->key.length);
282
283         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
284                 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
285                 sess->auth.op = CCP_AUTH_OP_GENERATE;
286         } else {
287                 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
288                 sess->auth.op = CCP_AUTH_OP_VERIFY;
289         }
290         sess->auth.aad_length = aead_xform->aad_length;
291         sess->auth.digest_length = aead_xform->digest_length;
292
293         /* set iv parameters */
294         sess->iv.offset = aead_xform->iv.offset;
295         sess->iv.length = aead_xform->iv.length;
296
297         switch (aead_xform->algo) {
298         default:
299                 CCP_LOG_ERR("Unsupported aead algo");
300                 return -ENOTSUP;
301         }
302         return 0;
303 }
304
305 int
306 ccp_set_session_parameters(struct ccp_session *sess,
307                            const struct rte_crypto_sym_xform *xform)
308 {
309         const struct rte_crypto_sym_xform *cipher_xform = NULL;
310         const struct rte_crypto_sym_xform *auth_xform = NULL;
311         const struct rte_crypto_sym_xform *aead_xform = NULL;
312         int ret = 0;
313
314         sess->cmd_id = ccp_get_cmd_id(xform);
315
316         switch (sess->cmd_id) {
317         case CCP_CMD_CIPHER:
318                 cipher_xform = xform;
319                 break;
320         case CCP_CMD_AUTH:
321                 auth_xform = xform;
322                 break;
323         case CCP_CMD_CIPHER_HASH:
324                 cipher_xform = xform;
325                 auth_xform = xform->next;
326                 break;
327         case CCP_CMD_HASH_CIPHER:
328                 auth_xform = xform;
329                 cipher_xform = xform->next;
330                 break;
331         case CCP_CMD_COMBINED:
332                 aead_xform = xform;
333                 break;
334         default:
335                 CCP_LOG_ERR("Unsupported cmd_id");
336                 return -1;
337         }
338
339         /* Default IV length = 0 */
340         sess->iv.length = 0;
341         if (cipher_xform) {
342                 ret = ccp_configure_session_cipher(sess, cipher_xform);
343                 if (ret != 0) {
344                         CCP_LOG_ERR("Invalid/unsupported cipher parameters");
345                         return ret;
346                 }
347         }
348         if (auth_xform) {
349                 ret = ccp_configure_session_auth(sess, auth_xform);
350                 if (ret != 0) {
351                         CCP_LOG_ERR("Invalid/unsupported auth parameters");
352                         return ret;
353                 }
354         }
355         if (aead_xform) {
356                 ret = ccp_configure_session_aead(sess, aead_xform);
357                 if (ret != 0) {
358                         CCP_LOG_ERR("Invalid/unsupported aead parameters");
359                         return ret;
360                 }
361         }
362         return ret;
363 }
364
365 /* calculate CCP descriptors requirement */
366 static inline int
367 ccp_cipher_slot(struct ccp_session *session)
368 {
369         int count = 0;
370
371         switch (session->cipher.algo) {
372         case CCP_CIPHER_ALGO_AES_CBC:
373                 count = 2;
374                 /**< op + passthrough for iv */
375                 break;
376         case CCP_CIPHER_ALGO_AES_ECB:
377                 count = 1;
378                 /**<only op*/
379                 break;
380         case CCP_CIPHER_ALGO_AES_CTR:
381                 count = 2;
382                 /**< op + passthrough for iv */
383                 break;
384         case CCP_CIPHER_ALGO_3DES_CBC:
385                 count = 2;
386                 /**< op + passthrough for iv */
387                 break;
388         default:
389                 CCP_LOG_ERR("Unsupported cipher algo %d",
390                             session->cipher.algo);
391         }
392         return count;
393 }
394
395 static inline int
396 ccp_auth_slot(struct ccp_session *session)
397 {
398         int count = 0;
399
400         switch (session->auth.algo) {
401         case CCP_AUTH_ALGO_AES_CMAC:
402                 count = 4;
403                 /**
404                  * op
405                  * extra descriptor in padding case
406                  * (k1/k2(255:128) with iv(127:0))
407                  * Retrieve result
408                  */
409                 break;
410         default:
411                 CCP_LOG_ERR("Unsupported auth algo %d",
412                             session->auth.algo);
413         }
414
415         return count;
416 }
417
418 static int
419 ccp_aead_slot(struct ccp_session *session)
420 {
421         int count = 0;
422
423         switch (session->aead_algo) {
424         default:
425                 CCP_LOG_ERR("Unsupported aead algo %d",
426                             session->aead_algo);
427         }
428         return count;
429 }
430
431 int
432 ccp_compute_slot_count(struct ccp_session *session)
433 {
434         int count = 0;
435
436         switch (session->cmd_id) {
437         case CCP_CMD_CIPHER:
438                 count = ccp_cipher_slot(session);
439                 break;
440         case CCP_CMD_AUTH:
441                 count = ccp_auth_slot(session);
442                 break;
443         case CCP_CMD_CIPHER_HASH:
444         case CCP_CMD_HASH_CIPHER:
445                 count = ccp_cipher_slot(session);
446                 count += ccp_auth_slot(session);
447                 break;
448         case CCP_CMD_COMBINED:
449                 count = ccp_aead_slot(session);
450                 break;
451         default:
452                 CCP_LOG_ERR("Unsupported cmd_id");
453
454         }
455
456         return count;
457 }
458
459 static void
460 ccp_perform_passthru(struct ccp_passthru *pst,
461                      struct ccp_queue *cmd_q)
462 {
463         struct ccp_desc *desc;
464         union ccp_function function;
465
466         desc = &cmd_q->qbase_desc[cmd_q->qidx];
467
468         CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
469
470         CCP_CMD_SOC(desc) = 0;
471         CCP_CMD_IOC(desc) = 0;
472         CCP_CMD_INIT(desc) = 0;
473         CCP_CMD_EOM(desc) = 0;
474         CCP_CMD_PROT(desc) = 0;
475
476         function.raw = 0;
477         CCP_PT_BYTESWAP(&function) = pst->byte_swap;
478         CCP_PT_BITWISE(&function) = pst->bit_mod;
479         CCP_CMD_FUNCTION(desc) = function.raw;
480
481         CCP_CMD_LEN(desc) = pst->len;
482
483         if (pst->dir) {
484                 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
485                 CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
486                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
487
488                 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
489                 CCP_CMD_DST_HI(desc) = 0;
490                 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
491
492                 if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
493                         CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
494         } else {
495
496                 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
497                 CCP_CMD_SRC_HI(desc) = 0;
498                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
499
500                 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
501                 CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
502                 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
503         }
504
505         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
506 }
507
508 static int
509 ccp_perform_aes_cmac(struct rte_crypto_op *op,
510                      struct ccp_queue *cmd_q)
511 {
512         struct ccp_session *session;
513         union ccp_function function;
514         struct ccp_passthru pst;
515         struct ccp_desc *desc;
516         uint32_t tail;
517         uint8_t *src_tb, *append_ptr, *ctx_addr;
518         phys_addr_t src_addr, dest_addr, key_addr;
519         int length, non_align_len;
520
521         session = (struct ccp_session *)get_session_private_data(
522                                          op->sym->session,
523                                         ccp_cryptodev_driver_id);
524         key_addr = rte_mem_virt2phy(session->auth.key_ccp);
525
526         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
527                                               op->sym->auth.data.offset);
528         append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
529                                                 session->auth.ctx_len);
530         dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
531
532         function.raw = 0;
533         CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
534         CCP_AES_MODE(&function) = session->auth.um.aes_mode;
535         CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
536
537         if (op->sym->auth.data.length % session->auth.block_size == 0) {
538
539                 ctx_addr = session->auth.pre_compute;
540                 memset(ctx_addr, 0, AES_BLOCK_SIZE);
541                 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
542                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
543                 pst.len = CCP_SB_BYTES;
544                 pst.dir = 1;
545                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
546                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
547                 ccp_perform_passthru(&pst, cmd_q);
548
549                 desc = &cmd_q->qbase_desc[cmd_q->qidx];
550                 memset(desc, 0, Q_DESC_SIZE);
551
552                 /* prepare desc for aes-cmac command */
553                 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
554                 CCP_CMD_EOM(desc) = 1;
555                 CCP_CMD_FUNCTION(desc) = function.raw;
556
557                 CCP_CMD_LEN(desc) = op->sym->auth.data.length;
558                 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
559                 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
560                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
561
562                 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
563                 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
564                 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
565                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
566
567                 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
568
569                 rte_wmb();
570
571                 tail =
572                 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
573                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
574                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
575                               cmd_q->qcontrol | CMD_Q_RUN);
576         } else {
577                 ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
578                 memset(ctx_addr, 0, AES_BLOCK_SIZE);
579                 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
580                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
581                 pst.len = CCP_SB_BYTES;
582                 pst.dir = 1;
583                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
584                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
585                 ccp_perform_passthru(&pst, cmd_q);
586
587                 length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
588                 length *= AES_BLOCK_SIZE;
589                 non_align_len = op->sym->auth.data.length - length;
590                 /* prepare desc for aes-cmac command */
591                 /*Command 1*/
592                 desc = &cmd_q->qbase_desc[cmd_q->qidx];
593                 memset(desc, 0, Q_DESC_SIZE);
594
595                 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
596                 CCP_CMD_INIT(desc) = 1;
597                 CCP_CMD_FUNCTION(desc) = function.raw;
598
599                 CCP_CMD_LEN(desc) = length;
600                 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
601                 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
602                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
603
604                 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
605                 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
606                 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
607                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
608
609                 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
610
611                 /*Command 2*/
612                 append_ptr = append_ptr + CCP_SB_BYTES;
613                 memset(append_ptr, 0, AES_BLOCK_SIZE);
614                 src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
615                                                  uint8_t *,
616                                                  op->sym->auth.data.offset +
617                                                  length);
618                 rte_memcpy(append_ptr, src_tb, non_align_len);
619                 append_ptr[non_align_len] = CMAC_PAD_VALUE;
620
621                 desc = &cmd_q->qbase_desc[cmd_q->qidx];
622                 memset(desc, 0, Q_DESC_SIZE);
623
624                 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
625                 CCP_CMD_EOM(desc) = 1;
626                 CCP_CMD_FUNCTION(desc) = function.raw;
627                 CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
628
629                 CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
630                 CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
631                 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
632
633                 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
634                 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
635                 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
636                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
637
638                 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
639
640                 rte_wmb();
641                 tail =
642                 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
643                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
644                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
645                               cmd_q->qcontrol | CMD_Q_RUN);
646         }
647         /* Retrieve result */
648         pst.dest_addr = dest_addr;
649         pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
650         pst.len = CCP_SB_BYTES;
651         pst.dir = 0;
652         pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
653         pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
654         ccp_perform_passthru(&pst, cmd_q);
655
656         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
657         return 0;
658 }
659
660 static int
661 ccp_perform_aes(struct rte_crypto_op *op,
662                 struct ccp_queue *cmd_q,
663                 struct ccp_batch_info *b_info)
664 {
665         struct ccp_session *session;
666         union ccp_function function;
667         uint8_t *lsb_buf;
668         struct ccp_passthru pst = {0};
669         struct ccp_desc *desc;
670         phys_addr_t src_addr, dest_addr, key_addr;
671         uint8_t *iv;
672
673         session = (struct ccp_session *)get_session_private_data(
674                                          op->sym->session,
675                                         ccp_cryptodev_driver_id);
676         function.raw = 0;
677
678         iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
679         if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
680                 if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
681                         rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
682                                    iv, session->iv.length);
683                         pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
684                         CCP_AES_SIZE(&function) = 0x1F;
685                 } else {
686                         lsb_buf =
687                         &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
688                         rte_memcpy(lsb_buf +
689                                    (CCP_SB_BYTES - session->iv.length),
690                                    iv, session->iv.length);
691                         pst.src_addr = b_info->lsb_buf_phys +
692                                 (b_info->lsb_buf_idx * CCP_SB_BYTES);
693                         b_info->lsb_buf_idx++;
694                 }
695
696                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
697                 pst.len = CCP_SB_BYTES;
698                 pst.dir = 1;
699                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
700                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
701                 ccp_perform_passthru(&pst, cmd_q);
702         }
703
704         desc = &cmd_q->qbase_desc[cmd_q->qidx];
705
706         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
707                                               op->sym->cipher.data.offset);
708         if (likely(op->sym->m_dst != NULL))
709                 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
710                                                 op->sym->cipher.data.offset);
711         else
712                 dest_addr = src_addr;
713         key_addr = session->cipher.key_phys;
714
715         /* prepare desc for aes command */
716         CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
717         CCP_CMD_INIT(desc) = 1;
718         CCP_CMD_EOM(desc) = 1;
719
720         CCP_AES_ENCRYPT(&function) = session->cipher.dir;
721         CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
722         CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
723         CCP_CMD_FUNCTION(desc) = function.raw;
724
725         CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
726
727         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
728         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
729         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
730
731         CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
732         CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
733         CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
734
735         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
736         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
737         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
738
739         if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
740                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
741
742         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
743         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
744         return 0;
745 }
746
747 static int
748 ccp_perform_3des(struct rte_crypto_op *op,
749                 struct ccp_queue *cmd_q,
750                 struct ccp_batch_info *b_info)
751 {
752         struct ccp_session *session;
753         union ccp_function function;
754         unsigned char *lsb_buf;
755         struct ccp_passthru pst;
756         struct ccp_desc *desc;
757         uint32_t tail;
758         uint8_t *iv;
759         phys_addr_t src_addr, dest_addr, key_addr;
760
761         session = (struct ccp_session *)get_session_private_data(
762                                          op->sym->session,
763                                         ccp_cryptodev_driver_id);
764
765         iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
766         switch (session->cipher.um.des_mode) {
767         case CCP_DES_MODE_CBC:
768                 lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
769                 b_info->lsb_buf_idx++;
770
771                 rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
772                            iv, session->iv.length);
773
774                 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
775                 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
776                 pst.len = CCP_SB_BYTES;
777                 pst.dir = 1;
778                 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
779                 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
780                 ccp_perform_passthru(&pst, cmd_q);
781                 break;
782         case CCP_DES_MODE_CFB:
783         case CCP_DES_MODE_ECB:
784                 CCP_LOG_ERR("Unsupported DES cipher mode");
785                 return -ENOTSUP;
786         }
787
788         src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
789                                               op->sym->cipher.data.offset);
790         if (unlikely(op->sym->m_dst != NULL))
791                 dest_addr =
792                         rte_pktmbuf_mtophys_offset(op->sym->m_dst,
793                                                    op->sym->cipher.data.offset);
794         else
795                 dest_addr = src_addr;
796
797         key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
798
799         desc = &cmd_q->qbase_desc[cmd_q->qidx];
800
801         memset(desc, 0, Q_DESC_SIZE);
802
803         /* prepare desc for des command */
804         CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
805
806         CCP_CMD_SOC(desc) = 0;
807         CCP_CMD_IOC(desc) = 0;
808         CCP_CMD_INIT(desc) = 1;
809         CCP_CMD_EOM(desc) = 1;
810         CCP_CMD_PROT(desc) = 0;
811
812         function.raw = 0;
813         CCP_DES_ENCRYPT(&function) = session->cipher.dir;
814         CCP_DES_MODE(&function) = session->cipher.um.des_mode;
815         CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
816         CCP_CMD_FUNCTION(desc) = function.raw;
817
818         CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
819
820         CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
821         CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
822         CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
823
824         CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
825         CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
826         CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
827
828         CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
829         CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
830         CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
831
832         if (session->cipher.um.des_mode)
833                 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
834
835         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
836
837         rte_wmb();
838
839         /* Write the new tail address back to the queue register */
840         tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
841         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
842         /* Turn the queue back on using our cached control register */
843         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
844                       cmd_q->qcontrol | CMD_Q_RUN);
845
846         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
847         return 0;
848 }
849
850 static inline int
851 ccp_crypto_cipher(struct rte_crypto_op *op,
852                   struct ccp_queue *cmd_q,
853                   struct ccp_batch_info *b_info)
854 {
855         int result = 0;
856         struct ccp_session *session;
857
858         session = (struct ccp_session *)get_session_private_data(
859                                          op->sym->session,
860                                          ccp_cryptodev_driver_id);
861
862         switch (session->cipher.algo) {
863         case CCP_CIPHER_ALGO_AES_CBC:
864                 result = ccp_perform_aes(op, cmd_q, b_info);
865                 b_info->desccnt += 2;
866                 break;
867         case CCP_CIPHER_ALGO_AES_CTR:
868                 result = ccp_perform_aes(op, cmd_q, b_info);
869                 b_info->desccnt += 2;
870                 break;
871         case CCP_CIPHER_ALGO_AES_ECB:
872                 result = ccp_perform_aes(op, cmd_q, b_info);
873                 b_info->desccnt += 1;
874                 break;
875         case CCP_CIPHER_ALGO_3DES_CBC:
876                 result = ccp_perform_3des(op, cmd_q, b_info);
877                 b_info->desccnt += 2;
878                 break;
879         default:
880                 CCP_LOG_ERR("Unsupported cipher algo %d",
881                             session->cipher.algo);
882                 return -ENOTSUP;
883         }
884         return result;
885 }
886
887 static inline int
888 ccp_crypto_auth(struct rte_crypto_op *op,
889                 struct ccp_queue *cmd_q,
890                 struct ccp_batch_info *b_info)
891 {
892
893         int result = 0;
894         struct ccp_session *session;
895
896         session = (struct ccp_session *)get_session_private_data(
897                                          op->sym->session,
898                                         ccp_cryptodev_driver_id);
899
900         switch (session->auth.algo) {
901         case CCP_AUTH_ALGO_AES_CMAC:
902                 result = ccp_perform_aes_cmac(op, cmd_q);
903                 b_info->desccnt += 4;
904                 break;
905         default:
906                 CCP_LOG_ERR("Unsupported auth algo %d",
907                             session->auth.algo);
908                 return -ENOTSUP;
909         }
910
911         return result;
912 }
913
914 static inline int
915 ccp_crypto_aead(struct rte_crypto_op *op,
916                 struct ccp_queue *cmd_q __rte_unused,
917                 struct ccp_batch_info *b_info __rte_unused)
918 {
919         int result = 0;
920         struct ccp_session *session;
921
922         session = (struct ccp_session *)get_session_private_data(
923                                          op->sym->session,
924                                         ccp_cryptodev_driver_id);
925
926         switch (session->aead_algo) {
927         default:
928                 CCP_LOG_ERR("Unsupported aead algo %d",
929                             session->aead_algo);
930                 return -ENOTSUP;
931         }
932         return result;
933 }
934
935 int
936 process_ops_to_enqueue(const struct ccp_qp *qp,
937                        struct rte_crypto_op **op,
938                        struct ccp_queue *cmd_q,
939                        uint16_t nb_ops,
940                        int slots_req)
941 {
942         int i, result = 0;
943         struct ccp_batch_info *b_info;
944         struct ccp_session *session;
945
946         if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
947                 CCP_LOG_ERR("batch info allocation failed");
948                 return 0;
949         }
950         /* populate batch info necessary for dequeue */
951         b_info->op_idx = 0;
952         b_info->lsb_buf_idx = 0;
953         b_info->desccnt = 0;
954         b_info->cmd_q = cmd_q;
955         b_info->lsb_buf_phys =
956                 (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
957         rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
958
959         b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
960                                          Q_DESC_SIZE);
961         for (i = 0; i < nb_ops; i++) {
962                 session = (struct ccp_session *)get_session_private_data(
963                                                  op[i]->sym->session,
964                                                  ccp_cryptodev_driver_id);
965                 switch (session->cmd_id) {
966                 case CCP_CMD_CIPHER:
967                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
968                         break;
969                 case CCP_CMD_AUTH:
970                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
971                         break;
972                 case CCP_CMD_CIPHER_HASH:
973                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
974                         if (result)
975                                 break;
976                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
977                         break;
978                 case CCP_CMD_HASH_CIPHER:
979                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
980                         if (result)
981                                 break;
982                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
983                         break;
984                 case CCP_CMD_COMBINED:
985                         result = ccp_crypto_aead(op[i], cmd_q, b_info);
986                         break;
987                 default:
988                         CCP_LOG_ERR("Unsupported cmd_id");
989                         result = -1;
990                 }
991                 if (unlikely(result < 0)) {
992                         rte_atomic64_add(&b_info->cmd_q->free_slots,
993                                          (slots_req - b_info->desccnt));
994                         break;
995                 }
996                 b_info->op[i] = op[i];
997         }
998
999         b_info->opcnt = i;
1000         b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
1001                                          Q_DESC_SIZE);
1002
1003         rte_wmb();
1004         /* Write the new tail address back to the queue register */
1005         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
1006                               b_info->tail_offset);
1007         /* Turn the queue back on using our cached control register */
1008         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1009                               cmd_q->qcontrol | CMD_Q_RUN);
1010
1011         rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
1012
1013         return i;
1014 }
1015
1016 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
1017 {
1018         struct ccp_session *session;
1019         uint8_t *digest_data, *addr;
1020         struct rte_mbuf *m_last;
1021         int offset, digest_offset;
1022         uint8_t digest_le[64];
1023
1024         session = (struct ccp_session *)get_session_private_data(
1025                                          op->sym->session,
1026                                         ccp_cryptodev_driver_id);
1027
1028         if (session->cmd_id == CCP_CMD_COMBINED) {
1029                 digest_data = op->sym->aead.digest.data;
1030                 digest_offset = op->sym->aead.data.offset +
1031                                         op->sym->aead.data.length;
1032         } else {
1033                 digest_data = op->sym->auth.digest.data;
1034                 digest_offset = op->sym->auth.data.offset +
1035                                         op->sym->auth.data.length;
1036         }
1037         m_last = rte_pktmbuf_lastseg(op->sym->m_src);
1038         addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
1039                            m_last->data_len - session->auth.ctx_len);
1040
1041         rte_mb();
1042         offset = session->auth.offset;
1043
1044         if (session->auth.engine == CCP_ENGINE_SHA)
1045                 if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
1046                     (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
1047                     (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
1048                         /* All other algorithms require byte
1049                          * swap done by host
1050                          */
1051                         unsigned int i;
1052
1053                         offset = session->auth.ctx_len -
1054                                 session->auth.offset - 1;
1055                         for (i = 0; i < session->auth.digest_length; i++)
1056                                 digest_le[i] = addr[offset - i];
1057                         offset = 0;
1058                         addr = digest_le;
1059                 }
1060
1061         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1062         if (session->auth.op == CCP_AUTH_OP_VERIFY) {
1063                 if (memcmp(addr + offset, digest_data,
1064                            session->auth.digest_length) != 0)
1065                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1066
1067         } else {
1068                 if (unlikely(digest_data == 0))
1069                         digest_data = rte_pktmbuf_mtod_offset(
1070                                         op->sym->m_dst, uint8_t *,
1071                                         digest_offset);
1072                 rte_memcpy(digest_data, addr + offset,
1073                            session->auth.digest_length);
1074         }
1075         /* Trim area used for digest from mbuf. */
1076         rte_pktmbuf_trim(op->sym->m_src,
1077                          session->auth.ctx_len);
1078 }
1079
1080 static int
1081 ccp_prepare_ops(struct rte_crypto_op **op_d,
1082                 struct ccp_batch_info *b_info,
1083                 uint16_t nb_ops)
1084 {
1085         int i, min_ops;
1086         struct ccp_session *session;
1087
1088         min_ops = RTE_MIN(nb_ops, b_info->opcnt);
1089
1090         for (i = 0; i < min_ops; i++) {
1091                 op_d[i] = b_info->op[b_info->op_idx++];
1092                 session = (struct ccp_session *)get_session_private_data(
1093                                                  op_d[i]->sym->session,
1094                                                 ccp_cryptodev_driver_id);
1095                 switch (session->cmd_id) {
1096                 case CCP_CMD_CIPHER:
1097                         op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1098                         break;
1099                 case CCP_CMD_AUTH:
1100                 case CCP_CMD_CIPHER_HASH:
1101                 case CCP_CMD_HASH_CIPHER:
1102                 case CCP_CMD_COMBINED:
1103                         ccp_auth_dq_prepare(op_d[i]);
1104                         break;
1105                 default:
1106                         CCP_LOG_ERR("Unsupported cmd_id");
1107                 }
1108         }
1109
1110         b_info->opcnt -= min_ops;
1111         return min_ops;
1112 }
1113
1114 int
1115 process_ops_to_dequeue(struct ccp_qp *qp,
1116                        struct rte_crypto_op **op,
1117                        uint16_t nb_ops)
1118 {
1119         struct ccp_batch_info *b_info;
1120         uint32_t cur_head_offset;
1121
1122         if (qp->b_info != NULL) {
1123                 b_info = qp->b_info;
1124                 if (unlikely(b_info->op_idx > 0))
1125                         goto success;
1126         } else if (rte_ring_dequeue(qp->processed_pkts,
1127                                     (void **)&b_info))
1128                 return 0;
1129         cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
1130                                        CMD_Q_HEAD_LO_BASE);
1131
1132         if (b_info->head_offset < b_info->tail_offset) {
1133                 if ((cur_head_offset >= b_info->head_offset) &&
1134                     (cur_head_offset < b_info->tail_offset)) {
1135                         qp->b_info = b_info;
1136                         return 0;
1137                 }
1138         } else {
1139                 if ((cur_head_offset >= b_info->head_offset) ||
1140                     (cur_head_offset < b_info->tail_offset)) {
1141                         qp->b_info = b_info;
1142                         return 0;
1143                 }
1144         }
1145
1146
1147 success:
1148         nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
1149         rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
1150         b_info->desccnt = 0;
1151         if (b_info->opcnt > 0) {
1152                 qp->b_info = b_info;
1153         } else {
1154                 rte_mempool_put(qp->batch_mp, (void *)b_info);
1155                 qp->b_info = NULL;
1156         }
1157
1158         return nb_ops;
1159 }