f8b7edcd69e18e28764b0b653b814b6d4b177fb6
[dpdk.git] / drivers / crypto / nitrox / nitrox_sym.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <stdbool.h>
6
7 #include <cryptodev_pmd.h>
8 #include <rte_crypto.h>
9
10 #include "nitrox_sym.h"
11 #include "nitrox_device.h"
12 #include "nitrox_sym_capabilities.h"
13 #include "nitrox_qp.h"
14 #include "nitrox_sym_reqmgr.h"
15 #include "nitrox_sym_ctx.h"
16 #include "nitrox_logs.h"
17
18 #define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
19 #define MC_MAC_MISMATCH_ERR_CODE 0x4c
20 #define NPS_PKT_IN_INSTR_SIZE 64
21 #define IV_FROM_DPTR 1
22 #define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33
23 #define FLEXI_CRYPTO_MAX_AAD_LEN 512
24 #define AES_KEYSIZE_128 16
25 #define AES_KEYSIZE_192 24
26 #define AES_KEYSIZE_256 32
27 #define MAX_IV_LEN 16
28
29 struct nitrox_sym_device {
30         struct rte_cryptodev *cdev;
31         struct nitrox_device *ndev;
32 };
33
34 /* Cipher opcodes */
35 enum flexi_cipher {
36         CIPHER_NULL = 0,
37         CIPHER_3DES_CBC,
38         CIPHER_3DES_ECB,
39         CIPHER_AES_CBC,
40         CIPHER_AES_ECB,
41         CIPHER_AES_CFB,
42         CIPHER_AES_CTR,
43         CIPHER_AES_GCM,
44         CIPHER_AES_XTS,
45         CIPHER_AES_CCM,
46         CIPHER_AES_CBC_CTS,
47         CIPHER_AES_ECB_CTS,
48         CIPHER_INVALID
49 };
50
51 /* Auth opcodes */
52 enum flexi_auth {
53         AUTH_NULL = 0,
54         AUTH_MD5,
55         AUTH_SHA1,
56         AUTH_SHA2_SHA224,
57         AUTH_SHA2_SHA256,
58         AUTH_SHA2_SHA384,
59         AUTH_SHA2_SHA512,
60         AUTH_GMAC,
61         AUTH_INVALID
62 };
63
64 uint8_t nitrox_sym_drv_id;
65 static const char nitrox_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_NITROX_PMD);
66 static const struct rte_driver nitrox_rte_sym_drv = {
67         .name = nitrox_sym_drv_name,
68         .alias = nitrox_sym_drv_name
69 };
70
71 static int nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev,
72                                      uint16_t qp_id);
73
74 static int
75 nitrox_sym_dev_config(struct rte_cryptodev *cdev,
76                       struct rte_cryptodev_config *config)
77 {
78         struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
79         struct nitrox_device *ndev = sym_dev->ndev;
80
81         if (config->nb_queue_pairs > ndev->nr_queues) {
82                 NITROX_LOG(ERR, "Invalid queue pairs, max supported %d\n",
83                            ndev->nr_queues);
84                 return -EINVAL;
85         }
86
87         return 0;
88 }
89
90 static int
91 nitrox_sym_dev_start(struct rte_cryptodev *cdev)
92 {
93         /* SE cores initialization is done in PF */
94         RTE_SET_USED(cdev);
95         return 0;
96 }
97
98 static void
99 nitrox_sym_dev_stop(struct rte_cryptodev *cdev)
100 {
101         /* SE cores cleanup is done in PF */
102         RTE_SET_USED(cdev);
103 }
104
105 static int
106 nitrox_sym_dev_close(struct rte_cryptodev *cdev)
107 {
108         int i, ret;
109
110         for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
111                 ret = nitrox_sym_dev_qp_release(cdev, i);
112                 if (ret)
113                         return ret;
114         }
115
116         return 0;
117 }
118
119 static void
120 nitrox_sym_dev_info_get(struct rte_cryptodev *cdev,
121                         struct rte_cryptodev_info *info)
122 {
123         struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
124         struct nitrox_device *ndev = sym_dev->ndev;
125
126         if (!info)
127                 return;
128
129         info->max_nb_queue_pairs = ndev->nr_queues;
130         info->feature_flags = cdev->feature_flags;
131         info->capabilities = nitrox_get_sym_capabilities();
132         info->driver_id = nitrox_sym_drv_id;
133         info->sym.max_nb_sessions = 0;
134 }
135
136 static void
137 nitrox_sym_dev_stats_get(struct rte_cryptodev *cdev,
138                          struct rte_cryptodev_stats *stats)
139 {
140         int qp_id;
141
142         for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
143                 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
144
145                 if (!qp)
146                         continue;
147
148                 stats->enqueued_count += qp->stats.enqueued_count;
149                 stats->dequeued_count += qp->stats.dequeued_count;
150                 stats->enqueue_err_count += qp->stats.enqueue_err_count;
151                 stats->dequeue_err_count += qp->stats.dequeue_err_count;
152         }
153 }
154
155 static void
156 nitrox_sym_dev_stats_reset(struct rte_cryptodev *cdev)
157 {
158         int qp_id;
159
160         for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
161                 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
162
163                 if (!qp)
164                         continue;
165
166                 memset(&qp->stats, 0, sizeof(qp->stats));
167         }
168 }
169
170 static int
171 nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id,
172                         const struct rte_cryptodev_qp_conf *qp_conf,
173                         int socket_id)
174 {
175         struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
176         struct nitrox_device *ndev = sym_dev->ndev;
177         struct nitrox_qp *qp = NULL;
178         int err;
179
180         NITROX_LOG(DEBUG, "queue %d\n", qp_id);
181         if (qp_id >= ndev->nr_queues) {
182                 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
183                            qp_id, ndev->nr_queues);
184                 return -EINVAL;
185         }
186
187         if (cdev->data->queue_pairs[qp_id]) {
188                 err = nitrox_sym_dev_qp_release(cdev, qp_id);
189                 if (err)
190                         return err;
191         }
192
193         qp = rte_zmalloc_socket("nitrox PMD qp", sizeof(*qp),
194                                 RTE_CACHE_LINE_SIZE,
195                                 socket_id);
196         if (!qp) {
197                 NITROX_LOG(ERR, "Failed to allocate nitrox qp\n");
198                 return -ENOMEM;
199         }
200
201         qp->qno = qp_id;
202         err = nitrox_qp_setup(qp, ndev->bar_addr, cdev->data->name,
203                               qp_conf->nb_descriptors, NPS_PKT_IN_INSTR_SIZE,
204                               socket_id);
205         if (unlikely(err))
206                 goto qp_setup_err;
207
208         qp->sr_mp = nitrox_sym_req_pool_create(cdev, qp->count, qp_id,
209                                                socket_id);
210         if (unlikely(!qp->sr_mp))
211                 goto req_pool_err;
212
213         cdev->data->queue_pairs[qp_id] = qp;
214         NITROX_LOG(DEBUG, "queue %d setup done\n", qp_id);
215         return 0;
216
217 req_pool_err:
218         nitrox_qp_release(qp, ndev->bar_addr);
219 qp_setup_err:
220         rte_free(qp);
221         return err;
222 }
223
224 static int
225 nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id)
226 {
227         struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
228         struct nitrox_device *ndev = sym_dev->ndev;
229         struct nitrox_qp *qp;
230         int err;
231
232         NITROX_LOG(DEBUG, "queue %d\n", qp_id);
233         if (qp_id >= ndev->nr_queues) {
234                 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
235                            qp_id, ndev->nr_queues);
236                 return -EINVAL;
237         }
238
239         qp = cdev->data->queue_pairs[qp_id];
240         if (!qp) {
241                 NITROX_LOG(DEBUG, "queue %u already freed\n", qp_id);
242                 return 0;
243         }
244
245         if (!nitrox_qp_is_empty(qp)) {
246                 NITROX_LOG(ERR, "queue %d not empty\n", qp_id);
247                 return -EAGAIN;
248         }
249
250         cdev->data->queue_pairs[qp_id] = NULL;
251         err = nitrox_qp_release(qp, ndev->bar_addr);
252         nitrox_sym_req_pool_free(qp->sr_mp);
253         rte_free(qp);
254         NITROX_LOG(DEBUG, "queue %d release done\n", qp_id);
255         return err;
256 }
257
258 static unsigned int
259 nitrox_sym_dev_sess_get_size(__rte_unused struct rte_cryptodev *cdev)
260 {
261         return sizeof(struct nitrox_crypto_ctx);
262 }
263
264 static enum nitrox_chain
265 get_crypto_chain_order(const struct rte_crypto_sym_xform *xform)
266 {
267         enum nitrox_chain res = NITROX_CHAIN_NOT_SUPPORTED;
268
269         if (unlikely(xform == NULL))
270                 return res;
271
272         switch (xform->type) {
273         case RTE_CRYPTO_SYM_XFORM_AUTH:
274                 if (xform->next == NULL) {
275                         res = NITROX_CHAIN_NOT_SUPPORTED;
276                 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
277                         if (xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
278                             xform->next->cipher.op ==
279                             RTE_CRYPTO_CIPHER_OP_DECRYPT) {
280                                 res = NITROX_CHAIN_AUTH_CIPHER;
281                         } else {
282                                 NITROX_LOG(ERR, "auth op %d, cipher op %d\n",
283                                     xform->auth.op, xform->next->cipher.op);
284                         }
285                 }
286                 break;
287         case RTE_CRYPTO_SYM_XFORM_CIPHER:
288                 if (xform->next == NULL) {
289                         res = NITROX_CHAIN_CIPHER_ONLY;
290                 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
291                         if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
292                             xform->next->auth.op ==
293                             RTE_CRYPTO_AUTH_OP_GENERATE) {
294                                 res = NITROX_CHAIN_CIPHER_AUTH;
295                         } else {
296                                 NITROX_LOG(ERR, "cipher op %d, auth op %d\n",
297                                     xform->cipher.op, xform->next->auth.op);
298                         }
299                 }
300                 break;
301         case RTE_CRYPTO_SYM_XFORM_AEAD:
302                 res = NITROX_CHAIN_COMBINED;
303                 break;
304         default:
305                 break;
306         }
307
308         return res;
309 }
310
311 static enum flexi_cipher
312 get_flexi_cipher_type(enum rte_crypto_cipher_algorithm algo, bool *is_aes)
313 {
314         enum flexi_cipher type;
315
316         switch (algo) {
317         case RTE_CRYPTO_CIPHER_AES_CBC:
318                 type = CIPHER_AES_CBC;
319                 *is_aes = true;
320                 break;
321         case RTE_CRYPTO_CIPHER_3DES_CBC:
322                 type = CIPHER_3DES_CBC;
323                 *is_aes = false;
324                 break;
325         default:
326                 type = CIPHER_INVALID;
327                 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
328                 break;
329         }
330
331         return type;
332 }
333
334 static int
335 flexi_aes_keylen(size_t keylen, bool is_aes)
336 {
337         int aes_keylen;
338
339         if (!is_aes)
340                 return 0;
341
342         switch (keylen) {
343         case AES_KEYSIZE_128:
344                 aes_keylen = 1;
345                 break;
346         case AES_KEYSIZE_192:
347                 aes_keylen = 2;
348                 break;
349         case AES_KEYSIZE_256:
350                 aes_keylen = 3;
351                 break;
352         default:
353                 NITROX_LOG(ERR, "Invalid keylen %zu\n", keylen);
354                 aes_keylen = -EINVAL;
355                 break;
356         }
357
358         return aes_keylen;
359 }
360
361 static bool
362 crypto_key_is_valid(struct rte_crypto_cipher_xform *xform,
363                     struct flexi_crypto_context *fctx)
364 {
365         if (unlikely(xform->key.length > sizeof(fctx->crypto.key))) {
366                 NITROX_LOG(ERR, "Invalid crypto key length %d\n",
367                            xform->key.length);
368                 return false;
369         }
370
371         return true;
372 }
373
374 static int
375 configure_cipher_ctx(struct rte_crypto_cipher_xform *xform,
376                      struct nitrox_crypto_ctx *ctx)
377 {
378         enum flexi_cipher type;
379         bool cipher_is_aes = false;
380         int aes_keylen;
381         struct flexi_crypto_context *fctx = &ctx->fctx;
382
383         type = get_flexi_cipher_type(xform->algo, &cipher_is_aes);
384         if (unlikely(type == CIPHER_INVALID))
385                 return -ENOTSUP;
386
387         aes_keylen = flexi_aes_keylen(xform->key.length, cipher_is_aes);
388         if (unlikely(aes_keylen < 0))
389                 return -EINVAL;
390
391         if (unlikely(!cipher_is_aes && !crypto_key_is_valid(xform, fctx)))
392                 return -EINVAL;
393
394         if (unlikely(xform->iv.length > MAX_IV_LEN))
395                 return -EINVAL;
396
397         fctx->flags = rte_be_to_cpu_64(fctx->flags);
398         fctx->w0.cipher_type = type;
399         fctx->w0.aes_keylen = aes_keylen;
400         fctx->w0.iv_source = IV_FROM_DPTR;
401         fctx->flags = rte_cpu_to_be_64(fctx->flags);
402         memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
403         memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
404
405         ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
406         ctx->req_op = (xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
407                         NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
408         ctx->iv.offset = xform->iv.offset;
409         ctx->iv.length = xform->iv.length;
410         return 0;
411 }
412
413 static enum flexi_auth
414 get_flexi_auth_type(enum rte_crypto_auth_algorithm algo)
415 {
416         enum flexi_auth type;
417
418         switch (algo) {
419         case RTE_CRYPTO_AUTH_SHA1_HMAC:
420                 type = AUTH_SHA1;
421                 break;
422         case RTE_CRYPTO_AUTH_SHA224_HMAC:
423                 type = AUTH_SHA2_SHA224;
424                 break;
425         case RTE_CRYPTO_AUTH_SHA256_HMAC:
426                 type = AUTH_SHA2_SHA256;
427                 break;
428         default:
429                 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
430                 type = AUTH_INVALID;
431                 break;
432         }
433
434         return type;
435 }
436
437 static bool
438 auth_key_is_valid(const uint8_t *data, uint16_t length,
439                   struct flexi_crypto_context *fctx)
440 {
441         if (unlikely(!data && length)) {
442                 NITROX_LOG(ERR, "Invalid auth key\n");
443                 return false;
444         }
445
446         if (unlikely(length > sizeof(fctx->auth.opad))) {
447                 NITROX_LOG(ERR, "Invalid auth key length %d\n",
448                            length);
449                 return false;
450         }
451
452         return true;
453 }
454
455 static int
456 configure_auth_ctx(struct rte_crypto_auth_xform *xform,
457                    struct nitrox_crypto_ctx *ctx)
458 {
459         enum flexi_auth type;
460         struct flexi_crypto_context *fctx = &ctx->fctx;
461
462         type = get_flexi_auth_type(xform->algo);
463         if (unlikely(type == AUTH_INVALID))
464                 return -ENOTSUP;
465
466         if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
467                                         fctx)))
468                 return -EINVAL;
469
470         ctx->digest_length = xform->digest_length;
471
472         fctx->flags = rte_be_to_cpu_64(fctx->flags);
473         fctx->w0.hash_type = type;
474         fctx->w0.auth_input_type = 1;
475         fctx->w0.mac_len = xform->digest_length;
476         fctx->flags = rte_cpu_to_be_64(fctx->flags);
477         memset(&fctx->auth, 0, sizeof(fctx->auth));
478         memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
479         return 0;
480 }
481
482 static int
483 configure_aead_ctx(struct rte_crypto_aead_xform *xform,
484                    struct nitrox_crypto_ctx *ctx)
485 {
486         int aes_keylen;
487         struct flexi_crypto_context *fctx = &ctx->fctx;
488
489         if (unlikely(xform->aad_length > FLEXI_CRYPTO_MAX_AAD_LEN)) {
490                 NITROX_LOG(ERR, "AAD length %d not supported\n",
491                            xform->aad_length);
492                 return -ENOTSUP;
493         }
494
495         if (unlikely(xform->algo != RTE_CRYPTO_AEAD_AES_GCM))
496                 return -ENOTSUP;
497
498         aes_keylen = flexi_aes_keylen(xform->key.length, true);
499         if (unlikely(aes_keylen < 0))
500                 return -EINVAL;
501
502         if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
503                                         fctx)))
504                 return -EINVAL;
505
506         if (unlikely(xform->iv.length > MAX_IV_LEN))
507                 return -EINVAL;
508
509         fctx->flags = rte_be_to_cpu_64(fctx->flags);
510         fctx->w0.cipher_type = CIPHER_AES_GCM;
511         fctx->w0.aes_keylen = aes_keylen;
512         fctx->w0.iv_source = IV_FROM_DPTR;
513         fctx->w0.hash_type = AUTH_NULL;
514         fctx->w0.auth_input_type = 1;
515         fctx->w0.mac_len = xform->digest_length;
516         fctx->flags = rte_cpu_to_be_64(fctx->flags);
517         memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
518         memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
519         memset(&fctx->auth, 0, sizeof(fctx->auth));
520         memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
521
522         ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
523         ctx->req_op = (xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
524                         NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
525         ctx->iv.offset = xform->iv.offset;
526         ctx->iv.length = xform->iv.length;
527         ctx->digest_length = xform->digest_length;
528         ctx->aad_length = xform->aad_length;
529         return 0;
530 }
531
532 static int
533 nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
534                               struct rte_crypto_sym_xform *xform,
535                               struct rte_cryptodev_sym_session *sess,
536                               struct rte_mempool *mempool)
537 {
538         void *mp_obj;
539         struct nitrox_crypto_ctx *ctx;
540         struct rte_crypto_cipher_xform *cipher_xform = NULL;
541         struct rte_crypto_auth_xform *auth_xform = NULL;
542         struct rte_crypto_aead_xform *aead_xform = NULL;
543         int ret = -EINVAL;
544
545         if (rte_mempool_get(mempool, &mp_obj)) {
546                 NITROX_LOG(ERR, "Couldn't allocate context\n");
547                 return -ENOMEM;
548         }
549
550         ctx = mp_obj;
551         ctx->nitrox_chain = get_crypto_chain_order(xform);
552         switch (ctx->nitrox_chain) {
553         case NITROX_CHAIN_CIPHER_ONLY:
554                 cipher_xform = &xform->cipher;
555                 break;
556         case NITROX_CHAIN_CIPHER_AUTH:
557                 cipher_xform = &xform->cipher;
558                 auth_xform = &xform->next->auth;
559                 break;
560         case NITROX_CHAIN_AUTH_CIPHER:
561                 auth_xform = &xform->auth;
562                 cipher_xform = &xform->next->cipher;
563                 break;
564         case NITROX_CHAIN_COMBINED:
565                 aead_xform = &xform->aead;
566                 break;
567         default:
568                 NITROX_LOG(ERR, "Crypto chain not supported\n");
569                 ret = -ENOTSUP;
570                 goto err;
571         }
572
573         if (cipher_xform && unlikely(configure_cipher_ctx(cipher_xform, ctx))) {
574                 NITROX_LOG(ERR, "Failed to configure cipher ctx\n");
575                 goto err;
576         }
577
578         if (auth_xform && unlikely(configure_auth_ctx(auth_xform, ctx))) {
579                 NITROX_LOG(ERR, "Failed to configure auth ctx\n");
580                 goto err;
581         }
582
583         if (aead_xform && unlikely(configure_aead_ctx(aead_xform, ctx))) {
584                 NITROX_LOG(ERR, "Failed to configure aead ctx\n");
585                 goto err;
586         }
587
588         ctx->iova = rte_mempool_virt2iova(ctx);
589         set_sym_session_private_data(sess, cdev->driver_id, ctx);
590         return 0;
591 err:
592         rte_mempool_put(mempool, mp_obj);
593         return ret;
594 }
595
596 static void
597 nitrox_sym_dev_sess_clear(struct rte_cryptodev *cdev,
598                           struct rte_cryptodev_sym_session *sess)
599 {
600         struct nitrox_crypto_ctx *ctx = get_sym_session_private_data(sess,
601                                                         cdev->driver_id);
602         struct rte_mempool *sess_mp;
603
604         if (!ctx)
605                 return;
606
607         memset(ctx, 0, sizeof(*ctx));
608         sess_mp = rte_mempool_from_obj(ctx);
609         set_sym_session_private_data(sess, cdev->driver_id, NULL);
610         rte_mempool_put(sess_mp, ctx);
611 }
612
613 static struct nitrox_crypto_ctx *
614 get_crypto_ctx(struct rte_crypto_op *op)
615 {
616         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
617                 if (likely(op->sym->session))
618                         return get_sym_session_private_data(op->sym->session,
619                                                            nitrox_sym_drv_id);
620         }
621
622         return NULL;
623 }
624
625 static int
626 nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_crypto_op *op)
627 {
628         struct nitrox_crypto_ctx *ctx;
629         struct nitrox_softreq *sr;
630         int err;
631
632         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
633         ctx = get_crypto_ctx(op);
634         if (unlikely(!ctx)) {
635                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
636                 return -EINVAL;
637         }
638
639         if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr)))
640                 return -ENOMEM;
641
642         err = nitrox_process_se_req(qp->qno, op, ctx, sr);
643         if (unlikely(err)) {
644                 rte_mempool_put(qp->sr_mp, sr);
645                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
646                 return err;
647         }
648
649         nitrox_qp_enqueue(qp, nitrox_sym_instr_addr(sr), sr);
650         return 0;
651 }
652
653 static uint16_t
654 nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops,
655                          uint16_t nb_ops)
656 {
657         struct nitrox_qp *qp = queue_pair;
658         uint16_t free_slots = 0;
659         uint16_t cnt = 0;
660         bool err = false;
661
662         free_slots = nitrox_qp_free_count(qp);
663         if (nb_ops > free_slots)
664                 nb_ops = free_slots;
665
666         for (cnt = 0; cnt < nb_ops; cnt++) {
667                 if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) {
668                         err = true;
669                         break;
670                 }
671         }
672
673         nitrox_ring_dbell(qp, cnt);
674         qp->stats.enqueued_count += cnt;
675         if (unlikely(err))
676                 qp->stats.enqueue_err_count++;
677
678         return cnt;
679 }
680
681 static int
682 nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr)
683 {
684         struct nitrox_softreq *sr;
685         int ret;
686         struct rte_crypto_op *op;
687
688         sr = nitrox_qp_get_softreq(qp);
689         ret = nitrox_check_se_req(sr, op_ptr);
690         if (ret < 0)
691                 return -EAGAIN;
692
693         op = *op_ptr;
694         nitrox_qp_dequeue(qp);
695         rte_mempool_put(qp->sr_mp, sr);
696         if (!ret) {
697                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
698                 qp->stats.dequeued_count++;
699
700                 return 0;
701         }
702
703         if (ret == MC_MAC_MISMATCH_ERR_CODE)
704                 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
705         else
706                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
707
708         qp->stats.dequeue_err_count++;
709         return 0;
710 }
711
712 static uint16_t
713 nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops,
714                          uint16_t nb_ops)
715 {
716         struct nitrox_qp *qp = queue_pair;
717         uint16_t filled_slots = nitrox_qp_used_count(qp);
718         int cnt = 0;
719
720         if (nb_ops > filled_slots)
721                 nb_ops = filled_slots;
722
723         for (cnt = 0; cnt < nb_ops; cnt++)
724                 if (nitrox_deq_single_op(qp, &ops[cnt]))
725                         break;
726
727         return cnt;
728 }
729
730 static struct rte_cryptodev_ops nitrox_cryptodev_ops = {
731         .dev_configure          = nitrox_sym_dev_config,
732         .dev_start              = nitrox_sym_dev_start,
733         .dev_stop               = nitrox_sym_dev_stop,
734         .dev_close              = nitrox_sym_dev_close,
735         .dev_infos_get          = nitrox_sym_dev_info_get,
736         .stats_get              = nitrox_sym_dev_stats_get,
737         .stats_reset            = nitrox_sym_dev_stats_reset,
738         .queue_pair_setup       = nitrox_sym_dev_qp_setup,
739         .queue_pair_release     = nitrox_sym_dev_qp_release,
740         .sym_session_get_size   = nitrox_sym_dev_sess_get_size,
741         .sym_session_configure  = nitrox_sym_dev_sess_configure,
742         .sym_session_clear      = nitrox_sym_dev_sess_clear
743 };
744
745 int
746 nitrox_sym_pmd_create(struct nitrox_device *ndev)
747 {
748         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
749         struct rte_cryptodev_pmd_init_params init_params = {
750                         .name = "",
751                         .socket_id = ndev->pdev->device.numa_node,
752                         .private_data_size = sizeof(struct nitrox_sym_device)
753         };
754         struct rte_cryptodev *cdev;
755
756         rte_pci_device_name(&ndev->pdev->addr, name, sizeof(name));
757         snprintf(name + strlen(name), RTE_CRYPTODEV_NAME_MAX_LEN - strlen(name),
758                  "_n5sym");
759         ndev->rte_sym_dev.driver = &nitrox_rte_sym_drv;
760         ndev->rte_sym_dev.numa_node = ndev->pdev->device.numa_node;
761         ndev->rte_sym_dev.devargs = NULL;
762         cdev = rte_cryptodev_pmd_create(name, &ndev->rte_sym_dev,
763                                         &init_params);
764         if (!cdev) {
765                 NITROX_LOG(ERR, "Cryptodev '%s' creation failed\n", name);
766                 return -ENODEV;
767         }
768
769         ndev->rte_sym_dev.name = cdev->data->name;
770         cdev->driver_id = nitrox_sym_drv_id;
771         cdev->dev_ops = &nitrox_cryptodev_ops;
772         cdev->enqueue_burst = nitrox_sym_dev_enq_burst;
773         cdev->dequeue_burst = nitrox_sym_dev_deq_burst;
774         cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
775                 RTE_CRYPTODEV_FF_HW_ACCELERATED |
776                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
777                 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
778                 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
779                 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
780                 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
781                 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
782
783         ndev->sym_dev = cdev->data->dev_private;
784         ndev->sym_dev->cdev = cdev;
785         ndev->sym_dev->ndev = ndev;
786         NITROX_LOG(DEBUG, "Created cryptodev '%s', dev_id %d, drv_id %d\n",
787                    cdev->data->name, cdev->data->dev_id, nitrox_sym_drv_id);
788         return 0;
789 }
790
791 int
792 nitrox_sym_pmd_destroy(struct nitrox_device *ndev)
793 {
794         return rte_cryptodev_pmd_destroy(ndev->sym_dev->cdev);
795 }
796
797 static struct cryptodev_driver nitrox_crypto_drv;
798 RTE_PMD_REGISTER_CRYPTO_DRIVER(nitrox_crypto_drv,
799                 nitrox_rte_sym_drv,
800                 nitrox_sym_drv_id);