7322539a17d5cc17f86ffdc9b8785298fee31bab
[dpdk.git] / drivers / crypto / cnxk / cnxk_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7 #include <rte_errno.h>
8
9 #include "roc_cpt.h"
10
11 #include "cnxk_ae.h"
12 #include "cnxk_cryptodev.h"
13 #include "cnxk_cryptodev_ops.h"
14 #include "cnxk_cryptodev_capabilities.h"
15 #include "cnxk_se.h"
16
17 #define CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS 5
18 #define CNXK_CPT_MAX_ASYM_OP_MOD_LEN    1024
19
20 static int
21 cnxk_cpt_get_mlen(void)
22 {
23         uint32_t len;
24
25         /* For MAC */
26         len = 2 * sizeof(uint64_t);
27         len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
28
29         len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
30         len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
31                                (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
32                                 2) * ROC_SE_SG_ENTRY_SIZE),
33                               8);
34
35         return len;
36 }
37
38 static int
39 cnxk_cpt_asym_get_mlen(void)
40 {
41         uint32_t len;
42
43         /* To hold RPTR */
44         len = sizeof(uint64_t);
45
46         /* Get meta len for asymmetric operations */
47         len += CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS * CNXK_CPT_MAX_ASYM_OP_MOD_LEN;
48
49         return len;
50 }
51
52 int
53 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
54                     struct rte_cryptodev_config *conf)
55 {
56         struct cnxk_cpt_vf *vf = dev->data->dev_private;
57         struct roc_cpt *roc_cpt = &vf->cpt;
58         uint16_t nb_lf_avail, nb_lf;
59         int ret;
60
61         dev->feature_flags &= ~conf->ff_disable;
62
63         nb_lf_avail = roc_cpt->nb_lf_avail;
64         nb_lf = conf->nb_queue_pairs;
65
66         if (nb_lf > nb_lf_avail)
67                 return -ENOTSUP;
68
69         ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
70         if (ret) {
71                 plt_err("Could not configure device");
72                 return ret;
73         }
74
75         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
76                 /* Initialize shared FPM table */
77                 ret = roc_ae_fpm_get(vf->cnxk_fpm_iova);
78                 if (ret) {
79                         plt_err("Could not get FPM table");
80                         return ret;
81                 }
82
83                 /* Init EC grp table */
84                 ret = roc_ae_ec_grp_get(vf->ec_grp);
85                 if (ret) {
86                         plt_err("Could not get EC grp table");
87                         roc_ae_fpm_put();
88                         return ret;
89                 }
90         }
91
92         return 0;
93 }
94
95 int
96 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
97 {
98         RTE_SET_USED(dev);
99
100         return 0;
101 }
102
103 void
104 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
105 {
106         RTE_SET_USED(dev);
107 }
108
109 int
110 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
111 {
112         struct cnxk_cpt_vf *vf = dev->data->dev_private;
113         uint16_t i;
114         int ret;
115
116         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
117                 ret = cnxk_cpt_queue_pair_release(dev, i);
118                 if (ret < 0) {
119                         plt_err("Could not release queue pair %u", i);
120                         return ret;
121                 }
122         }
123
124         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
125                 roc_ae_fpm_put();
126                 roc_ae_ec_grp_put();
127         }
128
129         roc_cpt_dev_clear(&vf->cpt);
130
131         return 0;
132 }
133
134 void
135 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
136                       struct rte_cryptodev_info *info)
137 {
138         struct cnxk_cpt_vf *vf = dev->data->dev_private;
139         struct roc_cpt *roc_cpt = &vf->cpt;
140
141         info->max_nb_queue_pairs = roc_cpt->nb_lf_avail;
142         info->feature_flags = dev->feature_flags;
143         info->capabilities = cnxk_crypto_capabilities_get(vf);
144         info->sym.max_nb_sessions = 0;
145         info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
146         info->min_mbuf_tailroom_req = 0;
147 }
148
149 static void
150 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
151 {
152         snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
153 }
154
155 static int
156 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
157                                 struct cnxk_cpt_qp *qp, uint8_t qp_id,
158                                 uint32_t nb_elements)
159 {
160         char mempool_name[RTE_MEMPOOL_NAMESIZE];
161         struct cpt_qp_meta_info *meta_info;
162         struct rte_mempool *pool;
163         uint32_t cache_sz;
164         int mlen = 8;
165
166         if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
167                 /* Get meta len */
168                 mlen = cnxk_cpt_get_mlen();
169         }
170
171         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
172
173                 /* Get meta len required for asymmetric operations */
174                 mlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());
175         }
176
177         cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
178
179         /* Allocate mempool */
180
181         snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
182                  dev->data->dev_id, qp_id);
183
184         pool = rte_mempool_create(mempool_name, nb_elements, mlen, cache_sz, 0,
185                                   NULL, NULL, NULL, NULL, rte_socket_id(), 0);
186
187         if (pool == NULL) {
188                 plt_err("Could not create mempool for metabuf");
189                 return rte_errno;
190         }
191
192         meta_info = &qp->meta_info;
193
194         meta_info->pool = pool;
195         meta_info->mlen = mlen;
196
197         return 0;
198 }
199
200 static void
201 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
202 {
203         struct cpt_qp_meta_info *meta_info = &qp->meta_info;
204
205         rte_mempool_free(meta_info->pool);
206
207         meta_info->pool = NULL;
208         meta_info->mlen = 0;
209 }
210
211 static struct cnxk_cpt_qp *
212 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
213                    uint32_t iq_len)
214 {
215         const struct rte_memzone *pq_mem;
216         char name[RTE_MEMZONE_NAMESIZE];
217         struct cnxk_cpt_qp *qp;
218         uint32_t len;
219         uint8_t *va;
220         int ret;
221
222         /* Allocate queue pair */
223         qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
224                                 ROC_ALIGN, 0);
225         if (qp == NULL) {
226                 plt_err("Could not allocate queue pair");
227                 return NULL;
228         }
229
230         /* For pending queue */
231         len = iq_len * sizeof(struct cpt_inflight_req);
232
233         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
234                             qp_id);
235
236         pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
237                                              RTE_MEMZONE_SIZE_HINT_ONLY |
238                                                      RTE_MEMZONE_256MB,
239                                              RTE_CACHE_LINE_SIZE);
240         if (pq_mem == NULL) {
241                 plt_err("Could not allocate reserved memzone");
242                 goto qp_free;
243         }
244
245         va = pq_mem->addr;
246
247         memset(va, 0, len);
248
249         ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
250         if (ret) {
251                 plt_err("Could not create mempool for metabuf");
252                 goto pq_mem_free;
253         }
254
255         /* Initialize pending queue */
256         qp->pend_q.req_queue = pq_mem->addr;
257         qp->pend_q.enq_tail = 0;
258         qp->pend_q.deq_head = 0;
259         qp->pend_q.pending_count = 0;
260
261         return qp;
262
263 pq_mem_free:
264         rte_memzone_free(pq_mem);
265 qp_free:
266         rte_free(qp);
267         return NULL;
268 }
269
270 static int
271 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
272 {
273         const struct rte_memzone *pq_mem;
274         char name[RTE_MEMZONE_NAMESIZE];
275         int ret;
276
277         cnxk_cpt_metabuf_mempool_destroy(qp);
278
279         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
280                             qp->lf.lf_id);
281
282         pq_mem = rte_memzone_lookup(name);
283
284         ret = rte_memzone_free(pq_mem);
285         if (ret)
286                 return ret;
287
288         rte_free(qp);
289
290         return 0;
291 }
292
293 int
294 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
295 {
296         struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
297         struct cnxk_cpt_vf *vf = dev->data->dev_private;
298         struct roc_cpt *roc_cpt = &vf->cpt;
299         struct roc_cpt_lf *lf;
300         int ret;
301
302         if (qp == NULL)
303                 return -EINVAL;
304
305         lf = roc_cpt->lf[qp_id];
306         if (lf == NULL)
307                 return -ENOTSUP;
308
309         roc_cpt_lf_fini(lf);
310
311         ret = cnxk_cpt_qp_destroy(dev, qp);
312         if (ret) {
313                 plt_err("Could not destroy queue pair %d", qp_id);
314                 return ret;
315         }
316
317         roc_cpt->lf[qp_id] = NULL;
318         dev->data->queue_pairs[qp_id] = NULL;
319
320         return 0;
321 }
322
323 int
324 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
325                           const struct rte_cryptodev_qp_conf *conf,
326                           int socket_id __rte_unused)
327 {
328         struct cnxk_cpt_vf *vf = dev->data->dev_private;
329         struct roc_cpt *roc_cpt = &vf->cpt;
330         struct rte_pci_device *pci_dev;
331         struct cnxk_cpt_qp *qp;
332         int ret;
333
334         if (dev->data->queue_pairs[qp_id] != NULL)
335                 cnxk_cpt_queue_pair_release(dev, qp_id);
336
337         pci_dev = RTE_DEV_TO_PCI(dev->device);
338
339         if (pci_dev->mem_resource[2].addr == NULL) {
340                 plt_err("Invalid PCI mem address");
341                 return -EIO;
342         }
343
344         qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
345         if (qp == NULL) {
346                 plt_err("Could not create queue pair %d", qp_id);
347                 return -ENOMEM;
348         }
349
350         qp->lf.lf_id = qp_id;
351         qp->lf.nb_desc = conf->nb_descriptors;
352
353         ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
354         if (ret < 0) {
355                 plt_err("Could not initialize queue pair %d", qp_id);
356                 ret = -EINVAL;
357                 goto exit;
358         }
359
360         roc_cpt->lf[qp_id] = &qp->lf;
361
362         ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
363         if (ret < 0) {
364                 roc_cpt->lf[qp_id] = NULL;
365                 plt_err("Could not init lmtline for queue pair %d", qp_id);
366                 goto exit;
367         }
368
369         qp->sess_mp = conf->mp_session;
370         qp->sess_mp_priv = conf->mp_session_private;
371         dev->data->queue_pairs[qp_id] = qp;
372
373         return 0;
374
375 exit:
376         cnxk_cpt_qp_destroy(dev, qp);
377         return ret;
378 }
379
380 unsigned int
381 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
382 {
383         return sizeof(struct cnxk_se_sess);
384 }
385
386 static int
387 sym_xform_verify(struct rte_crypto_sym_xform *xform)
388 {
389         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
390             xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
391             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
392                 return -ENOTSUP;
393
394         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
395                 return CNXK_CPT_CIPHER;
396
397         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
398                 return CNXK_CPT_AUTH;
399
400         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
401                 return CNXK_CPT_AEAD;
402
403         if (xform->next == NULL)
404                 return -EIO;
405
406         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
407             xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
408             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
409             xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
410                 return -ENOTSUP;
411
412         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
413             xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
414             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
415             xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
416                 return -ENOTSUP;
417
418         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
419             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
420             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
421             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
422                 return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
423
424         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
425             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
426             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
427             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
428                 return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
429
430         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
431             xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
432             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
433             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
434                 switch (xform->auth.algo) {
435                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
436                         switch (xform->next->cipher.algo) {
437                         case RTE_CRYPTO_CIPHER_AES_CBC:
438                                 return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
439                         default:
440                                 return -ENOTSUP;
441                         }
442                 default:
443                         return -ENOTSUP;
444                 }
445         }
446
447         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
448             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
449             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
450             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
451                 switch (xform->cipher.algo) {
452                 case RTE_CRYPTO_CIPHER_AES_CBC:
453                         switch (xform->next->auth.algo) {
454                         case RTE_CRYPTO_AUTH_SHA1_HMAC:
455                                 return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
456                         default:
457                                 return -ENOTSUP;
458                         }
459                 default:
460                         return -ENOTSUP;
461                 }
462         }
463
464         return -ENOTSUP;
465 }
466
467 static uint64_t
468 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
469 {
470         union cpt_inst_w7 inst_w7;
471
472         inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
473
474         /* Set the engine group */
475         if (sess->zsk_flag || sess->chacha_poly)
476                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
477         else
478                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
479
480         return inst_w7.u64;
481 }
482
483 int
484 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
485                       struct rte_crypto_sym_xform *xform,
486                       struct rte_cryptodev_sym_session *sess,
487                       struct rte_mempool *pool)
488 {
489         struct cnxk_se_sess *sess_priv;
490         void *priv;
491         int ret;
492
493         ret = sym_xform_verify(xform);
494         if (unlikely(ret < 0))
495                 return ret;
496
497         if (unlikely(rte_mempool_get(pool, &priv))) {
498                 plt_dp_err("Could not allocate session private data");
499                 return -ENOMEM;
500         }
501
502         memset(priv, 0, sizeof(struct cnxk_se_sess));
503
504         sess_priv = priv;
505
506         switch (ret) {
507         case CNXK_CPT_CIPHER:
508                 ret = fill_sess_cipher(xform, sess_priv);
509                 break;
510         case CNXK_CPT_AUTH:
511                 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
512                         ret = fill_sess_gmac(xform, sess_priv);
513                 else
514                         ret = fill_sess_auth(xform, sess_priv);
515                 break;
516         case CNXK_CPT_AEAD:
517                 ret = fill_sess_aead(xform, sess_priv);
518                 break;
519         case CNXK_CPT_CIPHER_ENC_AUTH_GEN:
520         case CNXK_CPT_CIPHER_DEC_AUTH_VRFY:
521                 ret = fill_sess_cipher(xform, sess_priv);
522                 if (ret < 0)
523                         break;
524                 ret = fill_sess_auth(xform->next, sess_priv);
525                 break;
526         case CNXK_CPT_AUTH_VRFY_CIPHER_DEC:
527         case CNXK_CPT_AUTH_GEN_CIPHER_ENC:
528                 ret = fill_sess_auth(xform, sess_priv);
529                 if (ret < 0)
530                         break;
531                 ret = fill_sess_cipher(xform->next, sess_priv);
532                 break;
533         default:
534                 ret = -1;
535         }
536
537         if (ret)
538                 goto priv_put;
539
540         if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
541             cpt_mac_len_verify(&xform->auth)) {
542                 plt_dp_err("MAC length is not supported");
543                 ret = -ENOTSUP;
544                 goto priv_put;
545         }
546
547         sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
548
549         set_sym_session_private_data(sess, driver_id, sess_priv);
550
551         return 0;
552
553 priv_put:
554         rte_mempool_put(pool, priv);
555
556         return -ENOTSUP;
557 }
558
559 int
560 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
561                                struct rte_crypto_sym_xform *xform,
562                                struct rte_cryptodev_sym_session *sess,
563                                struct rte_mempool *pool)
564 {
565         struct cnxk_cpt_vf *vf = dev->data->dev_private;
566         struct roc_cpt *roc_cpt = &vf->cpt;
567         uint8_t driver_id;
568
569         driver_id = dev->driver_id;
570
571         return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
572 }
573
574 void
575 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
576 {
577         void *priv = get_sym_session_private_data(sess, driver_id);
578         struct rte_mempool *pool;
579
580         if (priv == NULL)
581                 return;
582
583         memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
584
585         pool = rte_mempool_from_obj(priv);
586
587         set_sym_session_private_data(sess, driver_id, NULL);
588
589         rte_mempool_put(pool, priv);
590 }
591
592 void
593 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
594                            struct rte_cryptodev_sym_session *sess)
595 {
596         return sym_session_clear(dev->driver_id, sess);
597 }
598
599 unsigned int
600 cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused)
601 {
602         return sizeof(struct cnxk_ae_sess);
603 }
604
605 void
606 cnxk_ae_session_clear(struct rte_cryptodev *dev,
607                       struct rte_cryptodev_asym_session *sess)
608 {
609         struct rte_mempool *sess_mp;
610         struct cnxk_ae_sess *priv;
611
612         priv = get_asym_session_private_data(sess, dev->driver_id);
613         if (priv == NULL)
614                 return;
615
616         /* Free resources allocated in session_cfg */
617         cnxk_ae_free_session_parameters(priv);
618
619         /* Reset and free object back to pool */
620         memset(priv, 0, cnxk_ae_session_size_get(dev));
621         sess_mp = rte_mempool_from_obj(priv);
622         set_asym_session_private_data(sess, dev->driver_id, NULL);
623         rte_mempool_put(sess_mp, priv);
624 }
625
626 int
627 cnxk_ae_session_cfg(struct rte_cryptodev *dev,
628                     struct rte_crypto_asym_xform *xform,
629                     struct rte_cryptodev_asym_session *sess,
630                     struct rte_mempool *pool)
631 {
632         struct cnxk_cpt_vf *vf = dev->data->dev_private;
633         struct roc_cpt *roc_cpt = &vf->cpt;
634         struct cnxk_ae_sess *priv;
635         union cpt_inst_w7 w7;
636         int ret;
637
638         if (rte_mempool_get(pool, (void **)&priv))
639                 return -ENOMEM;
640
641         memset(priv, 0, sizeof(struct cnxk_ae_sess));
642
643         ret = cnxk_ae_fill_session_parameters(priv, xform);
644         if (ret) {
645                 rte_mempool_put(pool, priv);
646                 return ret;
647         }
648
649         w7.u64 = 0;
650         w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_AE];
651         priv->cpt_inst_w7 = w7.u64;
652         priv->cnxk_fpm_iova = vf->cnxk_fpm_iova;
653         priv->ec_grp = vf->ec_grp;
654         set_asym_session_private_data(sess, dev->driver_id, priv);
655
656         return 0;
657 }