vdpa/sfc: support device configure and close
[dpdk.git] / drivers / vdpa / sfc / sfc_vdpa_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020-2021 Xilinx, Inc.
3  */
4
5 #include <sys/ioctl.h>
6
7 #include <rte_errno.h>
8 #include <rte_malloc.h>
9 #include <rte_vdpa.h>
10 #include <rte_vfio.h>
11 #include <rte_vhost.h>
12
13 #include <vdpa_driver.h>
14
15 #include "efx.h"
16 #include "sfc_vdpa_ops.h"
17 #include "sfc_vdpa.h"
18
19 /* These protocol features are needed to enable notifier ctrl */
20 #define SFC_VDPA_PROTOCOL_FEATURES \
21                 ((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
22                  (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
23                  (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
24                  (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
25                  (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD))
26
27 /*
28  * Set of features which are enabled by default.
29  * Protocol feature bit is needed to enable notification notifier ctrl.
30  */
31 #define SFC_VDPA_DEFAULT_FEATURES \
32                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
33
34 #define SFC_VDPA_MSIX_IRQ_SET_BUF_LEN \
35                 (sizeof(struct vfio_irq_set) + \
36                 sizeof(int) * (SFC_VDPA_MAX_QUEUE_PAIRS * 2 + 1))
37
38 /* It will be used for target VF when calling function is not PF */
39 #define SFC_VDPA_VF_NULL                0xFFFF
40
41 static int
42 sfc_vdpa_get_device_features(struct sfc_vdpa_ops_data *ops_data)
43 {
44         int rc;
45         uint64_t dev_features;
46         efx_nic_t *nic;
47
48         nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
49
50         rc = efx_virtio_get_features(nic, EFX_VIRTIO_DEVICE_TYPE_NET,
51                                      &dev_features);
52         if (rc != 0) {
53                 sfc_vdpa_err(ops_data->dev_handle,
54                              "could not read device feature: %s",
55                              rte_strerror(rc));
56                 return rc;
57         }
58
59         ops_data->dev_features = dev_features;
60
61         sfc_vdpa_info(ops_data->dev_handle,
62                       "device supported virtio features : 0x%" PRIx64,
63                       ops_data->dev_features);
64
65         return 0;
66 }
67
68 static uint64_t
69 hva_to_gpa(int vid, uint64_t hva)
70 {
71         struct rte_vhost_memory *vhost_mem = NULL;
72         struct rte_vhost_mem_region *mem_reg = NULL;
73         uint32_t i;
74         uint64_t gpa = 0;
75
76         if (rte_vhost_get_mem_table(vid, &vhost_mem) < 0)
77                 goto error;
78
79         for (i = 0; i < vhost_mem->nregions; i++) {
80                 mem_reg = &vhost_mem->regions[i];
81
82                 if (hva >= mem_reg->host_user_addr &&
83                                 hva < mem_reg->host_user_addr + mem_reg->size) {
84                         gpa = (hva - mem_reg->host_user_addr) +
85                                 mem_reg->guest_phys_addr;
86                         break;
87                 }
88         }
89
90 error:
91         free(vhost_mem);
92         return gpa;
93 }
94
95 static int
96 sfc_vdpa_enable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
97 {
98         int rc;
99         int *irq_fd_ptr;
100         int vfio_dev_fd;
101         uint32_t i, num_vring;
102         struct rte_vhost_vring vring;
103         struct vfio_irq_set *irq_set;
104         struct rte_pci_device *pci_dev;
105         char irq_set_buf[SFC_VDPA_MSIX_IRQ_SET_BUF_LEN];
106         void *dev;
107
108         num_vring = rte_vhost_get_vring_num(ops_data->vid);
109         dev = ops_data->dev_handle;
110         vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
111         pci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;
112
113         irq_set = (struct vfio_irq_set *)irq_set_buf;
114         irq_set->argsz = sizeof(irq_set_buf);
115         irq_set->count = num_vring + 1;
116         irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
117                          VFIO_IRQ_SET_ACTION_TRIGGER;
118         irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
119         irq_set->start = 0;
120         irq_fd_ptr = (int *)&irq_set->data;
121         irq_fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] =
122                 rte_intr_fd_get(pci_dev->intr_handle);
123
124         for (i = 0; i < num_vring; i++) {
125                 rc = rte_vhost_get_vhost_vring(ops_data->vid, i, &vring);
126                 if (rc)
127                         return -1;
128
129                 irq_fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
130         }
131
132         rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
133         if (rc) {
134                 sfc_vdpa_err(ops_data->dev_handle,
135                              "error enabling MSI-X interrupts: %s",
136                              strerror(errno));
137                 return -1;
138         }
139
140         return 0;
141 }
142
143 static int
144 sfc_vdpa_disable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
145 {
146         int rc;
147         int vfio_dev_fd;
148         struct vfio_irq_set irq_set;
149         void *dev;
150
151         dev = ops_data->dev_handle;
152         vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
153
154         irq_set.argsz = sizeof(irq_set);
155         irq_set.count = 0;
156         irq_set.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
157         irq_set.index = VFIO_PCI_MSIX_IRQ_INDEX;
158         irq_set.start = 0;
159
160         rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set);
161         if (rc) {
162                 sfc_vdpa_err(ops_data->dev_handle,
163                              "error disabling MSI-X interrupts: %s",
164                              strerror(errno));
165                 return -1;
166         }
167
168         return 0;
169 }
170
171 static int
172 sfc_vdpa_get_vring_info(struct sfc_vdpa_ops_data *ops_data,
173                         int vq_num, struct sfc_vdpa_vring_info *vring)
174 {
175         int rc;
176         uint64_t gpa;
177         struct rte_vhost_vring vq;
178
179         rc = rte_vhost_get_vhost_vring(ops_data->vid, vq_num, &vq);
180         if (rc < 0) {
181                 sfc_vdpa_err(ops_data->dev_handle,
182                              "get vhost vring failed: %s", rte_strerror(rc));
183                 return rc;
184         }
185
186         gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.desc);
187         if (gpa == 0) {
188                 sfc_vdpa_err(ops_data->dev_handle,
189                              "fail to get GPA for descriptor ring.");
190                 return -1;
191         }
192         vring->desc = gpa;
193
194         gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.avail);
195         if (gpa == 0) {
196                 sfc_vdpa_err(ops_data->dev_handle,
197                              "fail to get GPA for available ring.");
198                 return -1;
199         }
200         vring->avail = gpa;
201
202         gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.used);
203         if (gpa == 0) {
204                 sfc_vdpa_err(ops_data->dev_handle,
205                              "fail to get GPA for used ring.");
206                 return -1;
207         }
208         vring->used = gpa;
209
210         vring->size = vq.size;
211
212         rc = rte_vhost_get_vring_base(ops_data->vid, vq_num,
213                                       &vring->last_avail_idx,
214                                       &vring->last_used_idx);
215
216         return rc;
217 }
218
219 static int
220 sfc_vdpa_virtq_start(struct sfc_vdpa_ops_data *ops_data, int vq_num)
221 {
222         int rc;
223         efx_virtio_vq_t *vq;
224         struct sfc_vdpa_vring_info vring;
225         efx_virtio_vq_cfg_t vq_cfg;
226         efx_virtio_vq_dyncfg_t vq_dyncfg;
227
228         vq = ops_data->vq_cxt[vq_num].vq;
229         if (vq == NULL)
230                 return -1;
231
232         rc = sfc_vdpa_get_vring_info(ops_data, vq_num, &vring);
233         if (rc < 0) {
234                 sfc_vdpa_err(ops_data->dev_handle,
235                              "get vring info failed: %s", rte_strerror(rc));
236                 goto fail_vring_info;
237         }
238
239         vq_cfg.evvc_target_vf = SFC_VDPA_VF_NULL;
240
241         /* even virtqueue for RX and odd for TX */
242         if (vq_num % 2) {
243                 vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_TXQ;
244                 sfc_vdpa_info(ops_data->dev_handle,
245                               "configure virtqueue # %d (TXQ)", vq_num);
246         } else {
247                 vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_RXQ;
248                 sfc_vdpa_info(ops_data->dev_handle,
249                               "configure virtqueue # %d (RXQ)", vq_num);
250         }
251
252         vq_cfg.evvc_vq_num = vq_num;
253         vq_cfg.evvc_desc_tbl_addr   = vring.desc;
254         vq_cfg.evvc_avail_ring_addr = vring.avail;
255         vq_cfg.evvc_used_ring_addr  = vring.used;
256         vq_cfg.evvc_vq_size = vring.size;
257
258         vq_dyncfg.evvd_vq_pidx = vring.last_used_idx;
259         vq_dyncfg.evvd_vq_cidx = vring.last_avail_idx;
260
261         /* MSI-X vector is function-relative */
262         vq_cfg.evvc_msix_vector = RTE_INTR_VEC_RXTX_OFFSET + vq_num;
263         if (ops_data->vdpa_context == SFC_VDPA_AS_VF)
264                 vq_cfg.evvc_pas_id = 0;
265         vq_cfg.evcc_features = ops_data->dev_features &
266                                ops_data->req_features;
267
268         /* Start virtqueue */
269         rc = efx_virtio_qstart(vq, &vq_cfg, &vq_dyncfg);
270         if (rc != 0) {
271                 /* destroy virtqueue */
272                 sfc_vdpa_err(ops_data->dev_handle,
273                              "virtqueue start failed: %s",
274                              rte_strerror(rc));
275                 efx_virtio_qdestroy(vq);
276                 goto fail_virtio_qstart;
277         }
278
279         sfc_vdpa_info(ops_data->dev_handle,
280                       "virtqueue started successfully for vq_num %d", vq_num);
281
282         ops_data->vq_cxt[vq_num].enable = B_TRUE;
283
284         return rc;
285
286 fail_virtio_qstart:
287 fail_vring_info:
288         return rc;
289 }
290
291 static int
292 sfc_vdpa_virtq_stop(struct sfc_vdpa_ops_data *ops_data, int vq_num)
293 {
294         int rc;
295         efx_virtio_vq_dyncfg_t vq_idx;
296         efx_virtio_vq_t *vq;
297
298         if (ops_data->vq_cxt[vq_num].enable != B_TRUE)
299                 return -1;
300
301         vq = ops_data->vq_cxt[vq_num].vq;
302         if (vq == NULL)
303                 return -1;
304
305         /* stop the vq */
306         rc = efx_virtio_qstop(vq, &vq_idx);
307         if (rc == 0) {
308                 ops_data->vq_cxt[vq_num].cidx = vq_idx.evvd_vq_cidx;
309                 ops_data->vq_cxt[vq_num].pidx = vq_idx.evvd_vq_pidx;
310         }
311         ops_data->vq_cxt[vq_num].enable = B_FALSE;
312
313         return rc;
314 }
315
316 static int
317 sfc_vdpa_configure(struct sfc_vdpa_ops_data *ops_data)
318 {
319         int rc, i;
320         int nr_vring;
321         int max_vring_cnt;
322         efx_virtio_vq_t *vq;
323         efx_nic_t *nic;
324         void *dev;
325
326         dev = ops_data->dev_handle;
327         nic = sfc_vdpa_adapter_by_dev_handle(dev)->nic;
328
329         SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_INITIALIZED);
330
331         ops_data->state = SFC_VDPA_STATE_CONFIGURING;
332
333         nr_vring = rte_vhost_get_vring_num(ops_data->vid);
334         max_vring_cnt =
335                 (sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
336
337         /* number of vring should not be more than supported max vq count */
338         if (nr_vring > max_vring_cnt) {
339                 sfc_vdpa_err(dev,
340                              "nr_vring (%d) is > max vring count (%d)",
341                              nr_vring, max_vring_cnt);
342                 goto fail_vring_num;
343         }
344
345         rc = sfc_vdpa_dma_map(ops_data, true);
346         if (rc) {
347                 sfc_vdpa_err(dev,
348                              "DMA map failed: %s", rte_strerror(rc));
349                 goto fail_dma_map;
350         }
351
352         for (i = 0; i < nr_vring; i++) {
353                 rc = efx_virtio_qcreate(nic, &vq);
354                 if ((rc != 0) || (vq == NULL)) {
355                         sfc_vdpa_err(dev,
356                                      "virtqueue create failed: %s",
357                                      rte_strerror(rc));
358                         goto fail_vq_create;
359                 }
360
361                 /* store created virtqueue context */
362                 ops_data->vq_cxt[i].vq = vq;
363         }
364
365         ops_data->vq_count = i;
366
367         ops_data->state = SFC_VDPA_STATE_CONFIGURED;
368
369         return 0;
370
371 fail_vq_create:
372         sfc_vdpa_dma_map(ops_data, false);
373
374 fail_dma_map:
375 fail_vring_num:
376         ops_data->state = SFC_VDPA_STATE_INITIALIZED;
377
378         return -1;
379 }
380
381 static void
382 sfc_vdpa_close(struct sfc_vdpa_ops_data *ops_data)
383 {
384         int i;
385
386         if (ops_data->state != SFC_VDPA_STATE_CONFIGURED)
387                 return;
388
389         ops_data->state = SFC_VDPA_STATE_CLOSING;
390
391         for (i = 0; i < ops_data->vq_count; i++) {
392                 if (ops_data->vq_cxt[i].vq == NULL)
393                         continue;
394
395                 efx_virtio_qdestroy(ops_data->vq_cxt[i].vq);
396         }
397
398         sfc_vdpa_dma_map(ops_data, false);
399
400         ops_data->state = SFC_VDPA_STATE_INITIALIZED;
401 }
402
403 static void
404 sfc_vdpa_stop(struct sfc_vdpa_ops_data *ops_data)
405 {
406         int i;
407         int rc;
408
409         if (ops_data->state != SFC_VDPA_STATE_STARTED)
410                 return;
411
412         ops_data->state = SFC_VDPA_STATE_STOPPING;
413
414         for (i = 0; i < ops_data->vq_count; i++) {
415                 rc = sfc_vdpa_virtq_stop(ops_data, i);
416                 if (rc != 0)
417                         continue;
418         }
419
420         sfc_vdpa_disable_vfio_intr(ops_data);
421
422         ops_data->state = SFC_VDPA_STATE_CONFIGURED;
423 }
424
425 static int
426 sfc_vdpa_start(struct sfc_vdpa_ops_data *ops_data)
427 {
428         int i, j;
429         int rc;
430
431         SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_CONFIGURED);
432
433         sfc_vdpa_log_init(ops_data->dev_handle, "entry");
434
435         ops_data->state = SFC_VDPA_STATE_STARTING;
436
437         sfc_vdpa_log_init(ops_data->dev_handle, "enable interrupts");
438         rc = sfc_vdpa_enable_vfio_intr(ops_data);
439         if (rc < 0) {
440                 sfc_vdpa_err(ops_data->dev_handle,
441                              "vfio intr allocation failed: %s",
442                              rte_strerror(rc));
443                 goto fail_enable_vfio_intr;
444         }
445
446         rte_vhost_get_negotiated_features(ops_data->vid,
447                                           &ops_data->req_features);
448
449         sfc_vdpa_info(ops_data->dev_handle,
450                       "negotiated feature : 0x%" PRIx64,
451                       ops_data->req_features);
452
453         for (i = 0; i < ops_data->vq_count; i++) {
454                 sfc_vdpa_log_init(ops_data->dev_handle,
455                                   "starting vq# %d", i);
456                 rc = sfc_vdpa_virtq_start(ops_data, i);
457                 if (rc != 0)
458                         goto fail_vq_start;
459         }
460
461         ops_data->state = SFC_VDPA_STATE_STARTED;
462
463         sfc_vdpa_log_init(ops_data->dev_handle, "done");
464
465         return 0;
466
467 fail_vq_start:
468         /* stop already started virtqueues */
469         for (j = 0; j < i; j++)
470                 sfc_vdpa_virtq_stop(ops_data, j);
471         sfc_vdpa_disable_vfio_intr(ops_data);
472
473 fail_enable_vfio_intr:
474         ops_data->state = SFC_VDPA_STATE_CONFIGURED;
475
476         return rc;
477 }
478
479 static int
480 sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)
481 {
482         struct sfc_vdpa_ops_data *ops_data;
483         void *dev;
484
485         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
486         if (ops_data == NULL)
487                 return -1;
488
489         dev = ops_data->dev_handle;
490         *queue_num = sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count;
491
492         sfc_vdpa_info(dev, "vDPA ops get_queue_num :: supported queue num : %u",
493                       *queue_num);
494
495         return 0;
496 }
497
498 static int
499 sfc_vdpa_get_features(struct rte_vdpa_device *vdpa_dev, uint64_t *features)
500 {
501         struct sfc_vdpa_ops_data *ops_data;
502
503         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
504         if (ops_data == NULL)
505                 return -1;
506
507         *features = ops_data->drv_features;
508
509         sfc_vdpa_info(ops_data->dev_handle,
510                       "vDPA ops get_feature :: features : 0x%" PRIx64,
511                       *features);
512
513         return 0;
514 }
515
516 static int
517 sfc_vdpa_get_protocol_features(struct rte_vdpa_device *vdpa_dev,
518                                uint64_t *features)
519 {
520         struct sfc_vdpa_ops_data *ops_data;
521
522         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
523         if (ops_data == NULL)
524                 return -1;
525
526         *features = SFC_VDPA_PROTOCOL_FEATURES;
527
528         sfc_vdpa_info(ops_data->dev_handle,
529                       "vDPA ops get_protocol_feature :: features : 0x%" PRIx64,
530                       *features);
531
532         return 0;
533 }
534
535 static int
536 sfc_vdpa_dev_config(int vid)
537 {
538         struct rte_vdpa_device *vdpa_dev;
539         int rc;
540         struct sfc_vdpa_ops_data *ops_data;
541
542         vdpa_dev = rte_vhost_get_vdpa_device(vid);
543
544         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
545         if (ops_data == NULL) {
546                 sfc_vdpa_err(ops_data->dev_handle,
547                              "invalid vDPA device : %p, vid : %d",
548                              vdpa_dev, vid);
549                 return -1;
550         }
551
552         sfc_vdpa_log_init(ops_data->dev_handle, "entry");
553
554         ops_data->vid = vid;
555
556         sfc_vdpa_adapter_lock(ops_data->dev_handle);
557
558         sfc_vdpa_log_init(ops_data->dev_handle, "configuring");
559         rc = sfc_vdpa_configure(ops_data);
560         if (rc != 0)
561                 goto fail_vdpa_config;
562
563         sfc_vdpa_log_init(ops_data->dev_handle, "starting");
564         rc = sfc_vdpa_start(ops_data);
565         if (rc != 0)
566                 goto fail_vdpa_start;
567
568         sfc_vdpa_adapter_unlock(ops_data->dev_handle);
569
570         sfc_vdpa_log_init(ops_data->dev_handle, "vhost notifier ctrl");
571         if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
572                 sfc_vdpa_info(ops_data->dev_handle,
573                               "vDPA (%s): software relay for notify is used.",
574                               vdpa_dev->device->name);
575
576         sfc_vdpa_log_init(ops_data->dev_handle, "done");
577
578         return 0;
579
580 fail_vdpa_start:
581         sfc_vdpa_close(ops_data);
582
583 fail_vdpa_config:
584         sfc_vdpa_adapter_unlock(ops_data->dev_handle);
585
586         return -1;
587 }
588
589 static int
590 sfc_vdpa_dev_close(int vid)
591 {
592         struct rte_vdpa_device *vdpa_dev;
593         struct sfc_vdpa_ops_data *ops_data;
594
595         vdpa_dev = rte_vhost_get_vdpa_device(vid);
596
597         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
598         if (ops_data == NULL) {
599                 sfc_vdpa_err(ops_data->dev_handle,
600                              "invalid vDPA device : %p, vid : %d",
601                              vdpa_dev, vid);
602                 return -1;
603         }
604
605         sfc_vdpa_adapter_lock(ops_data->dev_handle);
606
607         sfc_vdpa_stop(ops_data);
608         sfc_vdpa_close(ops_data);
609
610         sfc_vdpa_adapter_unlock(ops_data->dev_handle);
611
612         return 0;
613 }
614
615 static int
616 sfc_vdpa_set_vring_state(int vid, int vring, int state)
617 {
618         RTE_SET_USED(vid);
619         RTE_SET_USED(vring);
620         RTE_SET_USED(state);
621
622         return -1;
623 }
624
625 static int
626 sfc_vdpa_set_features(int vid)
627 {
628         RTE_SET_USED(vid);
629
630         return -1;
631 }
632
633 static int
634 sfc_vdpa_get_vfio_device_fd(int vid)
635 {
636         struct rte_vdpa_device *vdpa_dev;
637         struct sfc_vdpa_ops_data *ops_data;
638         int vfio_dev_fd;
639         void *dev;
640
641         vdpa_dev = rte_vhost_get_vdpa_device(vid);
642
643         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
644         if (ops_data == NULL)
645                 return -1;
646
647         dev = ops_data->dev_handle;
648         vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
649
650         sfc_vdpa_info(dev, "vDPA ops get_vfio_device_fd :: vfio fd : %d",
651                       vfio_dev_fd);
652
653         return vfio_dev_fd;
654 }
655
656 static struct rte_vdpa_dev_ops sfc_vdpa_ops = {
657         .get_queue_num = sfc_vdpa_get_queue_num,
658         .get_features = sfc_vdpa_get_features,
659         .get_protocol_features = sfc_vdpa_get_protocol_features,
660         .dev_conf = sfc_vdpa_dev_config,
661         .dev_close = sfc_vdpa_dev_close,
662         .set_vring_state = sfc_vdpa_set_vring_state,
663         .set_features = sfc_vdpa_set_features,
664         .get_vfio_device_fd = sfc_vdpa_get_vfio_device_fd,
665 };
666
667 struct sfc_vdpa_ops_data *
668 sfc_vdpa_device_init(void *dev_handle, enum sfc_vdpa_context context)
669 {
670         struct sfc_vdpa_ops_data *ops_data;
671         struct rte_pci_device *pci_dev;
672         int rc;
673
674         /* Create vDPA ops context */
675         ops_data = rte_zmalloc("vdpa", sizeof(struct sfc_vdpa_ops_data), 0);
676         if (ops_data == NULL)
677                 return NULL;
678
679         ops_data->vdpa_context = context;
680         ops_data->dev_handle = dev_handle;
681
682         pci_dev = sfc_vdpa_adapter_by_dev_handle(dev_handle)->pdev;
683
684         /* Register vDPA Device */
685         sfc_vdpa_log_init(dev_handle, "register vDPA device");
686         ops_data->vdpa_dev =
687                 rte_vdpa_register_device(&pci_dev->device, &sfc_vdpa_ops);
688         if (ops_data->vdpa_dev == NULL) {
689                 sfc_vdpa_err(dev_handle, "vDPA device registration failed");
690                 goto fail_register_device;
691         }
692
693         /* Read supported device features */
694         sfc_vdpa_log_init(dev_handle, "get device feature");
695         rc = sfc_vdpa_get_device_features(ops_data);
696         if (rc != 0)
697                 goto fail_get_dev_feature;
698
699         /* Driver features are superset of device supported feature
700          * and any additional features supported by the driver.
701          */
702         ops_data->drv_features =
703                 ops_data->dev_features | SFC_VDPA_DEFAULT_FEATURES;
704
705         ops_data->state = SFC_VDPA_STATE_INITIALIZED;
706
707         return ops_data;
708
709 fail_get_dev_feature:
710         rte_vdpa_unregister_device(ops_data->vdpa_dev);
711
712 fail_register_device:
713         rte_free(ops_data);
714         return NULL;
715 }
716
717 void
718 sfc_vdpa_device_fini(struct sfc_vdpa_ops_data *ops_data)
719 {
720         rte_vdpa_unregister_device(ops_data->vdpa_dev);
721
722         rte_free(ops_data);
723 }