1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020-2021 Xilinx, Inc.
10 #include <rte_malloc.h>
13 #include <rte_vhost.h>
15 #include <vdpa_driver.h>
18 #include "sfc_vdpa_ops.h"
21 /* These protocol features are needed to enable notifier ctrl */
22 #define SFC_VDPA_PROTOCOL_FEATURES \
23 ((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
24 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
25 (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
26 (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
27 (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD))
30 * Set of features which are enabled by default.
31 * Protocol feature bit is needed to enable notification notifier ctrl.
33 #define SFC_VDPA_DEFAULT_FEATURES \
34 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
36 #define SFC_VDPA_MSIX_IRQ_SET_BUF_LEN \
37 (sizeof(struct vfio_irq_set) + \
38 sizeof(int) * (SFC_VDPA_MAX_QUEUE_PAIRS * 2 + 1))
40 /* It will be used for target VF when calling function is not PF */
41 #define SFC_VDPA_VF_NULL 0xFFFF
44 sfc_vdpa_get_device_features(struct sfc_vdpa_ops_data *ops_data)
47 uint64_t dev_features;
50 nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
52 rc = efx_virtio_get_features(nic, EFX_VIRTIO_DEVICE_TYPE_NET,
55 sfc_vdpa_err(ops_data->dev_handle,
56 "could not read device feature: %s",
61 ops_data->dev_features = dev_features;
63 sfc_vdpa_info(ops_data->dev_handle,
64 "device supported virtio features : 0x%" PRIx64,
65 ops_data->dev_features);
71 hva_to_gpa(int vid, uint64_t hva)
73 struct rte_vhost_memory *vhost_mem = NULL;
74 struct rte_vhost_mem_region *mem_reg = NULL;
78 if (rte_vhost_get_mem_table(vid, &vhost_mem) < 0)
81 for (i = 0; i < vhost_mem->nregions; i++) {
82 mem_reg = &vhost_mem->regions[i];
84 if (hva >= mem_reg->host_user_addr &&
85 hva < mem_reg->host_user_addr + mem_reg->size) {
86 gpa = (hva - mem_reg->host_user_addr) +
87 mem_reg->guest_phys_addr;
98 sfc_vdpa_enable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
103 uint32_t i, num_vring;
104 struct rte_vhost_vring vring;
105 struct vfio_irq_set *irq_set;
106 struct rte_pci_device *pci_dev;
107 char irq_set_buf[SFC_VDPA_MSIX_IRQ_SET_BUF_LEN];
110 num_vring = rte_vhost_get_vring_num(ops_data->vid);
111 dev = ops_data->dev_handle;
112 vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
113 pci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;
115 irq_set = (struct vfio_irq_set *)irq_set_buf;
116 irq_set->argsz = sizeof(irq_set_buf);
117 irq_set->count = num_vring + 1;
118 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
119 VFIO_IRQ_SET_ACTION_TRIGGER;
120 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
122 irq_fd_ptr = (int *)&irq_set->data;
123 irq_fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] =
124 rte_intr_fd_get(pci_dev->intr_handle);
126 for (i = 0; i < num_vring; i++) {
127 rc = rte_vhost_get_vhost_vring(ops_data->vid, i, &vring);
131 irq_fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
134 rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
136 sfc_vdpa_err(ops_data->dev_handle,
137 "error enabling MSI-X interrupts: %s",
146 sfc_vdpa_disable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
150 struct vfio_irq_set irq_set;
153 dev = ops_data->dev_handle;
154 vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
156 irq_set.argsz = sizeof(irq_set);
158 irq_set.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
159 irq_set.index = VFIO_PCI_MSIX_IRQ_INDEX;
162 rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set);
164 sfc_vdpa_err(ops_data->dev_handle,
165 "error disabling MSI-X interrupts: %s",
174 sfc_vdpa_get_vring_info(struct sfc_vdpa_ops_data *ops_data,
175 int vq_num, struct sfc_vdpa_vring_info *vring)
179 struct rte_vhost_vring vq;
181 rc = rte_vhost_get_vhost_vring(ops_data->vid, vq_num, &vq);
183 sfc_vdpa_err(ops_data->dev_handle,
184 "get vhost vring failed: %s", rte_strerror(rc));
188 gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.desc);
190 sfc_vdpa_err(ops_data->dev_handle,
191 "fail to get GPA for descriptor ring.");
196 gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.avail);
198 sfc_vdpa_err(ops_data->dev_handle,
199 "fail to get GPA for available ring.");
204 gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.used);
206 sfc_vdpa_err(ops_data->dev_handle,
207 "fail to get GPA for used ring.");
212 vring->size = vq.size;
214 rc = rte_vhost_get_vring_base(ops_data->vid, vq_num,
215 &vring->last_avail_idx,
216 &vring->last_used_idx);
222 sfc_vdpa_virtq_start(struct sfc_vdpa_ops_data *ops_data, int vq_num)
226 struct sfc_vdpa_vring_info vring;
227 efx_virtio_vq_cfg_t vq_cfg;
228 efx_virtio_vq_dyncfg_t vq_dyncfg;
230 vq = ops_data->vq_cxt[vq_num].vq;
234 rc = sfc_vdpa_get_vring_info(ops_data, vq_num, &vring);
236 sfc_vdpa_err(ops_data->dev_handle,
237 "get vring info failed: %s", rte_strerror(rc));
238 goto fail_vring_info;
241 vq_cfg.evvc_target_vf = SFC_VDPA_VF_NULL;
243 /* even virtqueue for RX and odd for TX */
245 vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_TXQ;
246 sfc_vdpa_info(ops_data->dev_handle,
247 "configure virtqueue # %d (TXQ)", vq_num);
249 vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_RXQ;
250 sfc_vdpa_info(ops_data->dev_handle,
251 "configure virtqueue # %d (RXQ)", vq_num);
254 vq_cfg.evvc_vq_num = vq_num;
255 vq_cfg.evvc_desc_tbl_addr = vring.desc;
256 vq_cfg.evvc_avail_ring_addr = vring.avail;
257 vq_cfg.evvc_used_ring_addr = vring.used;
258 vq_cfg.evvc_vq_size = vring.size;
260 vq_dyncfg.evvd_vq_pidx = vring.last_used_idx;
261 vq_dyncfg.evvd_vq_cidx = vring.last_avail_idx;
263 /* MSI-X vector is function-relative */
264 vq_cfg.evvc_msix_vector = RTE_INTR_VEC_RXTX_OFFSET + vq_num;
265 if (ops_data->vdpa_context == SFC_VDPA_AS_VF)
266 vq_cfg.evvc_pas_id = 0;
267 vq_cfg.evcc_features = ops_data->dev_features &
268 ops_data->req_features;
270 /* Start virtqueue */
271 rc = efx_virtio_qstart(vq, &vq_cfg, &vq_dyncfg);
273 /* destroy virtqueue */
274 sfc_vdpa_err(ops_data->dev_handle,
275 "virtqueue start failed: %s",
277 efx_virtio_qdestroy(vq);
278 goto fail_virtio_qstart;
281 sfc_vdpa_info(ops_data->dev_handle,
282 "virtqueue started successfully for vq_num %d", vq_num);
284 ops_data->vq_cxt[vq_num].enable = B_TRUE;
294 sfc_vdpa_virtq_stop(struct sfc_vdpa_ops_data *ops_data, int vq_num)
297 efx_virtio_vq_dyncfg_t vq_idx;
300 if (ops_data->vq_cxt[vq_num].enable != B_TRUE)
303 vq = ops_data->vq_cxt[vq_num].vq;
308 rc = efx_virtio_qstop(vq, &vq_idx);
310 ops_data->vq_cxt[vq_num].cidx = vq_idx.evvd_vq_cidx;
311 ops_data->vq_cxt[vq_num].pidx = vq_idx.evvd_vq_pidx;
313 ops_data->vq_cxt[vq_num].enable = B_FALSE;
319 sfc_vdpa_configure(struct sfc_vdpa_ops_data *ops_data)
328 dev = ops_data->dev_handle;
329 nic = sfc_vdpa_adapter_by_dev_handle(dev)->nic;
331 SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_INITIALIZED);
333 ops_data->state = SFC_VDPA_STATE_CONFIGURING;
335 nr_vring = rte_vhost_get_vring_num(ops_data->vid);
337 (sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
339 /* number of vring should not be more than supported max vq count */
340 if (nr_vring > max_vring_cnt) {
342 "nr_vring (%d) is > max vring count (%d)",
343 nr_vring, max_vring_cnt);
347 rc = sfc_vdpa_dma_map(ops_data, true);
350 "DMA map failed: %s", rte_strerror(rc));
354 for (i = 0; i < nr_vring; i++) {
355 rc = efx_virtio_qcreate(nic, &vq);
356 if ((rc != 0) || (vq == NULL)) {
358 "virtqueue create failed: %s",
363 /* store created virtqueue context */
364 ops_data->vq_cxt[i].vq = vq;
367 ops_data->vq_count = i;
369 ops_data->state = SFC_VDPA_STATE_CONFIGURED;
374 sfc_vdpa_dma_map(ops_data, false);
378 ops_data->state = SFC_VDPA_STATE_INITIALIZED;
384 sfc_vdpa_close(struct sfc_vdpa_ops_data *ops_data)
388 if (ops_data->state != SFC_VDPA_STATE_CONFIGURED)
391 ops_data->state = SFC_VDPA_STATE_CLOSING;
393 for (i = 0; i < ops_data->vq_count; i++) {
394 if (ops_data->vq_cxt[i].vq == NULL)
397 efx_virtio_qdestroy(ops_data->vq_cxt[i].vq);
400 sfc_vdpa_dma_map(ops_data, false);
402 ops_data->state = SFC_VDPA_STATE_INITIALIZED;
406 sfc_vdpa_stop(struct sfc_vdpa_ops_data *ops_data)
411 if (ops_data->state != SFC_VDPA_STATE_STARTED)
414 ops_data->state = SFC_VDPA_STATE_STOPPING;
416 for (i = 0; i < ops_data->vq_count; i++) {
417 rc = sfc_vdpa_virtq_stop(ops_data, i);
422 sfc_vdpa_disable_vfio_intr(ops_data);
424 sfc_vdpa_filter_remove(ops_data);
426 ops_data->state = SFC_VDPA_STATE_CONFIGURED;
430 sfc_vdpa_start(struct sfc_vdpa_ops_data *ops_data)
435 SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_CONFIGURED);
437 sfc_vdpa_log_init(ops_data->dev_handle, "entry");
439 ops_data->state = SFC_VDPA_STATE_STARTING;
441 sfc_vdpa_log_init(ops_data->dev_handle, "enable interrupts");
442 rc = sfc_vdpa_enable_vfio_intr(ops_data);
444 sfc_vdpa_err(ops_data->dev_handle,
445 "vfio intr allocation failed: %s",
447 goto fail_enable_vfio_intr;
450 rte_vhost_get_negotiated_features(ops_data->vid,
451 &ops_data->req_features);
453 sfc_vdpa_info(ops_data->dev_handle,
454 "negotiated feature : 0x%" PRIx64,
455 ops_data->req_features);
457 for (i = 0; i < ops_data->vq_count; i++) {
458 sfc_vdpa_log_init(ops_data->dev_handle,
459 "starting vq# %d", i);
460 rc = sfc_vdpa_virtq_start(ops_data, i);
465 ops_data->vq_count = i;
467 sfc_vdpa_log_init(ops_data->dev_handle,
468 "configure MAC filters");
469 rc = sfc_vdpa_filter_config(ops_data);
471 sfc_vdpa_err(ops_data->dev_handle,
472 "MAC filter config failed: %s",
474 goto fail_filter_cfg;
477 ops_data->state = SFC_VDPA_STATE_STARTED;
479 sfc_vdpa_log_init(ops_data->dev_handle, "done");
484 /* remove already created filters */
485 sfc_vdpa_filter_remove(ops_data);
487 /* stop already started virtqueues */
488 for (j = 0; j < i; j++)
489 sfc_vdpa_virtq_stop(ops_data, j);
490 sfc_vdpa_disable_vfio_intr(ops_data);
492 fail_enable_vfio_intr:
493 ops_data->state = SFC_VDPA_STATE_CONFIGURED;
499 sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)
501 struct sfc_vdpa_ops_data *ops_data;
504 ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
505 if (ops_data == NULL)
508 dev = ops_data->dev_handle;
509 *queue_num = sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count;
511 sfc_vdpa_info(dev, "vDPA ops get_queue_num :: supported queue num : %u",
518 sfc_vdpa_get_features(struct rte_vdpa_device *vdpa_dev, uint64_t *features)
520 struct sfc_vdpa_ops_data *ops_data;
522 ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
523 if (ops_data == NULL)
526 *features = ops_data->drv_features;
528 sfc_vdpa_info(ops_data->dev_handle,
529 "vDPA ops get_feature :: features : 0x%" PRIx64,
536 sfc_vdpa_get_protocol_features(struct rte_vdpa_device *vdpa_dev,
539 struct sfc_vdpa_ops_data *ops_data;
541 ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
542 if (ops_data == NULL)
545 *features = SFC_VDPA_PROTOCOL_FEATURES;
547 sfc_vdpa_info(ops_data->dev_handle,
548 "vDPA ops get_protocol_feature :: features : 0x%" PRIx64,
555 sfc_vdpa_notify_ctrl(void *arg)
557 struct sfc_vdpa_ops_data *ops_data;
561 if (ops_data == NULL)
564 sfc_vdpa_adapter_lock(ops_data->dev_handle);
568 if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
569 sfc_vdpa_info(ops_data->dev_handle,
570 "vDPA (%s): Notifier could not get configured",
571 ops_data->vdpa_dev->device->name);
573 sfc_vdpa_adapter_unlock(ops_data->dev_handle);
579 sfc_vdpa_setup_notify_ctrl(struct sfc_vdpa_ops_data *ops_data)
583 ops_data->is_notify_thread_started = false;
586 * Use rte_vhost_host_notifier_ctrl in a thread to avoid
587 * dead lock scenario when multiple VFs are used in single vdpa
588 * application and multiple VFs are passed to a single VM.
590 ret = pthread_create(&ops_data->notify_tid, NULL,
591 sfc_vdpa_notify_ctrl, ops_data);
593 sfc_vdpa_err(ops_data->dev_handle,
594 "failed to create notify_ctrl thread: %s",
598 ops_data->is_notify_thread_started = true;
604 sfc_vdpa_dev_config(int vid)
606 struct rte_vdpa_device *vdpa_dev;
608 struct sfc_vdpa_ops_data *ops_data;
610 vdpa_dev = rte_vhost_get_vdpa_device(vid);
612 ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
613 if (ops_data == NULL) {
614 SFC_VDPA_GENERIC_LOG(ERR,
615 "invalid vDPA device : %p, vid : %d",
620 sfc_vdpa_log_init(ops_data->dev_handle, "entry");
624 sfc_vdpa_adapter_lock(ops_data->dev_handle);
626 sfc_vdpa_log_init(ops_data->dev_handle, "configuring");
627 rc = sfc_vdpa_configure(ops_data);
629 goto fail_vdpa_config;
631 sfc_vdpa_log_init(ops_data->dev_handle, "starting");
632 rc = sfc_vdpa_start(ops_data);
634 goto fail_vdpa_start;
636 rc = sfc_vdpa_setup_notify_ctrl(ops_data);
638 goto fail_vdpa_notify;
640 sfc_vdpa_adapter_unlock(ops_data->dev_handle);
642 sfc_vdpa_log_init(ops_data->dev_handle, "done");
647 sfc_vdpa_stop(ops_data);
650 sfc_vdpa_close(ops_data);
653 sfc_vdpa_adapter_unlock(ops_data->dev_handle);
659 sfc_vdpa_dev_close(int vid)
662 struct rte_vdpa_device *vdpa_dev;
663 struct sfc_vdpa_ops_data *ops_data;
665 vdpa_dev = rte_vhost_get_vdpa_device(vid);
667 ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
668 if (ops_data == NULL) {
669 SFC_VDPA_GENERIC_LOG(ERR,
670 "invalid vDPA device : %p, vid : %d",
675 sfc_vdpa_adapter_lock(ops_data->dev_handle);
676 if (ops_data->is_notify_thread_started == true) {
678 ret = pthread_cancel(ops_data->notify_tid);
680 sfc_vdpa_err(ops_data->dev_handle,
681 "failed to cancel notify_ctrl thread: %s",
685 ret = pthread_join(ops_data->notify_tid, &status);
687 sfc_vdpa_err(ops_data->dev_handle,
688 "failed to join terminated notify_ctrl thread: %s",
692 ops_data->is_notify_thread_started = false;
694 sfc_vdpa_stop(ops_data);
695 sfc_vdpa_close(ops_data);
697 sfc_vdpa_adapter_unlock(ops_data->dev_handle);
703 sfc_vdpa_set_vring_state(int vid, int vring, int state)
705 struct sfc_vdpa_ops_data *ops_data;
706 struct rte_vdpa_device *vdpa_dev;
711 vdpa_dev = rte_vhost_get_vdpa_device(vid);
713 ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
714 if (ops_data == NULL)
717 dev = ops_data->dev_handle;
720 "vDPA ops set_vring_state: vid: %d, vring: %d, state:%d",
723 vring_max = (sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
725 if (vring < 0 || vring > vring_max) {
726 sfc_vdpa_err(dev, "received invalid vring id : %d to set state",
732 * Skip if device is not yet started. virtqueues state can be
733 * changed once it is created and other configurations are done.
735 if (ops_data->state != SFC_VDPA_STATE_STARTED)
738 if (ops_data->vq_cxt[vring].enable == state)
742 rc = sfc_vdpa_virtq_stop(ops_data, vring);
744 sfc_vdpa_err(dev, "virtqueue stop failed: %s",
748 rc = sfc_vdpa_virtq_start(ops_data, vring);
750 sfc_vdpa_err(dev, "virtqueue start failed: %s",
759 sfc_vdpa_set_features(int vid)
767 sfc_vdpa_get_vfio_device_fd(int vid)
769 struct rte_vdpa_device *vdpa_dev;
770 struct sfc_vdpa_ops_data *ops_data;
774 vdpa_dev = rte_vhost_get_vdpa_device(vid);
776 ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
777 if (ops_data == NULL)
780 dev = ops_data->dev_handle;
781 vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
783 sfc_vdpa_info(dev, "vDPA ops get_vfio_device_fd :: vfio fd : %d",
790 sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
796 unsigned int bar_offset;
797 struct rte_vdpa_device *vdpa_dev;
798 struct sfc_vdpa_ops_data *ops_data;
799 struct vfio_region_info reg = { .argsz = sizeof(reg) };
800 const efx_nic_cfg_t *encp;
805 vdpa_dev = rte_vhost_get_vdpa_device(vid);
807 ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
808 if (ops_data == NULL)
811 dev = ops_data->dev_handle;
813 vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
815 (sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
817 nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
818 encp = efx_nic_cfg_get(nic);
820 if (qid >= max_vring_cnt) {
821 sfc_vdpa_err(dev, "invalid qid : %d", qid);
825 if (ops_data->vq_cxt[qid].enable != B_TRUE) {
826 sfc_vdpa_err(dev, "vq is not enabled");
830 rc = efx_virtio_get_doorbell_offset(ops_data->vq_cxt[qid].vq,
833 sfc_vdpa_err(dev, "failed to get doorbell offset: %s",
838 reg.index = sfc_vdpa_adapter_by_dev_handle(dev)->mem_bar.esb_rid;
839 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
841 sfc_vdpa_err(dev, "could not get device region info: %s",
846 *offset = reg.offset + bar_offset;
848 len = (1U << encp->enc_vi_window_shift) / 2;
849 if (len >= sysconf(_SC_PAGESIZE)) {
850 *size = sysconf(_SC_PAGESIZE);
852 sfc_vdpa_err(dev, "invalid VI window size : 0x%" PRIx64, len);
856 sfc_vdpa_info(dev, "vDPA ops get_notify_area :: offset : 0x%" PRIx64,
862 static struct rte_vdpa_dev_ops sfc_vdpa_ops = {
863 .get_queue_num = sfc_vdpa_get_queue_num,
864 .get_features = sfc_vdpa_get_features,
865 .get_protocol_features = sfc_vdpa_get_protocol_features,
866 .dev_conf = sfc_vdpa_dev_config,
867 .dev_close = sfc_vdpa_dev_close,
868 .set_vring_state = sfc_vdpa_set_vring_state,
869 .set_features = sfc_vdpa_set_features,
870 .get_vfio_device_fd = sfc_vdpa_get_vfio_device_fd,
871 .get_notify_area = sfc_vdpa_get_notify_area,
874 struct sfc_vdpa_ops_data *
875 sfc_vdpa_device_init(void *dev_handle, enum sfc_vdpa_context context)
877 struct sfc_vdpa_ops_data *ops_data;
878 struct rte_pci_device *pci_dev;
881 /* Create vDPA ops context */
882 ops_data = rte_zmalloc("vdpa", sizeof(struct sfc_vdpa_ops_data), 0);
883 if (ops_data == NULL)
886 ops_data->vdpa_context = context;
887 ops_data->dev_handle = dev_handle;
889 pci_dev = sfc_vdpa_adapter_by_dev_handle(dev_handle)->pdev;
891 /* Register vDPA Device */
892 sfc_vdpa_log_init(dev_handle, "register vDPA device");
894 rte_vdpa_register_device(&pci_dev->device, &sfc_vdpa_ops);
895 if (ops_data->vdpa_dev == NULL) {
896 sfc_vdpa_err(dev_handle, "vDPA device registration failed");
897 goto fail_register_device;
900 /* Read supported device features */
901 sfc_vdpa_log_init(dev_handle, "get device feature");
902 rc = sfc_vdpa_get_device_features(ops_data);
904 goto fail_get_dev_feature;
906 /* Driver features are superset of device supported feature
907 * and any additional features supported by the driver.
909 ops_data->drv_features =
910 ops_data->dev_features | SFC_VDPA_DEFAULT_FEATURES;
912 ops_data->state = SFC_VDPA_STATE_INITIALIZED;
916 fail_get_dev_feature:
917 rte_vdpa_unregister_device(ops_data->vdpa_dev);
919 fail_register_device:
925 sfc_vdpa_device_fini(struct sfc_vdpa_ops_data *ops_data)
927 rte_vdpa_unregister_device(ops_data->vdpa_dev);