vdpa/sfc: support MAC filter config
[dpdk.git] / drivers / vdpa / sfc / sfc_vdpa_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020-2021 Xilinx, Inc.
3  */
4
5 #include <pthread.h>
6 #include <unistd.h>
7 #include <sys/ioctl.h>
8
9 #include <rte_errno.h>
10 #include <rte_malloc.h>
11 #include <rte_vdpa.h>
12 #include <rte_vfio.h>
13 #include <rte_vhost.h>
14
15 #include <vdpa_driver.h>
16
17 #include "efx.h"
18 #include "sfc_vdpa_ops.h"
19 #include "sfc_vdpa.h"
20
21 /* These protocol features are needed to enable notifier ctrl */
22 #define SFC_VDPA_PROTOCOL_FEATURES \
23                 ((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
24                  (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
25                  (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
26                  (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
27                  (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD))
28
29 /*
30  * Set of features which are enabled by default.
31  * Protocol feature bit is needed to enable notification notifier ctrl.
32  */
33 #define SFC_VDPA_DEFAULT_FEATURES \
34                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
35
36 #define SFC_VDPA_MSIX_IRQ_SET_BUF_LEN \
37                 (sizeof(struct vfio_irq_set) + \
38                 sizeof(int) * (SFC_VDPA_MAX_QUEUE_PAIRS * 2 + 1))
39
40 /* It will be used for target VF when calling function is not PF */
41 #define SFC_VDPA_VF_NULL                0xFFFF
42
43 static int
44 sfc_vdpa_get_device_features(struct sfc_vdpa_ops_data *ops_data)
45 {
46         int rc;
47         uint64_t dev_features;
48         efx_nic_t *nic;
49
50         nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
51
52         rc = efx_virtio_get_features(nic, EFX_VIRTIO_DEVICE_TYPE_NET,
53                                      &dev_features);
54         if (rc != 0) {
55                 sfc_vdpa_err(ops_data->dev_handle,
56                              "could not read device feature: %s",
57                              rte_strerror(rc));
58                 return rc;
59         }
60
61         ops_data->dev_features = dev_features;
62
63         sfc_vdpa_info(ops_data->dev_handle,
64                       "device supported virtio features : 0x%" PRIx64,
65                       ops_data->dev_features);
66
67         return 0;
68 }
69
70 static uint64_t
71 hva_to_gpa(int vid, uint64_t hva)
72 {
73         struct rte_vhost_memory *vhost_mem = NULL;
74         struct rte_vhost_mem_region *mem_reg = NULL;
75         uint32_t i;
76         uint64_t gpa = 0;
77
78         if (rte_vhost_get_mem_table(vid, &vhost_mem) < 0)
79                 goto error;
80
81         for (i = 0; i < vhost_mem->nregions; i++) {
82                 mem_reg = &vhost_mem->regions[i];
83
84                 if (hva >= mem_reg->host_user_addr &&
85                                 hva < mem_reg->host_user_addr + mem_reg->size) {
86                         gpa = (hva - mem_reg->host_user_addr) +
87                                 mem_reg->guest_phys_addr;
88                         break;
89                 }
90         }
91
92 error:
93         free(vhost_mem);
94         return gpa;
95 }
96
97 static int
98 sfc_vdpa_enable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
99 {
100         int rc;
101         int *irq_fd_ptr;
102         int vfio_dev_fd;
103         uint32_t i, num_vring;
104         struct rte_vhost_vring vring;
105         struct vfio_irq_set *irq_set;
106         struct rte_pci_device *pci_dev;
107         char irq_set_buf[SFC_VDPA_MSIX_IRQ_SET_BUF_LEN];
108         void *dev;
109
110         num_vring = rte_vhost_get_vring_num(ops_data->vid);
111         dev = ops_data->dev_handle;
112         vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
113         pci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;
114
115         irq_set = (struct vfio_irq_set *)irq_set_buf;
116         irq_set->argsz = sizeof(irq_set_buf);
117         irq_set->count = num_vring + 1;
118         irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
119                          VFIO_IRQ_SET_ACTION_TRIGGER;
120         irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
121         irq_set->start = 0;
122         irq_fd_ptr = (int *)&irq_set->data;
123         irq_fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] =
124                 rte_intr_fd_get(pci_dev->intr_handle);
125
126         for (i = 0; i < num_vring; i++) {
127                 rc = rte_vhost_get_vhost_vring(ops_data->vid, i, &vring);
128                 if (rc)
129                         return -1;
130
131                 irq_fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
132         }
133
134         rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
135         if (rc) {
136                 sfc_vdpa_err(ops_data->dev_handle,
137                              "error enabling MSI-X interrupts: %s",
138                              strerror(errno));
139                 return -1;
140         }
141
142         return 0;
143 }
144
145 static int
146 sfc_vdpa_disable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
147 {
148         int rc;
149         int vfio_dev_fd;
150         struct vfio_irq_set irq_set;
151         void *dev;
152
153         dev = ops_data->dev_handle;
154         vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
155
156         irq_set.argsz = sizeof(irq_set);
157         irq_set.count = 0;
158         irq_set.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
159         irq_set.index = VFIO_PCI_MSIX_IRQ_INDEX;
160         irq_set.start = 0;
161
162         rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set);
163         if (rc) {
164                 sfc_vdpa_err(ops_data->dev_handle,
165                              "error disabling MSI-X interrupts: %s",
166                              strerror(errno));
167                 return -1;
168         }
169
170         return 0;
171 }
172
173 static int
174 sfc_vdpa_get_vring_info(struct sfc_vdpa_ops_data *ops_data,
175                         int vq_num, struct sfc_vdpa_vring_info *vring)
176 {
177         int rc;
178         uint64_t gpa;
179         struct rte_vhost_vring vq;
180
181         rc = rte_vhost_get_vhost_vring(ops_data->vid, vq_num, &vq);
182         if (rc < 0) {
183                 sfc_vdpa_err(ops_data->dev_handle,
184                              "get vhost vring failed: %s", rte_strerror(rc));
185                 return rc;
186         }
187
188         gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.desc);
189         if (gpa == 0) {
190                 sfc_vdpa_err(ops_data->dev_handle,
191                              "fail to get GPA for descriptor ring.");
192                 return -1;
193         }
194         vring->desc = gpa;
195
196         gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.avail);
197         if (gpa == 0) {
198                 sfc_vdpa_err(ops_data->dev_handle,
199                              "fail to get GPA for available ring.");
200                 return -1;
201         }
202         vring->avail = gpa;
203
204         gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.used);
205         if (gpa == 0) {
206                 sfc_vdpa_err(ops_data->dev_handle,
207                              "fail to get GPA for used ring.");
208                 return -1;
209         }
210         vring->used = gpa;
211
212         vring->size = vq.size;
213
214         rc = rte_vhost_get_vring_base(ops_data->vid, vq_num,
215                                       &vring->last_avail_idx,
216                                       &vring->last_used_idx);
217
218         return rc;
219 }
220
221 static int
222 sfc_vdpa_virtq_start(struct sfc_vdpa_ops_data *ops_data, int vq_num)
223 {
224         int rc;
225         efx_virtio_vq_t *vq;
226         struct sfc_vdpa_vring_info vring;
227         efx_virtio_vq_cfg_t vq_cfg;
228         efx_virtio_vq_dyncfg_t vq_dyncfg;
229
230         vq = ops_data->vq_cxt[vq_num].vq;
231         if (vq == NULL)
232                 return -1;
233
234         rc = sfc_vdpa_get_vring_info(ops_data, vq_num, &vring);
235         if (rc < 0) {
236                 sfc_vdpa_err(ops_data->dev_handle,
237                              "get vring info failed: %s", rte_strerror(rc));
238                 goto fail_vring_info;
239         }
240
241         vq_cfg.evvc_target_vf = SFC_VDPA_VF_NULL;
242
243         /* even virtqueue for RX and odd for TX */
244         if (vq_num % 2) {
245                 vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_TXQ;
246                 sfc_vdpa_info(ops_data->dev_handle,
247                               "configure virtqueue # %d (TXQ)", vq_num);
248         } else {
249                 vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_RXQ;
250                 sfc_vdpa_info(ops_data->dev_handle,
251                               "configure virtqueue # %d (RXQ)", vq_num);
252         }
253
254         vq_cfg.evvc_vq_num = vq_num;
255         vq_cfg.evvc_desc_tbl_addr   = vring.desc;
256         vq_cfg.evvc_avail_ring_addr = vring.avail;
257         vq_cfg.evvc_used_ring_addr  = vring.used;
258         vq_cfg.evvc_vq_size = vring.size;
259
260         vq_dyncfg.evvd_vq_pidx = vring.last_used_idx;
261         vq_dyncfg.evvd_vq_cidx = vring.last_avail_idx;
262
263         /* MSI-X vector is function-relative */
264         vq_cfg.evvc_msix_vector = RTE_INTR_VEC_RXTX_OFFSET + vq_num;
265         if (ops_data->vdpa_context == SFC_VDPA_AS_VF)
266                 vq_cfg.evvc_pas_id = 0;
267         vq_cfg.evcc_features = ops_data->dev_features &
268                                ops_data->req_features;
269
270         /* Start virtqueue */
271         rc = efx_virtio_qstart(vq, &vq_cfg, &vq_dyncfg);
272         if (rc != 0) {
273                 /* destroy virtqueue */
274                 sfc_vdpa_err(ops_data->dev_handle,
275                              "virtqueue start failed: %s",
276                              rte_strerror(rc));
277                 efx_virtio_qdestroy(vq);
278                 goto fail_virtio_qstart;
279         }
280
281         sfc_vdpa_info(ops_data->dev_handle,
282                       "virtqueue started successfully for vq_num %d", vq_num);
283
284         ops_data->vq_cxt[vq_num].enable = B_TRUE;
285
286         return rc;
287
288 fail_virtio_qstart:
289 fail_vring_info:
290         return rc;
291 }
292
293 static int
294 sfc_vdpa_virtq_stop(struct sfc_vdpa_ops_data *ops_data, int vq_num)
295 {
296         int rc;
297         efx_virtio_vq_dyncfg_t vq_idx;
298         efx_virtio_vq_t *vq;
299
300         if (ops_data->vq_cxt[vq_num].enable != B_TRUE)
301                 return -1;
302
303         vq = ops_data->vq_cxt[vq_num].vq;
304         if (vq == NULL)
305                 return -1;
306
307         /* stop the vq */
308         rc = efx_virtio_qstop(vq, &vq_idx);
309         if (rc == 0) {
310                 ops_data->vq_cxt[vq_num].cidx = vq_idx.evvd_vq_cidx;
311                 ops_data->vq_cxt[vq_num].pidx = vq_idx.evvd_vq_pidx;
312         }
313         ops_data->vq_cxt[vq_num].enable = B_FALSE;
314
315         return rc;
316 }
317
318 static int
319 sfc_vdpa_configure(struct sfc_vdpa_ops_data *ops_data)
320 {
321         int rc, i;
322         int nr_vring;
323         int max_vring_cnt;
324         efx_virtio_vq_t *vq;
325         efx_nic_t *nic;
326         void *dev;
327
328         dev = ops_data->dev_handle;
329         nic = sfc_vdpa_adapter_by_dev_handle(dev)->nic;
330
331         SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_INITIALIZED);
332
333         ops_data->state = SFC_VDPA_STATE_CONFIGURING;
334
335         nr_vring = rte_vhost_get_vring_num(ops_data->vid);
336         max_vring_cnt =
337                 (sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
338
339         /* number of vring should not be more than supported max vq count */
340         if (nr_vring > max_vring_cnt) {
341                 sfc_vdpa_err(dev,
342                              "nr_vring (%d) is > max vring count (%d)",
343                              nr_vring, max_vring_cnt);
344                 goto fail_vring_num;
345         }
346
347         rc = sfc_vdpa_dma_map(ops_data, true);
348         if (rc) {
349                 sfc_vdpa_err(dev,
350                              "DMA map failed: %s", rte_strerror(rc));
351                 goto fail_dma_map;
352         }
353
354         for (i = 0; i < nr_vring; i++) {
355                 rc = efx_virtio_qcreate(nic, &vq);
356                 if ((rc != 0) || (vq == NULL)) {
357                         sfc_vdpa_err(dev,
358                                      "virtqueue create failed: %s",
359                                      rte_strerror(rc));
360                         goto fail_vq_create;
361                 }
362
363                 /* store created virtqueue context */
364                 ops_data->vq_cxt[i].vq = vq;
365         }
366
367         ops_data->vq_count = i;
368
369         ops_data->state = SFC_VDPA_STATE_CONFIGURED;
370
371         return 0;
372
373 fail_vq_create:
374         sfc_vdpa_dma_map(ops_data, false);
375
376 fail_dma_map:
377 fail_vring_num:
378         ops_data->state = SFC_VDPA_STATE_INITIALIZED;
379
380         return -1;
381 }
382
383 static void
384 sfc_vdpa_close(struct sfc_vdpa_ops_data *ops_data)
385 {
386         int i;
387
388         if (ops_data->state != SFC_VDPA_STATE_CONFIGURED)
389                 return;
390
391         ops_data->state = SFC_VDPA_STATE_CLOSING;
392
393         for (i = 0; i < ops_data->vq_count; i++) {
394                 if (ops_data->vq_cxt[i].vq == NULL)
395                         continue;
396
397                 efx_virtio_qdestroy(ops_data->vq_cxt[i].vq);
398         }
399
400         sfc_vdpa_dma_map(ops_data, false);
401
402         ops_data->state = SFC_VDPA_STATE_INITIALIZED;
403 }
404
405 static void
406 sfc_vdpa_stop(struct sfc_vdpa_ops_data *ops_data)
407 {
408         int i;
409         int rc;
410
411         if (ops_data->state != SFC_VDPA_STATE_STARTED)
412                 return;
413
414         ops_data->state = SFC_VDPA_STATE_STOPPING;
415
416         for (i = 0; i < ops_data->vq_count; i++) {
417                 rc = sfc_vdpa_virtq_stop(ops_data, i);
418                 if (rc != 0)
419                         continue;
420         }
421
422         sfc_vdpa_disable_vfio_intr(ops_data);
423
424         sfc_vdpa_filter_remove(ops_data);
425
426         ops_data->state = SFC_VDPA_STATE_CONFIGURED;
427 }
428
429 static int
430 sfc_vdpa_start(struct sfc_vdpa_ops_data *ops_data)
431 {
432         int i, j;
433         int rc;
434
435         SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_CONFIGURED);
436
437         sfc_vdpa_log_init(ops_data->dev_handle, "entry");
438
439         ops_data->state = SFC_VDPA_STATE_STARTING;
440
441         sfc_vdpa_log_init(ops_data->dev_handle, "enable interrupts");
442         rc = sfc_vdpa_enable_vfio_intr(ops_data);
443         if (rc < 0) {
444                 sfc_vdpa_err(ops_data->dev_handle,
445                              "vfio intr allocation failed: %s",
446                              rte_strerror(rc));
447                 goto fail_enable_vfio_intr;
448         }
449
450         rte_vhost_get_negotiated_features(ops_data->vid,
451                                           &ops_data->req_features);
452
453         sfc_vdpa_info(ops_data->dev_handle,
454                       "negotiated feature : 0x%" PRIx64,
455                       ops_data->req_features);
456
457         for (i = 0; i < ops_data->vq_count; i++) {
458                 sfc_vdpa_log_init(ops_data->dev_handle,
459                                   "starting vq# %d", i);
460                 rc = sfc_vdpa_virtq_start(ops_data, i);
461                 if (rc != 0)
462                         goto fail_vq_start;
463         }
464
465         ops_data->vq_count = i;
466
467         sfc_vdpa_log_init(ops_data->dev_handle,
468                           "configure MAC filters");
469         rc = sfc_vdpa_filter_config(ops_data);
470         if (rc != 0) {
471                 sfc_vdpa_err(ops_data->dev_handle,
472                              "MAC filter config failed: %s",
473                              rte_strerror(rc));
474                 goto fail_filter_cfg;
475         }
476
477         ops_data->state = SFC_VDPA_STATE_STARTED;
478
479         sfc_vdpa_log_init(ops_data->dev_handle, "done");
480
481         return 0;
482
483 fail_filter_cfg:
484         /* remove already created filters */
485         sfc_vdpa_filter_remove(ops_data);
486 fail_vq_start:
487         /* stop already started virtqueues */
488         for (j = 0; j < i; j++)
489                 sfc_vdpa_virtq_stop(ops_data, j);
490         sfc_vdpa_disable_vfio_intr(ops_data);
491
492 fail_enable_vfio_intr:
493         ops_data->state = SFC_VDPA_STATE_CONFIGURED;
494
495         return rc;
496 }
497
498 static int
499 sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)
500 {
501         struct sfc_vdpa_ops_data *ops_data;
502         void *dev;
503
504         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
505         if (ops_data == NULL)
506                 return -1;
507
508         dev = ops_data->dev_handle;
509         *queue_num = sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count;
510
511         sfc_vdpa_info(dev, "vDPA ops get_queue_num :: supported queue num : %u",
512                       *queue_num);
513
514         return 0;
515 }
516
517 static int
518 sfc_vdpa_get_features(struct rte_vdpa_device *vdpa_dev, uint64_t *features)
519 {
520         struct sfc_vdpa_ops_data *ops_data;
521
522         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
523         if (ops_data == NULL)
524                 return -1;
525
526         *features = ops_data->drv_features;
527
528         sfc_vdpa_info(ops_data->dev_handle,
529                       "vDPA ops get_feature :: features : 0x%" PRIx64,
530                       *features);
531
532         return 0;
533 }
534
535 static int
536 sfc_vdpa_get_protocol_features(struct rte_vdpa_device *vdpa_dev,
537                                uint64_t *features)
538 {
539         struct sfc_vdpa_ops_data *ops_data;
540
541         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
542         if (ops_data == NULL)
543                 return -1;
544
545         *features = SFC_VDPA_PROTOCOL_FEATURES;
546
547         sfc_vdpa_info(ops_data->dev_handle,
548                       "vDPA ops get_protocol_feature :: features : 0x%" PRIx64,
549                       *features);
550
551         return 0;
552 }
553
554 static void *
555 sfc_vdpa_notify_ctrl(void *arg)
556 {
557         struct sfc_vdpa_ops_data *ops_data;
558         int vid;
559
560         ops_data = arg;
561         if (ops_data == NULL)
562                 return NULL;
563
564         sfc_vdpa_adapter_lock(ops_data->dev_handle);
565
566         vid = ops_data->vid;
567
568         if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
569                 sfc_vdpa_info(ops_data->dev_handle,
570                               "vDPA (%s): Notifier could not get configured",
571                               ops_data->vdpa_dev->device->name);
572
573         sfc_vdpa_adapter_unlock(ops_data->dev_handle);
574
575         return NULL;
576 }
577
578 static int
579 sfc_vdpa_setup_notify_ctrl(struct sfc_vdpa_ops_data *ops_data)
580 {
581         int ret;
582
583         ops_data->is_notify_thread_started = false;
584
585         /*
586          * Use rte_vhost_host_notifier_ctrl in a thread to avoid
587          * dead lock scenario when multiple VFs are used in single vdpa
588          * application and multiple VFs are passed to a single VM.
589          */
590         ret = pthread_create(&ops_data->notify_tid, NULL,
591                              sfc_vdpa_notify_ctrl, ops_data);
592         if (ret != 0) {
593                 sfc_vdpa_err(ops_data->dev_handle,
594                              "failed to create notify_ctrl thread: %s",
595                              rte_strerror(ret));
596                 return -1;
597         }
598         ops_data->is_notify_thread_started = true;
599
600         return 0;
601 }
602
603 static int
604 sfc_vdpa_dev_config(int vid)
605 {
606         struct rte_vdpa_device *vdpa_dev;
607         int rc;
608         struct sfc_vdpa_ops_data *ops_data;
609
610         vdpa_dev = rte_vhost_get_vdpa_device(vid);
611
612         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
613         if (ops_data == NULL) {
614                 sfc_vdpa_err(ops_data->dev_handle,
615                              "invalid vDPA device : %p, vid : %d",
616                              vdpa_dev, vid);
617                 return -1;
618         }
619
620         sfc_vdpa_log_init(ops_data->dev_handle, "entry");
621
622         ops_data->vid = vid;
623
624         sfc_vdpa_adapter_lock(ops_data->dev_handle);
625
626         sfc_vdpa_log_init(ops_data->dev_handle, "configuring");
627         rc = sfc_vdpa_configure(ops_data);
628         if (rc != 0)
629                 goto fail_vdpa_config;
630
631         sfc_vdpa_log_init(ops_data->dev_handle, "starting");
632         rc = sfc_vdpa_start(ops_data);
633         if (rc != 0)
634                 goto fail_vdpa_start;
635
636         rc = sfc_vdpa_setup_notify_ctrl(ops_data);
637         if (rc != 0)
638                 goto fail_vdpa_notify;
639
640         sfc_vdpa_adapter_unlock(ops_data->dev_handle);
641
642         sfc_vdpa_log_init(ops_data->dev_handle, "done");
643
644         return 0;
645
646 fail_vdpa_notify:
647         sfc_vdpa_stop(ops_data);
648
649 fail_vdpa_start:
650         sfc_vdpa_close(ops_data);
651
652 fail_vdpa_config:
653         sfc_vdpa_adapter_unlock(ops_data->dev_handle);
654
655         return -1;
656 }
657
658 static int
659 sfc_vdpa_dev_close(int vid)
660 {
661         int ret;
662         struct rte_vdpa_device *vdpa_dev;
663         struct sfc_vdpa_ops_data *ops_data;
664
665         vdpa_dev = rte_vhost_get_vdpa_device(vid);
666
667         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
668         if (ops_data == NULL) {
669                 sfc_vdpa_err(ops_data->dev_handle,
670                              "invalid vDPA device : %p, vid : %d",
671                              vdpa_dev, vid);
672                 return -1;
673         }
674
675         sfc_vdpa_adapter_lock(ops_data->dev_handle);
676         if (ops_data->is_notify_thread_started == true) {
677                 void *status;
678                 ret = pthread_cancel(ops_data->notify_tid);
679                 if (ret != 0) {
680                         sfc_vdpa_err(ops_data->dev_handle,
681                                      "failed to cancel notify_ctrl thread: %s",
682                                      rte_strerror(ret));
683                 }
684
685                 ret = pthread_join(ops_data->notify_tid, &status);
686                 if (ret != 0) {
687                         sfc_vdpa_err(ops_data->dev_handle,
688                                      "failed to join terminated notify_ctrl thread: %s",
689                                      rte_strerror(ret));
690                 }
691         }
692         ops_data->is_notify_thread_started = false;
693
694         sfc_vdpa_stop(ops_data);
695         sfc_vdpa_close(ops_data);
696
697         sfc_vdpa_adapter_unlock(ops_data->dev_handle);
698
699         return 0;
700 }
701
702 static int
703 sfc_vdpa_set_vring_state(int vid, int vring, int state)
704 {
705         RTE_SET_USED(vid);
706         RTE_SET_USED(vring);
707         RTE_SET_USED(state);
708
709         return -1;
710 }
711
712 static int
713 sfc_vdpa_set_features(int vid)
714 {
715         RTE_SET_USED(vid);
716
717         return -1;
718 }
719
720 static int
721 sfc_vdpa_get_vfio_device_fd(int vid)
722 {
723         struct rte_vdpa_device *vdpa_dev;
724         struct sfc_vdpa_ops_data *ops_data;
725         int vfio_dev_fd;
726         void *dev;
727
728         vdpa_dev = rte_vhost_get_vdpa_device(vid);
729
730         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
731         if (ops_data == NULL)
732                 return -1;
733
734         dev = ops_data->dev_handle;
735         vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
736
737         sfc_vdpa_info(dev, "vDPA ops get_vfio_device_fd :: vfio fd : %d",
738                       vfio_dev_fd);
739
740         return vfio_dev_fd;
741 }
742
743 static int
744 sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
745 {
746         int ret;
747         efx_nic_t *nic;
748         int vfio_dev_fd;
749         efx_rc_t rc;
750         unsigned int bar_offset;
751         struct rte_vdpa_device *vdpa_dev;
752         struct sfc_vdpa_ops_data *ops_data;
753         struct vfio_region_info reg = { .argsz = sizeof(reg) };
754         const efx_nic_cfg_t *encp;
755         int max_vring_cnt;
756         int64_t len;
757         void *dev;
758
759         vdpa_dev = rte_vhost_get_vdpa_device(vid);
760
761         ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
762         if (ops_data == NULL)
763                 return -1;
764
765         dev = ops_data->dev_handle;
766
767         vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
768         max_vring_cnt =
769                 (sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
770
771         nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
772         encp = efx_nic_cfg_get(nic);
773
774         if (qid >= max_vring_cnt) {
775                 sfc_vdpa_err(dev, "invalid qid : %d", qid);
776                 return -1;
777         }
778
779         if (ops_data->vq_cxt[qid].enable != B_TRUE) {
780                 sfc_vdpa_err(dev, "vq is not enabled");
781                 return -1;
782         }
783
784         rc = efx_virtio_get_doorbell_offset(ops_data->vq_cxt[qid].vq,
785                                             &bar_offset);
786         if (rc != 0) {
787                 sfc_vdpa_err(dev, "failed to get doorbell offset: %s",
788                              rte_strerror(rc));
789                 return rc;
790         }
791
792         reg.index = sfc_vdpa_adapter_by_dev_handle(dev)->mem_bar.esb_rid;
793         ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
794         if (ret != 0) {
795                 sfc_vdpa_err(dev, "could not get device region info: %s",
796                              strerror(errno));
797                 return ret;
798         }
799
800         *offset = reg.offset + bar_offset;
801
802         len = (1U << encp->enc_vi_window_shift) / 2;
803         if (len >= sysconf(_SC_PAGESIZE)) {
804                 *size = sysconf(_SC_PAGESIZE);
805         } else {
806                 sfc_vdpa_err(dev, "invalid VI window size : 0x%" PRIx64, len);
807                 return -1;
808         }
809
810         sfc_vdpa_info(dev, "vDPA ops get_notify_area :: offset : 0x%" PRIx64,
811                       *offset);
812
813         return 0;
814 }
815
816 static struct rte_vdpa_dev_ops sfc_vdpa_ops = {
817         .get_queue_num = sfc_vdpa_get_queue_num,
818         .get_features = sfc_vdpa_get_features,
819         .get_protocol_features = sfc_vdpa_get_protocol_features,
820         .dev_conf = sfc_vdpa_dev_config,
821         .dev_close = sfc_vdpa_dev_close,
822         .set_vring_state = sfc_vdpa_set_vring_state,
823         .set_features = sfc_vdpa_set_features,
824         .get_vfio_device_fd = sfc_vdpa_get_vfio_device_fd,
825         .get_notify_area = sfc_vdpa_get_notify_area,
826 };
827
828 struct sfc_vdpa_ops_data *
829 sfc_vdpa_device_init(void *dev_handle, enum sfc_vdpa_context context)
830 {
831         struct sfc_vdpa_ops_data *ops_data;
832         struct rte_pci_device *pci_dev;
833         int rc;
834
835         /* Create vDPA ops context */
836         ops_data = rte_zmalloc("vdpa", sizeof(struct sfc_vdpa_ops_data), 0);
837         if (ops_data == NULL)
838                 return NULL;
839
840         ops_data->vdpa_context = context;
841         ops_data->dev_handle = dev_handle;
842
843         pci_dev = sfc_vdpa_adapter_by_dev_handle(dev_handle)->pdev;
844
845         /* Register vDPA Device */
846         sfc_vdpa_log_init(dev_handle, "register vDPA device");
847         ops_data->vdpa_dev =
848                 rte_vdpa_register_device(&pci_dev->device, &sfc_vdpa_ops);
849         if (ops_data->vdpa_dev == NULL) {
850                 sfc_vdpa_err(dev_handle, "vDPA device registration failed");
851                 goto fail_register_device;
852         }
853
854         /* Read supported device features */
855         sfc_vdpa_log_init(dev_handle, "get device feature");
856         rc = sfc_vdpa_get_device_features(ops_data);
857         if (rc != 0)
858                 goto fail_get_dev_feature;
859
860         /* Driver features are superset of device supported feature
861          * and any additional features supported by the driver.
862          */
863         ops_data->drv_features =
864                 ops_data->dev_features | SFC_VDPA_DEFAULT_FEATURES;
865
866         ops_data->state = SFC_VDPA_STATE_INITIALIZED;
867
868         return ops_data;
869
870 fail_get_dev_feature:
871         rte_vdpa_unregister_device(ops_data->vdpa_dev);
872
873 fail_register_device:
874         rte_free(ops_data);
875         return NULL;
876 }
877
878 void
879 sfc_vdpa_device_fini(struct sfc_vdpa_ops_data *ops_data)
880 {
881         rte_vdpa_unregister_device(ops_data->vdpa_dev);
882
883         rte_free(ops_data);
884 }