net/ixgbe: fix statistics in flow control mode
[dpdk.git] / lib / librte_vhost / vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 /**
6  * @file
7  *
8  * Device specific vhost lib
9  */
10
11 #include <stdbool.h>
12
13 #include <rte_malloc.h>
14 #include "rte_vdpa.h"
15 #include "vhost.h"
16
17 static struct rte_vdpa_device *vdpa_devices[MAX_VHOST_DEVICE];
18 static uint32_t vdpa_device_num;
19
20 static bool
21 is_same_vdpa_device(struct rte_vdpa_dev_addr *a,
22                 struct rte_vdpa_dev_addr *b)
23 {
24         bool ret = true;
25
26         if (a->type != b->type)
27                 return false;
28
29         switch (a->type) {
30         case VDPA_ADDR_PCI:
31                 if (a->pci_addr.domain != b->pci_addr.domain ||
32                                 a->pci_addr.bus != b->pci_addr.bus ||
33                                 a->pci_addr.devid != b->pci_addr.devid ||
34                                 a->pci_addr.function != b->pci_addr.function)
35                         ret = false;
36                 break;
37         default:
38                 break;
39         }
40
41         return ret;
42 }
43
44 int
45 rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
46                 struct rte_vdpa_dev_ops *ops)
47 {
48         struct rte_vdpa_device *dev;
49         char device_name[MAX_VDPA_NAME_LEN];
50         int i;
51
52         if (vdpa_device_num >= MAX_VHOST_DEVICE || addr == NULL || ops == NULL)
53                 return -1;
54
55         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
56                 dev = vdpa_devices[i];
57                 if (dev && is_same_vdpa_device(&dev->addr, addr))
58                         return -1;
59         }
60
61         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
62                 if (vdpa_devices[i] == NULL)
63                         break;
64         }
65
66         if (i == MAX_VHOST_DEVICE)
67                 return -1;
68
69         snprintf(device_name, sizeof(device_name), "vdpa-dev-%d", i);
70         dev = rte_zmalloc(device_name, sizeof(struct rte_vdpa_device),
71                         RTE_CACHE_LINE_SIZE);
72         if (!dev)
73                 return -1;
74
75         memcpy(&dev->addr, addr, sizeof(struct rte_vdpa_dev_addr));
76         dev->ops = ops;
77         vdpa_devices[i] = dev;
78         vdpa_device_num++;
79
80         return i;
81 }
82
83 int
84 rte_vdpa_unregister_device(int did)
85 {
86         if (did < 0 || did >= MAX_VHOST_DEVICE || vdpa_devices[did] == NULL)
87                 return -1;
88
89         rte_free(vdpa_devices[did]);
90         vdpa_devices[did] = NULL;
91         vdpa_device_num--;
92
93         return did;
94 }
95
96 int
97 rte_vdpa_find_device_id(struct rte_vdpa_dev_addr *addr)
98 {
99         struct rte_vdpa_device *dev;
100         int i;
101
102         if (addr == NULL)
103                 return -1;
104
105         for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
106                 dev = vdpa_devices[i];
107                 if (dev && is_same_vdpa_device(&dev->addr, addr))
108                         return i;
109         }
110
111         return -1;
112 }
113
114 struct rte_vdpa_device *
115 rte_vdpa_get_device(int did)
116 {
117         if (did < 0 || did >= MAX_VHOST_DEVICE)
118                 return NULL;
119
120         return vdpa_devices[did];
121 }
122
123 int
124 rte_vdpa_get_device_num(void)
125 {
126         return vdpa_device_num;
127 }
128
129 int
130 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
131 {
132         struct virtio_net *dev = get_device(vid);
133         uint16_t idx, idx_m, desc_id;
134         struct vhost_virtqueue *vq;
135         struct vring_desc desc;
136         struct vring_desc *desc_ring;
137         struct vring_desc *idesc = NULL;
138         struct vring *s_vring;
139         uint64_t dlen;
140         uint32_t nr_descs;
141         int ret;
142
143         if (!dev || !vring_m)
144                 return -1;
145
146         if (qid >= dev->nr_vring)
147                 return -1;
148
149         if (vq_is_packed(dev))
150                 return -1;
151
152         s_vring = (struct vring *)vring_m;
153         vq = dev->virtqueue[qid];
154         idx = vq->used->idx;
155         idx_m = s_vring->used->idx;
156         ret = (uint16_t)(idx_m - idx);
157
158         while (idx != idx_m) {
159                 /* copy used entry, used ring logging is not covered here */
160                 vq->used->ring[idx & (vq->size - 1)] =
161                         s_vring->used->ring[idx & (vq->size - 1)];
162
163                 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
164                 desc_ring = vq->desc;
165                 nr_descs = vq->size;
166
167                 if (unlikely(desc_id >= vq->size))
168                         return -1;
169
170                 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
171                         dlen = vq->desc[desc_id].len;
172                         nr_descs = dlen / sizeof(struct vring_desc);
173                         if (unlikely(nr_descs > vq->size))
174                                 return -1;
175
176                         desc_ring = (struct vring_desc *)(uintptr_t)
177                                 vhost_iova_to_vva(dev, vq,
178                                                 vq->desc[desc_id].addr, &dlen,
179                                                 VHOST_ACCESS_RO);
180                         if (unlikely(!desc_ring))
181                                 return -1;
182
183                         if (unlikely(dlen < vq->desc[desc_id].len)) {
184                                 idesc = vhost_alloc_copy_ind_table(dev, vq,
185                                                 vq->desc[desc_id].addr,
186                                                 vq->desc[desc_id].len);
187                                 if (unlikely(!idesc))
188                                         return -1;
189
190                                 desc_ring = idesc;
191                         }
192
193                         desc_id = 0;
194                 }
195
196                 /* dirty page logging for DMA writeable buffer */
197                 do {
198                         if (unlikely(desc_id >= vq->size))
199                                 goto fail;
200                         if (unlikely(nr_descs-- == 0))
201                                 goto fail;
202                         desc = desc_ring[desc_id];
203                         if (desc.flags & VRING_DESC_F_WRITE)
204                                 vhost_log_write_iova(dev, vq, desc.addr,
205                                                      desc.len);
206                         desc_id = desc.next;
207                 } while (desc.flags & VRING_DESC_F_NEXT);
208
209                 if (unlikely(idesc)) {
210                         free_ind_table(idesc);
211                         idesc = NULL;
212                 }
213
214                 idx++;
215         }
216
217         rte_smp_wmb();
218         vq->used->idx = idx_m;
219
220         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
221                 vring_used_event(s_vring) = idx_m;
222
223         return ret;
224
225 fail:
226         if (unlikely(idesc))
227                 free_ind_table(idesc);
228         return -1;
229 }