4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/eventfd.h>
42 #include <sys/types.h>
47 #include "virtio_user_dev.h"
48 #include "../virtio_ethdev.h"
51 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
53 /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
54 * firstly because vhost depends on this msg to allocate virtqueue
58 struct vhost_vring_file file;
60 /* May use invalid flag, but some backend leverages kickfd and callfd as
61 * criteria to judge if dev is alive. so finally we use real event_fd.
63 callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
65 PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
68 file.index = queue_sel;
70 dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
71 dev->callfds[queue_sel] = callfd;
77 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
80 struct vhost_vring_file file;
81 struct vhost_vring_state state;
82 struct vring *vring = &dev->vrings[queue_sel];
83 struct vhost_vring_addr addr = {
85 .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
86 .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
87 .used_user_addr = (uint64_t)(uintptr_t)vring->used,
89 .flags = 0, /* disable log */
92 state.index = queue_sel;
93 state.num = vring->num;
94 dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
96 state.index = queue_sel;
97 state.num = 0; /* no reservation */
98 dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
100 dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
102 /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
103 * lastly because vhost depends on this msg to judge if
106 kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
108 PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
111 file.index = queue_sel;
113 dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
114 dev->kickfds[queue_sel] = kickfd;
120 virtio_user_queue_setup(struct virtio_user_dev *dev,
121 int (*fn)(struct virtio_user_dev *, uint32_t))
123 uint32_t i, queue_sel;
125 for (i = 0; i < dev->max_queue_pairs; ++i) {
126 queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
127 if (fn(dev, queue_sel) < 0) {
128 PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
132 for (i = 0; i < dev->max_queue_pairs; ++i) {
133 queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
134 if (fn(dev, queue_sel) < 0) {
135 PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
144 virtio_user_start_device(struct virtio_user_dev *dev)
149 /* Step 0: tell vhost to create queues */
150 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
153 /* Step 1: set features */
154 features = dev->features;
155 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
156 features &= ~(1ull << VIRTIO_NET_F_MAC);
157 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
158 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
159 ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
162 PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
164 /* Step 2: share memory regions */
165 ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
169 /* Step 3: kick queues */
170 if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
173 /* Step 4: enable queues
174 * we enable the 1st queue pair by default.
176 dev->ops->enable_qp(dev, 0, 1);
180 /* TODO: free resource here or caller to check */
184 int virtio_user_stop_device(struct virtio_user_dev *dev)
188 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
189 close(dev->callfds[i]);
190 close(dev->kickfds[i]);
193 for (i = 0; i < dev->max_queue_pairs; ++i)
194 dev->ops->enable_qp(dev, i, 0);
203 parse_mac(struct virtio_user_dev *dev, const char *mac)
206 uint32_t tmp[ETHER_ADDR_LEN];
211 r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
212 &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
213 if (r == ETHER_ADDR_LEN) {
214 for (i = 0; i < ETHER_ADDR_LEN; ++i)
215 dev->mac_addr[i] = (uint8_t)tmp[i];
216 dev->mac_specified = 1;
218 /* ignore the wrong mac, use random mac */
219 PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
224 is_vhost_user_by_type(const char *path)
228 if (stat(path, &sb) == -1)
231 return S_ISSOCK(sb.st_mode);
235 virtio_user_dev_setup(struct virtio_user_dev *dev)
240 for (i = 0; i < VIRTIO_MAX_VIRTQUEUES * 2 + 1; ++i) {
241 dev->kickfds[i] = -1;
242 dev->callfds[i] = -1;
245 dev->vhostfds = NULL;
248 if (is_vhost_user_by_type(dev->path)) {
249 dev->ops = &ops_user;
251 dev->ops = &ops_kernel;
253 dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
254 dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
255 if (!dev->vhostfds || !dev->tapfds) {
256 PMD_INIT_LOG(ERR, "Failed to malloc");
260 for (q = 0; q < dev->max_queue_pairs; ++q) {
261 dev->vhostfds[q] = -1;
266 return dev->ops->setup(dev);
270 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
271 int cq, int queue_size, const char *mac)
273 snprintf(dev->path, PATH_MAX, "%s", path);
274 dev->max_queue_pairs = queues;
275 dev->queue_pairs = 1; /* mq disabled by default */
276 dev->queue_size = queue_size;
277 dev->mac_specified = 0;
280 if (virtio_user_dev_setup(dev) < 0) {
281 PMD_INIT_LOG(ERR, "backend set up fails");
284 if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
285 PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
289 if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
290 &dev->device_features) < 0) {
291 PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
294 if (dev->mac_specified)
295 dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
298 /* device does not really need to know anything about CQ,
299 * so if necessary, we just claim to support CQ
301 dev->device_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
303 dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
304 /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
305 dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
306 dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
307 dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
308 dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
309 dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
316 virtio_user_dev_uninit(struct virtio_user_dev *dev)
320 virtio_user_stop_device(dev);
325 for (i = 0; i < dev->max_queue_pairs; ++i)
326 close(dev->vhostfds[i]);
333 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
338 if (q_pairs > dev->max_queue_pairs) {
339 PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
340 q_pairs, dev->max_queue_pairs);
344 for (i = 0; i < q_pairs; ++i)
345 ret |= dev->ops->enable_qp(dev, i, 1);
346 for (i = q_pairs; i < dev->max_queue_pairs; ++i)
347 ret |= dev->ops->enable_qp(dev, i, 0);
349 dev->queue_pairs = q_pairs;
355 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
358 struct virtio_net_ctrl_hdr *hdr;
359 virtio_net_ctrl_ack status = ~0;
360 uint16_t i, idx_data, idx_status;
361 uint32_t n_descs = 0;
363 /* locate desc for header, data, and status */
364 idx_data = vring->desc[idx_hdr].next;
368 while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
369 i = vring->desc[i].next;
373 /* locate desc for status */
377 hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
378 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
379 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
382 queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
383 status = virtio_user_handle_mq(dev, queues);
387 *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
393 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
395 uint16_t avail_idx, desc_idx;
396 struct vring_used_elem *uep;
398 struct vring *vring = &dev->vrings[queue_idx];
400 /* Consume avail ring, using used ring idx as first one */
401 while (vring->used->idx != vring->avail->idx) {
402 avail_idx = (vring->used->idx) & (vring->num - 1);
403 desc_idx = vring->avail->ring[avail_idx];
405 n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
407 /* Update used ring */
408 uep = &vring->used->ring[avail_idx];