1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018, Microsoft Corporation.
12 #include <rte_tailq.h>
14 #include <rte_malloc.h>
16 #include <rte_atomic.h>
17 #include <rte_memory.h>
18 #include <rte_bus_vmbus.h>
23 vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
25 /* Use GCC builtin which atomic does atomic OR operation */
26 __sync_or_and_fetch(addr, mask);
30 vmbus_send_interrupt(const struct rte_vmbus_device *dev, uint32_t relid)
35 int_addr = dev->int_page + relid / 32;
36 int_mask = 1u << (relid % 32);
38 vmbus_sync_set_bit(int_addr, int_mask);
42 vmbus_set_monitor(const struct rte_vmbus_device *dev, uint32_t monitor_id)
44 uint32_t *monitor_addr, monitor_mask;
45 unsigned int trigger_index;
47 trigger_index = monitor_id / HV_MON_TRIG_LEN;
48 monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
50 monitor_addr = &dev->monitor_page->trigs[trigger_index].pending;
51 vmbus_sync_set_bit(monitor_addr, monitor_mask);
55 vmbus_set_event(const struct rte_vmbus_device *dev,
56 const struct vmbus_channel *chan)
58 vmbus_send_interrupt(dev, chan->relid);
59 vmbus_set_monitor(dev, chan->monitor_id);
63 * Set the wait between when hypervisor examines the trigger.
66 rte_vmbus_set_latency(const struct rte_vmbus_device *dev,
67 const struct vmbus_channel *chan,
70 uint32_t trig_idx = chan->monitor_id / VMBUS_MONTRIG_LEN;
71 uint32_t trig_offs = chan->monitor_id % VMBUS_MONTRIG_LEN;
73 if (latency >= UINT16_MAX * 100) {
74 VMBUS_LOG(ERR, "invalid latency value %u", latency);
78 if (trig_idx >= VMBUS_MONTRIGS_MAX) {
79 VMBUS_LOG(ERR, "invalid monitor trigger %u",
84 /* Host value is expressed in 100 nanosecond units */
85 dev->monitor_page->lat[trig_idx][trig_offs] = latency / 100;
89 * Notify host that there are data pending on our TX bufring.
91 * Since this in userspace, rely on the monitor page.
92 * Can't do a hypercall from userspace.
95 rte_vmbus_chan_signal_tx(const struct vmbus_channel *chan)
97 const struct rte_vmbus_device *dev = chan->device;
98 const struct vmbus_br *tbr = &chan->txbr;
100 /* Make sure all updates are done before signaling host */
103 /* If host is ignoring interrupts? */
107 vmbus_set_event(dev, chan);
111 /* Do a simple send directly using transmit ring. */
112 int rte_vmbus_chan_send(struct vmbus_channel *chan, uint16_t type,
113 void *data, uint32_t dlen,
114 uint64_t xactid, uint32_t flags, bool *need_sig)
116 struct vmbus_chanpkt pkt;
117 unsigned int pktlen, pad_pktlen;
118 const uint32_t hlen = sizeof(pkt);
119 bool send_evt = false;
124 pktlen = hlen + dlen;
125 pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
128 pkt.hdr.flags = flags;
129 pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
130 pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
131 pkt.hdr.xactid = xactid;
133 iov[0].iov_base = &pkt;
134 iov[0].iov_len = hlen;
135 iov[1].iov_base = data;
136 iov[1].iov_len = dlen;
137 iov[2].iov_base = &pad;
138 iov[2].iov_len = pad_pktlen - pktlen;
140 error = vmbus_txbr_write(&chan->txbr, iov, 3, &send_evt);
143 * caller sets need_sig to non-NULL if it will handle
144 * signaling if required later.
145 * if need_sig is NULL, signal now if needed.
148 *need_sig |= send_evt;
149 else if (error == 0 && send_evt)
150 rte_vmbus_chan_signal_tx(chan);
154 /* Do a scatter/gather send where the descriptor points to data. */
155 int rte_vmbus_chan_send_sglist(struct vmbus_channel *chan,
156 struct vmbus_gpa sg[], uint32_t sglen,
157 void *data, uint32_t dlen,
158 uint64_t xactid, bool *need_sig)
160 struct vmbus_chanpkt_sglist pkt;
161 unsigned int pktlen, pad_pktlen, hlen;
162 bool send_evt = false;
167 hlen = offsetof(struct vmbus_chanpkt_sglist, gpa[sglen]);
168 pktlen = hlen + dlen;
169 pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
171 pkt.hdr.type = VMBUS_CHANPKT_TYPE_GPA;
172 pkt.hdr.flags = VMBUS_CHANPKT_FLAG_RC;
173 pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
174 pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
175 pkt.hdr.xactid = xactid;
179 iov[0].iov_base = &pkt;
180 iov[0].iov_len = sizeof(pkt);
181 iov[1].iov_base = sg;
182 iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
183 iov[2].iov_base = data;
184 iov[2].iov_len = dlen;
185 iov[3].iov_base = &pad;
186 iov[3].iov_len = pad_pktlen - pktlen;
188 error = vmbus_txbr_write(&chan->txbr, iov, 4, &send_evt);
190 /* if caller is batching, just propagate the status */
192 *need_sig |= send_evt;
193 else if (error == 0 && send_evt)
194 rte_vmbus_chan_signal_tx(chan);
198 bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel)
200 const struct vmbus_br *br = &channel->rxbr;
202 return br->vbr->rindex == br->vbr->windex;
205 /* Signal host after reading N bytes */
206 void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read)
208 struct vmbus_br *rbr = &chan->rxbr;
209 uint32_t write_sz, pending_sz;
211 /* No need for signaling on older versions */
212 if (!rbr->vbr->feature_bits.feat_pending_send_sz)
215 /* Make sure reading of pending happens after new read index */
218 pending_sz = rbr->vbr->pending_send;
223 write_sz = vmbus_br_availwrite(rbr, rbr->vbr->windex);
225 /* If there was space before then host was not blocked */
226 if (write_sz - bytes_read > pending_sz)
229 /* If pending write will not fit */
230 if (write_sz <= pending_sz)
233 vmbus_set_event(chan->device, chan);
236 int rte_vmbus_chan_recv(struct vmbus_channel *chan, void *data, uint32_t *len,
237 uint64_t *request_id)
239 struct vmbus_chanpkt_hdr pkt;
240 uint32_t dlen, hlen, bufferlen = *len;
245 error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
249 if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
250 VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
251 /* XXX this channel is dead actually. */
255 if (unlikely(pkt.hlen > pkt.tlen)) {
256 VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
261 /* Length are in quad words */
262 hlen = pkt.hlen << VMBUS_CHANPKT_SIZE_SHIFT;
263 dlen = (pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT) - hlen;
266 /* If caller buffer is not large enough */
267 if (unlikely(dlen > bufferlen))
271 *request_id = pkt.xactid;
273 /* Read data and skip packet header */
274 error = vmbus_rxbr_read(&chan->rxbr, data, dlen, hlen);
278 rte_vmbus_chan_signal_read(chan, dlen + hlen + sizeof(uint64_t));
282 /* TODO: replace this with inplace ring buffer (no copy) */
283 int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
284 void *data, uint32_t *len)
286 struct vmbus_chanpkt_hdr pkt;
287 uint32_t dlen, bufferlen = *len;
290 error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
294 if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
295 VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
296 /* XXX this channel is dead actually. */
300 if (unlikely(pkt.hlen > pkt.tlen)) {
301 VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
306 /* Length are in quad words */
307 dlen = pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT;
310 /* If caller buffer is not large enough */
311 if (unlikely(dlen > bufferlen))
314 /* Read data and skip packet header */
315 error = vmbus_rxbr_read(&chan->rxbr, data, dlen, 0);
319 /* Return the number of bytes read */
320 return dlen + sizeof(uint64_t);
323 int vmbus_chan_create(const struct rte_vmbus_device *device,
324 uint16_t relid, uint16_t subid, uint8_t monitor_id,
325 struct vmbus_channel **new_chan)
327 struct vmbus_channel *chan;
330 chan = rte_zmalloc_socket("VMBUS", sizeof(*chan), RTE_CACHE_LINE_SIZE,
331 device->device.numa_node);
335 STAILQ_INIT(&chan->subchannel_list);
336 chan->device = device;
337 chan->subchannel_id = subid;
339 chan->monitor_id = monitor_id;
342 err = vmbus_uio_map_rings(chan);
351 /* Setup the primary channel */
352 int rte_vmbus_chan_open(struct rte_vmbus_device *device,
353 struct vmbus_channel **new_chan)
357 err = vmbus_chan_create(device, device->relid, 0,
358 device->monitor_id, new_chan);
360 device->primary = *new_chan;
365 int rte_vmbus_max_channels(const struct rte_vmbus_device *device)
367 if (vmbus_uio_subchannels_supported(device, device->primary))
368 return VMBUS_MAX_CHANNELS;
373 /* Setup secondary channel */
374 int rte_vmbus_subchan_open(struct vmbus_channel *primary,
375 struct vmbus_channel **new_chan)
377 struct vmbus_channel *chan;
380 err = vmbus_uio_get_subchan(primary, &chan);
384 STAILQ_INSERT_TAIL(&primary->subchannel_list, chan, next);
389 uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan)
391 return chan->subchannel_id;
394 void rte_vmbus_chan_close(struct vmbus_channel *chan)
396 const struct rte_vmbus_device *device = chan->device;
397 struct vmbus_channel *primary = device->primary;
400 STAILQ_REMOVE(&primary->subchannel_list, chan,
401 vmbus_channel, next);
406 static void vmbus_dump_ring(FILE *f, const char *id, const struct vmbus_br *br)
408 const struct vmbus_bufring *vbr = br->vbr;
409 struct vmbus_chanpkt_hdr pkt;
411 fprintf(f, "%s windex=%u rindex=%u mask=%u pending=%u feature=%#x\n",
412 id, vbr->windex, vbr->rindex, vbr->imask,
413 vbr->pending_send, vbr->feature_bits.value);
414 fprintf(f, " size=%u avail write=%u read=%u\n",
415 br->dsize, vmbus_br_availwrite(br, vbr->windex),
416 vmbus_br_availread(br));
418 if (vmbus_rxbr_peek(br, &pkt, sizeof(pkt)) == 0)
419 fprintf(f, " pkt type %#x len %u flags %#x xactid %#"PRIx64"\n",
421 pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT,
422 pkt.flags, pkt.xactid);
425 void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan)
427 fprintf(f, "channel[%u] relid=%u monitor=%u\n",
428 chan->subchannel_id, chan->relid, chan->monitor_id);
429 vmbus_dump_ring(f, "rxbr", &chan->rxbr);
430 vmbus_dump_ring(f, "txbr", &chan->txbr);