1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
3 * Copyright (c) 2010-2012 Citrix Inc.
4 * Copyright (c) 2012 NetApp Inc.
9 * Network Virtualization Service.
19 #include <rte_ethdev.h>
20 #include <rte_string_fns.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ether.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
32 #include <rte_bus_vmbus.h>
38 static const uint32_t hn_nvs_version[] = {
47 static int hn_nvs_req_send(struct hn_data *hv,
48 void *req, uint32_t reqlen)
50 return rte_vmbus_chan_send(hn_primary_chan(hv),
51 VMBUS_CHANPKT_TYPE_INBAND,
53 VMBUS_CHANPKT_FLAG_NONE, NULL);
57 __hn_nvs_execute(struct hn_data *hv,
58 void *req, uint32_t reqlen,
59 void *resp, uint32_t resplen,
62 struct vmbus_channel *chan = hn_primary_chan(hv);
63 char buffer[NVS_RESPSIZE_MAX];
64 const struct hn_nvs_hdr *hdr;
69 /* Send request to ring buffer */
70 ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
72 VMBUS_CHANPKT_FLAG_RC, NULL);
75 PMD_DRV_LOG(ERR, "send request failed: %d", ret);
81 ret = rte_vmbus_chan_recv(chan, buffer, &len, &xactid);
83 rte_delay_us(HN_CHAN_INTERVAL_US);
88 PMD_DRV_LOG(ERR, "recv response failed: %d", ret);
92 if (len < sizeof(*hdr)) {
93 PMD_DRV_LOG(ERR, "response missing NVS header");
97 hdr = (struct hn_nvs_hdr *)buffer;
99 /* Silently drop received packets while waiting for response */
100 if (hdr->type == NVS_TYPE_RNDIS) {
101 hn_nvs_ack_rxbuf(chan, xactid);
105 if (hdr->type != type) {
106 PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x",
113 "invalid NVS resp len %u (expect %u)",
118 memcpy(resp, buffer, resplen);
126 * Execute one control command and get the response.
127 * Only one command can be active on a channel at once
128 * Unlike BSD, DPDK does not have an interrupt context
129 * so the polling is required to wait for response.
132 hn_nvs_execute(struct hn_data *hv,
133 void *req, uint32_t reqlen,
134 void *resp, uint32_t resplen,
137 struct hn_rx_queue *rxq = hv->primary;
140 rte_spinlock_lock(&rxq->ring_lock);
141 ret = __hn_nvs_execute(hv, req, reqlen, resp, resplen, type);
142 rte_spinlock_unlock(&rxq->ring_lock);
148 hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver)
150 struct hn_nvs_init init;
151 struct hn_nvs_init_resp resp;
155 memset(&init, 0, sizeof(init));
156 init.type = NVS_TYPE_INIT;
157 init.ver_min = nvs_ver;
158 init.ver_max = nvs_ver;
160 error = hn_nvs_execute(hv, &init, sizeof(init),
166 status = resp.status;
167 if (status != NVS_STATUS_OK) {
168 /* Not fatal, try other versions */
169 PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x",
178 hn_nvs_conn_rxbuf(struct hn_data *hv)
180 struct hn_nvs_rxbuf_conn conn;
181 struct hn_nvs_rxbuf_connresp resp;
185 /* Kernel has already setup RXBUF on primary channel. */
188 * Connect RXBUF to NVS.
190 conn.type = NVS_TYPE_RXBUF_CONN;
191 conn.gpadl = hv->rxbuf_res->phys_addr;
192 conn.sig = NVS_RXBUF_SIG;
193 PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64,
195 hv->rxbuf_res->phys_addr);
197 error = hn_nvs_execute(hv, &conn, sizeof(conn),
199 NVS_TYPE_RXBUF_CONNRESP);
202 "exec nvs rxbuf conn failed: %d",
207 status = resp.status;
208 if (status != NVS_STATUS_OK) {
210 "nvs rxbuf conn failed: %x", status);
213 if (resp.nsect != 1) {
215 "nvs rxbuf response num sections %u != 1",
221 "receive buffer size %u count %u",
222 resp.nvs_sect[0].slotsz,
223 resp.nvs_sect[0].slotcnt);
224 hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
227 * Pimary queue's rxbuf_info is not allocated at creation time.
228 * Now we can allocate it after we figure out the slotcnt.
230 hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
231 hv->rxbuf_section_cnt,
232 sizeof(*hv->primary->rxbuf_info),
233 RTE_CACHE_LINE_SIZE);
234 if (!hv->primary->rxbuf_info) {
236 "could not allocate rxbuf info");
244 hn_nvs_disconn_rxbuf(struct hn_data *hv)
246 struct hn_nvs_rxbuf_disconn disconn;
250 * Disconnect RXBUF from NVS.
252 memset(&disconn, 0, sizeof(disconn));
253 disconn.type = NVS_TYPE_RXBUF_DISCONN;
254 disconn.sig = NVS_RXBUF_SIG;
256 /* NOTE: No response. */
257 error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
260 "send nvs rxbuf disconn failed: %d",
265 * Linger long enough for NVS to disconnect RXBUF.
271 hn_nvs_disconn_chim(struct hn_data *hv)
275 if (hv->chim_cnt != 0) {
276 struct hn_nvs_chim_disconn disconn;
278 /* Disconnect chimney sending buffer from NVS. */
279 memset(&disconn, 0, sizeof(disconn));
280 disconn.type = NVS_TYPE_CHIM_DISCONN;
281 disconn.sig = NVS_CHIM_SIG;
283 /* NOTE: No response. */
284 error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
288 "send nvs chim disconn failed: %d", error);
293 * Linger long enough for NVS to disconnect chimney
301 hn_nvs_conn_chim(struct hn_data *hv)
303 struct hn_nvs_chim_conn chim;
304 struct hn_nvs_chim_connresp resp;
306 unsigned long len = hv->chim_res->len;
309 /* Connect chimney sending buffer to NVS */
310 memset(&chim, 0, sizeof(chim));
311 chim.type = NVS_TYPE_CHIM_CONN;
312 chim.gpadl = hv->chim_res->phys_addr;
313 chim.sig = NVS_CHIM_SIG;
314 PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64,
316 hv->chim_res->phys_addr);
318 error = hn_nvs_execute(hv, &chim, sizeof(chim),
320 NVS_TYPE_CHIM_CONNRESP);
322 PMD_DRV_LOG(ERR, "exec nvs chim conn failed");
326 if (resp.status != NVS_STATUS_OK) {
327 PMD_DRV_LOG(ERR, "nvs chim conn failed: %x",
332 sectsz = resp.sectsz;
333 if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) {
334 /* Can't use chimney sending buffer; done! */
336 "invalid chimney sending buffer section size: %u",
342 hv->chim_szmax = sectsz;
343 hv->chim_cnt = len / sectsz;
345 PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u",
346 len, hv->chim_szmax, hv->chim_cnt);
352 hn_nvs_disconn_chim(hv);
357 * Configure MTU and enable VLAN.
360 hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu)
362 struct hn_nvs_ndis_conf conf;
365 memset(&conf, 0, sizeof(conf));
366 conf.type = NVS_TYPE_NDIS_CONF;
367 conf.mtu = mtu + RTE_ETHER_HDR_LEN;
368 conf.caps = NVS_NDIS_CONF_VLAN;
371 if (hv->nvs_ver >= NVS_VERSION_5)
372 conf.caps |= NVS_NDIS_CONF_SRIOV;
374 /* NOTE: No response. */
375 error = hn_nvs_req_send(hv, &conf, sizeof(conf));
378 "send nvs ndis conf failed: %d", error);
386 hn_nvs_init_ndis(struct hn_data *hv)
388 struct hn_nvs_ndis_init ndis;
391 memset(&ndis, 0, sizeof(ndis));
392 ndis.type = NVS_TYPE_NDIS_INIT;
393 ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver);
394 ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver);
396 /* NOTE: No response. */
397 error = hn_nvs_req_send(hv, &ndis, sizeof(ndis));
400 "send nvs ndis init failed: %d", error);
406 hn_nvs_init(struct hn_data *hv)
412 * Find the supported NVS version and set NDIS version accordingly.
414 for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) {
415 error = hn_nvs_doinit(hv, hn_nvs_version[i]);
417 PMD_INIT_LOG(DEBUG, "version %#x error %d",
418 hn_nvs_version[i], error);
422 hv->nvs_ver = hn_nvs_version[i];
424 /* Set NDIS version according to NVS version. */
425 hv->ndis_ver = NDIS_VERSION_6_30;
426 if (hv->nvs_ver <= NVS_VERSION_4)
427 hv->ndis_ver = NDIS_VERSION_6_1;
430 "NVS version %#x, NDIS version %u.%u",
431 hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver),
432 NDIS_VERSION_MINOR(hv->ndis_ver));
437 "no NVS compatible version available");
442 hn_nvs_attach(struct hn_data *hv, unsigned int mtu)
449 error = hn_nvs_init(hv);
453 /** Configure NDIS before initializing it. */
454 if (hv->nvs_ver >= NVS_VERSION_2) {
455 error = hn_nvs_conf_ndis(hv, mtu);
463 error = hn_nvs_init_ndis(hv);
470 error = hn_nvs_conn_rxbuf(hv);
475 * Connect chimney sending buffer.
477 error = hn_nvs_conn_chim(hv);
479 hn_nvs_disconn_rxbuf(hv);
487 hn_nvs_detach(struct hn_data *hv __rte_unused)
489 PMD_INIT_FUNC_TRACE();
491 /* NOTE: there are no requests to stop the NVS. */
492 hn_nvs_disconn_rxbuf(hv);
493 hn_nvs_disconn_chim(hv);
497 * Ack the consumed RXBUF associated w/ this channel packet,
498 * so that this RXBUF can be recycled by the hypervisor.
501 hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid)
503 unsigned int retries = 0;
504 struct hn_nvs_rndis_ack ack = {
505 .type = NVS_TYPE_RNDIS_ACK,
506 .status = NVS_STATUS_OK,
510 PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid);
513 error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP,
514 &ack, sizeof(ack), tid,
515 VMBUS_CHANPKT_FLAG_NONE, NULL);
520 if (error == -EAGAIN) {
523 * This should _not_ happen in real world, since the
524 * consumption of the TX bufring from the TX path is
527 PMD_RX_LOG(NOTICE, "RXBUF ack retry");
528 if (++retries < 10) {
534 PMD_DRV_LOG(ERR, "RXBUF ack failed");
538 hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch)
540 struct hn_nvs_subch_req req;
541 struct hn_nvs_subch_resp resp;
544 memset(&req, 0, sizeof(req));
545 req.type = NVS_TYPE_SUBCH_REQ;
546 req.op = NVS_SUBCH_OP_ALLOC;
547 req.nsubch = *nsubch;
549 error = hn_nvs_execute(hv, &req, sizeof(req),
551 NVS_TYPE_SUBCH_RESP);
555 if (resp.status != NVS_STATUS_OK) {
557 "nvs subch alloc failed: %#x",
562 if (resp.nsubch > *nsubch) {
564 "%u subchans are allocated, requested %u",
565 resp.nsubch, *nsubch);
567 *nsubch = resp.nsubch;
573 hn_nvs_set_datapath(struct hn_data *hv, uint32_t path)
575 struct hn_nvs_datapath dp;
578 PMD_DRV_LOG(DEBUG, "set datapath %s",
579 path ? "VF" : "Synthetic");
581 memset(&dp, 0, sizeof(dp));
582 dp.type = NVS_TYPE_SET_DATAPATH;
583 dp.active_path = path;
585 error = hn_nvs_req_send(hv, &dp, sizeof(dp));
588 "send set datapath failed: %d",