1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
3 * Copyright (c) 2010-2012 Citrix Inc.
4 * Copyright (c) 2012 NetApp Inc.
9 * Network Virtualization Service.
19 #include <rte_ethdev.h>
20 #include <rte_string_fns.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ether.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
32 #include <rte_bus_vmbus.h>
38 static const uint32_t hn_nvs_version[] = {
47 static int hn_nvs_req_send(struct hn_data *hv,
48 void *req, uint32_t reqlen)
50 return rte_vmbus_chan_send(hn_primary_chan(hv),
51 VMBUS_CHANPKT_TYPE_INBAND,
53 VMBUS_CHANPKT_FLAG_NONE, NULL);
57 __hn_nvs_execute(struct hn_data *hv,
58 void *req, uint32_t reqlen,
59 void *resp, uint32_t resplen,
62 struct vmbus_channel *chan = hn_primary_chan(hv);
63 char buffer[NVS_RESPSIZE_MAX];
64 const struct hn_nvs_hdr *hdr;
69 /* Send request to ring buffer */
70 ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
72 VMBUS_CHANPKT_FLAG_RC, NULL);
75 PMD_DRV_LOG(ERR, "send request failed: %d", ret);
81 ret = rte_vmbus_chan_recv(chan, buffer, &len, &xactid);
83 rte_delay_us(HN_CHAN_INTERVAL_US);
88 PMD_DRV_LOG(ERR, "recv response failed: %d", ret);
92 if (len < sizeof(*hdr)) {
93 PMD_DRV_LOG(ERR, "response missing NVS header");
97 hdr = (struct hn_nvs_hdr *)buffer;
99 /* Silently drop received packets while waiting for response */
100 if (hdr->type == NVS_TYPE_RNDIS) {
101 hn_nvs_ack_rxbuf(chan, xactid);
102 --hv->rxbuf_outstanding;
106 if (hdr->type != type) {
107 PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x",
114 "invalid NVS resp len %u (expect %u)",
119 memcpy(resp, buffer, resplen);
127 * Execute one control command and get the response.
128 * Only one command can be active on a channel at once
129 * Unlike BSD, DPDK does not have an interrupt context
130 * so the polling is required to wait for response.
133 hn_nvs_execute(struct hn_data *hv,
134 void *req, uint32_t reqlen,
135 void *resp, uint32_t resplen,
138 struct hn_rx_queue *rxq = hv->primary;
141 rte_spinlock_lock(&rxq->ring_lock);
142 ret = __hn_nvs_execute(hv, req, reqlen, resp, resplen, type);
143 rte_spinlock_unlock(&rxq->ring_lock);
149 hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver)
151 struct hn_nvs_init init;
152 struct hn_nvs_init_resp resp;
156 memset(&init, 0, sizeof(init));
157 init.type = NVS_TYPE_INIT;
158 init.ver_min = nvs_ver;
159 init.ver_max = nvs_ver;
161 error = hn_nvs_execute(hv, &init, sizeof(init),
167 status = resp.status;
168 if (status != NVS_STATUS_OK) {
169 /* Not fatal, try other versions */
170 PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x",
179 hn_nvs_conn_rxbuf(struct hn_data *hv)
181 struct hn_nvs_rxbuf_conn conn;
182 struct hn_nvs_rxbuf_connresp resp;
186 /* Kernel has already setup RXBUF on primary channel. */
189 * Connect RXBUF to NVS.
191 conn.type = NVS_TYPE_RXBUF_CONN;
192 conn.gpadl = hv->rxbuf_res->phys_addr;
193 conn.sig = NVS_RXBUF_SIG;
194 PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64,
196 hv->rxbuf_res->phys_addr);
198 error = hn_nvs_execute(hv, &conn, sizeof(conn),
200 NVS_TYPE_RXBUF_CONNRESP);
203 "exec nvs rxbuf conn failed: %d",
208 status = resp.status;
209 if (status != NVS_STATUS_OK) {
211 "nvs rxbuf conn failed: %x", status);
214 if (resp.nsect != 1) {
216 "nvs rxbuf response num sections %u != 1",
222 "receive buffer size %u count %u",
223 resp.nvs_sect[0].slotsz,
224 resp.nvs_sect[0].slotcnt);
225 hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
227 hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt,
228 sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE);
229 if (!hv->rxbuf_info) {
231 "could not allocate rxbuf info");
239 hn_nvs_disconn_rxbuf(struct hn_data *hv)
241 struct hn_nvs_rxbuf_disconn disconn;
245 * Disconnect RXBUF from NVS.
247 memset(&disconn, 0, sizeof(disconn));
248 disconn.type = NVS_TYPE_RXBUF_DISCONN;
249 disconn.sig = NVS_RXBUF_SIG;
251 /* NOTE: No response. */
252 error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
255 "send nvs rxbuf disconn failed: %d",
259 rte_free(hv->rxbuf_info);
261 * Linger long enough for NVS to disconnect RXBUF.
267 hn_nvs_disconn_chim(struct hn_data *hv)
271 if (hv->chim_cnt != 0) {
272 struct hn_nvs_chim_disconn disconn;
274 /* Disconnect chimney sending buffer from NVS. */
275 memset(&disconn, 0, sizeof(disconn));
276 disconn.type = NVS_TYPE_CHIM_DISCONN;
277 disconn.sig = NVS_CHIM_SIG;
279 /* NOTE: No response. */
280 error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
284 "send nvs chim disconn failed: %d", error);
289 * Linger long enough for NVS to disconnect chimney
297 hn_nvs_conn_chim(struct hn_data *hv)
299 struct hn_nvs_chim_conn chim;
300 struct hn_nvs_chim_connresp resp;
302 unsigned long len = hv->chim_res->len;
305 /* Connect chimney sending buffer to NVS */
306 memset(&chim, 0, sizeof(chim));
307 chim.type = NVS_TYPE_CHIM_CONN;
308 chim.gpadl = hv->chim_res->phys_addr;
309 chim.sig = NVS_CHIM_SIG;
310 PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64,
312 hv->chim_res->phys_addr);
314 error = hn_nvs_execute(hv, &chim, sizeof(chim),
316 NVS_TYPE_CHIM_CONNRESP);
318 PMD_DRV_LOG(ERR, "exec nvs chim conn failed");
322 if (resp.status != NVS_STATUS_OK) {
323 PMD_DRV_LOG(ERR, "nvs chim conn failed: %x",
328 sectsz = resp.sectsz;
329 if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) {
330 /* Can't use chimney sending buffer; done! */
332 "invalid chimney sending buffer section size: %u",
338 hv->chim_szmax = sectsz;
339 hv->chim_cnt = len / sectsz;
341 PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u",
342 len, hv->chim_szmax, hv->chim_cnt);
348 hn_nvs_disconn_chim(hv);
353 * Configure MTU and enable VLAN.
356 hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu)
358 struct hn_nvs_ndis_conf conf;
361 memset(&conf, 0, sizeof(conf));
362 conf.type = NVS_TYPE_NDIS_CONF;
363 conf.mtu = mtu + RTE_ETHER_HDR_LEN;
364 conf.caps = NVS_NDIS_CONF_VLAN;
367 if (hv->nvs_ver >= NVS_VERSION_5)
368 conf.caps |= NVS_NDIS_CONF_SRIOV;
370 /* NOTE: No response. */
371 error = hn_nvs_req_send(hv, &conf, sizeof(conf));
374 "send nvs ndis conf failed: %d", error);
382 hn_nvs_init_ndis(struct hn_data *hv)
384 struct hn_nvs_ndis_init ndis;
387 memset(&ndis, 0, sizeof(ndis));
388 ndis.type = NVS_TYPE_NDIS_INIT;
389 ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver);
390 ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver);
392 /* NOTE: No response. */
393 error = hn_nvs_req_send(hv, &ndis, sizeof(ndis));
396 "send nvs ndis init failed: %d", error);
402 hn_nvs_init(struct hn_data *hv)
408 * Find the supported NVS version and set NDIS version accordingly.
410 for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) {
411 error = hn_nvs_doinit(hv, hn_nvs_version[i]);
413 PMD_INIT_LOG(DEBUG, "version %#x error %d",
414 hn_nvs_version[i], error);
418 hv->nvs_ver = hn_nvs_version[i];
420 /* Set NDIS version according to NVS version. */
421 hv->ndis_ver = NDIS_VERSION_6_30;
422 if (hv->nvs_ver <= NVS_VERSION_4)
423 hv->ndis_ver = NDIS_VERSION_6_1;
426 "NVS version %#x, NDIS version %u.%u",
427 hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver),
428 NDIS_VERSION_MINOR(hv->ndis_ver));
433 "no NVS compatible version available");
438 hn_nvs_attach(struct hn_data *hv, unsigned int mtu)
445 error = hn_nvs_init(hv);
449 /** Configure NDIS before initializing it. */
450 if (hv->nvs_ver >= NVS_VERSION_2) {
451 error = hn_nvs_conf_ndis(hv, mtu);
459 error = hn_nvs_init_ndis(hv);
466 error = hn_nvs_conn_rxbuf(hv);
471 * Connect chimney sending buffer.
473 error = hn_nvs_conn_chim(hv);
475 hn_nvs_disconn_rxbuf(hv);
483 hn_nvs_detach(struct hn_data *hv __rte_unused)
485 PMD_INIT_FUNC_TRACE();
487 /* NOTE: there are no requests to stop the NVS. */
488 hn_nvs_disconn_rxbuf(hv);
489 hn_nvs_disconn_chim(hv);
493 * Ack the consumed RXBUF associated w/ this channel packet,
494 * so that this RXBUF can be recycled by the hypervisor.
497 hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid)
499 unsigned int retries = 0;
500 struct hn_nvs_rndis_ack ack = {
501 .type = NVS_TYPE_RNDIS_ACK,
502 .status = NVS_STATUS_OK,
506 PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid);
509 error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP,
510 &ack, sizeof(ack), tid,
511 VMBUS_CHANPKT_FLAG_NONE, NULL);
516 if (error == -EAGAIN) {
519 * This should _not_ happen in real world, since the
520 * consumption of the TX bufring from the TX path is
523 PMD_RX_LOG(NOTICE, "RXBUF ack retry");
524 if (++retries < 10) {
530 PMD_DRV_LOG(ERR, "RXBUF ack failed");
534 hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch)
536 struct hn_nvs_subch_req req;
537 struct hn_nvs_subch_resp resp;
540 memset(&req, 0, sizeof(req));
541 req.type = NVS_TYPE_SUBCH_REQ;
542 req.op = NVS_SUBCH_OP_ALLOC;
543 req.nsubch = *nsubch;
545 error = hn_nvs_execute(hv, &req, sizeof(req),
547 NVS_TYPE_SUBCH_RESP);
551 if (resp.status != NVS_STATUS_OK) {
553 "nvs subch alloc failed: %#x",
558 if (resp.nsubch > *nsubch) {
560 "%u subchans are allocated, requested %u",
561 resp.nsubch, *nsubch);
563 *nsubch = resp.nsubch;
569 hn_nvs_set_datapath(struct hn_data *hv, uint32_t path)
571 struct hn_nvs_datapath dp;
574 PMD_DRV_LOG(DEBUG, "set datapath %s",
575 path ? "VF" : "Synthetic");
577 memset(&dp, 0, sizeof(dp));
578 dp.type = NVS_TYPE_SET_DATAPATH;
579 dp.active_path = path;
581 error = hn_nvs_req_send(hv, &dp, sizeof(dp));
584 "send set datapath failed: %d",