1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
3 * Copyright (c) 2010-2012 Citrix Inc.
4 * Copyright (c) 2012 NetApp Inc.
9 * Network Virtualization Service.
19 #include <rte_ethdev.h>
20 #include <rte_string_fns.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ether.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
32 #include <rte_bus_vmbus.h>
38 static const uint32_t hn_nvs_version[] = {
47 static int hn_nvs_req_send(struct hn_data *hv,
48 void *req, uint32_t reqlen)
50 return rte_vmbus_chan_send(hn_primary_chan(hv),
51 VMBUS_CHANPKT_TYPE_INBAND,
53 VMBUS_CHANPKT_FLAG_NONE, NULL);
57 __hn_nvs_execute(struct hn_data *hv,
58 void *req, uint32_t reqlen,
59 void *resp, uint32_t resplen,
62 struct vmbus_channel *chan = hn_primary_chan(hv);
63 char buffer[NVS_RESPSIZE_MAX];
64 const struct hn_nvs_hdr *hdr;
69 /* Send request to ring buffer */
70 ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
72 VMBUS_CHANPKT_FLAG_RC, NULL);
75 PMD_DRV_LOG(ERR, "send request failed: %d", ret);
81 ret = rte_vmbus_chan_recv(chan, buffer, &len, &xactid);
83 rte_delay_us(HN_CHAN_INTERVAL_US);
88 PMD_DRV_LOG(ERR, "recv response failed: %d", ret);
92 if (len < sizeof(*hdr)) {
93 PMD_DRV_LOG(ERR, "response missing NVS header");
97 hdr = (struct hn_nvs_hdr *)buffer;
99 /* Silently drop received packets while waiting for response */
102 hn_nvs_ack_rxbuf(chan, xactid);
105 case NVS_TYPE_TXTBL_NOTE:
106 PMD_DRV_LOG(DEBUG, "discard packet type 0x%x", hdr->type);
110 if (hdr->type != type) {
111 PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x",
118 "invalid NVS resp len %u (expect %u)",
123 memcpy(resp, buffer, resplen);
131 * Execute one control command and get the response.
132 * Only one command can be active on a channel at once
133 * Unlike BSD, DPDK does not have an interrupt context
134 * so the polling is required to wait for response.
137 hn_nvs_execute(struct hn_data *hv,
138 void *req, uint32_t reqlen,
139 void *resp, uint32_t resplen,
142 struct hn_rx_queue *rxq = hv->primary;
145 rte_spinlock_lock(&rxq->ring_lock);
146 ret = __hn_nvs_execute(hv, req, reqlen, resp, resplen, type);
147 rte_spinlock_unlock(&rxq->ring_lock);
153 hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver)
155 struct hn_nvs_init init;
156 struct hn_nvs_init_resp resp;
160 memset(&init, 0, sizeof(init));
161 init.type = NVS_TYPE_INIT;
162 init.ver_min = nvs_ver;
163 init.ver_max = nvs_ver;
165 error = hn_nvs_execute(hv, &init, sizeof(init),
171 status = resp.status;
172 if (status != NVS_STATUS_OK) {
173 /* Not fatal, try other versions */
174 PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x",
183 hn_nvs_conn_rxbuf(struct hn_data *hv)
185 struct hn_nvs_rxbuf_conn conn;
186 struct hn_nvs_rxbuf_connresp resp;
190 /* Kernel has already setup RXBUF on primary channel. */
193 * Connect RXBUF to NVS.
195 conn.type = NVS_TYPE_RXBUF_CONN;
196 conn.gpadl = hv->rxbuf_res->phys_addr;
197 conn.sig = NVS_RXBUF_SIG;
198 PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64,
200 hv->rxbuf_res->phys_addr);
202 error = hn_nvs_execute(hv, &conn, sizeof(conn),
204 NVS_TYPE_RXBUF_CONNRESP);
207 "exec nvs rxbuf conn failed: %d",
212 status = resp.status;
213 if (status != NVS_STATUS_OK) {
215 "nvs rxbuf conn failed: %x", status);
218 if (resp.nsect != 1) {
220 "nvs rxbuf response num sections %u != 1",
226 "receive buffer size %u count %u",
227 resp.nvs_sect[0].slotsz,
228 resp.nvs_sect[0].slotcnt);
229 hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
232 * Primary queue's rxbuf_info is not allocated at creation time.
233 * Now we can allocate it after we figure out the slotcnt.
235 hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
236 hv->rxbuf_section_cnt,
237 sizeof(*hv->primary->rxbuf_info),
238 RTE_CACHE_LINE_SIZE);
239 if (!hv->primary->rxbuf_info) {
241 "could not allocate rxbuf info");
249 hn_nvs_disconn_rxbuf(struct hn_data *hv)
251 struct hn_nvs_rxbuf_disconn disconn;
255 * Disconnect RXBUF from NVS.
257 memset(&disconn, 0, sizeof(disconn));
258 disconn.type = NVS_TYPE_RXBUF_DISCONN;
259 disconn.sig = NVS_RXBUF_SIG;
261 /* NOTE: No response. */
262 error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
265 "send nvs rxbuf disconn failed: %d",
270 * Linger long enough for NVS to disconnect RXBUF.
276 hn_nvs_disconn_chim(struct hn_data *hv)
280 if (hv->chim_cnt != 0) {
281 struct hn_nvs_chim_disconn disconn;
283 /* Disconnect chimney sending buffer from NVS. */
284 memset(&disconn, 0, sizeof(disconn));
285 disconn.type = NVS_TYPE_CHIM_DISCONN;
286 disconn.sig = NVS_CHIM_SIG;
288 /* NOTE: No response. */
289 error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
293 "send nvs chim disconn failed: %d", error);
298 * Linger long enough for NVS to disconnect chimney
306 hn_nvs_conn_chim(struct hn_data *hv)
308 struct hn_nvs_chim_conn chim;
309 struct hn_nvs_chim_connresp resp;
311 unsigned long len = hv->chim_res->len;
314 /* Connect chimney sending buffer to NVS */
315 memset(&chim, 0, sizeof(chim));
316 chim.type = NVS_TYPE_CHIM_CONN;
317 chim.gpadl = hv->chim_res->phys_addr;
318 chim.sig = NVS_CHIM_SIG;
319 PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64,
321 hv->chim_res->phys_addr);
323 error = hn_nvs_execute(hv, &chim, sizeof(chim),
325 NVS_TYPE_CHIM_CONNRESP);
327 PMD_DRV_LOG(ERR, "exec nvs chim conn failed");
331 if (resp.status != NVS_STATUS_OK) {
332 PMD_DRV_LOG(ERR, "nvs chim conn failed: %x",
337 sectsz = resp.sectsz;
338 if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) {
339 /* Can't use chimney sending buffer; done! */
341 "invalid chimney sending buffer section size: %u",
347 hv->chim_szmax = sectsz;
348 hv->chim_cnt = len / sectsz;
350 PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u",
351 len, hv->chim_szmax, hv->chim_cnt);
357 hn_nvs_disconn_chim(hv);
362 * Configure MTU and enable VLAN.
365 hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu)
367 struct hn_nvs_ndis_conf conf;
370 memset(&conf, 0, sizeof(conf));
371 conf.type = NVS_TYPE_NDIS_CONF;
372 conf.mtu = mtu + RTE_ETHER_HDR_LEN;
373 conf.caps = NVS_NDIS_CONF_VLAN;
376 if (hv->nvs_ver >= NVS_VERSION_5)
377 conf.caps |= NVS_NDIS_CONF_SRIOV;
379 /* NOTE: No response. */
380 error = hn_nvs_req_send(hv, &conf, sizeof(conf));
383 "send nvs ndis conf failed: %d", error);
391 hn_nvs_init_ndis(struct hn_data *hv)
393 struct hn_nvs_ndis_init ndis;
396 memset(&ndis, 0, sizeof(ndis));
397 ndis.type = NVS_TYPE_NDIS_INIT;
398 ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver);
399 ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver);
401 /* NOTE: No response. */
402 error = hn_nvs_req_send(hv, &ndis, sizeof(ndis));
405 "send nvs ndis init failed: %d", error);
411 hn_nvs_init(struct hn_data *hv)
417 * Find the supported NVS version and set NDIS version accordingly.
419 for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) {
420 error = hn_nvs_doinit(hv, hn_nvs_version[i]);
422 PMD_INIT_LOG(DEBUG, "version %#x error %d",
423 hn_nvs_version[i], error);
427 hv->nvs_ver = hn_nvs_version[i];
429 /* Set NDIS version according to NVS version. */
430 hv->ndis_ver = NDIS_VERSION_6_30;
431 if (hv->nvs_ver <= NVS_VERSION_4)
432 hv->ndis_ver = NDIS_VERSION_6_1;
435 "NVS version %#x, NDIS version %u.%u",
436 hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver),
437 NDIS_VERSION_MINOR(hv->ndis_ver));
442 "no NVS compatible version available");
447 hn_nvs_attach(struct hn_data *hv, unsigned int mtu)
454 error = hn_nvs_init(hv);
458 /** Configure NDIS before initializing it. */
459 if (hv->nvs_ver >= NVS_VERSION_2) {
460 error = hn_nvs_conf_ndis(hv, mtu);
468 error = hn_nvs_init_ndis(hv);
475 error = hn_nvs_conn_rxbuf(hv);
480 * Connect chimney sending buffer.
482 error = hn_nvs_conn_chim(hv);
484 hn_nvs_disconn_rxbuf(hv);
492 hn_nvs_detach(struct hn_data *hv __rte_unused)
494 PMD_INIT_FUNC_TRACE();
496 /* NOTE: there are no requests to stop the NVS. */
497 hn_nvs_disconn_rxbuf(hv);
498 hn_nvs_disconn_chim(hv);
502 * Ack the consumed RXBUF associated w/ this channel packet,
503 * so that this RXBUF can be recycled by the hypervisor.
506 hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid)
508 unsigned int retries = 0;
509 struct hn_nvs_rndis_ack ack = {
510 .type = NVS_TYPE_RNDIS_ACK,
511 .status = NVS_STATUS_OK,
515 PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid);
518 error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP,
519 &ack, sizeof(ack), tid,
520 VMBUS_CHANPKT_FLAG_NONE, NULL);
525 if (error == -EAGAIN) {
528 * This should _not_ happen in real world, since the
529 * consumption of the TX bufring from the TX path is
532 PMD_RX_LOG(NOTICE, "RXBUF ack retry");
533 if (++retries < 10) {
539 PMD_DRV_LOG(ERR, "RXBUF ack failed");
543 hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch)
545 struct hn_nvs_subch_req req;
546 struct hn_nvs_subch_resp resp;
549 memset(&req, 0, sizeof(req));
550 req.type = NVS_TYPE_SUBCH_REQ;
551 req.op = NVS_SUBCH_OP_ALLOC;
552 req.nsubch = *nsubch;
554 error = hn_nvs_execute(hv, &req, sizeof(req),
556 NVS_TYPE_SUBCH_RESP);
560 if (resp.status != NVS_STATUS_OK) {
562 "nvs subch alloc failed: %#x",
567 if (resp.nsubch > *nsubch) {
569 "%u subchans are allocated, requested %u",
570 resp.nsubch, *nsubch);
572 *nsubch = resp.nsubch;
578 hn_nvs_set_datapath(struct hn_data *hv, uint32_t path)
580 struct hn_nvs_datapath dp;
583 PMD_DRV_LOG(DEBUG, "set datapath %s",
584 path ? "VF" : "Synthetic");
586 memset(&dp, 0, sizeof(dp));
587 dp.type = NVS_TYPE_SET_DATAPATH;
588 dp.active_path = path;
590 error = hn_nvs_req_send(hv, &dp, sizeof(dp));
593 "send set datapath failed: %d",