1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
3 * Copyright (c) 2010-2012 Citrix Inc.
4 * Copyright (c) 2012 NetApp Inc.
9 * Network Virtualization Service.
19 #include <rte_ethdev.h>
20 #include <rte_string_fns.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ether.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
32 #include <rte_bus_vmbus.h>
38 static const uint32_t hn_nvs_version[] = {
47 static int hn_nvs_req_send(struct hn_data *hv,
48 void *req, uint32_t reqlen)
50 return rte_vmbus_chan_send(hn_primary_chan(hv),
51 VMBUS_CHANPKT_TYPE_INBAND,
53 VMBUS_CHANPKT_FLAG_NONE, NULL);
57 hn_nvs_execute(struct hn_data *hv,
58 void *req, uint32_t reqlen,
59 void *resp, uint32_t resplen,
62 struct vmbus_channel *chan = hn_primary_chan(hv);
63 char buffer[NVS_RESPSIZE_MAX];
64 const struct hn_nvs_hdr *hdr;
68 /* Send request to ring buffer */
69 ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
71 VMBUS_CHANPKT_FLAG_RC, NULL);
74 PMD_DRV_LOG(ERR, "send request failed: %d", ret);
80 ret = rte_vmbus_chan_recv(chan, buffer, &len, NULL);
82 rte_delay_us(HN_CHAN_INTERVAL_US);
87 PMD_DRV_LOG(ERR, "recv response failed: %d", ret);
91 hdr = (struct hn_nvs_hdr *)buffer;
92 if (hdr->type != type) {
93 PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x",
100 "invalid NVS resp len %u (expect %u)",
105 memcpy(resp, buffer, resplen);
112 hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver)
114 struct hn_nvs_init init;
115 struct hn_nvs_init_resp resp;
119 memset(&init, 0, sizeof(init));
120 init.type = NVS_TYPE_INIT;
121 init.ver_min = nvs_ver;
122 init.ver_max = nvs_ver;
124 error = hn_nvs_execute(hv, &init, sizeof(init),
130 status = resp.status;
131 if (status != NVS_STATUS_OK) {
132 /* Not fatal, try other versions */
133 PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x",
142 hn_nvs_conn_rxbuf(struct hn_data *hv)
144 struct hn_nvs_rxbuf_conn conn;
145 struct hn_nvs_rxbuf_connresp resp;
149 /* Kernel has already setup RXBUF on primary channel. */
152 * Connect RXBUF to NVS.
154 conn.type = NVS_TYPE_RXBUF_CONN;
155 conn.gpadl = hv->rxbuf_res->phys_addr;
156 conn.sig = NVS_RXBUF_SIG;
157 PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64,
159 hv->rxbuf_res->phys_addr);
161 error = hn_nvs_execute(hv, &conn, sizeof(conn),
163 NVS_TYPE_RXBUF_CONNRESP);
166 "exec nvs rxbuf conn failed: %d",
171 status = resp.status;
172 if (status != NVS_STATUS_OK) {
174 "nvs rxbuf conn failed: %x", status);
177 if (resp.nsect != 1) {
179 "nvs rxbuf response num sections %u != 1",
185 "receive buffer size %u count %u",
186 resp.nvs_sect[0].slotsz,
187 resp.nvs_sect[0].slotcnt);
188 hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
190 hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt,
191 sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE);
192 if (!hv->rxbuf_info) {
194 "could not allocate rxbuf info");
202 hn_nvs_disconn_rxbuf(struct hn_data *hv)
204 struct hn_nvs_rxbuf_disconn disconn;
208 * Disconnect RXBUF from NVS.
210 memset(&disconn, 0, sizeof(disconn));
211 disconn.type = NVS_TYPE_RXBUF_DISCONN;
212 disconn.sig = NVS_RXBUF_SIG;
214 /* NOTE: No response. */
215 error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
218 "send nvs rxbuf disconn failed: %d",
222 rte_free(hv->rxbuf_info);
224 * Linger long enough for NVS to disconnect RXBUF.
230 hn_nvs_disconn_chim(struct hn_data *hv)
234 if (hv->chim_cnt != 0) {
235 struct hn_nvs_chim_disconn disconn;
237 /* Disconnect chimney sending buffer from NVS. */
238 memset(&disconn, 0, sizeof(disconn));
239 disconn.type = NVS_TYPE_CHIM_DISCONN;
240 disconn.sig = NVS_CHIM_SIG;
242 /* NOTE: No response. */
243 error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
247 "send nvs chim disconn failed: %d", error);
252 * Linger long enough for NVS to disconnect chimney
260 hn_nvs_conn_chim(struct hn_data *hv)
262 struct hn_nvs_chim_conn chim;
263 struct hn_nvs_chim_connresp resp;
265 unsigned long len = hv->chim_res->len;
268 /* Connect chimney sending buffer to NVS */
269 memset(&chim, 0, sizeof(chim));
270 chim.type = NVS_TYPE_CHIM_CONN;
271 chim.gpadl = hv->chim_res->phys_addr;
272 chim.sig = NVS_CHIM_SIG;
273 PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64,
275 hv->chim_res->phys_addr);
277 error = hn_nvs_execute(hv, &chim, sizeof(chim),
279 NVS_TYPE_CHIM_CONNRESP);
281 PMD_DRV_LOG(ERR, "exec nvs chim conn failed");
285 if (resp.status != NVS_STATUS_OK) {
286 PMD_DRV_LOG(ERR, "nvs chim conn failed: %x",
291 sectsz = resp.sectsz;
292 if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) {
293 /* Can't use chimney sending buffer; done! */
295 "invalid chimney sending buffer section size: %u",
301 hv->chim_szmax = sectsz;
302 hv->chim_cnt = len / sectsz;
304 PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u",
305 len, hv->chim_szmax, hv->chim_cnt);
311 hn_nvs_disconn_chim(hv);
316 * Configure MTU and enable VLAN.
319 hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu)
321 struct hn_nvs_ndis_conf conf;
324 memset(&conf, 0, sizeof(conf));
325 conf.type = NVS_TYPE_NDIS_CONF;
326 conf.mtu = mtu + ETHER_HDR_LEN;
327 conf.caps = NVS_NDIS_CONF_VLAN;
329 /* TODO enable SRIOV */
330 //if (hv->nvs_ver >= NVS_VERSION_5)
331 // conf.caps |= NVS_NDIS_CONF_SRIOV;
333 /* NOTE: No response. */
334 error = hn_nvs_req_send(hv, &conf, sizeof(conf));
337 "send nvs ndis conf failed: %d", error);
345 hn_nvs_init_ndis(struct hn_data *hv)
347 struct hn_nvs_ndis_init ndis;
350 memset(&ndis, 0, sizeof(ndis));
351 ndis.type = NVS_TYPE_NDIS_INIT;
352 ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver);
353 ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver);
355 /* NOTE: No response. */
356 error = hn_nvs_req_send(hv, &ndis, sizeof(ndis));
359 "send nvs ndis init failed: %d", error);
365 hn_nvs_init(struct hn_data *hv)
371 * Find the supported NVS version and set NDIS version accordingly.
373 for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) {
374 error = hn_nvs_doinit(hv, hn_nvs_version[i]);
376 PMD_INIT_LOG(DEBUG, "version %#x error %d",
377 hn_nvs_version[i], error);
381 hv->nvs_ver = hn_nvs_version[i];
383 /* Set NDIS version according to NVS version. */
384 hv->ndis_ver = NDIS_VERSION_6_30;
385 if (hv->nvs_ver <= NVS_VERSION_4)
386 hv->ndis_ver = NDIS_VERSION_6_1;
389 "NVS version %#x, NDIS version %u.%u",
390 hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver),
391 NDIS_VERSION_MINOR(hv->ndis_ver));
396 "no NVS compatible version available");
401 hn_nvs_attach(struct hn_data *hv, unsigned int mtu)
408 error = hn_nvs_init(hv);
412 /** Configure NDIS before initializing it. */
413 if (hv->nvs_ver >= NVS_VERSION_2) {
414 error = hn_nvs_conf_ndis(hv, mtu);
422 error = hn_nvs_init_ndis(hv);
429 error = hn_nvs_conn_rxbuf(hv);
434 * Connect chimney sending buffer.
436 error = hn_nvs_conn_chim(hv);
438 hn_nvs_disconn_rxbuf(hv);
446 hn_nvs_detach(struct hn_data *hv __rte_unused)
448 PMD_INIT_FUNC_TRACE();
450 /* NOTE: there are no requests to stop the NVS. */
451 hn_nvs_disconn_rxbuf(hv);
452 hn_nvs_disconn_chim(hv);
456 * Ack the consumed RXBUF associated w/ this channel packet,
457 * so that this RXBUF can be recycled by the hypervisor.
460 hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid)
462 unsigned int retries = 0;
463 struct hn_nvs_rndis_ack ack = {
464 .type = NVS_TYPE_RNDIS_ACK,
465 .status = NVS_STATUS_OK,
469 PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid);
472 error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP,
473 &ack, sizeof(ack), tid,
474 VMBUS_CHANPKT_FLAG_NONE, NULL);
479 if (error == -EAGAIN) {
482 * This should _not_ happen in real world, since the
483 * consumption of the TX bufring from the TX path is
486 PMD_RX_LOG(NOTICE, "RXBUF ack retry");
487 if (++retries < 10) {
493 PMD_DRV_LOG(ERR, "RXBUF ack failed");
497 hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch)
499 struct hn_nvs_subch_req req;
500 struct hn_nvs_subch_resp resp;
503 memset(&req, 0, sizeof(req));
504 req.type = NVS_TYPE_SUBCH_REQ;
505 req.op = NVS_SUBCH_OP_ALLOC;
506 req.nsubch = *nsubch;
508 error = hn_nvs_execute(hv, &req, sizeof(req),
510 NVS_TYPE_SUBCH_RESP);
514 if (resp.status != NVS_STATUS_OK) {
516 "nvs subch alloc failed: %#x",
521 if (resp.nsubch > *nsubch) {
523 "%u subchans are allocated, requested %u",
524 resp.nsubch, *nsubch);
526 *nsubch = resp.nsubch;
532 hn_nvs_set_datapath(struct hn_data *hv, uint32_t path)
534 struct hn_nvs_datapath dp;
536 memset(&dp, 0, sizeof(dp));
537 dp.type = NVS_TYPE_SET_DATAPATH;
538 dp.active_path = path;
540 hn_nvs_req_send(hv, &dp, sizeof(dp));