1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
17 #include <rte_atomic.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_malloc.h>
23 #include <rte_memzone.h>
28 #define ICE_DCF_AQ_LEN 32
29 #define ICE_DCF_AQ_BUF_SZ 4096
31 #define ICE_DCF_ARQ_MAX_RETRIES 200
32 #define ICE_DCF_ARQ_CHECK_TIME 2 /* msecs */
34 #define ICE_DCF_VF_RES_BUF_SZ \
35 (sizeof(struct virtchnl_vf_resource) + \
36 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
38 static __rte_always_inline int
39 ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
40 uint8_t *req_msg, uint16_t req_msglen)
42 return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
43 req_msg, req_msglen, NULL);
47 ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
48 uint8_t *rsp_msgbuf, uint16_t rsp_buflen,
51 struct iavf_arq_event_info event;
52 enum virtchnl_ops v_op;
56 event.buf_len = rsp_buflen;
57 event.msg_buf = rsp_msgbuf;
60 err = iavf_clean_arq_element(&hw->avf, &event, NULL);
61 if (err != IAVF_SUCCESS)
64 v_op = rte_le_to_cpu_32(event.desc.cookie_high);
68 if (rsp_msglen != NULL)
69 *rsp_msglen = event.msg_len;
70 return rte_le_to_cpu_32(event.desc.cookie_low);
73 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
74 } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
79 static __rte_always_inline void
80 ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
82 rte_spinlock_lock(&hw->vc_cmd_queue_lock);
84 TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next);
86 rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
89 static __rte_always_inline void
90 ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
92 cmd->v_ret = IAVF_ERR_NOT_READY;
96 rte_spinlock_lock(&hw->vc_cmd_queue_lock);
98 TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next);
100 rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
103 static __rte_always_inline int
104 ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
106 return iavf_aq_send_msg_to_pf(&hw->avf,
107 cmd->v_op, IAVF_SUCCESS,
108 cmd->req_msg, cmd->req_msglen, NULL);
111 static __rte_always_inline void
112 ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info)
114 struct dcf_virtchnl_cmd *cmd;
115 enum virtchnl_ops v_op;
116 enum iavf_status v_ret;
119 aq_op = rte_le_to_cpu_16(info->desc.opcode);
120 if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) {
122 "Request %u is not supported yet", aq_op);
126 v_op = rte_le_to_cpu_32(info->desc.cookie_high);
127 if (v_op == VIRTCHNL_OP_EVENT) {
128 if (hw->vc_event_msg_cb != NULL)
129 hw->vc_event_msg_cb(hw,
135 v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
137 rte_spinlock_lock(&hw->vc_cmd_queue_lock);
139 TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) {
140 if (cmd->v_op == v_op && cmd->pending) {
142 cmd->rsp_msglen = RTE_MIN(info->msg_len,
144 if (likely(cmd->rsp_msglen != 0))
145 rte_memcpy(cmd->rsp_msgbuf, info->msg_buf,
148 /* prevent compiler reordering */
149 rte_compiler_barrier();
155 rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
159 ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw)
161 struct iavf_arq_event_info info;
162 uint16_t pending = 1;
165 info.buf_len = ICE_DCF_AQ_BUF_SZ;
166 info.msg_buf = hw->arq_buf;
169 ret = iavf_clean_arq_element(&hw->avf, &info, &pending);
170 if (ret != IAVF_SUCCESS)
173 ice_dcf_aq_cmd_handle(hw, &info);
178 ice_dcf_init_check_api_version(struct ice_dcf_hw *hw)
180 #define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START 1
181 #define ICE_CPF_VIRTCHNL_VERSION_MINOR_START 1
182 struct virtchnl_version_info version, *pver;
185 version.major = VIRTCHNL_VERSION_MAJOR;
186 version.minor = VIRTCHNL_VERSION_MINOR;
187 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION,
188 (uint8_t *)&version, sizeof(version));
190 PMD_INIT_LOG(ERR, "Failed to send OP_VERSION");
194 pver = &hw->virtchnl_version;
195 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION,
196 (uint8_t *)pver, sizeof(*pver), NULL);
198 PMD_INIT_LOG(ERR, "Failed to get response of OP_VERSION");
203 "Peer PF API version: %u.%u", pver->major, pver->minor);
205 if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START ||
206 (pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START &&
207 pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) {
209 "VIRTCHNL API version should not be lower than (%u.%u)",
210 ICE_CPF_VIRTCHNL_VERSION_MAJOR_START,
211 ICE_CPF_VIRTCHNL_VERSION_MAJOR_START);
213 } else if (pver->major > VIRTCHNL_VERSION_MAJOR ||
214 (pver->major == VIRTCHNL_VERSION_MAJOR &&
215 pver->minor > VIRTCHNL_VERSION_MINOR)) {
217 "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
218 pver->major, pver->minor,
219 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
223 PMD_INIT_LOG(DEBUG, "Peer is supported PF host");
229 ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
234 caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
235 VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
236 VF_BASE_MODE_OFFLOADS;
238 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
239 (uint8_t *)&caps, sizeof(caps));
241 PMD_DRV_LOG(ERR, "Failed to send msg OP_GET_VF_RESOURCE");
245 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
246 (uint8_t *)hw->vf_res,
247 ICE_DCF_VF_RES_BUF_SZ, NULL);
249 PMD_DRV_LOG(ERR, "Failed to get response of OP_GET_VF_RESOURCE");
253 iavf_vf_parse_hw_config(&hw->avf, hw->vf_res);
256 for (i = 0; i < hw->vf_res->num_vsis; i++) {
257 if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
258 hw->vsi_res = &hw->vf_res->vsi_res[i];
262 PMD_DRV_LOG(ERR, "no LAN VSI found");
266 hw->vsi_id = hw->vsi_res->vsi_id;
267 PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
273 ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
275 struct virtchnl_dcf_vsi_map *vsi_map;
276 uint32_t valid_msg_len;
280 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
283 PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP");
287 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
288 hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
291 PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP");
295 vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
296 valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) +
298 if (len != valid_msg_len) {
299 PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
304 if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
305 PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)",
306 vsi_map->num_vfs, hw->num_vfs);
310 len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
312 if (!hw->vf_vsi_map) {
313 hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
314 if (!hw->vf_vsi_map) {
315 PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context");
319 hw->num_vfs = vsi_map->num_vfs;
322 if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
323 PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
327 rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
332 ice_dcf_mode_disable(struct ice_dcf_hw *hw)
336 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
339 PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_DISABLE");
343 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
344 hw->arq_buf, ICE_DCF_AQ_BUF_SZ, NULL);
347 "Failed to get response of OP_DCF_DISABLE %d",
356 ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
358 #define ICE_DCF_RESET_WAIT_CNT 50
359 struct iavf_hw *avf = &hw->avf;
362 for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) {
363 reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) &
364 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
365 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
367 if (reset == VIRTCHNL_VFR_VFACTIVE ||
368 reset == VIRTCHNL_VFR_COMPLETED)
374 if (i >= ICE_DCF_RESET_WAIT_CNT)
381 ice_dcf_enable_irq0(struct ice_dcf_hw *hw)
383 struct iavf_hw *avf = &hw->avf;
385 /* Enable admin queue interrupt trigger */
386 IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1,
387 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
388 IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
389 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
390 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
391 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
393 IAVF_WRITE_FLUSH(avf);
397 ice_dcf_disable_irq0(struct ice_dcf_hw *hw)
399 struct iavf_hw *avf = &hw->avf;
401 /* Disable all interrupt types */
402 IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0);
403 IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
404 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
406 IAVF_WRITE_FLUSH(avf);
410 ice_dcf_dev_interrupt_handler(void *param)
412 struct ice_dcf_hw *hw = param;
414 ice_dcf_disable_irq0(hw);
416 ice_dcf_handle_virtchnl_msg(hw);
418 ice_dcf_enable_irq0(hw);
422 ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
423 struct dcf_virtchnl_cmd *cmd)
428 if ((cmd->req_msg && !cmd->req_msglen) ||
429 (!cmd->req_msg && cmd->req_msglen) ||
430 (cmd->rsp_msgbuf && !cmd->rsp_buflen) ||
431 (!cmd->rsp_msgbuf && cmd->rsp_buflen))
434 rte_spinlock_lock(&hw->vc_cmd_send_lock);
435 ice_dcf_vc_cmd_set(hw, cmd);
437 err = ice_dcf_vc_cmd_send(hw, cmd);
439 PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op);
447 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
448 } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
450 if (cmd->v_ret != IAVF_SUCCESS) {
453 "No response (%d times) or return failure (%d) for cmd %d",
454 i, cmd->v_ret, cmd->v_op);
458 ice_dcf_aq_cmd_clear(hw, cmd);
459 rte_spinlock_unlock(&hw->vc_cmd_send_lock);
464 ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
465 void *buf, uint16_t buf_size)
467 struct dcf_virtchnl_cmd desc_cmd, buff_cmd;
468 struct ice_dcf_hw *hw = dcf_hw;
472 if ((buf && !buf_size) || (!buf && buf_size) ||
473 buf_size > ICE_DCF_AQ_BUF_SZ)
476 desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
477 desc_cmd.req_msglen = sizeof(*desc);
478 desc_cmd.req_msg = (uint8_t *)desc;
479 desc_cmd.rsp_buflen = sizeof(*desc);
480 desc_cmd.rsp_msgbuf = (uint8_t *)desc;
483 return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd);
485 desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF);
487 buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF;
488 buff_cmd.req_msglen = buf_size;
489 buff_cmd.req_msg = buf;
490 buff_cmd.rsp_buflen = buf_size;
491 buff_cmd.rsp_msgbuf = buf;
493 rte_spinlock_lock(&hw->vc_cmd_send_lock);
494 ice_dcf_vc_cmd_set(hw, &desc_cmd);
495 ice_dcf_vc_cmd_set(hw, &buff_cmd);
497 if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
498 ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
500 PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
505 if ((!desc_cmd.pending && !buff_cmd.pending) ||
506 (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) ||
507 (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS))
510 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
511 } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
513 if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
516 "No response (%d times) or return failure (desc: %d / buff: %d)",
517 i, desc_cmd.v_ret, buff_cmd.v_ret);
521 ice_dcf_aq_cmd_clear(hw, &desc_cmd);
522 ice_dcf_aq_cmd_clear(hw, &buff_cmd);
523 rte_spinlock_unlock(&hw->vc_cmd_send_lock);
529 ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
531 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev);
534 rte_spinlock_lock(&hw->vc_cmd_send_lock);
536 rte_intr_disable(&pci_dev->intr_handle);
537 ice_dcf_disable_irq0(hw);
539 if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw) < 0)
542 rte_intr_enable(&pci_dev->intr_handle);
543 ice_dcf_enable_irq0(hw);
545 rte_spinlock_unlock(&hw->vc_cmd_send_lock);
551 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
553 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
556 hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
559 hw->avf.bus.bus_id = pci_dev->addr.bus;
560 hw->avf.bus.device = pci_dev->addr.devid;
561 hw->avf.bus.func = pci_dev->addr.function;
563 hw->avf.device_id = pci_dev->id.device_id;
564 hw->avf.vendor_id = pci_dev->id.vendor_id;
565 hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id;
566 hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
568 hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN;
569 hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN;
570 hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ;
571 hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ;
573 rte_spinlock_init(&hw->vc_cmd_send_lock);
574 rte_spinlock_init(&hw->vc_cmd_queue_lock);
575 TAILQ_INIT(&hw->vc_cmd_queue);
577 hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
578 if (hw->arq_buf == NULL) {
579 PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
583 ret = iavf_set_mac_type(&hw->avf);
585 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret);
589 ret = ice_dcf_check_reset_done(hw);
591 PMD_INIT_LOG(ERR, "VF is still resetting");
595 ret = iavf_init_adminq(&hw->avf);
597 PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret);
601 if (ice_dcf_init_check_api_version(hw)) {
602 PMD_INIT_LOG(ERR, "check_api version failed");
606 hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0);
607 if (hw->vf_res == NULL) {
608 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
612 if (ice_dcf_get_vf_resource(hw)) {
613 PMD_INIT_LOG(ERR, "Failed to get VF resource");
617 if (ice_dcf_get_vf_vsi_map(hw) < 0) {
618 PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
619 ice_dcf_mode_disable(hw);
623 hw->eth_dev = eth_dev;
624 rte_intr_callback_register(&pci_dev->intr_handle,
625 ice_dcf_dev_interrupt_handler, hw);
626 rte_intr_enable(&pci_dev->intr_handle);
627 ice_dcf_enable_irq0(hw);
632 rte_free(hw->vf_res);
634 iavf_shutdown_adminq(&hw->avf);
636 rte_free(hw->arq_buf);
642 ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
644 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
645 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
647 ice_dcf_disable_irq0(hw);
648 rte_intr_disable(intr_handle);
649 rte_intr_callback_unregister(intr_handle,
650 ice_dcf_dev_interrupt_handler, hw);
652 ice_dcf_mode_disable(hw);
653 iavf_shutdown_adminq(&hw->avf);
655 rte_free(hw->arq_buf);
656 rte_free(hw->vf_vsi_map);
657 rte_free(hw->vf_res);