}
v_op = rte_le_to_cpu_32(info->desc.cookie_high);
- if (unlikely(v_op == VIRTCHNL_OP_EVENT))
+ if (v_op == VIRTCHNL_OP_EVENT) {
+ if (hw->vc_event_msg_cb != NULL)
+ hw->vc_event_msg_cb(hw,
+ info->msg_buf,
+ info->msg_len);
return;
+ }
v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
int err, i;
caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
- VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
+ VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
VF_BASE_MODE_OFFLOADS;
err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
return 0;
}
+static int
+ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
+{
+ struct virtchnl_dcf_vsi_map *vsi_map;
+ uint32_t valid_msg_len;
+ uint16_t len;
+ int err;
+
+ err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
+ NULL, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP");
+ return err;
+ }
+
+ err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
+ hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
+ &len);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP");
+ return err;
+ }
+
+ vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
+ valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) +
+ sizeof(*vsi_map);
+ if (len != valid_msg_len) {
+ PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
+ len);
+ return -EINVAL;
+ }
+
+ if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
+ PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)",
+ vsi_map->num_vfs, hw->num_vfs);
+ return -EINVAL;
+ }
+
+ len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
+
+ if (!hw->vf_vsi_map) {
+ hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
+ if (!hw->vf_vsi_map) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context");
+ return -ENOMEM;
+ }
+
+ hw->num_vfs = vsi_map->num_vfs;
+ }
+
+ if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
+ PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
+ return 1;
+ }
+
+ rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
+ return 0;
+}
+
+static int
+ice_dcf_mode_disable(struct ice_dcf_hw *hw)
+{
+ int err;
+
+ err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
+ NULL, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_DISABLE");
+ return err;
+ }
+
+ err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
+ hw->arq_buf, ICE_DCF_AQ_BUF_SZ, NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to get response of OP_DCF_DISABLE %d",
+ err);
+ return -1;
+ }
+
+ return 0;
+}
+
static int
ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
{
return err;
}
+int
+ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
+ void *buf, uint16_t buf_size)
+{
+ struct dcf_virtchnl_cmd desc_cmd, buff_cmd;
+ struct ice_dcf_hw *hw = dcf_hw;
+ int err = 0;
+ int i = 0;
+
+ if ((buf && !buf_size) || (!buf && buf_size) ||
+ buf_size > ICE_DCF_AQ_BUF_SZ)
+ return -EINVAL;
+
+ desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
+ desc_cmd.req_msglen = sizeof(*desc);
+ desc_cmd.req_msg = (uint8_t *)desc;
+ desc_cmd.rsp_buflen = sizeof(*desc);
+ desc_cmd.rsp_msgbuf = (uint8_t *)desc;
+
+ if (buf == NULL)
+ return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd);
+
+ desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF);
+
+ buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF;
+ buff_cmd.req_msglen = buf_size;
+ buff_cmd.req_msg = buf;
+ buff_cmd.rsp_buflen = buf_size;
+ buff_cmd.rsp_msgbuf = buf;
+
+ rte_spinlock_lock(&hw->vc_cmd_send_lock);
+ ice_dcf_vc_cmd_set(hw, &desc_cmd);
+ ice_dcf_vc_cmd_set(hw, &buff_cmd);
+
+ if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
+ ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
+ err = -1;
+ PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
+ goto ret;
+ }
+
+ do {
+ if ((!desc_cmd.pending && !buff_cmd.pending) ||
+ (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) ||
+ (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS))
+ break;
+
+ rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
+ } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
+
+ if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
+ err = -1;
+ PMD_DRV_LOG(ERR,
+ "No response (%d times) or return failure (desc: %d / buff: %d)",
+ i, desc_cmd.v_ret, buff_cmd.v_ret);
+ }
+
+ret:
+ ice_dcf_aq_cmd_clear(hw, &desc_cmd);
+ ice_dcf_aq_cmd_clear(hw, &buff_cmd);
+ rte_spinlock_unlock(&hw->vc_cmd_send_lock);
+
+ return err;
+}
+
+int
+ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev);
+ int err = 0;
+
+ rte_spinlock_lock(&hw->vc_cmd_send_lock);
+
+ rte_intr_disable(&pci_dev->intr_handle);
+ ice_dcf_disable_irq0(hw);
+
+ if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw) < 0)
+ err = -1;
+
+ rte_intr_enable(&pci_dev->intr_handle);
+ ice_dcf_enable_irq0(hw);
+
+ rte_spinlock_unlock(&hw->vc_cmd_send_lock);
+
+ return err;
+}
+
int
ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
{
goto err_alloc;
}
+ if (ice_dcf_get_vf_vsi_map(hw) < 0) {
+ PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
+ ice_dcf_mode_disable(hw);
+ goto err_alloc;
+ }
+
+ hw->eth_dev = eth_dev;
rte_intr_callback_register(&pci_dev->intr_handle,
ice_dcf_dev_interrupt_handler, hw);
rte_intr_enable(&pci_dev->intr_handle);
rte_intr_callback_unregister(intr_handle,
ice_dcf_dev_interrupt_handler, hw);
+ ice_dcf_mode_disable(hw);
iavf_shutdown_adminq(&hw->avf);
rte_free(hw->arq_buf);
+ rte_free(hw->vf_vsi_map);
rte_free(hw->vf_res);
}