return 0;
}
+static int
+ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
+{
+ struct virtchnl_dcf_vsi_map *vsi_map;
+ uint32_t valid_msg_len;
+ uint16_t len;
+ int err;
+
+ err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
+ NULL, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP");
+ return err;
+ }
+
+ err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
+ hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
+ &len);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP");
+ return err;
+ }
+
+ vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
+ valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) +
+ sizeof(*vsi_map);
+ if (len != valid_msg_len) {
+ PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
+ len);
+ return -EINVAL;
+ }
+
+ if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
+ PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)",
+ vsi_map->num_vfs, hw->num_vfs);
+ return -EINVAL;
+ }
+
+ len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
+
+ if (!hw->vf_vsi_map) {
+ hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
+ if (!hw->vf_vsi_map) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context");
+ return -ENOMEM;
+ }
+
+ hw->num_vfs = vsi_map->num_vfs;
+ }
+
+ if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
+ PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
+ return 1;
+ }
+
+ rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
+ return 0;
+}
+
static int
ice_dcf_mode_disable(struct ice_dcf_hw *hw)
{
return err;
}
+int
+ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev);
+ int err = 0;
+
+ rte_spinlock_lock(&hw->vc_cmd_send_lock);
+
+ rte_intr_disable(&pci_dev->intr_handle);
+ ice_dcf_disable_irq0(hw);
+
+ if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw))
+ err = -1;
+
+ rte_intr_enable(&pci_dev->intr_handle);
+ ice_dcf_enable_irq0(hw);
+
+ rte_spinlock_unlock(&hw->vc_cmd_send_lock);
+
+ return err;
+}
+
int
ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
{
goto err_alloc;
}
+ if (ice_dcf_get_vf_vsi_map(hw) < 0) {
+ PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
+ ice_dcf_mode_disable(hw);
+ goto err_alloc;
+ }
+
+ hw->eth_dev = eth_dev;
rte_intr_callback_register(&pci_dev->intr_handle,
ice_dcf_dev_interrupt_handler, hw);
rte_intr_enable(&pci_dev->intr_handle);
iavf_shutdown_adminq(&hw->avf);
rte_free(hw->arq_buf);
+ rte_free(hw->vf_vsi_map);
rte_free(hw->vf_res);
}
*/
#include <sys/types.h>
#include <sys/stat.h>
+#include <pthread.h>
#include <unistd.h>
+#include <rte_spinlock.h>
+
#include "ice_dcf_ethdev.h"
+#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL 100000 /* us */
+static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
+
+static __rte_always_inline void
+ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
+ uint16_t vsi_map)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
+ PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
+ return;
+ }
+
+ vsi_ctx = hw->vsi_ctx[vsi_handle];
+
+ if (vsi_map & VIRTCHNL_DCF_VF_VSI_VALID) {
+ if (!vsi_ctx) {
+ vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+ if (!vsi_ctx) {
+ PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+ vsi_handle);
+ return;
+ }
+ }
+
+ vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
+ VIRTCHNL_DCF_VF_VSI_ID_S;
+ hw->vsi_ctx[vsi_handle] = vsi_ctx;
+
+ PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+ vsi_handle, vsi_ctx->vsi_num);
+ } else {
+ hw->vsi_ctx[vsi_handle] = NULL;
+
+ ice_free(hw, vsi_ctx);
+
+ PMD_DRV_LOG(NOTICE, "VF%u is disabled", vsi_handle);
+ }
+}
+
+static void
+ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
+ uint16_t *vf_vsi_map)
+{
+ uint16_t vf_id;
+
+ for (vf_id = 0; vf_id < num_vfs; vf_id++)
+ ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
+}
+
+static void*
+ice_dcf_vsi_update_service_handler(void *param)
+{
+ struct ice_dcf_hw *hw = param;
+
+ usleep(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
+
+ rte_spinlock_lock(&vsi_update_lock);
+
+ if (!ice_dcf_handle_vsi_update_event(hw)) {
+ struct ice_dcf_adapter *dcf_ad =
+ container_of(hw, struct ice_dcf_adapter, real_hw);
+
+ ice_dcf_update_vf_vsi_map(&dcf_ad->parent.hw,
+ hw->num_vfs, hw->vf_vsi_map);
+ }
+
+ rte_spinlock_unlock(&vsi_update_lock);
+
+ return NULL;
+}
+
void
-ice_dcf_handle_pf_event_msg(__rte_unused struct ice_dcf_hw *dcf_hw,
+ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen)
{
struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg;
+ pthread_t thread;
if (msglen < sizeof(struct virtchnl_pf_event)) {
PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen);
switch (pf_msg->event) {
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
+ pthread_create(&thread, NULL,
+ ice_dcf_vsi_update_service_handler, dcf_hw);
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
+ case VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
+ pf_msg->event_data.vf_vsi_map.vf_id,
+ pf_msg->event_data.vf_vsi_map.vsi_id);
+ pthread_create(&thread, NULL,
+ ice_dcf_vsi_update_service_handler, dcf_hw);
+ break;
default:
PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event);
break;
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
+
mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
if (rte_is_valid_assigned_ether_addr(mac))
rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);