net/ice: support QoS config VF bandwidth in DCF
[dpdk.git] / drivers / net / ice / ice_dcf_parent.c
index 37f0e2b..c59cd0b 100644 (file)
 #define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL    100000 /* us */
 static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
 
+struct ice_dcf_reset_event_param {
+       struct ice_dcf_hw *dcf_hw;
+
+       bool vfr; /* VF reset event */
+       uint16_t vf_id; /* The reset VF ID */
+};
+
 static __rte_always_inline void
 ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
                       uint16_t vsi_map)
 {
        struct ice_vsi_ctx *vsi_ctx;
+       bool first_update = false;
+       uint16_t new_vsi_num;
 
        if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
                PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
@@ -35,11 +44,25 @@ ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
                                            vsi_handle);
                                return;
                        }
+                       hw->vsi_ctx[vsi_handle] = vsi_ctx;
+                       first_update = true;
                }
 
-               vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
-                                             VIRTCHNL_DCF_VF_VSI_ID_S;
-               hw->vsi_ctx[vsi_handle] = vsi_ctx;
+               new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
+                       VIRTCHNL_DCF_VF_VSI_ID_S;
+
+               /* Redirect rules if vsi mapping table changes. */
+               if (!first_update) {
+                       struct ice_flow_redirect rd;
+
+                       memset(&rd, 0, sizeof(struct ice_flow_redirect));
+                       rd.type = ICE_FLOW_REDIRECT_VSI;
+                       rd.vsi_handle = vsi_handle;
+                       rd.new_vsi_num = new_vsi_num;
+                       ice_flow_redirect((struct ice_adapter *)hw->back, &rd);
+               } else {
+                       vsi_ctx->vsi_num = new_vsi_num;
+               }
 
                PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
                            vsi_handle, vsi_ctx->vsi_num);
@@ -62,34 +85,142 @@ ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
                ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
 }
 
+static void
+ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
+                       uint16_t pf_vsi_num)
+{
+       struct ice_vsi_ctx *vsi_ctx;
+
+       if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
+               PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
+               return;
+       }
+
+       vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
+
+       if (!vsi_ctx)
+               vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
+
+       if (!vsi_ctx) {
+               PMD_DRV_LOG(ERR, "No memory for vsi context %u",
+                               pf_vsi_idx);
+               return;
+       }
+
+       vsi_ctx->vsi_num = pf_vsi_num;
+       hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
+
+       PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
+                       pf_vsi_idx, vsi_ctx->vsi_num);
+}
+
 static void*
 ice_dcf_vsi_update_service_handler(void *param)
 {
-       struct ice_dcf_hw *hw = param;
+       struct ice_dcf_reset_event_param *reset_param = param;
+       struct ice_dcf_hw *hw = reset_param->dcf_hw;
+       struct ice_dcf_adapter *adapter;
 
-       usleep(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
+       pthread_detach(pthread_self());
+
+       rte_delay_us(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
 
        rte_spinlock_lock(&vsi_update_lock);
 
-       if (!ice_dcf_handle_vsi_update_event(hw)) {
-               struct ice_dcf_adapter *dcf_ad =
-                       container_of(hw, struct ice_dcf_adapter, real_hw);
+       adapter = container_of(hw, struct ice_dcf_adapter, real_hw);
 
-               ice_dcf_update_vf_vsi_map(&dcf_ad->parent.hw,
+       if (!ice_dcf_handle_vsi_update_event(hw))
+               ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
                                          hw->num_vfs, hw->vf_vsi_map);
+
+       if (reset_param->vfr && adapter->repr_infos) {
+               struct rte_eth_dev *vf_rep_eth_dev =
+                       adapter->repr_infos[reset_param->vf_id].vf_rep_eth_dev;
+               if (vf_rep_eth_dev && vf_rep_eth_dev->data->dev_started) {
+                       PMD_DRV_LOG(DEBUG, "VF%u representor is resetting",
+                                   reset_param->vf_id);
+                       ice_dcf_vf_repr_init_vlan(vf_rep_eth_dev);
+               }
        }
 
        rte_spinlock_unlock(&vsi_update_lock);
 
+       free(param);
+
        return NULL;
 }
 
+static void
+start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
+{
+#define THREAD_NAME_LEN        16
+       struct ice_dcf_reset_event_param *param;
+       char name[THREAD_NAME_LEN];
+       pthread_t thread;
+       int ret;
+
+       param = malloc(sizeof(*param));
+       if (!param) {
+               PMD_DRV_LOG(ERR, "Failed to allocate the memory for reset handling");
+               return;
+       }
+
+       param->dcf_hw = dcf_hw;
+       param->vfr = vfr;
+       param->vf_id = vf_id;
+
+       snprintf(name, sizeof(name), "ice-reset-%u", vf_id);
+       ret = rte_ctrl_thread_create(&thread, name, NULL,
+                                    ice_dcf_vsi_update_service_handler, param);
+       if (ret != 0) {
+               PMD_DRV_LOG(ERR, "Failed to start the thread for reset handling");
+               free(param);
+       }
+}
+
+static uint32_t
+ice_dcf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
+{
+       uint32_t speed;
+
+       switch (virt_link_speed) {
+       case VIRTCHNL_LINK_SPEED_100MB:
+               speed = 100;
+               break;
+       case VIRTCHNL_LINK_SPEED_1GB:
+               speed = 1000;
+               break;
+       case VIRTCHNL_LINK_SPEED_10GB:
+               speed = 10000;
+               break;
+       case VIRTCHNL_LINK_SPEED_40GB:
+               speed = 40000;
+               break;
+       case VIRTCHNL_LINK_SPEED_20GB:
+               speed = 20000;
+               break;
+       case VIRTCHNL_LINK_SPEED_25GB:
+               speed = 25000;
+               break;
+       case VIRTCHNL_LINK_SPEED_2_5GB:
+               speed = 2500;
+               break;
+       case VIRTCHNL_LINK_SPEED_5GB:
+               speed = 5000;
+               break;
+       default:
+               speed = 0;
+               break;
+       }
+
+       return speed;
+}
+
 void
 ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
                            uint8_t *msg, uint16_t msglen)
 {
        struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg;
-       pthread_t thread;
 
        if (msglen < sizeof(struct virtchnl_pf_event)) {
                PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen);
@@ -99,11 +230,23 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
        switch (pf_msg->event) {
        case VIRTCHNL_EVENT_RESET_IMPENDING:
                PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
-               pthread_create(&thread, NULL,
-                              ice_dcf_vsi_update_service_handler, dcf_hw);
+               start_vsi_reset_thread(dcf_hw, false, 0);
                break;
        case VIRTCHNL_EVENT_LINK_CHANGE:
                PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
+               dcf_hw->link_up = pf_msg->event_data.link_event.link_status;
+               if (dcf_hw->vf_res->vf_cap_flags &
+                       VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+                       dcf_hw->link_speed =
+                               pf_msg->event_data.link_event_adv.link_speed;
+               } else {
+                       enum virtchnl_link_speed speed;
+                       speed = pf_msg->event_data.link_event.link_speed;
+                       dcf_hw->link_speed = ice_dcf_convert_link_speed(speed);
+               }
+               ice_dcf_link_update(dcf_hw->eth_dev, 0);
+               rte_eth_dev_callback_process(dcf_hw->eth_dev,
+                       RTE_ETH_EVENT_INTR_LSC, NULL);
                break;
        case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
                PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
@@ -112,8 +255,8 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
                PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
                            pf_msg->event_data.vf_vsi_map.vf_id,
                            pf_msg->event_data.vf_vsi_map.vsi_id);
-               pthread_create(&thread, NULL,
-                              ice_dcf_vsi_update_service_handler, dcf_hw);
+               start_vsi_reset_thread(dcf_hw, true,
+                                      pf_msg->event_data.vf_vsi_map.vf_id);
                break;
        default:
                PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event);
@@ -121,6 +264,29 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
        }
 }
 
+static int
+ice_dcf_query_port_ets(struct ice_hw *parent_hw, struct ice_dcf_hw *real_hw)
+{
+       int ret;
+
+       real_hw->ets_config = (struct ice_aqc_port_ets_elem *)
+                       ice_malloc(real_hw, sizeof(*real_hw->ets_config));
+       if (!real_hw->ets_config)
+               return ICE_ERR_NO_MEMORY;
+
+       ret = ice_aq_query_port_ets(parent_hw->port_info,
+                       real_hw->ets_config, sizeof(*real_hw->ets_config),
+                       NULL);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "DCF Query Port ETS failed");
+               rte_free(real_hw->ets_config);
+               real_hw->ets_config = NULL;
+               return ret;
+       }
+
+       return ICE_SUCCESS;
+}
+
 static int
 ice_dcf_init_parent_hw(struct ice_hw *hw)
 {
@@ -157,7 +323,7 @@ ice_dcf_init_parent_hw(struct ice_hw *hw)
 
        /* Initialize port_info struct with PHY capabilities */
        status = ice_aq_get_phy_caps(hw->port_info, false,
-                                    ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
+                                    ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
        ice_free(hw, pcaps);
        if (status)
                goto err_unroll_alloc;
@@ -210,20 +376,42 @@ ice_dcf_request_pkg_name(struct ice_hw *hw, char *pkg_name)
 {
        struct ice_dcf_adapter *dcf_adapter =
                        container_of(hw, struct ice_dcf_adapter, parent.hw);
+       struct virtchnl_pkg_info pkg_info;
+       struct dcf_virtchnl_cmd vc_cmd;
+       uint64_t dsn;
+
+       vc_cmd.v_op = VIRTCHNL_OP_DCF_GET_PKG_INFO;
+       vc_cmd.req_msglen = 0;
+       vc_cmd.req_msg = NULL;
+       vc_cmd.rsp_buflen = sizeof(pkg_info);
+       vc_cmd.rsp_msgbuf = (uint8_t *)&pkg_info;
 
-       /* TODO: check with DSN firstly by iAVF */
-       PMD_INIT_LOG(DEBUG,
-                    "DCF VSI_ID = %u",
-                    dcf_adapter->real_hw.vsi_id);
+       if (ice_dcf_execute_virtchnl_cmd(&dcf_adapter->real_hw, &vc_cmd))
+               goto pkg_file_direct;
+
+       rte_memcpy(&dsn, pkg_info.dsn, sizeof(dsn));
+
+       snprintf(pkg_name, ICE_MAX_PKG_FILENAME_SIZE,
+                ICE_PKG_FILE_SEARCH_PATH_UPDATES "ice-%016llx.pkg",
+                (unsigned long long)dsn);
+       if (!ice_access(pkg_name, 0))
+               return 0;
+
+       snprintf(pkg_name, ICE_MAX_PKG_FILENAME_SIZE,
+                ICE_PKG_FILE_SEARCH_PATH_DEFAULT "ice-%016llx.pkg",
+                (unsigned long long)dsn);
+       if (!ice_access(pkg_name, 0))
+               return 0;
 
+pkg_file_direct:
        snprintf(pkg_name,
                 ICE_MAX_PKG_FILENAME_SIZE, "%s", ICE_PKG_FILE_UPDATES);
-       if (!access(pkg_name, 0))
+       if (!ice_access(pkg_name, 0))
                return 0;
 
        snprintf(pkg_name,
                 ICE_MAX_PKG_FILENAME_SIZE, "%s", ICE_PKG_FILE_DEFAULT);
-       if (!access(pkg_name, 0))
+       if (!ice_access(pkg_name, 0))
                return 0;
 
        return -1;
@@ -294,9 +482,16 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
        const struct rte_ether_addr *mac;
        int err;
 
-       parent_adapter->eth_dev = eth_dev;
        parent_adapter->pf.adapter = parent_adapter;
        parent_adapter->pf.dev_data = eth_dev->data;
+       /* create a dummy main_vsi */
+       parent_adapter->pf.main_vsi =
+               rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
+       if (!parent_adapter->pf.main_vsi)
+               return -ENOMEM;
+       parent_adapter->pf.main_vsi->adapter = parent_adapter;
+       parent_adapter->pf.adapter_stopped = 1;
+
        parent_hw->back = parent_adapter;
        parent_hw->mac_type = ICE_MAC_GENERIC;
        parent_hw->vendor_id = ICE_INTEL_VENDOR_ID;
@@ -314,6 +509,15 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
                return err;
        }
 
+       if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
+               err = ice_dcf_query_port_ets(parent_hw, hw);
+               if (err) {
+                       PMD_INIT_LOG(ERR, "failed to query port ets with error %d",
+                                    err);
+                       goto uninit_hw;
+               }
+       }
+
        err = ice_dcf_load_pkg(parent_hw);
        if (err) {
                PMD_INIT_LOG(ERR, "failed to load package with error %d",
@@ -322,14 +526,18 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
        }
        parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
 
+       parent_adapter->pf.main_vsi->idx = hw->num_vfs;
+       ice_dcf_update_pf_vsi_map(parent_hw,
+                       parent_adapter->pf.main_vsi->idx, hw->pf_vsi_id);
+
+       ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
+
        err = ice_flow_init(parent_adapter);
        if (err) {
                PMD_INIT_LOG(ERR, "Failed to initialize flow");
                goto uninit_hw;
        }
 
-       ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
-
        mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
        if (rte_is_valid_assigned_ether_addr(mac))
                rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);