.node_delete = ice_dcf_node_delete,
};
+#define ICE_DCF_SCHED_TC_NODE 0xffff
+#define ICE_DCF_VFID 0
+
void
ice_dcf_tm_conf_init(struct rte_eth_dev *dev)
{
return -EINVAL;
}
/* check the vsi node id */
- if (node_id > tc_nb * hw->num_vfs) {
+ if (node_id > (uint32_t)(tc_nb * hw->num_vfs)) {
error->type = RTE_TM_ERROR_TYPE_NODE_ID;
error->message = "too large VSI id";
return -EINVAL;
return 0;
}
+
+static int ice_dcf_commit_check(struct ice_dcf_hw *hw)
+{
+ struct ice_dcf_tm_node_list *tc_list = &hw->tm_conf.tc_list;
+ struct ice_dcf_tm_node_list *vsi_list = &hw->tm_conf.vsi_list;
+ struct ice_dcf_tm_node *tm_node;
+
+ if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)) {
+ PMD_DRV_LOG(ERR, "Configure VF bandwidth is not supported");
+ return ICE_ERR_NOT_SUPPORTED;
+ }
+
+ /* check if all TC nodes are set */
+ if (BIT(hw->tm_conf.nb_tc_node) & hw->ets_config->tc_valid_bits) {
+ PMD_DRV_LOG(ERR, "Not all enabled TC nodes are set");
+ return ICE_ERR_PARAM;
+ }
+
+ /* check if all VF vsi nodes are binded to all TCs */
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->reference_count != hw->num_vfs) {
+ PMD_DRV_LOG(ERR, "Not all VFs are binded to TC%u",
+ tm_node->tc);
+ return ICE_ERR_PARAM;
+ }
+ }
+
+ /* check if VF vsi node id start with 0 */
+ tm_node = TAILQ_FIRST(vsi_list);
+ if (tm_node->id != 0) {
+ PMD_DRV_LOG(ERR, "VF vsi node id must start with 0");
+ return ICE_ERR_PARAM;
+ }
+
+ return ICE_SUCCESS;
+}
+
+int
+ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id)
+{
+ struct ice_aqc_port_ets_elem old_ets_config;
+ struct ice_dcf_adapter *adapter;
+ struct ice_hw *parent_hw;
+ int ret, size;
+
+ adapter = hw->eth_dev->data->dev_private;
+ parent_hw = &adapter->parent.hw;
+
+ /* store the old ets config */
+ old_ets_config = *hw->ets_config;
+
+ ice_memset(hw->ets_config, 0, sizeof(*hw->ets_config), ICE_NONDMA_MEM);
+ ret = ice_aq_query_port_ets(parent_hw->port_info,
+ hw->ets_config, sizeof(*hw->ets_config),
+ NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "DCF Query Port ETS failed");
+ return ret;
+ }
+
+ if (memcmp(&old_ets_config, hw->ets_config, sizeof(old_ets_config))) {
+ PMD_DRV_LOG(DEBUG, "ETS config changes, do not replay BW");
+ return ICE_SUCCESS;
+ }
+
+ size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
+ sizeof(struct virtchnl_dcf_bw_cfg) *
+ (hw->tm_conf.nb_tc_node - 1);
+
+ ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size);
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "VF %u BW replay failed", vf_id);
+ return ICE_ERR_CFG;
+ }
+
+ return ICE_SUCCESS;
+}
+
+int
+ice_dcf_clear_bw(struct ice_dcf_hw *hw)
+{
+ uint16_t vf_id;
+ uint32_t tc;
+ int ret, size;
+
+ size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
+ sizeof(struct virtchnl_dcf_bw_cfg) *
+ (hw->tm_conf.nb_tc_node - 1);
+
+ for (vf_id = 0; vf_id < hw->num_vfs; vf_id++) {
+ for (tc = 0; tc < hw->tm_conf.nb_tc_node; tc++) {
+ hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.peak = 0;
+ hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.committed = 0;
+ }
+ ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size);
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "VF %u BW clear failed", vf_id);
+ return ICE_ERR_CFG;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
int clear_on_fail,
__rte_unused struct rte_tm_error *error)
uint32_t port_bw, cir_total;
uint16_t size, vf_id;
uint8_t num_elem = 0;
- int i, ret_val = ICE_SUCCESS;
+ int i, ret_val;
- if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)) {
- PMD_DRV_LOG(ERR, "Configure VF bandwidth is not supported");
- ret_val = ICE_ERR_NOT_SUPPORTED;
- goto fail_clear;
+ /* check if port is stopped */
+ if (!adapter->parent.pf.adapter_stopped) {
+ PMD_DRV_LOG(ERR, "Please stop port first");
+ ret_val = ICE_ERR_NOT_READY;
+ goto err;
}
- /* check if all TC nodes are set */
- if (BIT(hw->tm_conf.nb_tc_node) & hw->ets_config->tc_valid_bits) {
- PMD_DRV_LOG(ERR, "Not all enabled TC nodes are set");
- ret_val = ICE_ERR_PARAM;
+ ret_val = ice_dcf_commit_check(hw);
+ if (ret_val)
goto fail_clear;
- }
size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
sizeof(struct virtchnl_dcf_bw_cfg) *
cir_total = 0;
/* init tc bw configuration */
-#define ICE_DCF_SCHED_TC_NODE 0xffff
tc_bw->vf_id = ICE_DCF_SCHED_TC_NODE;
tc_bw->node_type = VIRTCHNL_DCF_TARGET_TC_BW;
tc_bw->num_elem = hw->tm_conf.nb_tc_node;
VIRTCHNL_DCF_BW_PIR | VIRTCHNL_DCF_BW_CIR;
}
- for (vf_id = 0; vf_id < hw->num_vfs; vf_id++) {
+ /* start with VF1, skip VF0 since DCF does not need to configure
+ * bandwidth for itself
+ */
+ for (vf_id = 1; vf_id < hw->num_vfs; vf_id++) {
num_elem = 0;
vf_bw->vf_id = vf_id;
vf_bw->node_type = VIRTCHNL_DCF_TARGET_VF_BW;
num_elem++;
}
- /* check if all TC nodes are set with VF vsi nodes */
- if (num_elem != hw->tm_conf.nb_tc_node) {
- PMD_DRV_LOG(ERR, "VF%u vsi nodes are not set to all TC nodes, node id should be continuous",
- vf_id);
- ret_val = ICE_ERR_PARAM;
- goto fail_clear;
- }
-
vf_bw->num_elem = num_elem;
ret_val = ice_dcf_set_vf_bw(hw, vf_bw, size);
if (ret_val)
goto fail_clear;
- memset(vf_bw, 0, size);
+
+ hw->qos_bw_cfg[vf_id] = rte_zmalloc("vf_bw_cfg", size, 0);
+ if (!hw->qos_bw_cfg[vf_id]) {
+ ret_val = ICE_ERR_NO_MEMORY;
+ goto fail_clear;
+ }
+ /* store the bandwidth information for replay */
+ ice_memcpy(hw->qos_bw_cfg[vf_id], vf_bw, size,
+ ICE_NONDMA_TO_NONDMA);
+ ice_memset(vf_bw, 0, size, ICE_NONDMA_MEM);
}
/* check if total CIR is larger than port bandwidth */
if (ret_val)
goto fail_clear;
+ /* store TC node bw configuration */
+ hw->qos_bw_cfg[ICE_DCF_VFID] = rte_zmalloc("tc_bw_cfg", size, 0);
+ if (!hw->qos_bw_cfg[ICE_DCF_VFID]) {
+ ret_val = ICE_ERR_NO_MEMORY;
+ goto fail_clear;
+ }
+ ice_memcpy(hw->qos_bw_cfg[ICE_DCF_VFID], tc_bw, size,
+ ICE_NONDMA_TO_NONDMA);
+
hw->tm_conf.committed = true;
return ret_val;
ice_dcf_tm_conf_uninit(dev);
ice_dcf_tm_conf_init(dev);
}
+err:
return ret_val;
}