parent = ice_sched_find_node_by_teid(pi->root,
LE32_TO_CPU(info->parent_teid));
if (!parent) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Parent Node not found for parent_teid=0x%x\n",
+ ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
LE32_TO_CPU(info->parent_teid));
return ICE_ERR_PARAM;
}
enum ice_status status;
u16 buf_size;
- buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
+ buf_size = ice_struct_size(buf, teid, num_nodes);
buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
{
u16 ln;
+ struct ice_hw *hw = pi->hw;
- for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
struct ice_aqc_rl_profile_info *rl_prof_elem;
struct ice_aqc_rl_profile_info *rl_prof_tmp;
LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
- &pi->rl_prof_list[ln],
+ &hw->rl_prof_list[ln],
ice_aqc_rl_profile_info, list_entry) {
- struct ice_hw *hw = pi->hw;
enum ice_status status;
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
if (status) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Remove rl profile failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
/* On error, free mem required */
LIST_DEL(&rl_prof_elem->list_entry);
ice_free(hw, rl_prof_elem);
u16 buf_size;
u32 teid;
- buf_size = ice_struct_size(buf, generic, num_nodes - 1);
+ buf_size = ice_struct_size(buf, generic, num_nodes);
buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
for (i = 0; i < num_nodes; i++) {
status = ice_sched_add_node(pi, layer, &buf->generic[i]);
if (status != ICE_SUCCESS) {
- ice_debug(hw, ICE_DBG_SCHED,
- "add nodes in SW DB failed status =%d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
status);
break;
}
teid = LE32_TO_CPU(buf->generic[i].node_teid);
new_node = ice_sched_find_node_by_teid(parent, teid);
if (!new_node) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Node is missing for teid =%d\n", teid);
+ ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
break;
}
pi->port_state = ICE_SCHED_PORT_STATE_READY;
ice_init_lock(&pi->sched_lock);
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- INIT_LIST_HEAD(&pi->rl_prof_list[i]);
+ INIT_LIST_HEAD(&hw->rl_prof_list[i]);
err_init_port:
if (status && pi->root) {
ice_release_lock(&pi->sched_lock);
if (!node)
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "Node not found for teid=0x%x\n", teid);
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid);
return node;
}
}
/**
- * ice_sched_get_free_qgrp - Scan all Q group siblings and find a free node
+ * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
* @pi: port information structure
* @vsi_node: software VSI handle
- * @qgrp_node: first Q group node identified for scanning
+ * @qgrp_node: first queue group node identified for scanning
* @owner: LAN or RDMA
*
- * This function retrieves a free LAN or RDMA Q group node by scanning
- * qgrp_node and its siblings for the Q group with the fewest number
+ * This function retrieves a free LAN or RDMA queue group node by scanning
+ * qgrp_node and its siblings for the queue group with the fewest number
* of queues currently assigned.
*/
static struct ice_sched_node *
if (!min_children)
return qgrp_node;
min_qgrp = qgrp_node;
- /* scan all Q groups until find a node which has less than the
- * minimum number of children. This way all Q group nodes get
+ /* scan all queue groups until find a node which has less than the
+ * minimum number of children. This way all queue group nodes get
* equal number of shares and active. The bandwidth will be equally
- * distributed across all Qs.
+ * distributed across all queues.
*/
while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < min_children &&
qgrp_node->owner == owner) {
- /* replace the new min Q group node */
+ /* replace the new min queue group node */
min_qgrp = qgrp_node;
min_children = min_qgrp->num_children;
/* break if it has no children, */
continue;
if (ice_sched_is_leaf_node_present(vsi_node)) {
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "VSI has leaf nodes in TC %d\n", i);
+ ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
status = ICE_ERR_IN_USE;
goto exit_sched_rm_vsi_cfg;
}
struct ice_sched_node *node;
u16 i, grps_movd = 0;
struct ice_hw *hw;
+ u16 buf_len;
hw = pi->hw;
hw->max_children[parent->tx_sched_layer])
return ICE_ERR_AQ_FULL;
- buf = (struct ice_aqc_move_elem *)ice_malloc(hw, sizeof(*buf));
+ buf_len = ice_struct_size(buf, teid, 1);
+ buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = CPU_TO_LE16(1);
- status = ice_aq_move_sched_elems(hw, 1, buf, sizeof(*buf),
+ status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL);
if (status && grps_movd != 1) {
status = ICE_ERR_CFG;
if (status)
break;
- if (agg_id != ICE_DFLT_AGG_ID)
- ice_set_bit(tc, agg_vsi_info->tc_bitmap);
- else
- ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
- }
- /* If VSI moved back to default aggregator, delete agg_vsi_info. */
- if (!ice_is_any_bit_set(agg_vsi_info->tc_bitmap,
- ICE_MAX_TRAFFIC_CLASS)) {
- LIST_DEL(&agg_vsi_info->list_entry);
- ice_free(hw, agg_vsi_info);
+ ice_set_bit(tc, agg_vsi_info->tc_bitmap);
}
return status;
}
/**
* ice_sched_rm_unused_rl_prof - remove unused RL profile
- * @pi: port information structure
+ * @hw: pointer to the hardware structure
*
* This function removes unused rate limit profiles from the HW and
* SW DB. The caller needs to hold scheduler lock.
*/
-static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
+static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw)
{
u16 ln;
- for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
struct ice_aqc_rl_profile_info *rl_prof_elem;
struct ice_aqc_rl_profile_info *rl_prof_tmp;
LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
- &pi->rl_prof_list[ln],
+ &hw->rl_prof_list[ln],
ice_aqc_rl_profile_info, list_entry) {
- if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "Removed rl profile\n");
+ if (!ice_sched_del_rl_profile(hw, rl_prof_elem))
+ ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n");
}
}
}
ice_free(pi->hw, agg_info);
/* Remove unused RL profile IDs from HW and SW DB */
- ice_sched_rm_unused_rl_prof(pi);
+ ice_sched_rm_unused_rl_prof(pi->hw);
exit_ice_rm_agg_cfg:
ice_release_lock(&pi->sched_lock);
/**
* ice_sched_add_rl_profile - add RL profile
- * @pi: port information structure
+ * @hw: pointer to the hardware structure
* @rl_type: type of rate limit BW - min, max, or shared
* @bw: bandwidth in Kbps - Kilo bits per sec
* @layer_num: specifies in which layer to create profile
* The caller needs to hold the scheduler lock.
*/
static struct ice_aqc_rl_profile_info *
-ice_sched_add_rl_profile(struct ice_port_info *pi,
- enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
+ u32 bw, u8 layer_num)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
struct ice_aqc_rl_profile_elem *buf;
enum ice_status status;
- struct ice_hw *hw;
u8 profile_type;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
return NULL;
}
- if (!pi)
+ if (!hw)
return NULL;
- hw = pi->hw;
- LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
profile_type && rl_prof_elem->bw == bw)
/* Good entry - add in the list */
rl_prof_elem->prof_id_ref = 0;
- LIST_ADD(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
+ LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]);
return rl_prof_elem;
exit_add_rl_prof:
/**
* ice_sched_rm_rl_profile - remove RL profile ID
- * @pi: port information structure
+ * @hw: pointer to the hardware structure
* @layer_num: layer number where profiles are saved
* @profile_type: profile type like EIR, CIR, or SRL
* @profile_id: profile ID to remove
* scheduler lock.
*/
static enum ice_status
-ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
+ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
u16 profile_id)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
return ICE_ERR_PARAM;
/* Check the existing list for RL profile */
- LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
profile_type &&
rl_prof_elem->prof_id_ref--;
/* Remove old profile ID from database */
- status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
+ status = ice_sched_del_rl_profile(hw, rl_prof_elem);
if (status && status != ICE_ERR_IN_USE)
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "Remove rl profile failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break;
}
if (status == ICE_ERR_IN_USE)
old_id == ICE_SCHED_INVAL_PROF_ID)
return ICE_SUCCESS;
- return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
+ return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id);
}
/**
struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id;
- rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
+ rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num);
if (!rl_prof_info)
return status;
old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
return ICE_SUCCESS;
- return ice_sched_rm_rl_profile(pi, layer_num,
+ return ice_sched_rm_rl_profile(hw, layer_num,
rl_prof_info->profile.flags &
ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
return ICE_ERR_PARAM;
hw = pi->hw;
/* Remove unused RL profile IDs from HW and SW DB */
- ice_sched_rm_unused_rl_prof(pi);
+ ice_sched_rm_unused_rl_prof(hw);
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (layer_num >= hw->num_tx_sched_layers)
ice_release_lock(&pi->sched_lock);
}
+/**
+ * ice_sched_replay_root_node_bw - replay root node BW
+ * @pi: port information structure
+ *
+ * Replay root node BW settings.
+ */
+enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi)
+{
+ enum ice_status status = ICE_SUCCESS;
+
+ if (!pi->hw)
+ return ICE_ERR_PARAM;
+ ice_acquire_lock(&pi->sched_lock);
+
+ status = ice_sched_replay_node_bw(pi->hw, pi->root,
+ &pi->root_node_bw_t_info);
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
/**
* ice_sched_replay_tc_node_bw - replay TC node(s) BW
* @pi: port information structure