/**
* ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
- * @hw: pointer to the HW struct
+ * @pi: pointer to the port information structure
* @tc_node: pointer to the TC node
* @agg_id: aggregator ID
*
* a given TC branch
*/
static struct ice_sched_node *
-ice_sched_get_agg_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
+ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u32 agg_id)
{
struct ice_sched_node *node;
+ struct ice_hw *hw = pi->hw;
u8 agg_layer;
+ if (!hw)
+ return NULL;
agg_layer = ice_sched_get_agg_layer(hw);
- node = ice_sched_get_first_node(hw->port_info, tc_node, agg_layer);
+ node = ice_sched_get_first_node(pi, tc_node, agg_layer);
/* Check whether it already exists */
while (node) {
if (!tc_node)
return ICE_ERR_CFG;
- agg_node = ice_sched_get_agg_node(pi->hw, tc_node, agg_id);
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
return ICE_ERR_DOES_NOT_EXIST;
if (!tc_node)
return ICE_ERR_CFG;
- agg_node = ice_sched_get_agg_node(hw, tc_node, agg_id);
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
return ICE_ERR_DOES_NOT_EXIST;
if (!tc_node)
return ICE_ERR_CFG;
- agg_node = ice_sched_get_agg_node(hw, tc_node, agg_id);
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
/* Does Agg node already exist ? */
if (agg_node)
return status;
u8 *q_prio)
{
enum ice_status status = ICE_ERR_PARAM;
- struct ice_hw *hw = pi->hw;
u16 i;
ice_acquire_lock(&pi->sched_lock);
break;
}
/* Configure Priority */
- status = ice_sched_cfg_sibl_node_prio(hw, node, q_prio[i]);
+ status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]);
if (status)
break;
}
if (!tc_node)
goto exit_agg_priority_per_tc;
- agg_node = ice_sched_get_agg_node(hw, tc_node, agg_id);
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
goto exit_agg_priority_per_tc;
if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) {
/* Configure Priority */
- status = ice_sched_cfg_sibl_node_prio(hw, vsi_node,
+ status = ice_sched_cfg_sibl_node_prio(pi, vsi_node,
node_prio[i]);
if (status)
break;
if (!tc_node)
continue;
- agg_node = ice_sched_get_agg_node(hw, tc_node, agg_id);
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
continue;
ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- struct ice_hw *hw = pi->hw;
-
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return ICE_ERR_PARAM;
switch (rl_type) {
case ICE_MIN_BW:
- ice_set_clear_cir_bw(&hw->tc_node_bw_t_info[tc], bw);
+ ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw);
break;
case ICE_MAX_BW:
- ice_set_clear_eir_bw(&hw->tc_node_bw_t_info[tc], bw);
+ ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw);
break;
case ICE_SHARED_BW:
- ice_set_clear_shared_bw(&hw->tc_node_bw_t_info[tc], bw);
+ ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw);
break;
default:
return ICE_ERR_PARAM;
ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u16 bw_alloc)
{
- struct ice_hw *hw = pi->hw;
-
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return ICE_ERR_PARAM;
switch (rl_type) {
case ICE_MIN_BW:
- ice_set_clear_cir_bw_alloc(&hw->tc_node_bw_t_info[tc],
+ ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc],
bw_alloc);
break;
case ICE_MAX_BW:
- ice_set_clear_eir_bw_alloc(&hw->tc_node_bw_t_info[tc],
+ ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc],
bw_alloc);
break;
default:
tc_node = ice_sched_get_tc_node(pi, tc);
if (tc_node)
- node = ice_sched_get_agg_node(pi->hw, tc_node, id);
+ node = ice_sched_get_agg_node(pi, tc_node, id);
break;
}
if (!tc_node)
continue;
- agg_node = ice_sched_get_agg_node(pi->hw, tc_node, agg_id);
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
continue;
/* SRL bandwidth layer selection */
if (!tc_node)
continue;
- agg_node = ice_sched_get_agg_node(pi->hw, tc_node, agg_id);
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
continue;
/**
* ice_sched_cfg_sibl_node_prio - configure node sibling priority
- * @hw: pointer to the HW struct
+ * @pi: port information structure
* @node: sched node to configure
* @priority: sibling priority
*
* function needs to be called with scheduler lock held.
*/
enum ice_status
-ice_sched_cfg_sibl_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
- u8 priority)
+ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
+ struct ice_sched_node *node, u8 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
+ struct ice_hw *hw = pi->hw;
enum ice_status status;
+ if (!hw)
+ return ICE_ERR_PARAM;
buf = node->info;
data = &buf.data;
data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
status = ICE_ERR_PARAM;
break;
}
- agg_node = ice_sched_get_agg_node(hw, tc_node,
+ agg_node = ice_sched_get_agg_node(hw->port_info, tc_node,
agg_info->agg_id);
if (!agg_node) {
status = ICE_ERR_PARAM;
/**
* ice_sched_replay_tc_node_bw - replay TC node(s) BW
- * @hw: pointer to the HW struct
+ * @pi: port information structure
*
- * This function replay TC nodes. The caller needs to hold the scheduler lock.
+ * This function replay TC nodes.
*/
enum ice_status
-ice_sched_replay_tc_node_bw(struct ice_hw *hw)
+ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
{
- struct ice_port_info *pi = hw->port_info;
enum ice_status status = ICE_SUCCESS;
u8 tc;
+ if (!pi->hw)
+ return ICE_ERR_PARAM;
ice_acquire_lock(&pi->sched_lock);
ice_for_each_traffic_class(tc) {
struct ice_sched_node *tc_node;
- tc_node = ice_sched_get_tc_node(hw->port_info, tc);
+ tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
continue; /* TC not present */
- status = ice_sched_replay_node_bw(hw, tc_node,
- &hw->tc_node_bw_t_info[tc]);
+ status = ice_sched_replay_node_bw(pi->hw, tc_node,
+ &pi->tc_node_bw_t_info[tc]);
if (status)
break;
}