return status;
}
+/**
+ * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_q_ctx *q_ctx;
+
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ /* allocate LAN queue contexts */
+ if (!vsi_ctx->lan_q_ctx[tc]) {
+ vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *)
+ ice_calloc(hw, new_numqs, sizeof(*q_ctx));
+ if (!vsi_ctx->lan_q_ctx[tc])
+ return ICE_ERR_NO_MEMORY;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ return ICE_SUCCESS;
+ }
+ /* num queues are increased, update the queue contexts */
+ if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
+ u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
+
+ q_ctx = (struct ice_q_ctx *)
+ ice_calloc(hw, new_numqs, sizeof(*q_ctx));
+ if (!q_ctx)
+ return ICE_ERR_NO_MEMORY;
+ ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
+ prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
+ ice_free(hw, vsi_ctx->lan_q_ctx[tc]);
+ vsi_ctx->lan_q_ctx[tc] = q_ctx;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ }
+ return ICE_SUCCESS;
+}
+
/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
if (!vsi_ctx)
return ICE_ERR_PARAM;
- if (owner == ICE_SCHED_NODE_OWNER_LAN)
- prev_numqs = vsi_ctx->sched.max_lanq[tc];
- else
- return ICE_ERR_PARAM;
-
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
/* num queues are not changed or less than the previous number */
if (new_numqs <= prev_numqs)
return status;
+ status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
+ return status;
+
if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
/* Keep the max number of queue configuration all the time. Update the
*/
#include "ice_switch.h"
+#include "ice_flex_type.h"
+#include "ice_flow.h"
#define ICE_ETH_DA_OFFSET 0
hw->vsi_ctx[vsi_handle] = vsi;
}
+/**
+ * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ */
+static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ u8 i;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return;
+ ice_for_each_traffic_class(i) {
+ if (vsi->lan_q_ctx[i]) {
+ ice_free(hw, vsi->lan_q_ctx[i]);
+ vsi->lan_q_ctx[i] = NULL;
+ }
+ }
+}
+
/**
* ice_clear_vsi_ctx - clear the VSI context entry
* @hw: pointer to the HW struct
vsi = ice_get_vsi_ctx(hw, vsi_handle);
if (vsi) {
+ if (!LIST_EMPTY(&vsi->rss_list_head))
+ ice_rem_all_rss_vsi_ctx(hw, vsi_handle);
+ ice_clear_vsi_q_ctx(hw, vsi_handle);
ice_destroy_lock(&vsi->rss_locks);
ice_free(hw, vsi);
hw->vsi_ctx[vsi_handle] = NULL;
#define ICE_VSI_INVAL_ID 0xFFFF
+#define ICE_INVAL_Q_HANDLE 0xFFFF
+
+/* VSI queue context structure */
+struct ice_q_ctx {
+ u16 q_handle;
+};
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
struct ice_aqc_vsi_props info;
struct ice_sched_vsi_info sched;
u8 alloc_from_pool;
+ u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
struct ice_lock rss_locks; /* protect rss config in VSI ctx */
struct LIST_HEAD_TYPE rss_list_head;
};