0x00, 0x00, /* 2 bytes for 4 bytes alignment */
};
+static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_EX, 14 },
+ { ICE_VLAN_OFOS, 18 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_qinq_ipv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x91, 0x00,
+
+ 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
+ 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_EX, 14 },
+ { ICE_VLAN_OFOS, 18 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_qinq_ipv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x91, 0x00,
+
+ 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
+ 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
+ 0x00, 0x10, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_EX, 14 },
+ { ICE_VLAN_OFOS, 18 },
+ { ICE_PPPOE, 22 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_EX, 14 },
+ { ICE_VLAN_OFOS, 18 },
+ { ICE_PPPOE, 22 },
+ { ICE_IPV4_OFOS, 30 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x91, 0x00,
+
+ 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
+ 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
+ 0x00, 0x16,
+
+ 0x00, 0x21, /* PPP Link Layer 28 */
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_VLAN_EX, 14},
+ { ICE_VLAN_OFOS, 18 },
+ { ICE_PPPOE, 22 },
+ { ICE_IPV6_OFOS, 30 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x91, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
+ 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
+ 0x00, 0x2a,
+
+ 0x00, 0x57, /* PPP Link Layer 28*/
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
+ 0x00, 0x00, 0x3b, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
/* this is a recipe to profile association bitmap */
static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
ICE_MAX_NUM_PROFILES);
* ice_get_tun_type_for_recipe - get tunnel type for the recipe
* @rid: recipe ID that we are populating
*/
-static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
+static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
{
u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
- enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+ enum ice_sw_tunnel_type tun_type;
u16 i, j, profile_num = 0;
bool non_tun_valid = false;
bool pppoe_valid = false;
}
}
+ if (vlan && tun_type == ICE_SW_TUN_PPPOE)
+ tun_type = ICE_SW_TUN_PPPOE_QINQ;
+ else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
+ tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
+ else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
+ tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
+ else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
+ tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
+ else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
+ tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
+ else if (vlan && tun_type == ICE_NON_TUN)
+ tun_type = ICE_NON_TUN_QINQ;
+
return tun_type;
}
struct ice_prot_lkup_ext *lkup_exts;
enum ice_status status;
u8 fv_word_idx = 0;
+ bool vlan = false;
u16 sub_recps;
ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
lkup_exts->fv_words[fv_word_idx].off = off;
lkup_exts->field_mask[fv_word_idx] =
rg_entry->fv_mask[i];
+ if (prot == ICE_META_DATA_ID_HW &&
+ off == ICE_TUN_FLAG_MDID_OFF)
+ vlan = true;
fv_word_idx++;
}
/* populate rg_list with the data from the child entry of this
lkup_exts->n_val_words = fv_word_idx;
recps[rid].big_recp = (num_recps > 1);
recps[rid].n_grp_count = (u8)num_recps;
- recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
+ recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
ice_memdup(hw, tmp, recps[rid].n_grp_count *
sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
u16 i;
- for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
+ for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
u16 j;
ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
return status;
}
+/**
+ * ice_alloc_rss_global_lut - allocate a RSS global LUT
+ * @hw: pointer to the HW struct
+ * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
+ * @global_lut_id: output parameter for the RSS global LUT's ID
+ */
+enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ sw_buf->num_elems = CPU_TO_LE16(1);
+ sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
+ (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED));
+
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
+ shared_res ? "shared" : "dedicated", status);
+ goto ice_alloc_global_lut_exit;
+ }
+
+ *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
+
+ice_alloc_global_lut_exit:
+ ice_free(hw, sw_buf);
+ return status;
+}
+
+/**
+ * ice_free_global_lut - free a RSS global LUT
+ * @hw: pointer to the HW struct
+ * @global_lut_id: ID of the RSS global LUT to free
+ */
+enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ u16 buf_len, num_elems = 1;
+ enum ice_status status;
+
+ buf_len = ice_struct_size(sw_buf, elem, num_elems);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ sw_buf->num_elems = CPU_TO_LE16(num_elems);
+ sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
+ sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
+
+ status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
+ global_lut_id, status);
+
+ ice_free(hw, sw_buf);
+ return status;
+}
+
/**
* ice_alloc_sw - allocate resources specific to switch
* @hw: pointer to the HW struct
status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
ice_aqc_opc_free_res, NULL);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "VEB counter resource could not be freed\n");
+ ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
ret_status = status;
}
return ICE_ERR_PARAM;
break;
default:
- ice_debug(hw, ICE_DBG_SW,
- "Error due to unsupported rule_type %u\n", rule_type);
+ ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
return ICE_ERR_OUT_OF_RANGE;
}
* than ICE_MAX_VSI, if not return with error.
*/
if (id >= ICE_MAX_VSI) {
- ice_debug(hw, ICE_DBG_SW,
- "Error VSI index (%u) out-of-range\n",
+ ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
id);
ice_free(hw, mr_list);
return ICE_ERR_OUT_OF_RANGE;
* @hw: pointer to the HW struct
* @bcast_thresh: represents the upper threshold for broadcast storm control
* @mcast_thresh: represents the upper threshold for multicast storm control
- * @ctl_bitmask: storm control control knobs
+ * @ctl_bitmask: storm control knobs
*
* Sets the storm control configuration (0x0280)
*/
* @hw: pointer to the HW struct
* @bcast_thresh: represents the upper threshold for broadcast storm control
* @mcast_thresh: represents the upper threshold for multicast storm control
- * @ctl_bitmask: storm control control knobs
+ * @ctl_bitmask: storm control knobs
*
* Gets the storm control configuration (0x0281)
*/
pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
- ice_debug(pi->hw, ICE_DBG_SW,
- "incorrect VSI/port type received\n");
+ ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
break;
}
}
case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
if (j == num_total_ports) {
- ice_debug(hw, ICE_DBG_SW,
- "more ports than expected\n");
+ ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
status = ICE_ERR_CFG;
goto out;
}
} while (req_desc && !status);
out:
- ice_free(hw, (void *)rbuf);
+ ice_free(hw, rbuf);
return status;
}
m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
- ICE_LG_ACT_VSI_LIST_ID_M;
+ act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
- lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
- rules_size);
+ lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
if (!lg_act)
return ICE_ERR_NO_MEMORY;
- rx_tx = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)lg_act + lg_act_size);
+ rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
/* Fill in the first switch rule i.e. large action */
lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
tmp_fltr_info.vsi_handle = rem_vsi_handle;
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
tmp_fltr_info.fwd_id.hw_vsi_id, status);
return status;
}
/* Remove the VSI list since it is no longer used */
status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "Failed to remove VSI list %d, error %d\n",
+ ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
vsi_list_id, status);
return status;
}
*/
if (v_list_itr->vsi_count > 1 &&
v_list_itr->vsi_list_info->ref_cnt > 1) {
- ice_debug(hw, ICE_DBG_SW,
- "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
+ ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
status = ICE_ERR_CFG;
goto exit;
}
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+
s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
ice_remove_eth_mac(hw, &remove_list_head);
break;
case ICE_SW_LKUP_DFLT:
- ice_debug(hw, ICE_DBG_SW,
- "Remove filters for this lookup type hasn't been implemented yet\n");
+ ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
break;
case ICE_SW_LKUP_LAST:
ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
ice_aqc_opc_free_res, NULL);
if (status)
- ice_debug(hw, ICE_DBG_SW,
- "counter resource could not be freed\n");
+ ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
ice_free(hw, buf);
return status;
{ ICE_AH, { 0, 2, 4, 6, 8, 10 } },
{ ICE_NAT_T, { 8, 10, 12, 14 } },
{ ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
+ { ICE_VLAN_EX, { 0, 2 } },
};
/* The following table describes preferred grouping of recipes.
{ ICE_AH, ICE_AH_HW },
{ ICE_NAT_T, ICE_UDP_ILOS_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_VLAN_EX, ICE_VLAN_OF_HW },
};
/**
* that can be used.
*/
if (chain_idx >= ICE_MAX_FV_WORDS) {
- ice_debug(hw, ICE_DBG_SW,
- "No chain index available\n");
+ ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
status = ICE_ERR_MAX_LIMIT;
goto err_unroll;
}
case ICE_SW_TUN_NVGRE:
case ICE_SW_TUN_UDP:
case ICE_ALL_TUNNELS:
+ case ICE_SW_TUN_AND_NON_TUN_QINQ:
+ case ICE_NON_TUN_QINQ:
+ case ICE_SW_TUN_PPPOE_QINQ:
+ case ICE_SW_TUN_PPPOE_PAY_QINQ:
+ case ICE_SW_TUN_PPPOE_IPV4_QINQ:
+ case ICE_SW_TUN_PPPOE_IPV6_QINQ:
*mask = ICE_TUN_FLAG_MASK;
return true;
switch (rinfo->tun_type) {
case ICE_NON_TUN:
+ case ICE_NON_TUN_QINQ:
prof_type = ICE_PROF_NON_TUN;
break;
case ICE_ALL_TUNNELS:
prof_type = ICE_PROF_TUN_GRE;
break;
case ICE_SW_TUN_PPPOE:
+ case ICE_SW_TUN_PPPOE_QINQ:
prof_type = ICE_PROF_TUN_PPPOE;
break;
case ICE_SW_TUN_PPPOE_PAY:
+ case ICE_SW_TUN_PPPOE_PAY_QINQ:
ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
return;
case ICE_SW_TUN_PPPOE_IPV4:
+ case ICE_SW_TUN_PPPOE_IPV4_QINQ:
ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
return;
case ICE_SW_TUN_PPPOE_IPV6:
+ case ICE_SW_TUN_PPPOE_IPV6_QINQ:
ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
return;
case ICE_SW_TUN_AND_NON_TUN:
+ case ICE_SW_TUN_AND_NON_TUN_QINQ:
default:
prof_type = ICE_PROF_ALL;
break;
tcp = true;
}
+ if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
+ tun_type == ICE_NON_TUN_QINQ) && ipv6) {
+ *pkt = dummy_qinq_ipv6_pkt;
+ *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
+ *offsets = dummy_qinq_ipv6_packet_offsets;
+ return;
+ } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
+ tun_type == ICE_NON_TUN_QINQ) {
+ *pkt = dummy_qinq_ipv4_pkt;
+ *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
+ *offsets = dummy_qinq_ipv4_packet_offsets;
+ return;
+ }
+
+ if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
+ *pkt = dummy_qinq_pppoe_ipv6_packet;
+ *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
+ *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
+ return;
+ } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
+ *pkt = dummy_qinq_pppoe_ipv4_pkt;
+ *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
+ *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
+ return;
+ } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
+ tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
+ *pkt = dummy_qinq_pppoe_ipv4_pkt;
+ *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
+ *offsets = dummy_qinq_pppoe_packet_offsets;
+ return;
+ }
+
if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
*pkt = dummy_ipv4_gtpu_ipv4_packet;
*pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
len = sizeof(struct ice_ethtype_hdr);
break;
case ICE_VLAN_OFOS:
+ case ICE_VLAN_EX:
len = sizeof(struct ice_vlan_hdr);
break;
case ICE_IPV4_OFOS:
*/
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
tmp_fltr.fwd_id.hw_vsi_id, status);
return status;
}
/* Remove the VSI list since it is no longer used */
status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "Failed to remove VSI list %d, error %d\n",
+ ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
vsi_list_id, status);
return status;
}
u16 rule_buf_sz;
rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
- s_rule =
- (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
- rule_buf_sz);
+ s_rule = (struct ice_aqc_sw_rules_elem *)
+ ice_malloc(hw, rule_buf_sz);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
s_rule->pdata.lkup_tx_rx.act = 0;
*/
enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
{
- struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
struct ice_vsi_list_map_info *map_info;
struct LIST_HEAD_TYPE *list_head;
struct ice_adv_rule_info rinfo;
struct ice_switch_info *sw;
enum ice_status status;
- u16 vsi_list_id = 0;
u8 rid;
sw = hw->switch_info;
continue;
if (!sw->recp_list[rid].adv_rule)
continue;
+
list_head = &sw->recp_list[rid].filt_rules;
- map_info = NULL;
- LIST_FOR_EACH_ENTRY(list_itr, list_head,
- ice_adv_fltr_mgmt_list_entry, list_entry) {
- map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
- vsi_handle,
- &vsi_list_id);
- if (!map_info)
- continue;
+ LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
+ ice_adv_fltr_mgmt_list_entry,
+ list_entry) {
rinfo = list_itr->rule_info;
+
+ if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
+ map_info = list_itr->vsi_list_info;
+ if (!map_info)
+ continue;
+
+ if (!ice_is_bit_set(map_info->vsi_map,
+ vsi_handle))
+ continue;
+ } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
+ continue;
+ }
+
rinfo.sw_act.vsi_handle = vsi_handle;
status = ice_rem_adv_rule(hw, list_itr->lkups,
list_itr->lkups_cnt, &rinfo);
+
if (status)
return status;
- map_info = NULL;
}
}
return ICE_SUCCESS;