1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
57 u8 dummy_gre_packet[] = { 0, 0, 0, 0, /* Ether starts */
60 0x08, 0, /* Ether ends */
61 0x45, 0, 0, 0x3E, /* IP starts */
65 0, 0, 0, 0, /* IP ends */
66 0x80, 0, 0x65, 0x58, /* GRE starts */
67 0, 0, 0, 0, /* GRE ends */
68 0, 0, 0, 0, /* Ether starts */
71 0x08, 0, /* Ether ends */
72 0x45, 0, 0, 0x14, /* IP starts */
76 0, 0, 0, 0 /* IP ends */
80 dummy_udp_tun_packet[] = {0, 0, 0, 0, /* Ether starts */
83 0x08, 0, /* Ether ends */
84 0x45, 0, 0, 0x32, /* IP starts */
88 0, 0, 0, 0, /* IP ends */
89 0, 0, 0x12, 0xB5, /* UDP start*/
90 0, 0x1E, 0, 0, /* UDP end*/
91 0, 0, 0, 0, /* VXLAN start */
92 0, 0, 0, 0, /* VXLAN end*/
93 0, 0, 0, 0, /* Ether starts */
100 dummy_tcp_tun_packet[] = {0, 0, 0, 0, /* Ether starts */
103 0x08, 0, /* Ether ends */
104 0x45, 0, 0, 0x28, /* IP starts */
106 0x40, 0x06, 0xF5, 0x69,
108 0, 0, 0, 0, /* IP ends */
114 0, 0 /* 2 bytes padding for 4 byte alignment*/
117 /* this is a recipe to profile bitmap association */
118 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
119 ICE_MAX_NUM_PROFILES);
120 static ice_declare_bitmap(available_result_ids, ICE_CHAIN_FV_INDEX_START + 1);
123 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
124 * @hw: pointer to hardware structure
125 * @recps: struct that we need to populate
126 * @rid: recipe ID that we are populating
128 * This function is used to populate all the necessary entries into our
129 * bookkeeping so that we have a current list of all the recipes that are
130 * programmed in the firmware.
132 static enum ice_status
133 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid)
135 u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
136 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
137 u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
138 struct ice_aqc_recipe_data_elem *tmp;
139 u16 num_recps = ICE_MAX_NUM_RECIPES;
140 struct ice_prot_lkup_ext *lkup_exts;
141 enum ice_status status;
143 /* we need a buffer big enough to accommodate all the recipes */
144 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
145 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
147 return ICE_ERR_NO_MEMORY;
149 tmp[0].recipe_indx = rid;
150 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
151 /* non-zero status meaning recipe doesn't exist */
154 lkup_exts = &recps[rid].lkup_exts;
155 /* start populating all the entries for recps[rid] based on lkups from
158 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
159 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
160 struct ice_recp_grp_entry *rg_entry;
161 u8 prof_id, prot = 0;
164 rg_entry = (struct ice_recp_grp_entry *)
165 ice_malloc(hw, sizeof(*rg_entry));
167 status = ICE_ERR_NO_MEMORY;
170 /* Avoid 8th bit since its result enable bit */
171 result_idxs[result_idx] = root_bufs.content.result_indx &
172 ~ICE_AQ_RECIPE_RESULT_EN;
173 /* Check if result enable bit is set */
174 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
175 ice_clear_bit(ICE_CHAIN_FV_INDEX_START -
176 result_idxs[result_idx++],
177 available_result_ids);
179 recipe_to_profile[tmp[sub_recps].recipe_indx],
180 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
181 /* get the first profile that is associated with rid */
182 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
183 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
184 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
186 rg_entry->fv_idx[i] = lkup_indx;
187 /* If the recipe is a chained recipe then all its
188 * child recipe's result will have a result index.
189 * To fill fv_words we should not use those result
190 * index, we only need the protocol ids and offsets.
191 * We will skip all the fv_idx which stores result
192 * index in them. We also need to skip any fv_idx which
193 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
194 * valid offset value.
196 if (result_idxs[0] == rg_entry->fv_idx[i] ||
197 result_idxs[1] == rg_entry->fv_idx[i] ||
198 result_idxs[2] == rg_entry->fv_idx[i] ||
199 result_idxs[3] == rg_entry->fv_idx[i] ||
200 result_idxs[4] == rg_entry->fv_idx[i] ||
201 rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
202 rg_entry->fv_idx[i] == 0)
205 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
206 rg_entry->fv_idx[i], &prot, &off);
207 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
208 lkup_exts->fv_words[fv_word_idx].off = off;
211 /* populate rg_list with the data from the child entry of this
214 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
216 lkup_exts->n_val_words = fv_word_idx;
217 recps[rid].n_grp_count = num_recps;
218 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
219 ice_calloc(hw, recps[rid].n_grp_count,
220 sizeof(struct ice_aqc_recipe_data_elem));
221 if (!recps[rid].root_buf)
224 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
225 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
226 recps[rid].recp_created = true;
227 if (tmp[sub_recps].content.rid & ICE_AQ_RECIPE_ID_IS_ROOT)
228 recps[rid].root_rid = rid;
235 * ice_get_recp_to_prof_map - updates recipe to profile mapping
236 * @hw: pointer to hardware structure
238 * This function is used to populate recipe_to_profile matrix where index to
239 * this array is the recipe ID and the element is the mapping of which profiles
240 * is this recipe mapped to.
243 ice_get_recp_to_prof_map(struct ice_hw *hw)
245 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
248 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
251 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
252 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
255 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
256 if (ice_is_bit_set(r_bitmap, j))
257 ice_set_bit(i, recipe_to_profile[j]);
262 * ice_init_def_sw_recp - initialize the recipe book keeping tables
263 * @hw: pointer to the HW struct
265 * Allocate memory for the entire recipe table and initialize the structures/
266 * entries corresponding to basic recipes.
268 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
270 struct ice_sw_recipe *recps;
273 recps = (struct ice_sw_recipe *)
274 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
276 return ICE_ERR_NO_MEMORY;
278 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
279 recps[i].root_rid = i;
280 INIT_LIST_HEAD(&recps[i].filt_rules);
281 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
282 INIT_LIST_HEAD(&recps[i].rg_list);
283 ice_init_lock(&recps[i].filt_rule_lock);
286 hw->switch_info->recp_list = recps;
292 * ice_aq_get_sw_cfg - get switch configuration
293 * @hw: pointer to the hardware structure
294 * @buf: pointer to the result buffer
295 * @buf_size: length of the buffer available for response
296 * @req_desc: pointer to requested descriptor
297 * @num_elems: pointer to number of elements
298 * @cd: pointer to command details structure or NULL
300 * Get switch configuration (0x0200) to be placed in 'buff'.
301 * This admin command returns information such as initial VSI/port number
302 * and switch ID it belongs to.
304 * NOTE: *req_desc is both an input/output parameter.
305 * The caller of this function first calls this function with *request_desc set
306 * to 0. If the response from f/w has *req_desc set to 0, all the switch
307 * configuration information has been returned; if non-zero (meaning not all
308 * the information was returned), the caller should call this function again
309 * with *req_desc set to the previous value returned by f/w to get the
310 * next block of switch configuration information.
312 * *num_elems is output only parameter. This reflects the number of elements
313 * in response buffer. The caller of this function to use *num_elems while
314 * parsing the response buffer.
316 static enum ice_status
317 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
318 u16 buf_size, u16 *req_desc, u16 *num_elems,
319 struct ice_sq_cd *cd)
321 struct ice_aqc_get_sw_cfg *cmd;
322 enum ice_status status;
323 struct ice_aq_desc desc;
325 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
326 cmd = &desc.params.get_sw_conf;
327 cmd->element = CPU_TO_LE16(*req_desc);
329 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
331 *req_desc = LE16_TO_CPU(cmd->element);
332 *num_elems = LE16_TO_CPU(cmd->num_elems);
340 * ice_alloc_sw - allocate resources specific to switch
341 * @hw: pointer to the HW struct
342 * @ena_stats: true to turn on VEB stats
343 * @shared_res: true for shared resource, false for dedicated resource
344 * @sw_id: switch ID returned
345 * @counter_id: VEB counter ID returned
347 * allocates switch resources (SWID and VEB counter) (0x0208)
350 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
353 struct ice_aqc_alloc_free_res_elem *sw_buf;
354 struct ice_aqc_res_elem *sw_ele;
355 enum ice_status status;
358 buf_len = sizeof(*sw_buf);
359 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
360 ice_malloc(hw, buf_len);
362 return ICE_ERR_NO_MEMORY;
364 /* Prepare buffer for switch ID.
365 * The number of resource entries in buffer is passed as 1 since only a
366 * single switch/VEB instance is allocated, and hence a single sw_id
369 sw_buf->num_elems = CPU_TO_LE16(1);
371 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
372 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
373 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
375 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
376 ice_aqc_opc_alloc_res, NULL);
379 goto ice_alloc_sw_exit;
381 sw_ele = &sw_buf->elem[0];
382 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
385 /* Prepare buffer for VEB Counter */
386 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
387 struct ice_aqc_alloc_free_res_elem *counter_buf;
388 struct ice_aqc_res_elem *counter_ele;
390 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
391 ice_malloc(hw, buf_len);
393 status = ICE_ERR_NO_MEMORY;
394 goto ice_alloc_sw_exit;
397 /* The number of resource entries in buffer is passed as 1 since
398 * only a single switch/VEB instance is allocated, and hence a
399 * single VEB counter is requested.
401 counter_buf->num_elems = CPU_TO_LE16(1);
402 counter_buf->res_type =
403 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
404 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
405 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
409 ice_free(hw, counter_buf);
410 goto ice_alloc_sw_exit;
412 counter_ele = &counter_buf->elem[0];
413 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
414 ice_free(hw, counter_buf);
418 ice_free(hw, sw_buf);
423 * ice_free_sw - free resources specific to switch
424 * @hw: pointer to the HW struct
425 * @sw_id: switch ID returned
426 * @counter_id: VEB counter ID returned
428 * free switch resources (SWID and VEB counter) (0x0209)
430 * NOTE: This function frees multiple resources. It continues
431 * releasing other resources even after it encounters error.
432 * The error code returned is the last error it encountered.
434 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
436 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
437 enum ice_status status, ret_status;
440 buf_len = sizeof(*sw_buf);
441 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
442 ice_malloc(hw, buf_len);
444 return ICE_ERR_NO_MEMORY;
446 /* Prepare buffer to free for switch ID res.
447 * The number of resource entries in buffer is passed as 1 since only a
448 * single switch/VEB instance is freed, and hence a single sw_id
451 sw_buf->num_elems = CPU_TO_LE16(1);
452 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
453 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
455 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
456 ice_aqc_opc_free_res, NULL);
459 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
461 /* Prepare buffer to free for VEB Counter resource */
462 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
463 ice_malloc(hw, buf_len);
465 ice_free(hw, sw_buf);
466 return ICE_ERR_NO_MEMORY;
469 /* The number of resource entries in buffer is passed as 1 since only a
470 * single switch/VEB instance is freed, and hence a single VEB counter
473 counter_buf->num_elems = CPU_TO_LE16(1);
474 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
475 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
477 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
478 ice_aqc_opc_free_res, NULL);
480 ice_debug(hw, ICE_DBG_SW,
481 "VEB counter resource could not be freed\n");
485 ice_free(hw, counter_buf);
486 ice_free(hw, sw_buf);
492 * @hw: pointer to the HW struct
493 * @vsi_ctx: pointer to a VSI context struct
494 * @cd: pointer to command details structure or NULL
496 * Add a VSI context to the hardware (0x0210)
499 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
500 struct ice_sq_cd *cd)
502 struct ice_aqc_add_update_free_vsi_resp *res;
503 struct ice_aqc_add_get_update_free_vsi *cmd;
504 struct ice_aq_desc desc;
505 enum ice_status status;
507 cmd = &desc.params.vsi_cmd;
508 res = &desc.params.add_update_free_vsi_res;
510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
512 if (!vsi_ctx->alloc_from_pool)
513 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
514 ICE_AQ_VSI_IS_VALID);
516 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
518 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
520 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
521 sizeof(vsi_ctx->info), cd);
524 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
525 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
526 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
534 * @hw: pointer to the HW struct
535 * @vsi_ctx: pointer to a VSI context struct
536 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
537 * @cd: pointer to command details structure or NULL
539 * Free VSI context info from hardware (0x0213)
542 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
543 bool keep_vsi_alloc, struct ice_sq_cd *cd)
545 struct ice_aqc_add_update_free_vsi_resp *resp;
546 struct ice_aqc_add_get_update_free_vsi *cmd;
547 struct ice_aq_desc desc;
548 enum ice_status status;
550 cmd = &desc.params.vsi_cmd;
551 resp = &desc.params.add_update_free_vsi_res;
553 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
555 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
557 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
559 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
561 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
562 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
570 * @hw: pointer to the HW struct
571 * @vsi_ctx: pointer to a VSI context struct
572 * @cd: pointer to command details structure or NULL
574 * Update VSI context in the hardware (0x0211)
577 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
578 struct ice_sq_cd *cd)
580 struct ice_aqc_add_update_free_vsi_resp *resp;
581 struct ice_aqc_add_get_update_free_vsi *cmd;
582 struct ice_aq_desc desc;
583 enum ice_status status;
585 cmd = &desc.params.vsi_cmd;
586 resp = &desc.params.add_update_free_vsi_res;
588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
590 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
592 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
594 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
595 sizeof(vsi_ctx->info), cd);
598 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
599 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
606 * ice_is_vsi_valid - check whether the VSI is valid or not
607 * @hw: pointer to the HW struct
608 * @vsi_handle: VSI handle
610 * check whether the VSI is valid or not
612 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
614 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
618 * ice_get_hw_vsi_num - return the HW VSI number
619 * @hw: pointer to the HW struct
620 * @vsi_handle: VSI handle
622 * return the HW VSI number
623 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
625 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
627 return hw->vsi_ctx[vsi_handle]->vsi_num;
631 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
632 * @hw: pointer to the HW struct
633 * @vsi_handle: VSI handle
635 * return the VSI context entry for a given VSI handle
637 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
639 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
643 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
644 * @hw: pointer to the HW struct
645 * @vsi_handle: VSI handle
646 * @vsi: VSI context pointer
648 * save the VSI context entry for a given VSI handle
651 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
653 hw->vsi_ctx[vsi_handle] = vsi;
657 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
658 * @hw: pointer to the HW struct
659 * @vsi_handle: VSI handle
661 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
663 struct ice_vsi_ctx *vsi;
666 vsi = ice_get_vsi_ctx(hw, vsi_handle);
669 ice_for_each_traffic_class(i) {
670 if (vsi->lan_q_ctx[i]) {
671 ice_free(hw, vsi->lan_q_ctx[i]);
672 vsi->lan_q_ctx[i] = NULL;
678 * ice_clear_vsi_ctx - clear the VSI context entry
679 * @hw: pointer to the HW struct
680 * @vsi_handle: VSI handle
682 * clear the VSI context entry
684 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
686 struct ice_vsi_ctx *vsi;
688 vsi = ice_get_vsi_ctx(hw, vsi_handle);
690 ice_clear_vsi_q_ctx(hw, vsi_handle);
692 hw->vsi_ctx[vsi_handle] = NULL;
697 * ice_clear_all_vsi_ctx - clear all the VSI context entries
698 * @hw: pointer to the HW struct
700 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
704 for (i = 0; i < ICE_MAX_VSI; i++)
705 ice_clear_vsi_ctx(hw, i);
709 * ice_add_vsi - add VSI context to the hardware and VSI handle list
710 * @hw: pointer to the HW struct
711 * @vsi_handle: unique VSI handle provided by drivers
712 * @vsi_ctx: pointer to a VSI context struct
713 * @cd: pointer to command details structure or NULL
715 * Add a VSI context to the hardware also add it into the VSI handle list.
716 * If this function gets called after reset for existing VSIs then update
717 * with the new HW VSI number in the corresponding VSI handle list entry.
720 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
721 struct ice_sq_cd *cd)
723 struct ice_vsi_ctx *tmp_vsi_ctx;
724 enum ice_status status;
726 if (vsi_handle >= ICE_MAX_VSI)
727 return ICE_ERR_PARAM;
728 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
731 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
733 /* Create a new VSI context */
734 tmp_vsi_ctx = (struct ice_vsi_ctx *)
735 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
737 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
738 return ICE_ERR_NO_MEMORY;
740 *tmp_vsi_ctx = *vsi_ctx;
742 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
744 /* update with new HW VSI num */
745 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
746 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
753 * ice_free_vsi- free VSI context from hardware and VSI handle list
754 * @hw: pointer to the HW struct
755 * @vsi_handle: unique VSI handle
756 * @vsi_ctx: pointer to a VSI context struct
757 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
758 * @cd: pointer to command details structure or NULL
760 * Free VSI context info from hardware as well as from VSI handle list
763 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
764 bool keep_vsi_alloc, struct ice_sq_cd *cd)
766 enum ice_status status;
768 if (!ice_is_vsi_valid(hw, vsi_handle))
769 return ICE_ERR_PARAM;
770 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
771 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
773 ice_clear_vsi_ctx(hw, vsi_handle);
779 * @hw: pointer to the HW struct
780 * @vsi_handle: unique VSI handle
781 * @vsi_ctx: pointer to a VSI context struct
782 * @cd: pointer to command details structure or NULL
784 * Update VSI context in the hardware
787 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
788 struct ice_sq_cd *cd)
790 if (!ice_is_vsi_valid(hw, vsi_handle))
791 return ICE_ERR_PARAM;
792 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
793 return ice_aq_update_vsi(hw, vsi_ctx, cd);
797 * ice_aq_get_vsi_params
798 * @hw: pointer to the HW struct
799 * @vsi_ctx: pointer to a VSI context struct
800 * @cd: pointer to command details structure or NULL
802 * Get VSI context info from hardware (0x0212)
805 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
806 struct ice_sq_cd *cd)
808 struct ice_aqc_add_get_update_free_vsi *cmd;
809 struct ice_aqc_get_vsi_resp *resp;
810 struct ice_aq_desc desc;
811 enum ice_status status;
813 cmd = &desc.params.vsi_cmd;
814 resp = &desc.params.get_vsi_resp;
816 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
818 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
820 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
821 sizeof(vsi_ctx->info), cd);
823 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
825 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
826 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
833 * ice_aq_add_update_mir_rule - add/update a mirror rule
834 * @hw: pointer to the HW struct
835 * @rule_type: Rule Type
836 * @dest_vsi: VSI number to which packets will be mirrored
837 * @count: length of the list
838 * @mr_buf: buffer for list of mirrored VSI numbers
839 * @cd: pointer to command details structure or NULL
842 * Add/Update Mirror Rule (0x260).
845 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
846 u16 count, struct ice_mir_rule_buf *mr_buf,
847 struct ice_sq_cd *cd, u16 *rule_id)
849 struct ice_aqc_add_update_mir_rule *cmd;
850 struct ice_aq_desc desc;
851 enum ice_status status;
852 __le16 *mr_list = NULL;
856 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
857 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
858 /* Make sure count and mr_buf are set for these rule_types */
859 if (!(count && mr_buf))
860 return ICE_ERR_PARAM;
862 buf_size = count * sizeof(__le16);
863 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
865 return ICE_ERR_NO_MEMORY;
867 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
868 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
869 /* Make sure count and mr_buf are not set for these
873 return ICE_ERR_PARAM;
876 ice_debug(hw, ICE_DBG_SW,
877 "Error due to unsupported rule_type %u\n", rule_type);
878 return ICE_ERR_OUT_OF_RANGE;
881 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
883 /* Pre-process 'mr_buf' items for add/update of virtual port
884 * ingress/egress mirroring (but not physical port ingress/egress
890 for (i = 0; i < count; i++) {
893 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
895 /* Validate specified VSI number, make sure it is less
896 * than ICE_MAX_VSI, if not return with error.
898 if (id >= ICE_MAX_VSI) {
899 ice_debug(hw, ICE_DBG_SW,
900 "Error VSI index (%u) out-of-range\n",
902 ice_free(hw, mr_list);
903 return ICE_ERR_OUT_OF_RANGE;
906 /* add VSI to mirror rule */
909 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
910 else /* remove VSI from mirror rule */
911 mr_list[i] = CPU_TO_LE16(id);
915 cmd = &desc.params.add_update_rule;
916 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
917 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
918 ICE_AQC_RULE_ID_VALID_M);
919 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
920 cmd->num_entries = CPU_TO_LE16(count);
921 cmd->dest = CPU_TO_LE16(dest_vsi);
923 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
925 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
927 ice_free(hw, mr_list);
933 * ice_aq_delete_mir_rule - delete a mirror rule
934 * @hw: pointer to the HW struct
935 * @rule_id: Mirror rule ID (to be deleted)
936 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
937 * otherwise it is returned to the shared pool
938 * @cd: pointer to command details structure or NULL
940 * Delete Mirror Rule (0x261).
943 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
944 struct ice_sq_cd *cd)
946 struct ice_aqc_delete_mir_rule *cmd;
947 struct ice_aq_desc desc;
949 /* rule_id should be in the range 0...63 */
950 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
951 return ICE_ERR_OUT_OF_RANGE;
953 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
955 cmd = &desc.params.del_rule;
956 rule_id |= ICE_AQC_RULE_ID_VALID_M;
957 cmd->rule_id = CPU_TO_LE16(rule_id);
960 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
962 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
966 * ice_aq_alloc_free_vsi_list
967 * @hw: pointer to the HW struct
968 * @vsi_list_id: VSI list ID returned or used for lookup
969 * @lkup_type: switch rule filter lookup type
970 * @opc: switch rules population command type - pass in the command opcode
972 * allocates or free a VSI list resource
974 static enum ice_status
975 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
976 enum ice_sw_lkup_type lkup_type,
977 enum ice_adminq_opc opc)
979 struct ice_aqc_alloc_free_res_elem *sw_buf;
980 struct ice_aqc_res_elem *vsi_ele;
981 enum ice_status status;
984 buf_len = sizeof(*sw_buf);
985 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
986 ice_malloc(hw, buf_len);
988 return ICE_ERR_NO_MEMORY;
989 sw_buf->num_elems = CPU_TO_LE16(1);
991 if (lkup_type == ICE_SW_LKUP_MAC ||
992 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
993 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
994 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
995 lkup_type == ICE_SW_LKUP_PROMISC ||
996 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
997 lkup_type == ICE_SW_LKUP_LAST) {
998 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
999 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1001 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1003 status = ICE_ERR_PARAM;
1004 goto ice_aq_alloc_free_vsi_list_exit;
1007 if (opc == ice_aqc_opc_free_res)
1008 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1010 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1012 goto ice_aq_alloc_free_vsi_list_exit;
1014 if (opc == ice_aqc_opc_alloc_res) {
1015 vsi_ele = &sw_buf->elem[0];
1016 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1019 ice_aq_alloc_free_vsi_list_exit:
1020 ice_free(hw, sw_buf);
1025 * ice_aq_set_storm_ctrl - Sets storm control configuration
1026 * @hw: pointer to the HW struct
1027 * @bcast_thresh: represents the upper threshold for broadcast storm control
1028 * @mcast_thresh: represents the upper threshold for multicast storm control
1029 * @ctl_bitmask: storm control control knobs
1031 * Sets the storm control configuration (0x0280)
1034 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1037 struct ice_aqc_storm_cfg *cmd;
1038 struct ice_aq_desc desc;
1040 cmd = &desc.params.storm_conf;
1042 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1044 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1045 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1046 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1048 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1052 * ice_aq_get_storm_ctrl - gets storm control configuration
1053 * @hw: pointer to the HW struct
1054 * @bcast_thresh: represents the upper threshold for broadcast storm control
1055 * @mcast_thresh: represents the upper threshold for multicast storm control
1056 * @ctl_bitmask: storm control control knobs
1058 * Gets the storm control configuration (0x0281)
1061 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1064 enum ice_status status;
1065 struct ice_aq_desc desc;
1067 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1069 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1071 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1074 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1077 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1080 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1087 * ice_aq_sw_rules - add/update/remove switch rules
1088 * @hw: pointer to the HW struct
1089 * @rule_list: pointer to switch rule population list
1090 * @rule_list_sz: total size of the rule list in bytes
1091 * @num_rules: number of switch rules in the rule_list
1092 * @opc: switch rules population command type - pass in the command opcode
1093 * @cd: pointer to command details structure or NULL
1095 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1097 static enum ice_status
1098 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1099 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1101 struct ice_aq_desc desc;
1103 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_sw_rules");
1105 if (opc != ice_aqc_opc_add_sw_rules &&
1106 opc != ice_aqc_opc_update_sw_rules &&
1107 opc != ice_aqc_opc_remove_sw_rules)
1108 return ICE_ERR_PARAM;
1110 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1112 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1113 desc.params.sw_rules.num_rules_fltr_entry_index =
1114 CPU_TO_LE16(num_rules);
1115 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1119 * ice_aq_add_recipe - add switch recipe
1120 * @hw: pointer to the HW struct
1121 * @s_recipe_list: pointer to switch rule population list
1122 * @num_recipes: number of switch recipes in the list
1123 * @cd: pointer to command details structure or NULL
1128 ice_aq_add_recipe(struct ice_hw *hw,
1129 struct ice_aqc_recipe_data_elem *s_recipe_list,
1130 u16 num_recipes, struct ice_sq_cd *cd)
1132 struct ice_aqc_add_get_recipe *cmd;
1133 struct ice_aq_desc desc;
1136 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_recipe");
1137 cmd = &desc.params.add_get_recipe;
1138 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1140 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1141 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1143 buf_size = num_recipes * sizeof(*s_recipe_list);
1145 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1149 * ice_aq_get_recipe - get switch recipe
1150 * @hw: pointer to the HW struct
1151 * @s_recipe_list: pointer to switch rule population list
1152 * @num_recipes: pointer to the number of recipes (input and output)
1153 * @recipe_root: root recipe number of recipe(s) to retrieve
1154 * @cd: pointer to command details structure or NULL
1158 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1159 * On output, *num_recipes will equal the number of entries returned in
1162 * The caller must supply enough space in s_recipe_list to hold all possible
1163 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1166 ice_aq_get_recipe(struct ice_hw *hw,
1167 struct ice_aqc_recipe_data_elem *s_recipe_list,
1168 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1170 struct ice_aqc_add_get_recipe *cmd;
1171 struct ice_aq_desc desc;
1172 enum ice_status status;
1175 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1176 return ICE_ERR_PARAM;
1178 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe");
1179 cmd = &desc.params.add_get_recipe;
1180 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1182 cmd->return_index = CPU_TO_LE16(recipe_root);
1183 cmd->num_sub_recipes = 0;
1185 buf_size = *num_recipes * sizeof(*s_recipe_list);
1187 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1188 /* cppcheck-suppress constArgument */
1189 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1195 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1196 * @hw: pointer to the HW struct
1197 * @profile_id: package profile ID to associate the recipe with
1198 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1199 * @cd: pointer to command details structure or NULL
1200 * Recipe to profile association (0x0291)
1203 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1204 struct ice_sq_cd *cd)
1206 struct ice_aqc_recipe_to_profile *cmd;
1207 struct ice_aq_desc desc;
1209 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_assoc_recipe_to_prof");
1210 cmd = &desc.params.recipe_to_profile;
1211 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1212 cmd->profile_id = CPU_TO_LE16(profile_id);
1213 /* Set the recipe ID bit in the bitmask to let the device know which
1214 * profile we are associating the recipe to
1216 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1217 ICE_NONDMA_TO_NONDMA);
1219 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1223 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1224 * @hw: pointer to the HW struct
1225 * @profile_id: package profile ID to associate the recipe with
1226 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1227 * @cd: pointer to command details structure or NULL
1228 * Associate profile ID with given recipe (0x0293)
1231 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1232 struct ice_sq_cd *cd)
1234 struct ice_aqc_recipe_to_profile *cmd;
1235 struct ice_aq_desc desc;
1236 enum ice_status status;
1238 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe_to_prof");
1239 cmd = &desc.params.recipe_to_profile;
1240 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1241 cmd->profile_id = CPU_TO_LE16(profile_id);
1243 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1245 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1246 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1252 * ice_alloc_recipe - add recipe resource
1253 * @hw: pointer to the hardware structure
1254 * @rid: recipe ID returned as response to AQ call
1256 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1258 struct ice_aqc_alloc_free_res_elem *sw_buf;
1259 enum ice_status status;
1262 buf_len = sizeof(*sw_buf);
1263 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1265 return ICE_ERR_NO_MEMORY;
1267 sw_buf->num_elems = CPU_TO_LE16(1);
1268 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1269 ICE_AQC_RES_TYPE_S) |
1270 ICE_AQC_RES_TYPE_FLAG_SHARED);
1271 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1272 ice_aqc_opc_alloc_res, NULL);
1274 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1275 ice_free(hw, sw_buf);
1280 /* ice_init_port_info - Initialize port_info with switch configuration data
1281 * @pi: pointer to port_info
1282 * @vsi_port_num: VSI number or port number
1283 * @type: Type of switch element (port or VSI)
1284 * @swid: switch ID of the switch the element is attached to
1285 * @pf_vf_num: PF or VF number
1286 * @is_vf: true if the element is a VF, false otherwise
1289 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1290 u16 swid, u16 pf_vf_num, bool is_vf)
1293 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1294 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1296 pi->pf_vf_num = pf_vf_num;
1298 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1299 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1302 ice_debug(pi->hw, ICE_DBG_SW,
1303 "incorrect VSI/port type received\n");
1308 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1309 * @hw: pointer to the hardware structure
1311 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1313 struct ice_aqc_get_sw_cfg_resp *rbuf;
1314 enum ice_status status;
1315 u16 num_total_ports;
1321 num_total_ports = 1;
1323 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1324 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1327 return ICE_ERR_NO_MEMORY;
1329 /* Multiple calls to ice_aq_get_sw_cfg may be required
1330 * to get all the switch configuration information. The need
1331 * for additional calls is indicated by ice_aq_get_sw_cfg
1332 * writing a non-zero value in req_desc
1335 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1336 &req_desc, &num_elems, NULL);
1341 for (i = 0; i < num_elems; i++) {
1342 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1343 u16 pf_vf_num, swid, vsi_port_num;
1347 ele = rbuf[i].elements;
1348 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1349 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1351 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1352 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1354 swid = LE16_TO_CPU(ele->swid);
1356 if (LE16_TO_CPU(ele->pf_vf_num) &
1357 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1360 type = LE16_TO_CPU(ele->vsi_port_num) >>
1361 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1364 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1365 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1366 if (j == num_total_ports) {
1367 ice_debug(hw, ICE_DBG_SW,
1368 "more ports than expected\n");
1369 status = ICE_ERR_CFG;
1372 ice_init_port_info(hw->port_info,
1373 vsi_port_num, type, swid,
1381 } while (req_desc && !status);
1385 ice_free(hw, (void *)rbuf);
1391 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1392 * @hw: pointer to the hardware structure
1393 * @fi: filter info structure to fill/update
1395 * This helper function populates the lb_en and lan_en elements of the provided
1396 * ice_fltr_info struct using the switch's type and characteristics of the
1397 * switch rule being configured.
1399 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1403 if ((fi->flag & ICE_FLTR_TX) &&
1404 (fi->fltr_act == ICE_FWD_TO_VSI ||
1405 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1406 fi->fltr_act == ICE_FWD_TO_Q ||
1407 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1408 /* Setting LB for prune actions will result in replicated
1409 * packets to the internal switch that will be dropped.
1411 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1414 /* Set lan_en to TRUE if
1415 * 1. The switch is a VEB AND
1417 * 2.1 The lookup is a directional lookup like ethertype,
1418 * promiscuous, ethertype-MAC, promiscuous-VLAN
1419 * and default-port OR
1420 * 2.2 The lookup is VLAN, OR
1421 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1422 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1426 * The switch is a VEPA.
1428 * In all other cases, the LAN enable has to be set to false.
1431 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1432 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1433 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1434 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1435 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1436 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1437 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1438 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1439 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1440 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1449 * ice_ilog2 - Calculates integer log base 2 of a number
1450 * @n: number on which to perform operation
1452 static int ice_ilog2(u64 n)
1456 for (i = 63; i >= 0; i--)
1457 if (((u64)1 << i) & n)
1464 * ice_fill_sw_rule - Helper function to fill switch rule structure
1465 * @hw: pointer to the hardware structure
1466 * @f_info: entry containing packet forwarding information
1467 * @s_rule: switch rule structure to be filled in based on mac_entry
1468 * @opc: switch rules population command type - pass in the command opcode
1471 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1472 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1474 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1482 if (opc == ice_aqc_opc_remove_sw_rules) {
1483 s_rule->pdata.lkup_tx_rx.act = 0;
1484 s_rule->pdata.lkup_tx_rx.index =
1485 CPU_TO_LE16(f_info->fltr_rule_id);
1486 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1490 eth_hdr_sz = sizeof(dummy_eth_header);
1491 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1493 /* initialize the ether header with a dummy header */
1494 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1495 ice_fill_sw_info(hw, f_info);
1497 switch (f_info->fltr_act) {
1498 case ICE_FWD_TO_VSI:
1499 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1500 ICE_SINGLE_ACT_VSI_ID_M;
1501 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1502 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1503 ICE_SINGLE_ACT_VALID_BIT;
1505 case ICE_FWD_TO_VSI_LIST:
1506 act |= ICE_SINGLE_ACT_VSI_LIST;
1507 act |= (f_info->fwd_id.vsi_list_id <<
1508 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1509 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1510 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1511 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1512 ICE_SINGLE_ACT_VALID_BIT;
1515 act |= ICE_SINGLE_ACT_TO_Q;
1516 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1517 ICE_SINGLE_ACT_Q_INDEX_M;
1519 case ICE_DROP_PACKET:
1520 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1521 ICE_SINGLE_ACT_VALID_BIT;
1523 case ICE_FWD_TO_QGRP:
1524 q_rgn = f_info->qgrp_size > 0 ?
1525 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1526 act |= ICE_SINGLE_ACT_TO_Q;
1527 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1528 ICE_SINGLE_ACT_Q_INDEX_M;
1529 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1530 ICE_SINGLE_ACT_Q_REGION_M;
1537 act |= ICE_SINGLE_ACT_LB_ENABLE;
1539 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1541 switch (f_info->lkup_type) {
1542 case ICE_SW_LKUP_MAC:
1543 daddr = f_info->l_data.mac.mac_addr;
1545 case ICE_SW_LKUP_VLAN:
1546 vlan_id = f_info->l_data.vlan.vlan_id;
1547 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1548 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1549 act |= ICE_SINGLE_ACT_PRUNE;
1550 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1553 case ICE_SW_LKUP_ETHERTYPE_MAC:
1554 daddr = f_info->l_data.ethertype_mac.mac_addr;
1556 case ICE_SW_LKUP_ETHERTYPE:
1557 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1558 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1560 case ICE_SW_LKUP_MAC_VLAN:
1561 daddr = f_info->l_data.mac_vlan.mac_addr;
1562 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1564 case ICE_SW_LKUP_PROMISC_VLAN:
1565 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1567 case ICE_SW_LKUP_PROMISC:
1568 daddr = f_info->l_data.mac_vlan.mac_addr;
1574 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1575 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1576 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1578 /* Recipe set depending on lookup type */
1579 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1580 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1581 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1584 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1585 ICE_NONDMA_TO_NONDMA);
1587 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1588 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1589 *off = CPU_TO_BE16(vlan_id);
1592 /* Create the switch rule with the final dummy Ethernet header */
1593 if (opc != ice_aqc_opc_update_sw_rules)
1594 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1598 * ice_add_marker_act
1599 * @hw: pointer to the hardware structure
1600 * @m_ent: the management entry for which sw marker needs to be added
1601 * @sw_marker: sw marker to tag the Rx descriptor with
1602 * @l_id: large action resource ID
1604 * Create a large action to hold software marker and update the switch rule
1605 * entry pointed by m_ent with newly created large action
1607 static enum ice_status
1608 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1609 u16 sw_marker, u16 l_id)
1611 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1612 /* For software marker we need 3 large actions
1613 * 1. FWD action: FWD TO VSI or VSI LIST
1614 * 2. GENERIC VALUE action to hold the profile ID
1615 * 3. GENERIC VALUE action to hold the software marker ID
1617 const u16 num_lg_acts = 3;
1618 enum ice_status status;
1624 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1625 return ICE_ERR_PARAM;
1627 /* Create two back-to-back switch rules and submit them to the HW using
1628 * one memory buffer:
1632 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1633 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1634 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1636 return ICE_ERR_NO_MEMORY;
1638 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1640 /* Fill in the first switch rule i.e. large action */
1641 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1642 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1643 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1645 /* First action VSI forwarding or VSI list forwarding depending on how
1648 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1649 m_ent->fltr_info.fwd_id.hw_vsi_id;
1651 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1652 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1653 ICE_LG_ACT_VSI_LIST_ID_M;
1654 if (m_ent->vsi_count > 1)
1655 act |= ICE_LG_ACT_VSI_LIST;
1656 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1658 /* Second action descriptor type */
1659 act = ICE_LG_ACT_GENERIC;
1661 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1662 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1664 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1665 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1667 /* Third action Marker value */
1668 act |= ICE_LG_ACT_GENERIC;
1669 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1670 ICE_LG_ACT_GENERIC_VALUE_M;
1672 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1674 /* call the fill switch rule to fill the lookup Tx Rx structure */
1675 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1676 ice_aqc_opc_update_sw_rules);
1678 /* Update the action to point to the large action ID */
1679 rx_tx->pdata.lkup_tx_rx.act =
1680 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1681 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1682 ICE_SINGLE_ACT_PTR_VAL_M));
1684 /* Use the filter rule ID of the previously created rule with single
1685 * act. Once the update happens, hardware will treat this as large
1688 rx_tx->pdata.lkup_tx_rx.index =
1689 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1691 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1692 ice_aqc_opc_update_sw_rules, NULL);
1694 m_ent->lg_act_idx = l_id;
1695 m_ent->sw_marker_id = sw_marker;
1698 ice_free(hw, lg_act);
1703 * ice_add_counter_act - add/update filter rule with counter action
1704 * @hw: pointer to the hardware structure
1705 * @m_ent: the management entry for which counter needs to be added
1706 * @counter_id: VLAN counter ID returned as part of allocate resource
1707 * @l_id: large action resource ID
1709 static enum ice_status
1710 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1711 u16 counter_id, u16 l_id)
1713 struct ice_aqc_sw_rules_elem *lg_act;
1714 struct ice_aqc_sw_rules_elem *rx_tx;
1715 enum ice_status status;
1716 /* 2 actions will be added while adding a large action counter */
1717 const int num_acts = 2;
1724 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1725 return ICE_ERR_PARAM;
1727 /* Create two back-to-back switch rules and submit them to the HW using
1728 * one memory buffer:
1732 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
1733 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1734 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
1737 return ICE_ERR_NO_MEMORY;
1739 rx_tx = (struct ice_aqc_sw_rules_elem *)
1740 ((u8 *)lg_act + lg_act_size);
1742 /* Fill in the first switch rule i.e. large action */
1743 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1744 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1745 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
1747 /* First action VSI forwarding or VSI list forwarding depending on how
1750 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1751 m_ent->fltr_info.fwd_id.hw_vsi_id;
1753 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1754 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1755 ICE_LG_ACT_VSI_LIST_ID_M;
1756 if (m_ent->vsi_count > 1)
1757 act |= ICE_LG_ACT_VSI_LIST;
1758 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1760 /* Second action counter ID */
1761 act = ICE_LG_ACT_STAT_COUNT;
1762 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
1763 ICE_LG_ACT_STAT_COUNT_M;
1764 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1766 /* call the fill switch rule to fill the lookup Tx Rx structure */
1767 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1768 ice_aqc_opc_update_sw_rules);
1770 act = ICE_SINGLE_ACT_PTR;
1771 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
1772 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1774 /* Use the filter rule ID of the previously created rule with single
1775 * act. Once the update happens, hardware will treat this as large
1778 f_rule_id = m_ent->fltr_info.fltr_rule_id;
1779 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
1781 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1782 ice_aqc_opc_update_sw_rules, NULL);
1784 m_ent->lg_act_idx = l_id;
1785 m_ent->counter_index = counter_id;
1788 ice_free(hw, lg_act);
1793 * ice_create_vsi_list_map
1794 * @hw: pointer to the hardware structure
1795 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1796 * @num_vsi: number of VSI handles in the array
1797 * @vsi_list_id: VSI list ID generated as part of allocate resource
1799 * Helper function to create a new entry of VSI list ID to VSI mapping
1800 * using the given VSI list ID
1802 static struct ice_vsi_list_map_info *
1803 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1806 struct ice_switch_info *sw = hw->switch_info;
1807 struct ice_vsi_list_map_info *v_map;
1810 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
1815 v_map->vsi_list_id = vsi_list_id;
1817 for (i = 0; i < num_vsi; i++)
1818 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
1820 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
1825 * ice_update_vsi_list_rule
1826 * @hw: pointer to the hardware structure
1827 * @vsi_handle_arr: array of VSI handles to form a VSI list
1828 * @num_vsi: number of VSI handles in the array
1829 * @vsi_list_id: VSI list ID generated as part of allocate resource
1830 * @remove: Boolean value to indicate if this is a remove action
1831 * @opc: switch rules population command type - pass in the command opcode
1832 * @lkup_type: lookup type of the filter
1834 * Call AQ command to add a new switch rule or update existing switch rule
1835 * using the given VSI list ID
1837 static enum ice_status
1838 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1839 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1840 enum ice_sw_lkup_type lkup_type)
1842 struct ice_aqc_sw_rules_elem *s_rule;
1843 enum ice_status status;
1849 return ICE_ERR_PARAM;
1851 if (lkup_type == ICE_SW_LKUP_MAC ||
1852 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1853 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1854 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1855 lkup_type == ICE_SW_LKUP_PROMISC ||
1856 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1857 lkup_type == ICE_SW_LKUP_LAST)
1858 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1859 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1860 else if (lkup_type == ICE_SW_LKUP_VLAN)
1861 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1862 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1864 return ICE_ERR_PARAM;
1866 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1867 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
1869 return ICE_ERR_NO_MEMORY;
1870 for (i = 0; i < num_vsi; i++) {
1871 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1872 status = ICE_ERR_PARAM;
1875 /* AQ call requires hw_vsi_id(s) */
1876 s_rule->pdata.vsi_list.vsi[i] =
1877 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1880 s_rule->type = CPU_TO_LE16(type);
1881 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
1882 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
1884 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1887 ice_free(hw, s_rule);
1892 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
1893 * @hw: pointer to the HW struct
1894 * @vsi_handle_arr: array of VSI handles to form a VSI list
1895 * @num_vsi: number of VSI handles in the array
1896 * @vsi_list_id: stores the ID of the VSI list to be created
1897 * @lkup_type: switch rule filter's lookup type
1899 static enum ice_status
1900 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1901 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1903 enum ice_status status;
1905 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1906 ice_aqc_opc_alloc_res);
1910 /* Update the newly created VSI list to include the specified VSIs */
1911 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1912 *vsi_list_id, false,
1913 ice_aqc_opc_add_sw_rules, lkup_type);
1917 * ice_create_pkt_fwd_rule
1918 * @hw: pointer to the hardware structure
1919 * @f_entry: entry containing packet forwarding information
1921 * Create switch rule with given filter information and add an entry
1922 * to the corresponding filter management list to track this switch rule
1925 static enum ice_status
1926 ice_create_pkt_fwd_rule(struct ice_hw *hw,
1927 struct ice_fltr_list_entry *f_entry)
1929 struct ice_fltr_mgmt_list_entry *fm_entry;
1930 struct ice_aqc_sw_rules_elem *s_rule;
1931 enum ice_sw_lkup_type l_type;
1932 struct ice_sw_recipe *recp;
1933 enum ice_status status;
1935 s_rule = (struct ice_aqc_sw_rules_elem *)
1936 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
1938 return ICE_ERR_NO_MEMORY;
1939 fm_entry = (struct ice_fltr_mgmt_list_entry *)
1940 ice_malloc(hw, sizeof(*fm_entry));
1942 status = ICE_ERR_NO_MEMORY;
1943 goto ice_create_pkt_fwd_rule_exit;
1946 fm_entry->fltr_info = f_entry->fltr_info;
1948 /* Initialize all the fields for the management entry */
1949 fm_entry->vsi_count = 1;
1950 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1951 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1952 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1954 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1955 ice_aqc_opc_add_sw_rules);
1957 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1958 ice_aqc_opc_add_sw_rules, NULL);
1960 ice_free(hw, fm_entry);
1961 goto ice_create_pkt_fwd_rule_exit;
1964 f_entry->fltr_info.fltr_rule_id =
1965 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
1966 fm_entry->fltr_info.fltr_rule_id =
1967 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
1969 /* The book keeping entries will get removed when base driver
1970 * calls remove filter AQ command
1972 l_type = fm_entry->fltr_info.lkup_type;
1973 recp = &hw->switch_info->recp_list[l_type];
1974 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
1976 ice_create_pkt_fwd_rule_exit:
1977 ice_free(hw, s_rule);
1982 * ice_update_pkt_fwd_rule
1983 * @hw: pointer to the hardware structure
1984 * @f_info: filter information for switch rule
1986 * Call AQ command to update a previously created switch rule with a
1989 static enum ice_status
1990 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1992 struct ice_aqc_sw_rules_elem *s_rule;
1993 enum ice_status status;
1995 s_rule = (struct ice_aqc_sw_rules_elem *)
1996 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
1998 return ICE_ERR_NO_MEMORY;
2000 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2002 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2004 /* Update switch rule with new rule set to forward VSI list */
2005 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2006 ice_aqc_opc_update_sw_rules, NULL);
2008 ice_free(hw, s_rule);
2013 * ice_update_sw_rule_bridge_mode
2014 * @hw: pointer to the HW struct
2016 * Updates unicast switch filter rules based on VEB/VEPA mode
2018 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2020 struct ice_switch_info *sw = hw->switch_info;
2021 struct ice_fltr_mgmt_list_entry *fm_entry;
2022 enum ice_status status = ICE_SUCCESS;
2023 struct LIST_HEAD_TYPE *rule_head;
2024 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2026 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2027 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2029 ice_acquire_lock(rule_lock);
2030 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2032 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2033 u8 *addr = fi->l_data.mac.mac_addr;
2035 /* Update unicast Tx rules to reflect the selected
2038 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2039 (fi->fltr_act == ICE_FWD_TO_VSI ||
2040 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2041 fi->fltr_act == ICE_FWD_TO_Q ||
2042 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2043 status = ice_update_pkt_fwd_rule(hw, fi);
2049 ice_release_lock(rule_lock);
2055 * ice_add_update_vsi_list
2056 * @hw: pointer to the hardware structure
2057 * @m_entry: pointer to current filter management list entry
2058 * @cur_fltr: filter information from the book keeping entry
2059 * @new_fltr: filter information with the new VSI to be added
2061 * Call AQ command to add or update previously created VSI list with new VSI.
2063 * Helper function to do book keeping associated with adding filter information
2064 * The algorithm to do the book keeping is described below :
2065 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2066 * if only one VSI has been added till now
2067 * Allocate a new VSI list and add two VSIs
2068 * to this list using switch rule command
2069 * Update the previously created switch rule with the
2070 * newly created VSI list ID
2071 * if a VSI list was previously created
2072 * Add the new VSI to the previously created VSI list set
2073 * using the update switch rule command
2075 static enum ice_status
2076 ice_add_update_vsi_list(struct ice_hw *hw,
2077 struct ice_fltr_mgmt_list_entry *m_entry,
2078 struct ice_fltr_info *cur_fltr,
2079 struct ice_fltr_info *new_fltr)
2081 enum ice_status status = ICE_SUCCESS;
2082 u16 vsi_list_id = 0;
2084 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2085 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2086 return ICE_ERR_NOT_IMPL;
2088 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2089 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2090 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2091 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2092 return ICE_ERR_NOT_IMPL;
2094 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2095 /* Only one entry existed in the mapping and it was not already
2096 * a part of a VSI list. So, create a VSI list with the old and
2099 struct ice_fltr_info tmp_fltr;
2100 u16 vsi_handle_arr[2];
2102 /* A rule already exists with the new VSI being added */
2103 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2104 return ICE_ERR_ALREADY_EXISTS;
2106 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2107 vsi_handle_arr[1] = new_fltr->vsi_handle;
2108 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2110 new_fltr->lkup_type);
2114 tmp_fltr = *new_fltr;
2115 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2116 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2117 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2118 /* Update the previous switch rule of "MAC forward to VSI" to
2119 * "MAC fwd to VSI list"
2121 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2125 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2126 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2127 m_entry->vsi_list_info =
2128 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2131 /* If this entry was large action then the large action needs
2132 * to be updated to point to FWD to VSI list
2134 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2136 ice_add_marker_act(hw, m_entry,
2137 m_entry->sw_marker_id,
2138 m_entry->lg_act_idx);
2140 u16 vsi_handle = new_fltr->vsi_handle;
2141 enum ice_adminq_opc opcode;
2143 if (!m_entry->vsi_list_info)
2146 /* A rule already exists with the new VSI being added */
2147 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2150 /* Update the previously created VSI list set with
2151 * the new VSI ID passed in
2153 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2154 opcode = ice_aqc_opc_update_sw_rules;
2156 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2157 vsi_list_id, false, opcode,
2158 new_fltr->lkup_type);
2159 /* update VSI list mapping info with new VSI ID */
2161 ice_set_bit(vsi_handle,
2162 m_entry->vsi_list_info->vsi_map);
2165 m_entry->vsi_count++;
2170 * ice_find_rule_entry - Search a rule entry
2171 * @hw: pointer to the hardware structure
2172 * @recp_id: lookup type for which the specified rule needs to be searched
2173 * @f_info: rule information
2175 * Helper function to search for a given rule entry
2176 * Returns pointer to entry storing the rule if found
2178 static struct ice_fltr_mgmt_list_entry *
2179 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2181 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2182 struct ice_switch_info *sw = hw->switch_info;
2183 struct LIST_HEAD_TYPE *list_head;
2185 list_head = &sw->recp_list[recp_id].filt_rules;
2186 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2188 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2189 sizeof(f_info->l_data)) &&
2190 f_info->flag == list_itr->fltr_info.flag) {
2199 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2200 * @hw: pointer to the hardware structure
2201 * @recp_id: lookup type for which VSI lists needs to be searched
2202 * @vsi_handle: VSI handle to be found in VSI list
2203 * @vsi_list_id: VSI list ID found containing vsi_handle
2205 * Helper function to search a VSI list with single entry containing given VSI
2206 * handle element. This can be extended further to search VSI list with more
2207 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2209 static struct ice_vsi_list_map_info *
2210 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2213 struct ice_vsi_list_map_info *map_info = NULL;
2214 struct ice_switch_info *sw = hw->switch_info;
2215 struct LIST_HEAD_TYPE *list_head;
2217 list_head = &sw->recp_list[recp_id].filt_rules;
2218 if (sw->recp_list[recp_id].adv_rule) {
2219 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2221 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2222 ice_adv_fltr_mgmt_list_entry,
2224 if (list_itr->vsi_list_info) {
2225 map_info = list_itr->vsi_list_info;
2226 if (ice_is_bit_set(map_info->vsi_map,
2228 *vsi_list_id = map_info->vsi_list_id;
2234 struct ice_fltr_mgmt_list_entry *list_itr;
2236 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2237 ice_fltr_mgmt_list_entry,
2239 if (list_itr->vsi_count == 1 &&
2240 list_itr->vsi_list_info) {
2241 map_info = list_itr->vsi_list_info;
2242 if (ice_is_bit_set(map_info->vsi_map,
2244 *vsi_list_id = map_info->vsi_list_id;
2254 * ice_add_rule_internal - add rule for a given lookup type
2255 * @hw: pointer to the hardware structure
2256 * @recp_id: lookup type (recipe ID) for which rule has to be added
2257 * @f_entry: structure containing MAC forwarding information
2259 * Adds or updates the rule lists for a given recipe
2261 static enum ice_status
2262 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2263 struct ice_fltr_list_entry *f_entry)
2265 struct ice_switch_info *sw = hw->switch_info;
2266 struct ice_fltr_info *new_fltr, *cur_fltr;
2267 struct ice_fltr_mgmt_list_entry *m_entry;
2268 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2269 enum ice_status status = ICE_SUCCESS;
2271 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2272 return ICE_ERR_PARAM;
2274 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2275 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2276 f_entry->fltr_info.fwd_id.hw_vsi_id =
2277 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2279 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2281 ice_acquire_lock(rule_lock);
2282 new_fltr = &f_entry->fltr_info;
2283 if (new_fltr->flag & ICE_FLTR_RX)
2284 new_fltr->src = hw->port_info->lport;
2285 else if (new_fltr->flag & ICE_FLTR_TX)
2287 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2289 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2291 status = ice_create_pkt_fwd_rule(hw, f_entry);
2292 goto exit_add_rule_internal;
2295 cur_fltr = &m_entry->fltr_info;
2296 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2298 exit_add_rule_internal:
2299 ice_release_lock(rule_lock);
2304 * ice_remove_vsi_list_rule
2305 * @hw: pointer to the hardware structure
2306 * @vsi_list_id: VSI list ID generated as part of allocate resource
2307 * @lkup_type: switch rule filter lookup type
2309 * The VSI list should be emptied before this function is called to remove the
2312 static enum ice_status
2313 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2314 enum ice_sw_lkup_type lkup_type)
2316 struct ice_aqc_sw_rules_elem *s_rule;
2317 enum ice_status status;
2320 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2321 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2323 return ICE_ERR_NO_MEMORY;
2325 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2326 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2328 /* Free the vsi_list resource that we allocated. It is assumed that the
2329 * list is empty at this point.
2331 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2332 ice_aqc_opc_free_res);
2334 ice_free(hw, s_rule);
2339 * ice_rem_update_vsi_list
2340 * @hw: pointer to the hardware structure
2341 * @vsi_handle: VSI handle of the VSI to remove
2342 * @fm_list: filter management entry for which the VSI list management needs to
2345 static enum ice_status
2346 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2347 struct ice_fltr_mgmt_list_entry *fm_list)
2349 enum ice_sw_lkup_type lkup_type;
2350 enum ice_status status = ICE_SUCCESS;
2353 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2354 fm_list->vsi_count == 0)
2355 return ICE_ERR_PARAM;
2357 /* A rule with the VSI being removed does not exist */
2358 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2359 return ICE_ERR_DOES_NOT_EXIST;
2361 lkup_type = fm_list->fltr_info.lkup_type;
2362 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2363 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2364 ice_aqc_opc_update_sw_rules,
2369 fm_list->vsi_count--;
2370 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2372 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2373 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2374 struct ice_vsi_list_map_info *vsi_list_info =
2375 fm_list->vsi_list_info;
2378 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2380 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2381 return ICE_ERR_OUT_OF_RANGE;
2383 /* Make sure VSI list is empty before removing it below */
2384 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2386 ice_aqc_opc_update_sw_rules,
2391 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2392 tmp_fltr_info.fwd_id.hw_vsi_id =
2393 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2394 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2395 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2397 ice_debug(hw, ICE_DBG_SW,
2398 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2399 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2403 fm_list->fltr_info = tmp_fltr_info;
2406 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2407 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2408 struct ice_vsi_list_map_info *vsi_list_info =
2409 fm_list->vsi_list_info;
2411 /* Remove the VSI list since it is no longer used */
2412 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2414 ice_debug(hw, ICE_DBG_SW,
2415 "Failed to remove VSI list %d, error %d\n",
2416 vsi_list_id, status);
2420 LIST_DEL(&vsi_list_info->list_entry);
2421 ice_free(hw, vsi_list_info);
2422 fm_list->vsi_list_info = NULL;
2429 * ice_remove_rule_internal - Remove a filter rule of a given type
2431 * @hw: pointer to the hardware structure
2432 * @recp_id: recipe ID for which the rule needs to removed
2433 * @f_entry: rule entry containing filter information
2435 static enum ice_status
2436 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2437 struct ice_fltr_list_entry *f_entry)
2439 struct ice_switch_info *sw = hw->switch_info;
2440 struct ice_fltr_mgmt_list_entry *list_elem;
2441 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2442 enum ice_status status = ICE_SUCCESS;
2443 bool remove_rule = false;
2446 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2447 return ICE_ERR_PARAM;
2448 f_entry->fltr_info.fwd_id.hw_vsi_id =
2449 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2451 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2452 ice_acquire_lock(rule_lock);
2453 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2455 status = ICE_ERR_DOES_NOT_EXIST;
2459 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2461 } else if (!list_elem->vsi_list_info) {
2462 status = ICE_ERR_DOES_NOT_EXIST;
2464 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2465 /* a ref_cnt > 1 indicates that the vsi_list is being
2466 * shared by multiple rules. Decrement the ref_cnt and
2467 * remove this rule, but do not modify the list, as it
2468 * is in-use by other rules.
2470 list_elem->vsi_list_info->ref_cnt--;
2473 /* a ref_cnt of 1 indicates the vsi_list is only used
2474 * by one rule. However, the original removal request is only
2475 * for a single VSI. Update the vsi_list first, and only
2476 * remove the rule if there are no further VSIs in this list.
2478 vsi_handle = f_entry->fltr_info.vsi_handle;
2479 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2482 /* if VSI count goes to zero after updating the VSI list */
2483 if (list_elem->vsi_count == 0)
2488 /* Remove the lookup rule */
2489 struct ice_aqc_sw_rules_elem *s_rule;
2491 s_rule = (struct ice_aqc_sw_rules_elem *)
2492 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2494 status = ICE_ERR_NO_MEMORY;
2498 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2499 ice_aqc_opc_remove_sw_rules);
2501 status = ice_aq_sw_rules(hw, s_rule,
2502 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2503 ice_aqc_opc_remove_sw_rules, NULL);
2507 /* Remove a book keeping from the list */
2508 ice_free(hw, s_rule);
2510 LIST_DEL(&list_elem->list_entry);
2511 ice_free(hw, list_elem);
2514 ice_release_lock(rule_lock);
2519 * ice_aq_get_res_alloc - get allocated resources
2520 * @hw: pointer to the HW struct
2521 * @num_entries: pointer to u16 to store the number of resource entries returned
2522 * @buf: pointer to user-supplied buffer
2523 * @buf_size: size of buff
2524 * @cd: pointer to command details structure or NULL
2526 * The user-supplied buffer must be large enough to store the resource
2527 * information for all resource types. Each resource type is an
2528 * ice_aqc_get_res_resp_data_elem structure.
2531 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2532 u16 buf_size, struct ice_sq_cd *cd)
2534 struct ice_aqc_get_res_alloc *resp;
2535 enum ice_status status;
2536 struct ice_aq_desc desc;
2539 return ICE_ERR_BAD_PTR;
2541 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2542 return ICE_ERR_INVAL_SIZE;
2544 resp = &desc.params.get_res;
2546 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2547 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2549 if (!status && num_entries)
2550 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2556 * ice_aq_get_res_descs - get allocated resource descriptors
2557 * @hw: pointer to the hardware structure
2558 * @num_entries: number of resource entries in buffer
2559 * @buf: Indirect buffer to hold data parameters and response
2560 * @buf_size: size of buffer for indirect commands
2561 * @res_type: resource type
2562 * @res_shared: is resource shared
2563 * @desc_id: input - first desc ID to start; output - next desc ID
2564 * @cd: pointer to command details structure or NULL
2567 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2568 struct ice_aqc_get_allocd_res_desc_resp *buf,
2569 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2570 struct ice_sq_cd *cd)
2572 struct ice_aqc_get_allocd_res_desc *cmd;
2573 struct ice_aq_desc desc;
2574 enum ice_status status;
2576 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_res_descs");
2578 cmd = &desc.params.get_res_desc;
2581 return ICE_ERR_PARAM;
2583 if (buf_size != (num_entries * sizeof(*buf)))
2584 return ICE_ERR_PARAM;
2586 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2588 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2589 ICE_AQC_RES_TYPE_M) | (res_shared ?
2590 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2591 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2593 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2595 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2597 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2603 * ice_add_mac - Add a MAC address based filter rule
2604 * @hw: pointer to the hardware structure
2605 * @m_list: list of MAC addresses and forwarding information
2607 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2608 * multiple unicast addresses, the function assumes that all the
2609 * addresses are unique in a given add_mac call. It doesn't
2610 * check for duplicates in this case, removing duplicates from a given
2611 * list should be taken care of in the caller of this function.
2614 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2616 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2617 struct ice_fltr_list_entry *m_list_itr;
2618 struct LIST_HEAD_TYPE *rule_head;
2619 u16 elem_sent, total_elem_left;
2620 struct ice_switch_info *sw;
2621 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2622 enum ice_status status = ICE_SUCCESS;
2623 u16 num_unicast = 0;
2627 return ICE_ERR_PARAM;
2629 sw = hw->switch_info;
2630 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2631 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2633 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2637 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2638 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2639 if (!ice_is_vsi_valid(hw, vsi_handle))
2640 return ICE_ERR_PARAM;
2641 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2642 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2643 /* update the src in case it is VSI num */
2644 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2645 return ICE_ERR_PARAM;
2646 m_list_itr->fltr_info.src = hw_vsi_id;
2647 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2648 IS_ZERO_ETHER_ADDR(add))
2649 return ICE_ERR_PARAM;
2650 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
2651 /* Don't overwrite the unicast address */
2652 ice_acquire_lock(rule_lock);
2653 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2654 &m_list_itr->fltr_info)) {
2655 ice_release_lock(rule_lock);
2656 return ICE_ERR_ALREADY_EXISTS;
2658 ice_release_lock(rule_lock);
2660 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
2661 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
2662 m_list_itr->status =
2663 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2665 if (m_list_itr->status)
2666 return m_list_itr->status;
2670 ice_acquire_lock(rule_lock);
2671 /* Exit if no suitable entries were found for adding bulk switch rule */
2673 status = ICE_SUCCESS;
2674 goto ice_add_mac_exit;
2677 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2679 /* Allocate switch rule buffer for the bulk update for unicast */
2680 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2681 s_rule = (struct ice_aqc_sw_rules_elem *)
2682 ice_calloc(hw, num_unicast, s_rule_size);
2684 status = ICE_ERR_NO_MEMORY;
2685 goto ice_add_mac_exit;
2689 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2691 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2692 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2694 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2695 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2696 ice_aqc_opc_add_sw_rules);
2697 r_iter = (struct ice_aqc_sw_rules_elem *)
2698 ((u8 *)r_iter + s_rule_size);
2702 /* Call AQ bulk switch rule update for all unicast addresses */
2704 /* Call AQ switch rule in AQ_MAX chunk */
2705 for (total_elem_left = num_unicast; total_elem_left > 0;
2706 total_elem_left -= elem_sent) {
2707 struct ice_aqc_sw_rules_elem *entry = r_iter;
2709 elem_sent = min(total_elem_left,
2710 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
2711 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2712 elem_sent, ice_aqc_opc_add_sw_rules,
2715 goto ice_add_mac_exit;
2716 r_iter = (struct ice_aqc_sw_rules_elem *)
2717 ((u8 *)r_iter + (elem_sent * s_rule_size));
2720 /* Fill up rule ID based on the value returned from FW */
2722 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2724 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2725 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2726 struct ice_fltr_mgmt_list_entry *fm_entry;
2728 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2729 f_info->fltr_rule_id =
2730 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
2731 f_info->fltr_act = ICE_FWD_TO_VSI;
2732 /* Create an entry to track this MAC address */
2733 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2734 ice_malloc(hw, sizeof(*fm_entry));
2736 status = ICE_ERR_NO_MEMORY;
2737 goto ice_add_mac_exit;
2739 fm_entry->fltr_info = *f_info;
2740 fm_entry->vsi_count = 1;
2741 /* The book keeping entries will get removed when
2742 * base driver calls remove filter AQ command
2745 LIST_ADD(&fm_entry->list_entry, rule_head);
2746 r_iter = (struct ice_aqc_sw_rules_elem *)
2747 ((u8 *)r_iter + s_rule_size);
2752 ice_release_lock(rule_lock);
2754 ice_free(hw, s_rule);
2759 * ice_add_vlan_internal - Add one VLAN based filter rule
2760 * @hw: pointer to the hardware structure
2761 * @f_entry: filter entry containing one VLAN information
2763 static enum ice_status
2764 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2766 struct ice_switch_info *sw = hw->switch_info;
2767 struct ice_fltr_mgmt_list_entry *v_list_itr;
2768 struct ice_fltr_info *new_fltr, *cur_fltr;
2769 enum ice_sw_lkup_type lkup_type;
2770 u16 vsi_list_id = 0, vsi_handle;
2771 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2772 enum ice_status status = ICE_SUCCESS;
2774 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2775 return ICE_ERR_PARAM;
2777 f_entry->fltr_info.fwd_id.hw_vsi_id =
2778 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2779 new_fltr = &f_entry->fltr_info;
2781 /* VLAN ID should only be 12 bits */
2782 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2783 return ICE_ERR_PARAM;
2785 if (new_fltr->src_id != ICE_SRC_ID_VSI)
2786 return ICE_ERR_PARAM;
2788 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2789 lkup_type = new_fltr->lkup_type;
2790 vsi_handle = new_fltr->vsi_handle;
2791 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2792 ice_acquire_lock(rule_lock);
2793 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2795 struct ice_vsi_list_map_info *map_info = NULL;
2797 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2798 /* All VLAN pruning rules use a VSI list. Check if
2799 * there is already a VSI list containing VSI that we
2800 * want to add. If found, use the same vsi_list_id for
2801 * this new VLAN rule or else create a new list.
2803 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2807 status = ice_create_vsi_list_rule(hw,
2815 /* Convert the action to forwarding to a VSI list. */
2816 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2817 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2820 status = ice_create_pkt_fwd_rule(hw, f_entry);
2822 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2825 status = ICE_ERR_DOES_NOT_EXIST;
2828 /* reuse VSI list for new rule and increment ref_cnt */
2830 v_list_itr->vsi_list_info = map_info;
2831 map_info->ref_cnt++;
2833 v_list_itr->vsi_list_info =
2834 ice_create_vsi_list_map(hw, &vsi_handle,
2838 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2839 /* Update existing VSI list to add new VSI ID only if it used
2842 cur_fltr = &v_list_itr->fltr_info;
2843 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2846 /* If VLAN rule exists and VSI list being used by this rule is
2847 * referenced by more than 1 VLAN rule. Then create a new VSI
2848 * list appending previous VSI with new VSI and update existing
2849 * VLAN rule to point to new VSI list ID
2851 struct ice_fltr_info tmp_fltr;
2852 u16 vsi_handle_arr[2];
2855 /* Current implementation only supports reusing VSI list with
2856 * one VSI count. We should never hit below condition
2858 if (v_list_itr->vsi_count > 1 &&
2859 v_list_itr->vsi_list_info->ref_cnt > 1) {
2860 ice_debug(hw, ICE_DBG_SW,
2861 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
2862 status = ICE_ERR_CFG;
2867 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
2870 /* A rule already exists with the new VSI being added */
2871 if (cur_handle == vsi_handle) {
2872 status = ICE_ERR_ALREADY_EXISTS;
2876 vsi_handle_arr[0] = cur_handle;
2877 vsi_handle_arr[1] = vsi_handle;
2878 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2879 &vsi_list_id, lkup_type);
2883 tmp_fltr = v_list_itr->fltr_info;
2884 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
2885 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2886 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2887 /* Update the previous switch rule to a new VSI list which
2888 * includes current VSI that is requested
2890 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2894 /* before overriding VSI list map info. decrement ref_cnt of
2897 v_list_itr->vsi_list_info->ref_cnt--;
2899 /* now update to newly created list */
2900 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
2901 v_list_itr->vsi_list_info =
2902 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2904 v_list_itr->vsi_count++;
2908 ice_release_lock(rule_lock);
2913 * ice_add_vlan - Add VLAN based filter rule
2914 * @hw: pointer to the hardware structure
2915 * @v_list: list of VLAN entries and forwarding information
2918 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
2920 struct ice_fltr_list_entry *v_list_itr;
2923 return ICE_ERR_PARAM;
2925 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
2927 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
2928 return ICE_ERR_PARAM;
2929 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
2930 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
2931 if (v_list_itr->status)
2932 return v_list_itr->status;
2937 #ifndef NO_MACVLAN_SUPPORT
2939 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
2940 * @hw: pointer to the hardware structure
2941 * @mv_list: list of MAC and VLAN filters
2943 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
2944 * pruning bits enabled, then it is the responsibility of the caller to make
2945 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
2946 * VLAN won't be received on that VSI otherwise.
2949 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
2951 struct ice_fltr_list_entry *mv_list_itr;
2953 if (!mv_list || !hw)
2954 return ICE_ERR_PARAM;
2956 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
2958 enum ice_sw_lkup_type l_type =
2959 mv_list_itr->fltr_info.lkup_type;
2961 if (l_type != ICE_SW_LKUP_MAC_VLAN)
2962 return ICE_ERR_PARAM;
2963 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
2964 mv_list_itr->status =
2965 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
2967 if (mv_list_itr->status)
2968 return mv_list_itr->status;
2975 * ice_add_eth_mac - Add ethertype and MAC based filter rule
2976 * @hw: pointer to the hardware structure
2977 * @em_list: list of ether type MAC filter, MAC is optional
2979 * This function requires the caller to populate the entries in
2980 * the filter list with the necessary fields (including flags to
2981 * indicate Tx or Rx rules).
2984 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
2986 struct ice_fltr_list_entry *em_list_itr;
2988 if (!em_list || !hw)
2989 return ICE_ERR_PARAM;
2991 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
2993 enum ice_sw_lkup_type l_type =
2994 em_list_itr->fltr_info.lkup_type;
2996 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2997 l_type != ICE_SW_LKUP_ETHERTYPE)
2998 return ICE_ERR_PARAM;
3000 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3002 if (em_list_itr->status)
3003 return em_list_itr->status;
3009 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3010 * @hw: pointer to the hardware structure
3011 * @em_list: list of ethertype or ethertype MAC entries
3014 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3016 struct ice_fltr_list_entry *em_list_itr, *tmp;
3018 if (!em_list || !hw)
3019 return ICE_ERR_PARAM;
3021 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3023 enum ice_sw_lkup_type l_type =
3024 em_list_itr->fltr_info.lkup_type;
3026 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3027 l_type != ICE_SW_LKUP_ETHERTYPE)
3028 return ICE_ERR_PARAM;
3030 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3032 if (em_list_itr->status)
3033 return em_list_itr->status;
3040 * ice_rem_sw_rule_info
3041 * @hw: pointer to the hardware structure
3042 * @rule_head: pointer to the switch list structure that we want to delete
3045 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3047 if (!LIST_EMPTY(rule_head)) {
3048 struct ice_fltr_mgmt_list_entry *entry;
3049 struct ice_fltr_mgmt_list_entry *tmp;
3051 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3052 ice_fltr_mgmt_list_entry, list_entry) {
3053 LIST_DEL(&entry->list_entry);
3054 ice_free(hw, entry);
3060 * ice_rem_adv_rule_info
3061 * @hw: pointer to the hardware structure
3062 * @rule_head: pointer to the switch list structure that we want to delete
3065 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3067 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3068 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3070 if (LIST_EMPTY(rule_head))
3073 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3074 ice_adv_fltr_mgmt_list_entry, list_entry) {
3075 LIST_DEL(&lst_itr->list_entry);
3076 ice_free(hw, lst_itr->lkups);
3077 ice_free(hw, lst_itr);
3082 * ice_rem_all_sw_rules_info
3083 * @hw: pointer to the hardware structure
3085 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3087 struct ice_switch_info *sw = hw->switch_info;
3090 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3091 struct LIST_HEAD_TYPE *rule_head;
3093 rule_head = &sw->recp_list[i].filt_rules;
3094 if (!sw->recp_list[i].adv_rule)
3095 ice_rem_sw_rule_info(hw, rule_head);
3097 ice_rem_adv_rule_info(hw, rule_head);
3102 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3103 * @pi: pointer to the port_info structure
3104 * @vsi_handle: VSI handle to set as default
3105 * @set: true to add the above mentioned switch rule, false to remove it
3106 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3108 * add filter rule to set/unset given VSI as default VSI for the switch
3109 * (represented by swid)
3112 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3115 struct ice_aqc_sw_rules_elem *s_rule;
3116 struct ice_fltr_info f_info;
3117 struct ice_hw *hw = pi->hw;
3118 enum ice_adminq_opc opcode;
3119 enum ice_status status;
3123 if (!ice_is_vsi_valid(hw, vsi_handle))
3124 return ICE_ERR_PARAM;
3125 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3127 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3128 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3129 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3131 return ICE_ERR_NO_MEMORY;
3133 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3135 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3136 f_info.flag = direction;
3137 f_info.fltr_act = ICE_FWD_TO_VSI;
3138 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3140 if (f_info.flag & ICE_FLTR_RX) {
3141 f_info.src = pi->lport;
3142 f_info.src_id = ICE_SRC_ID_LPORT;
3144 f_info.fltr_rule_id =
3145 pi->dflt_rx_vsi_rule_id;
3146 } else if (f_info.flag & ICE_FLTR_TX) {
3147 f_info.src_id = ICE_SRC_ID_VSI;
3148 f_info.src = hw_vsi_id;
3150 f_info.fltr_rule_id =
3151 pi->dflt_tx_vsi_rule_id;
3155 opcode = ice_aqc_opc_add_sw_rules;
3157 opcode = ice_aqc_opc_remove_sw_rules;
3159 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3161 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3162 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3165 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3167 if (f_info.flag & ICE_FLTR_TX) {
3168 pi->dflt_tx_vsi_num = hw_vsi_id;
3169 pi->dflt_tx_vsi_rule_id = index;
3170 } else if (f_info.flag & ICE_FLTR_RX) {
3171 pi->dflt_rx_vsi_num = hw_vsi_id;
3172 pi->dflt_rx_vsi_rule_id = index;
3175 if (f_info.flag & ICE_FLTR_TX) {
3176 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3177 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3178 } else if (f_info.flag & ICE_FLTR_RX) {
3179 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3180 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3185 ice_free(hw, s_rule);
3190 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3191 * @hw: pointer to the hardware structure
3192 * @recp_id: lookup type for which the specified rule needs to be searched
3193 * @f_info: rule information
3195 * Helper function to search for a unicast rule entry - this is to be used
3196 * to remove unicast MAC filter that is not shared with other VSIs on the
3199 * Returns pointer to entry storing the rule if found
3201 static struct ice_fltr_mgmt_list_entry *
3202 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3203 struct ice_fltr_info *f_info)
3205 struct ice_switch_info *sw = hw->switch_info;
3206 struct ice_fltr_mgmt_list_entry *list_itr;
3207 struct LIST_HEAD_TYPE *list_head;
3209 list_head = &sw->recp_list[recp_id].filt_rules;
3210 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3212 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3213 sizeof(f_info->l_data)) &&
3214 f_info->fwd_id.hw_vsi_id ==
3215 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3216 f_info->flag == list_itr->fltr_info.flag)
3223 * ice_remove_mac - remove a MAC address based filter rule
3224 * @hw: pointer to the hardware structure
3225 * @m_list: list of MAC addresses and forwarding information
3227 * This function removes either a MAC filter rule or a specific VSI from a
3228 * VSI list for a multicast MAC address.
3230 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3231 * ice_add_mac. Caller should be aware that this call will only work if all
3232 * the entries passed into m_list were added previously. It will not attempt to
3233 * do a partial remove of entries that were found.
3236 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3238 struct ice_fltr_list_entry *list_itr, *tmp;
3239 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3242 return ICE_ERR_PARAM;
3244 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3245 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3247 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3248 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3251 if (l_type != ICE_SW_LKUP_MAC)
3252 return ICE_ERR_PARAM;
3254 vsi_handle = list_itr->fltr_info.vsi_handle;
3255 if (!ice_is_vsi_valid(hw, vsi_handle))
3256 return ICE_ERR_PARAM;
3258 list_itr->fltr_info.fwd_id.hw_vsi_id =
3259 ice_get_hw_vsi_num(hw, vsi_handle);
3260 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3261 /* Don't remove the unicast address that belongs to
3262 * another VSI on the switch, since it is not being
3265 ice_acquire_lock(rule_lock);
3266 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3267 &list_itr->fltr_info)) {
3268 ice_release_lock(rule_lock);
3269 return ICE_ERR_DOES_NOT_EXIST;
3271 ice_release_lock(rule_lock);
3273 list_itr->status = ice_remove_rule_internal(hw,
3276 if (list_itr->status)
3277 return list_itr->status;
3283 * ice_remove_vlan - Remove VLAN based filter rule
3284 * @hw: pointer to the hardware structure
3285 * @v_list: list of VLAN entries and forwarding information
3288 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3290 struct ice_fltr_list_entry *v_list_itr, *tmp;
3293 return ICE_ERR_PARAM;
3295 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3297 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3299 if (l_type != ICE_SW_LKUP_VLAN)
3300 return ICE_ERR_PARAM;
3301 v_list_itr->status = ice_remove_rule_internal(hw,
3304 if (v_list_itr->status)
3305 return v_list_itr->status;
3310 #ifndef NO_MACVLAN_SUPPORT
3312 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3313 * @hw: pointer to the hardware structure
3314 * @v_list: list of MAC VLAN entries and forwarding information
3317 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3319 struct ice_fltr_list_entry *v_list_itr, *tmp;
3322 return ICE_ERR_PARAM;
3324 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3326 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3328 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3329 return ICE_ERR_PARAM;
3330 v_list_itr->status =
3331 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3333 if (v_list_itr->status)
3334 return v_list_itr->status;
3338 #endif /* !NO_MACVLAN_SUPPORT */
3341 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3342 * @fm_entry: filter entry to inspect
3343 * @vsi_handle: VSI handle to compare with filter info
3346 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3348 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3349 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3350 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3351 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3356 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3357 * @hw: pointer to the hardware structure
3358 * @vsi_handle: VSI handle to remove filters from
3359 * @vsi_list_head: pointer to the list to add entry to
3360 * @fi: pointer to fltr_info of filter entry to copy & add
3362 * Helper function, used when creating a list of filters to remove from
3363 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3364 * original filter entry, with the exception of fltr_info.fltr_act and
3365 * fltr_info.fwd_id fields. These are set such that later logic can
3366 * extract which VSI to remove the fltr from, and pass on that information.
3368 static enum ice_status
3369 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3370 struct LIST_HEAD_TYPE *vsi_list_head,
3371 struct ice_fltr_info *fi)
3373 struct ice_fltr_list_entry *tmp;
3375 /* this memory is freed up in the caller function
3376 * once filters for this VSI are removed
3378 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3380 return ICE_ERR_NO_MEMORY;
3382 tmp->fltr_info = *fi;
3384 /* Overwrite these fields to indicate which VSI to remove filter from,
3385 * so find and remove logic can extract the information from the
3386 * list entries. Note that original entries will still have proper
3389 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3390 tmp->fltr_info.vsi_handle = vsi_handle;
3391 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3393 LIST_ADD(&tmp->list_entry, vsi_list_head);
3399 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3400 * @hw: pointer to the hardware structure
3401 * @vsi_handle: VSI handle to remove filters from
3402 * @lkup_list_head: pointer to the list that has certain lookup type filters
3403 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3405 * Locates all filters in lkup_list_head that are used by the given VSI,
3406 * and adds COPIES of those entries to vsi_list_head (intended to be used
3407 * to remove the listed filters).
3408 * Note that this means all entries in vsi_list_head must be explicitly
3409 * deallocated by the caller when done with list.
3411 static enum ice_status
3412 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3413 struct LIST_HEAD_TYPE *lkup_list_head,
3414 struct LIST_HEAD_TYPE *vsi_list_head)
3416 struct ice_fltr_mgmt_list_entry *fm_entry;
3417 enum ice_status status = ICE_SUCCESS;
3419 /* check to make sure VSI ID is valid and within boundary */
3420 if (!ice_is_vsi_valid(hw, vsi_handle))
3421 return ICE_ERR_PARAM;
3423 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3424 ice_fltr_mgmt_list_entry, list_entry) {
3425 struct ice_fltr_info *fi;
3427 fi = &fm_entry->fltr_info;
3428 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3431 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3441 * ice_determine_promisc_mask
3442 * @fi: filter info to parse
3444 * Helper function to determine which ICE_PROMISC_ mask corresponds
3445 * to given filter into.
3447 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3449 u16 vid = fi->l_data.mac_vlan.vlan_id;
3450 u8 *macaddr = fi->l_data.mac.mac_addr;
3451 bool is_tx_fltr = false;
3452 u8 promisc_mask = 0;
3454 if (fi->flag == ICE_FLTR_TX)
3457 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3458 promisc_mask |= is_tx_fltr ?
3459 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3460 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3461 promisc_mask |= is_tx_fltr ?
3462 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3463 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3464 promisc_mask |= is_tx_fltr ?
3465 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3467 promisc_mask |= is_tx_fltr ?
3468 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3470 return promisc_mask;
3474 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3475 * @hw: pointer to the hardware structure
3476 * @vsi_handle: VSI handle to retrieve info from
3477 * @promisc_mask: pointer to mask to be filled in
3478 * @vid: VLAN ID of promisc VLAN VSI
3481 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3484 struct ice_switch_info *sw = hw->switch_info;
3485 struct ice_fltr_mgmt_list_entry *itr;
3486 struct LIST_HEAD_TYPE *rule_head;
3487 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3489 if (!ice_is_vsi_valid(hw, vsi_handle))
3490 return ICE_ERR_PARAM;
3494 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3495 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3497 ice_acquire_lock(rule_lock);
3498 LIST_FOR_EACH_ENTRY(itr, rule_head,
3499 ice_fltr_mgmt_list_entry, list_entry) {
3500 /* Continue if this filter doesn't apply to this VSI or the
3501 * VSI ID is not in the VSI map for this filter
3503 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3506 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3508 ice_release_lock(rule_lock);
3514 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3515 * @hw: pointer to the hardware structure
3516 * @vsi_handle: VSI handle to retrieve info from
3517 * @promisc_mask: pointer to mask to be filled in
3518 * @vid: VLAN ID of promisc VLAN VSI
3521 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3524 struct ice_switch_info *sw = hw->switch_info;
3525 struct ice_fltr_mgmt_list_entry *itr;
3526 struct LIST_HEAD_TYPE *rule_head;
3527 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3529 if (!ice_is_vsi_valid(hw, vsi_handle))
3530 return ICE_ERR_PARAM;
3534 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3535 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3537 ice_acquire_lock(rule_lock);
3538 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3540 /* Continue if this filter doesn't apply to this VSI or the
3541 * VSI ID is not in the VSI map for this filter
3543 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3546 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3548 ice_release_lock(rule_lock);
3554 * ice_remove_promisc - Remove promisc based filter rules
3555 * @hw: pointer to the hardware structure
3556 * @recp_id: recipe ID for which the rule needs to removed
3557 * @v_list: list of promisc entries
3559 static enum ice_status
3560 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3561 struct LIST_HEAD_TYPE *v_list)
3563 struct ice_fltr_list_entry *v_list_itr, *tmp;
3565 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3567 v_list_itr->status =
3568 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3569 if (v_list_itr->status)
3570 return v_list_itr->status;
3576 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3577 * @hw: pointer to the hardware structure
3578 * @vsi_handle: VSI handle to clear mode
3579 * @promisc_mask: mask of promiscuous config bits to clear
3580 * @vid: VLAN ID to clear VLAN promiscuous
3583 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3586 struct ice_switch_info *sw = hw->switch_info;
3587 struct ice_fltr_list_entry *fm_entry, *tmp;
3588 struct LIST_HEAD_TYPE remove_list_head;
3589 struct ice_fltr_mgmt_list_entry *itr;
3590 struct LIST_HEAD_TYPE *rule_head;
3591 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3592 enum ice_status status = ICE_SUCCESS;
3595 if (!ice_is_vsi_valid(hw, vsi_handle))
3596 return ICE_ERR_PARAM;
3599 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3601 recipe_id = ICE_SW_LKUP_PROMISC;
3603 rule_head = &sw->recp_list[recipe_id].filt_rules;
3604 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3606 INIT_LIST_HEAD(&remove_list_head);
3608 ice_acquire_lock(rule_lock);
3609 LIST_FOR_EACH_ENTRY(itr, rule_head,
3610 ice_fltr_mgmt_list_entry, list_entry) {
3611 u8 fltr_promisc_mask = 0;
3613 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3616 fltr_promisc_mask |=
3617 ice_determine_promisc_mask(&itr->fltr_info);
3619 /* Skip if filter is not completely specified by given mask */
3620 if (fltr_promisc_mask & ~promisc_mask)
3623 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3627 ice_release_lock(rule_lock);
3628 goto free_fltr_list;
3631 ice_release_lock(rule_lock);
3633 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3636 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3637 ice_fltr_list_entry, list_entry) {
3638 LIST_DEL(&fm_entry->list_entry);
3639 ice_free(hw, fm_entry);
3646 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3647 * @hw: pointer to the hardware structure
3648 * @vsi_handle: VSI handle to configure
3649 * @promisc_mask: mask of promiscuous config bits
3650 * @vid: VLAN ID to set VLAN promiscuous
3653 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3655 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3656 struct ice_fltr_list_entry f_list_entry;
3657 struct ice_fltr_info new_fltr;
3658 enum ice_status status = ICE_SUCCESS;
3664 ice_debug(hw, ICE_DBG_TRACE, "ice_set_vsi_promisc\n");
3666 if (!ice_is_vsi_valid(hw, vsi_handle))
3667 return ICE_ERR_PARAM;
3668 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3670 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3672 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3673 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3674 new_fltr.l_data.mac_vlan.vlan_id = vid;
3675 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3677 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3678 recipe_id = ICE_SW_LKUP_PROMISC;
3681 /* Separate filters must be set for each direction/packet type
3682 * combination, so we will loop over the mask value, store the
3683 * individual type, and clear it out in the input mask as it
3686 while (promisc_mask) {
3692 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3693 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3694 pkt_type = UCAST_FLTR;
3695 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3696 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3697 pkt_type = UCAST_FLTR;
3699 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3700 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3701 pkt_type = MCAST_FLTR;
3702 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3703 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3704 pkt_type = MCAST_FLTR;
3706 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3707 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3708 pkt_type = BCAST_FLTR;
3709 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3710 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3711 pkt_type = BCAST_FLTR;
3715 /* Check for VLAN promiscuous flag */
3716 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3717 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3718 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3719 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3723 /* Set filter DA based on packet type */
3724 mac_addr = new_fltr.l_data.mac.mac_addr;
3725 if (pkt_type == BCAST_FLTR) {
3726 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
3727 } else if (pkt_type == MCAST_FLTR ||
3728 pkt_type == UCAST_FLTR) {
3729 /* Use the dummy ether header DA */
3730 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
3731 ICE_NONDMA_TO_NONDMA);
3732 if (pkt_type == MCAST_FLTR)
3733 mac_addr[0] |= 0x1; /* Set multicast bit */
3736 /* Need to reset this to zero for all iterations */
3739 new_fltr.flag |= ICE_FLTR_TX;
3740 new_fltr.src = hw_vsi_id;
3742 new_fltr.flag |= ICE_FLTR_RX;
3743 new_fltr.src = hw->port_info->lport;
3746 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3747 new_fltr.vsi_handle = vsi_handle;
3748 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3749 f_list_entry.fltr_info = new_fltr;
3751 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3752 if (status != ICE_SUCCESS)
3753 goto set_promisc_exit;
3761 * ice_set_vlan_vsi_promisc
3762 * @hw: pointer to the hardware structure
3763 * @vsi_handle: VSI handle to configure
3764 * @promisc_mask: mask of promiscuous config bits
3765 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3767 * Configure VSI with all associated VLANs to given promiscuous mode(s)
3770 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3771 bool rm_vlan_promisc)
3773 struct ice_switch_info *sw = hw->switch_info;
3774 struct ice_fltr_list_entry *list_itr, *tmp;
3775 struct LIST_HEAD_TYPE vsi_list_head;
3776 struct LIST_HEAD_TYPE *vlan_head;
3777 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
3778 enum ice_status status;
3781 INIT_LIST_HEAD(&vsi_list_head);
3782 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3783 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3784 ice_acquire_lock(vlan_lock);
3785 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3787 ice_release_lock(vlan_lock);
3789 goto free_fltr_list;
3791 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
3793 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3794 if (rm_vlan_promisc)
3795 status = ice_clear_vsi_promisc(hw, vsi_handle,
3796 promisc_mask, vlan_id);
3798 status = ice_set_vsi_promisc(hw, vsi_handle,
3799 promisc_mask, vlan_id);
3805 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
3806 ice_fltr_list_entry, list_entry) {
3807 LIST_DEL(&list_itr->list_entry);
3808 ice_free(hw, list_itr);
3814 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3815 * @hw: pointer to the hardware structure
3816 * @vsi_handle: VSI handle to remove filters from
3817 * @lkup: switch rule filter lookup type
3820 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3821 enum ice_sw_lkup_type lkup)
3823 struct ice_switch_info *sw = hw->switch_info;
3824 struct ice_fltr_list_entry *fm_entry;
3825 struct LIST_HEAD_TYPE remove_list_head;
3826 struct LIST_HEAD_TYPE *rule_head;
3827 struct ice_fltr_list_entry *tmp;
3828 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3829 enum ice_status status;
3831 INIT_LIST_HEAD(&remove_list_head);
3832 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3833 rule_head = &sw->recp_list[lkup].filt_rules;
3834 ice_acquire_lock(rule_lock);
3835 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3837 ice_release_lock(rule_lock);
3842 case ICE_SW_LKUP_MAC:
3843 ice_remove_mac(hw, &remove_list_head);
3845 case ICE_SW_LKUP_VLAN:
3846 ice_remove_vlan(hw, &remove_list_head);
3848 case ICE_SW_LKUP_PROMISC:
3849 case ICE_SW_LKUP_PROMISC_VLAN:
3850 ice_remove_promisc(hw, lkup, &remove_list_head);
3852 case ICE_SW_LKUP_MAC_VLAN:
3853 #ifndef NO_MACVLAN_SUPPORT
3854 ice_remove_mac_vlan(hw, &remove_list_head);
3856 ice_debug(hw, ICE_DBG_SW, "MAC VLAN look up is not supported yet\n");
3857 #endif /* !NO_MACVLAN_SUPPORT */
3859 case ICE_SW_LKUP_ETHERTYPE:
3860 case ICE_SW_LKUP_ETHERTYPE_MAC:
3861 ice_remove_eth_mac(hw, &remove_list_head);
3863 case ICE_SW_LKUP_DFLT:
3864 ice_debug(hw, ICE_DBG_SW,
3865 "Remove filters for this lookup type hasn't been implemented yet\n");
3867 case ICE_SW_LKUP_LAST:
3868 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
3872 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3873 ice_fltr_list_entry, list_entry) {
3874 LIST_DEL(&fm_entry->list_entry);
3875 ice_free(hw, fm_entry);
3880 * ice_remove_vsi_fltr - Remove all filters for a VSI
3881 * @hw: pointer to the hardware structure
3882 * @vsi_handle: VSI handle to remove filters from
3884 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
3886 ice_debug(hw, ICE_DBG_TRACE, "ice_remove_vsi_fltr\n");
3888 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
3889 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
3890 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
3891 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
3892 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
3893 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
3894 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
3895 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
3899 * ice_alloc_res_cntr - allocating resource counter
3900 * @hw: pointer to the hardware structure
3901 * @type: type of resource
3902 * @alloc_shared: if set it is shared else dedicated
3903 * @num_items: number of entries requested for FD resource type
3904 * @counter_id: counter index returned by AQ call
3907 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3910 struct ice_aqc_alloc_free_res_elem *buf;
3911 enum ice_status status;
3914 /* Allocate resource */
3915 buf_len = sizeof(*buf);
3916 buf = (struct ice_aqc_alloc_free_res_elem *)
3917 ice_malloc(hw, buf_len);
3919 return ICE_ERR_NO_MEMORY;
3921 buf->num_elems = CPU_TO_LE16(num_items);
3922 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
3923 ICE_AQC_RES_TYPE_M) | alloc_shared);
3925 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3926 ice_aqc_opc_alloc_res, NULL);
3930 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
3938 * ice_free_res_cntr - free resource counter
3939 * @hw: pointer to the hardware structure
3940 * @type: type of resource
3941 * @alloc_shared: if set it is shared else dedicated
3942 * @num_items: number of entries to be freed for FD resource type
3943 * @counter_id: counter ID resource which needs to be freed
3946 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3949 struct ice_aqc_alloc_free_res_elem *buf;
3950 enum ice_status status;
3954 buf_len = sizeof(*buf);
3955 buf = (struct ice_aqc_alloc_free_res_elem *)
3956 ice_malloc(hw, buf_len);
3958 return ICE_ERR_NO_MEMORY;
3960 buf->num_elems = CPU_TO_LE16(num_items);
3961 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
3962 ICE_AQC_RES_TYPE_M) | alloc_shared);
3963 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
3965 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3966 ice_aqc_opc_free_res, NULL);
3968 ice_debug(hw, ICE_DBG_SW,
3969 "counter resource could not be freed\n");
3976 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
3977 * @hw: pointer to the hardware structure
3978 * @counter_id: returns counter index
3980 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
3982 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
3983 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
3988 * ice_free_vlan_res_counter - Free counter resource for VLAN type
3989 * @hw: pointer to the hardware structure
3990 * @counter_id: counter index to be freed
3992 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
3994 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
3995 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4000 * ice_alloc_res_lg_act - add large action resource
4001 * @hw: pointer to the hardware structure
4002 * @l_id: large action ID to fill it in
4003 * @num_acts: number of actions to hold with a large action entry
4005 static enum ice_status
4006 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4008 struct ice_aqc_alloc_free_res_elem *sw_buf;
4009 enum ice_status status;
4012 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4013 return ICE_ERR_PARAM;
4015 /* Allocate resource for large action */
4016 buf_len = sizeof(*sw_buf);
4017 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4018 ice_malloc(hw, buf_len);
4020 return ICE_ERR_NO_MEMORY;
4022 sw_buf->num_elems = CPU_TO_LE16(1);
4024 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4025 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4026 * If num_acts is greater than 2, then use
4027 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4028 * The num_acts cannot exceed 4. This was ensured at the
4029 * beginning of the function.
4032 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4033 else if (num_acts == 2)
4034 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4036 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4038 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4039 ice_aqc_opc_alloc_res, NULL);
4041 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4043 ice_free(hw, sw_buf);
4048 * ice_add_mac_with_sw_marker - add filter with sw marker
4049 * @hw: pointer to the hardware structure
4050 * @f_info: filter info structure containing the MAC filter information
4051 * @sw_marker: sw marker to tag the Rx descriptor with
4054 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4057 struct ice_switch_info *sw = hw->switch_info;
4058 struct ice_fltr_mgmt_list_entry *m_entry;
4059 struct ice_fltr_list_entry fl_info;
4060 struct LIST_HEAD_TYPE l_head;
4061 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4062 enum ice_status ret;
4066 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4067 return ICE_ERR_PARAM;
4069 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4070 return ICE_ERR_PARAM;
4072 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4073 return ICE_ERR_PARAM;
4075 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4076 return ICE_ERR_PARAM;
4077 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4079 /* Add filter if it doesn't exist so then the adding of large
4080 * action always results in update
4083 INIT_LIST_HEAD(&l_head);
4084 fl_info.fltr_info = *f_info;
4085 LIST_ADD(&fl_info.list_entry, &l_head);
4087 entry_exists = false;
4088 ret = ice_add_mac(hw, &l_head);
4089 if (ret == ICE_ERR_ALREADY_EXISTS)
4090 entry_exists = true;
4094 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4095 ice_acquire_lock(rule_lock);
4096 /* Get the book keeping entry for the filter */
4097 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4101 /* If counter action was enabled for this rule then don't enable
4102 * sw marker large action
4104 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4105 ret = ICE_ERR_PARAM;
4109 /* if same marker was added before */
4110 if (m_entry->sw_marker_id == sw_marker) {
4111 ret = ICE_ERR_ALREADY_EXISTS;
4115 /* Allocate a hardware table entry to hold large act. Three actions
4116 * for marker based large action
4118 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4122 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4125 /* Update the switch rule to add the marker action */
4126 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4128 ice_release_lock(rule_lock);
4133 ice_release_lock(rule_lock);
4134 /* only remove entry if it did not exist previously */
4136 ret = ice_remove_mac(hw, &l_head);
4142 * ice_add_mac_with_counter - add filter with counter enabled
4143 * @hw: pointer to the hardware structure
4144 * @f_info: pointer to filter info structure containing the MAC filter
4148 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4150 struct ice_switch_info *sw = hw->switch_info;
4151 struct ice_fltr_mgmt_list_entry *m_entry;
4152 struct ice_fltr_list_entry fl_info;
4153 struct LIST_HEAD_TYPE l_head;
4154 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4155 enum ice_status ret;
4160 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4161 return ICE_ERR_PARAM;
4163 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4164 return ICE_ERR_PARAM;
4166 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4167 return ICE_ERR_PARAM;
4168 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4170 entry_exist = false;
4172 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4174 /* Add filter if it doesn't exist so then the adding of large
4175 * action always results in update
4177 INIT_LIST_HEAD(&l_head);
4179 fl_info.fltr_info = *f_info;
4180 LIST_ADD(&fl_info.list_entry, &l_head);
4182 ret = ice_add_mac(hw, &l_head);
4183 if (ret == ICE_ERR_ALREADY_EXISTS)
4188 ice_acquire_lock(rule_lock);
4189 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4191 ret = ICE_ERR_BAD_PTR;
4195 /* Don't enable counter for a filter for which sw marker was enabled */
4196 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4197 ret = ICE_ERR_PARAM;
4201 /* If a counter was already enabled then don't need to add again */
4202 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4203 ret = ICE_ERR_ALREADY_EXISTS;
4207 /* Allocate a hardware table entry to VLAN counter */
4208 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4212 /* Allocate a hardware table entry to hold large act. Two actions for
4213 * counter based large action
4215 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4219 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4222 /* Update the switch rule to add the counter action */
4223 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4225 ice_release_lock(rule_lock);
4230 ice_release_lock(rule_lock);
4231 /* only remove entry if it did not exist previously */
4233 ret = ice_remove_mac(hw, &l_head);
4238 /* This is mapping table entry that maps every word within a given protocol
4239 * structure to the real byte offset as per the specification of that
4241 * for example dst address is 3 words in ethertype header and corresponding
4242 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4243 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4244 * matching entry describing its field. This needs to be updated if new
4245 * structure is added to that union.
4247 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4248 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4249 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4250 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4251 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4252 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4253 26, 28, 30, 32, 34, 36, 38 } },
4254 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4255 26, 28, 30, 32, 34, 36, 38 } },
4256 { ICE_TCP_IL, { 0, 2 } },
4257 { ICE_UDP_ILOS, { 0, 2 } },
4258 { ICE_SCTP_IL, { 0, 2 } },
4259 { ICE_VXLAN, { 8, 10, 12 } },
4260 { ICE_GENEVE, { 8, 10, 12 } },
4261 { ICE_VXLAN_GPE, { 0, 2, 4 } },
4262 { ICE_NVGRE, { 0, 2 } },
4263 { ICE_PROTOCOL_LAST, { 0 } }
4266 /* The following table describes preferred grouping of recipes.
4267 * If a recipe that needs to be programmed is a superset or matches one of the
4268 * following combinations, then the recipe needs to be chained as per the
4271 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4272 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4273 { ICE_MAC_OFOS_HW, 4, 0 } } },
4274 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4275 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } } },
4276 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } } },
4277 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } } },
4280 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4281 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4282 { ICE_MAC_IL, ICE_MAC_IL_HW },
4283 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4284 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4285 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4286 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4287 { ICE_TCP_IL, ICE_TCP_IL_HW },
4288 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4289 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4290 { ICE_VXLAN, ICE_UDP_OF_HW },
4291 { ICE_GENEVE, ICE_UDP_OF_HW },
4292 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4293 { ICE_NVGRE, ICE_GRE_OF_HW },
4294 { ICE_PROTOCOL_LAST, 0 }
4298 * ice_find_recp - find a recipe
4299 * @hw: pointer to the hardware structure
4300 * @lkup_exts: extension sequence to match
4302 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4304 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4306 struct ice_sw_recipe *recp;
4309 ice_get_recp_to_prof_map(hw);
4310 /* Initialize available_result_ids which tracks available result idx */
4311 for (i = 0; i <= ICE_CHAIN_FV_INDEX_START; i++)
4312 ice_set_bit(ICE_CHAIN_FV_INDEX_START - i,
4313 available_result_ids);
4315 /* Walk through existing recipes to find a match */
4316 recp = hw->switch_info->recp_list;
4317 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4318 /* If recipe was not created for this ID, in SW bookkeeping,
4319 * check if FW has an entry for this recipe. If the FW has an
4320 * entry update it in our SW bookkeeping and continue with the
4323 if (!recp[i].recp_created)
4324 if (ice_get_recp_frm_fw(hw,
4325 hw->switch_info->recp_list, i))
4328 /* if number of words we are looking for match */
4329 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4330 struct ice_fv_word *a = lkup_exts->fv_words;
4331 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4335 for (p = 0; p < lkup_exts->n_val_words; p++) {
4336 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4338 if (a[p].off == b[q].off &&
4339 a[p].prot_id == b[q].prot_id)
4340 /* Found the "p"th word in the
4345 /* After walking through all the words in the
4346 * "i"th recipe if "p"th word was not found then
4347 * this recipe is not what we are looking for.
4348 * So break out from this loop and try the next
4351 if (q >= recp[i].lkup_exts.n_val_words) {
4356 /* If for "i"th recipe the found was never set to false
4357 * then it means we found our match
4360 return i; /* Return the recipe ID */
4363 return ICE_MAX_NUM_RECIPES;
4367 * ice_prot_type_to_id - get protocol ID from protocol type
4368 * @type: protocol type
4369 * @id: pointer to variable that will receive the ID
4371 * Returns true if found, false otherwise
4373 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4377 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4378 if (ice_prot_id_tbl[i].type == type) {
4379 *id = ice_prot_id_tbl[i].protocol_id;
4386 * ice_find_valid_words - count valid words
4387 * @rule: advanced rule with lookup information
4388 * @lkup_exts: byte offset extractions of the words that are valid
4390 * calculate valid words in a lookup rule using mask value
4393 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4394 struct ice_prot_lkup_ext *lkup_exts)
4400 if (!ice_prot_type_to_id(rule->type, &prot_id))
4403 word = lkup_exts->n_val_words;
4405 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4406 if (((u16 *)&rule->m_u)[j] == 0xffff &&
4407 rule->type < ARRAY_SIZE(ice_prot_ext)) {
4408 /* No more space to accommodate */
4409 if (word >= ICE_MAX_CHAIN_WORDS)
4411 lkup_exts->fv_words[word].off =
4412 ice_prot_ext[rule->type].offs[j];
4413 lkup_exts->fv_words[word].prot_id =
4414 ice_prot_id_tbl[rule->type].protocol_id;
4418 ret_val = word - lkup_exts->n_val_words;
4419 lkup_exts->n_val_words = word;
4425 * ice_find_prot_off_ind - check for specific ID and offset in rule
4426 * @lkup_exts: an array of protocol header extractions
4427 * @prot_type: protocol type to check
4428 * @off: expected offset of the extraction
4430 * Check if the prot_ext has given protocol ID and offset
4433 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4438 for (j = 0; j < lkup_exts->n_val_words; j++)
4439 if (lkup_exts->fv_words[j].off == off &&
4440 lkup_exts->fv_words[j].prot_id == prot_type)
4443 return ICE_MAX_CHAIN_WORDS;
4447 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4448 * @lkup_exts: an array of protocol header extractions
4449 * @r_policy: preferred recipe grouping policy
4451 * Helper function to check if given recipe group is subset we need to check if
4452 * all the words described by the given recipe group exist in the advanced rule
4453 * look up information
4456 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4457 const struct ice_pref_recipe_group *r_policy)
4459 u8 ind[ICE_NUM_WORDS_RECIPE];
4463 /* check if everything in the r_policy is part of the entire rule */
4464 for (i = 0; i < r_policy->n_val_pairs; i++) {
4467 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4468 r_policy->pairs[i].off);
4469 if (j >= ICE_MAX_CHAIN_WORDS)
4472 /* store the indexes temporarily found by the find function
4473 * this will be used to mark the words as 'done'
4478 /* If the entire policy recipe was a true match, then mark the fields
4479 * that are covered by the recipe as 'done' meaning that these words
4480 * will be clumped together in one recipe.
4481 * "Done" here means in our searching if certain recipe group
4482 * matches or is subset of the given rule, then we mark all
4483 * the corresponding offsets as found. So the remaining recipes should
4484 * be created with whatever words that were left.
4486 for (i = 0; i < count; i++) {
4489 ice_set_bit(in, lkup_exts->done);
4495 * ice_create_first_fit_recp_def - Create a recipe grouping
4496 * @hw: pointer to the hardware structure
4497 * @lkup_exts: an array of protocol header extractions
4498 * @rg_list: pointer to a list that stores new recipe groups
4499 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4501 * Using first fit algorithm, take all the words that are still not done
4502 * and start grouping them in 4-word groups. Each group makes up one
4505 static enum ice_status
4506 ice_create_first_fit_recp_def(struct ice_hw *hw,
4507 struct ice_prot_lkup_ext *lkup_exts,
4508 struct LIST_HEAD_TYPE *rg_list,
4511 struct ice_pref_recipe_group *grp = NULL;
4516 /* Walk through every word in the rule to check if it is not done. If so
4517 * then this word needs to be part of a new recipe.
4519 for (j = 0; j < lkup_exts->n_val_words; j++)
4520 if (!ice_is_bit_set(lkup_exts->done, j)) {
4522 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4523 struct ice_recp_grp_entry *entry;
4525 entry = (struct ice_recp_grp_entry *)
4526 ice_malloc(hw, sizeof(*entry));
4528 return ICE_ERR_NO_MEMORY;
4529 LIST_ADD(&entry->l_entry, rg_list);
4530 grp = &entry->r_group;
4534 grp->pairs[grp->n_val_pairs].prot_id =
4535 lkup_exts->fv_words[j].prot_id;
4536 grp->pairs[grp->n_val_pairs].off =
4537 lkup_exts->fv_words[j].off;
4545 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4546 * @hw: pointer to the hardware structure
4547 * @fv_list: field vector with the extraction sequence information
4548 * @rg_list: recipe groupings with protocol-offset pairs
4550 * Helper function to fill in the field vector indices for protocol-offset
4551 * pairs. These indexes are then ultimately programmed into a recipe.
4554 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4555 struct LIST_HEAD_TYPE *rg_list)
4557 struct ice_sw_fv_list_entry *fv;
4558 struct ice_recp_grp_entry *rg;
4559 struct ice_fv_word *fv_ext;
4561 if (LIST_EMPTY(fv_list))
4564 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4565 fv_ext = fv->fv_ptr->ew;
4567 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4570 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4571 struct ice_fv_word *pr;
4574 pr = &rg->r_group.pairs[i];
4575 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4576 if (fv_ext[j].prot_id == pr->prot_id &&
4577 fv_ext[j].off == pr->off) {
4578 /* Store index of field vector */
4587 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4588 * @hw: pointer to hardware structure
4589 * @rm: recipe management list entry
4590 * @match_tun: if field vector index for tunnel needs to be programmed
4592 static enum ice_status
4593 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4596 struct ice_aqc_recipe_data_elem *tmp;
4597 struct ice_aqc_recipe_data_elem *buf;
4598 struct ice_recp_grp_entry *entry;
4599 enum ice_status status;
4604 /* When more than one recipe are required, another recipe is needed to
4605 * chain them together. Matching a tunnel metadata ID takes up one of
4606 * the match fields in the chaining recipe reducing the number of
4607 * chained recipes by one.
4609 if (rm->n_grp_count > 1)
4611 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE ||
4612 (match_tun && rm->n_grp_count > (ICE_MAX_CHAIN_RECIPE - 1)))
4613 return ICE_ERR_MAX_LIMIT;
4615 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
4616 ICE_MAX_NUM_RECIPES,
4619 return ICE_ERR_NO_MEMORY;
4621 buf = (struct ice_aqc_recipe_data_elem *)
4622 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
4624 status = ICE_ERR_NO_MEMORY;
4628 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4629 recipe_count = ICE_MAX_NUM_RECIPES;
4630 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4632 if (status || recipe_count == 0)
4635 /* Allocate the recipe resources, and configure them according to the
4636 * match fields from protocol headers and extracted field vectors.
4638 chain_idx = ICE_CHAIN_FV_INDEX_START -
4639 ice_find_first_bit(available_result_ids,
4640 ICE_CHAIN_FV_INDEX_START + 1);
4641 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4644 status = ice_alloc_recipe(hw, &entry->rid);
4648 /* Clear the result index of the located recipe, as this will be
4649 * updated, if needed, later in the recipe creation process.
4651 tmp[0].content.result_indx = 0;
4653 buf[recps] = tmp[0];
4654 buf[recps].recipe_indx = (u8)entry->rid;
4655 /* if the recipe is a non-root recipe RID should be programmed
4656 * as 0 for the rules to be applied correctly.
4658 buf[recps].content.rid = 0;
4659 ice_memset(&buf[recps].content.lkup_indx, 0,
4660 sizeof(buf[recps].content.lkup_indx),
4663 /* All recipes use look-up index 0 to match switch ID. */
4664 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4665 buf[recps].content.mask[0] =
4666 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4667 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4670 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4671 buf[recps].content.lkup_indx[i] = 0x80;
4672 buf[recps].content.mask[i] = 0;
4675 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4676 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4677 buf[recps].content.mask[i + 1] = CPU_TO_LE16(0xFFFF);
4680 if (rm->n_grp_count > 1) {
4681 entry->chain_idx = chain_idx;
4682 buf[recps].content.result_indx =
4683 ICE_AQ_RECIPE_RESULT_EN |
4684 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4685 ICE_AQ_RECIPE_RESULT_DATA_M);
4686 ice_clear_bit(ICE_CHAIN_FV_INDEX_START - chain_idx,
4687 available_result_ids);
4688 chain_idx = ICE_CHAIN_FV_INDEX_START -
4689 ice_find_first_bit(available_result_ids,
4690 ICE_CHAIN_FV_INDEX_START +
4694 /* fill recipe dependencies */
4695 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
4696 ICE_MAX_NUM_RECIPES);
4697 ice_set_bit(buf[recps].recipe_indx,
4698 (ice_bitmap_t *)buf[recps].recipe_bitmap);
4699 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4703 if (rm->n_grp_count == 1) {
4704 rm->root_rid = buf[0].recipe_indx;
4705 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
4706 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4707 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4708 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4709 sizeof(buf[0].recipe_bitmap),
4710 ICE_NONDMA_TO_NONDMA);
4712 status = ICE_ERR_BAD_PTR;
4715 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4716 * the recipe which is getting created if specified
4717 * by user. Usually any advanced switch filter, which results
4718 * into new extraction sequence, ended up creating a new recipe
4719 * of type ROOT and usually recipes are associated with profiles
4720 * Switch rule referreing newly created recipe, needs to have
4721 * either/or 'fwd' or 'join' priority, otherwise switch rule
4722 * evaluation will not happen correctly. In other words, if
4723 * switch rule to be evaluated on priority basis, then recipe
4724 * needs to have priority, otherwise it will be evaluated last.
4726 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4728 struct ice_recp_grp_entry *last_chain_entry;
4731 /* Allocate the last recipe that will chain the outcomes of the
4732 * other recipes together
4734 status = ice_alloc_recipe(hw, &rid);
4738 buf[recps].recipe_indx = (u8)rid;
4739 buf[recps].content.rid = (u8)rid;
4740 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4741 /* the new entry created should also be part of rg_list to
4742 * make sure we have complete recipe
4744 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
4745 sizeof(*last_chain_entry));
4746 if (!last_chain_entry) {
4747 status = ICE_ERR_NO_MEMORY;
4750 last_chain_entry->rid = rid;
4751 ice_memset(&buf[recps].content.lkup_indx, 0,
4752 sizeof(buf[recps].content.lkup_indx),
4754 /* All recipes use look-up index 0 to match switch ID. */
4755 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4756 buf[recps].content.mask[0] =
4757 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4758 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4759 buf[recps].content.lkup_indx[i] =
4760 ICE_AQ_RECIPE_LKUP_IGNORE;
4761 buf[recps].content.mask[i] = 0;
4765 /* update r_bitmap with the recp that is used for chaining */
4766 ice_set_bit(rid, rm->r_bitmap);
4767 /* this is the recipe that chains all the other recipes so it
4768 * should not have a chaining ID to indicate the same
4770 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4771 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
4773 last_chain_entry->fv_idx[i] = entry->chain_idx;
4774 buf[recps].content.lkup_indx[i] = entry->chain_idx;
4775 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
4776 ice_set_bit(entry->rid, rm->r_bitmap);
4778 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
4779 if (sizeof(buf[recps].recipe_bitmap) >=
4780 sizeof(rm->r_bitmap)) {
4781 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4782 sizeof(buf[recps].recipe_bitmap),
4783 ICE_NONDMA_TO_NONDMA);
4785 status = ICE_ERR_BAD_PTR;
4788 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4790 /* To differentiate among different UDP tunnels, a meta data ID
4794 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
4795 buf[recps].content.mask[i] =
4796 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
4800 rm->root_rid = (u8)rid;
4802 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4806 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4807 ice_release_change_lock(hw);
4811 /* Every recipe that just got created add it to the recipe
4814 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4815 struct ice_switch_info *sw = hw->switch_info;
4816 struct ice_sw_recipe *recp;
4818 recp = &sw->recp_list[entry->rid];
4819 recp->root_rid = entry->rid;
4820 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
4821 entry->r_group.n_val_pairs *
4822 sizeof(struct ice_fv_word),
4823 ICE_NONDMA_TO_NONDMA);
4825 recp->n_ext_words = entry->r_group.n_val_pairs;
4826 recp->chain_idx = entry->chain_idx;
4827 recp->recp_created = true;
4828 recp->big_recp = false;
4842 * ice_create_recipe_group - creates recipe group
4843 * @hw: pointer to hardware structure
4844 * @rm: recipe management list entry
4845 * @lkup_exts: lookup elements
4847 static enum ice_status
4848 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
4849 struct ice_prot_lkup_ext *lkup_exts)
4851 struct ice_recp_grp_entry *entry;
4852 struct ice_recp_grp_entry *tmp;
4853 enum ice_status status;
4857 rm->n_grp_count = 0;
4859 /* Each switch recipe can match up to 5 words or metadata. One word in
4860 * each recipe is used to match the switch ID. Four words are left for
4861 * matching other values. If the new advanced recipe requires more than
4862 * 4 words, it needs to be split into multiple recipes which are chained
4863 * together using the intermediate result that each produces as input to
4864 * the other recipes in the sequence.
4866 groups = ARRAY_SIZE(ice_recipe_pack);
4868 /* Check if any of the preferred recipes from the grouping policy
4871 for (i = 0; i < groups; i++)
4872 /* Check if the recipe from the preferred grouping matches
4873 * or is a subset of the fields that needs to be looked up.
4875 if (ice_is_recipe_subset(lkup_exts, &ice_recipe_pack[i])) {
4876 /* This recipe can be used by itself or grouped with
4879 entry = (struct ice_recp_grp_entry *)
4880 ice_malloc(hw, sizeof(*entry));
4882 status = ICE_ERR_NO_MEMORY;
4885 entry->r_group = ice_recipe_pack[i];
4886 LIST_ADD(&entry->l_entry, &rm->rg_list);
4890 /* Create recipes for words that are marked not done by packing them
4893 status = ice_create_first_fit_recp_def(hw, lkup_exts,
4894 &rm->rg_list, &recp_count);
4896 rm->n_grp_count += recp_count;
4897 rm->n_ext_words = lkup_exts->n_val_words;
4898 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
4899 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
4904 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
4906 LIST_DEL(&entry->l_entry);
4907 ice_free(hw, entry);
4915 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
4916 * @hw: pointer to hardware structure
4917 * @lkups: lookup elements or match criteria for the advanced recipe, one
4918 * structure per protocol header
4919 * @lkups_cnt: number of protocols
4920 * @fv_list: pointer to a list that holds the returned field vectors
4922 static enum ice_status
4923 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4924 struct LIST_HEAD_TYPE *fv_list)
4926 enum ice_status status;
4930 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
4932 return ICE_ERR_NO_MEMORY;
4934 for (i = 0; i < lkups_cnt; i++)
4935 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
4936 status = ICE_ERR_CFG;
4940 /* Find field vectors that include all specified protocol types */
4941 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
4944 ice_free(hw, prot_ids);
4949 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
4950 * @hw: pointer to hardware structure
4951 * @lkups: lookup elements or match criteria for the advanced recipe, one
4952 * structure per protocol header
4953 * @lkups_cnt: number of protocols
4954 * @rinfo: other information regarding the rule e.g. priority and action info
4955 * @rid: return the recipe ID of the recipe created
4957 static enum ice_status
4958 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
4959 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
4961 struct ice_prot_lkup_ext *lkup_exts;
4962 struct ice_recp_grp_entry *r_entry;
4963 struct ice_sw_fv_list_entry *fvit;
4964 struct ice_recp_grp_entry *r_tmp;
4965 struct ice_sw_fv_list_entry *tmp;
4966 enum ice_status status = ICE_SUCCESS;
4967 struct ice_sw_recipe *rm;
4968 bool match_tun = false;
4972 return ICE_ERR_PARAM;
4974 lkup_exts = (struct ice_prot_lkup_ext *)
4975 ice_malloc(hw, sizeof(*lkup_exts));
4977 return ICE_ERR_NO_MEMORY;
4979 /* Determine the number of words to be matched and if it exceeds a
4980 * recipe's restrictions
4982 for (i = 0; i < lkups_cnt; i++) {
4985 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
4986 status = ICE_ERR_CFG;
4987 goto err_free_lkup_exts;
4990 count = ice_fill_valid_words(&lkups[i], lkup_exts);
4992 status = ICE_ERR_CFG;
4993 goto err_free_lkup_exts;
4997 *rid = ice_find_recp(hw, lkup_exts);
4998 if (*rid < ICE_MAX_NUM_RECIPES)
4999 /* Success if found a recipe that match the existing criteria */
5000 goto err_free_lkup_exts;
5002 /* Recipe we need does not exist, add a recipe */
5004 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5006 status = ICE_ERR_NO_MEMORY;
5007 goto err_free_lkup_exts;
5010 /* Get field vectors that contain fields extracted from all the protocol
5011 * headers being programmed.
5013 INIT_LIST_HEAD(&rm->fv_list);
5014 INIT_LIST_HEAD(&rm->rg_list);
5016 status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
5020 /* Group match words into recipes using preferred recipe grouping
5023 status = ice_create_recipe_group(hw, rm, lkup_exts);
5027 /* There is only profile for UDP tunnels. So, it is necessary to use a
5028 * metadata ID flag to differentiate different tunnel types. A separate
5029 * recipe needs to be used for the metadata.
5031 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5032 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5033 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5036 /* set the recipe priority if specified */
5037 rm->priority = rinfo->priority ? rinfo->priority : 0;
5039 /* Find offsets from the field vector. Pick the first one for all the
5042 ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5043 status = ice_add_sw_recipe(hw, rm, match_tun);
5047 /* Associate all the recipes created with all the profiles in the
5048 * common field vector.
5050 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5052 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5054 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5055 (u8 *)r_bitmap, NULL);
5059 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5060 ICE_MAX_NUM_RECIPES);
5061 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5065 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5068 ice_release_change_lock(hw);
5074 *rid = rm->root_rid;
5075 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5076 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5078 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5079 ice_recp_grp_entry, l_entry) {
5080 LIST_DEL(&r_entry->l_entry);
5081 ice_free(hw, r_entry);
5084 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5086 LIST_DEL(&fvit->list_entry);
5091 ice_free(hw, rm->root_buf);
5096 ice_free(hw, lkup_exts);
5101 #define ICE_MAC_HDR_OFFSET 0
5102 #define ICE_IP_HDR_OFFSET 14
5103 #define ICE_GRE_HDR_OFFSET 34
5104 #define ICE_MAC_IL_HDR_OFFSET 42
5105 #define ICE_IP_IL_HDR_OFFSET 56
5106 #define ICE_L4_HDR_OFFSET 34
5107 #define ICE_UDP_TUN_HDR_OFFSET 42
5110 * ice_find_dummy_packet - find dummy packet with given match criteria
5112 * @lkups: lookup elements or match criteria for the advanced recipe, one
5113 * structure per protocol header
5114 * @lkups_cnt: number of protocols
5115 * @tun_type: tunnel type from the match criteria
5116 * @pkt: dummy packet to fill according to filter match criteria
5117 * @pkt_len: packet length of dummy packet
5120 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5121 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5126 if (tun_type == ICE_SW_TUN_NVGRE || tun_type == ICE_ALL_TUNNELS) {
5127 *pkt = dummy_gre_packet;
5128 *pkt_len = sizeof(dummy_gre_packet);
5132 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5133 tun_type == ICE_SW_TUN_VXLAN_GPE) {
5134 *pkt = dummy_udp_tun_packet;
5135 *pkt_len = sizeof(dummy_udp_tun_packet);
5139 for (i = 0; i < lkups_cnt; i++) {
5140 if (lkups[i].type == ICE_UDP_ILOS) {
5141 *pkt = dummy_udp_tun_packet;
5142 *pkt_len = sizeof(dummy_udp_tun_packet);
5147 *pkt = dummy_tcp_tun_packet;
5148 *pkt_len = sizeof(dummy_tcp_tun_packet);
5152 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5154 * @lkups: lookup elements or match criteria for the advanced recipe, one
5155 * structure per protocol header
5156 * @lkups_cnt: number of protocols
5157 * @tun_type: to know if the dummy packet is supposed to be tunnel packet
5158 * @s_rule: stores rule information from the match criteria
5159 * @dummy_pkt: dummy packet to fill according to filter match criteria
5160 * @pkt_len: packet length of dummy packet
5163 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5164 enum ice_sw_tunnel_type tun_type,
5165 struct ice_aqc_sw_rules_elem *s_rule,
5166 const u8 *dummy_pkt, u16 pkt_len)
5171 /* Start with a packet with a pre-defined/dummy content. Then, fill
5172 * in the header values to be looked up or matched.
5174 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5176 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5178 for (i = 0; i < lkups_cnt; i++) {
5179 u32 len, pkt_off, hdr_size, field_off;
5181 switch (lkups[i].type) {
5184 pkt_off = offsetof(struct ice_ether_hdr, dst_addr) +
5185 ((lkups[i].type == ICE_MAC_IL) ?
5186 ICE_MAC_IL_HDR_OFFSET : 0);
5187 len = sizeof(lkups[i].h_u.eth_hdr.dst_addr);
5188 if ((tun_type == ICE_SW_TUN_VXLAN ||
5189 tun_type == ICE_SW_TUN_GENEVE ||
5190 tun_type == ICE_SW_TUN_VXLAN_GPE) &&
5191 lkups[i].type == ICE_MAC_IL) {
5192 pkt_off += sizeof(struct ice_udp_tnl_hdr);
5195 ice_memcpy(&pkt[pkt_off],
5196 &lkups[i].h_u.eth_hdr.dst_addr, len,
5197 ICE_NONDMA_TO_NONDMA);
5198 pkt_off = offsetof(struct ice_ether_hdr, src_addr) +
5199 ((lkups[i].type == ICE_MAC_IL) ?
5200 ICE_MAC_IL_HDR_OFFSET : 0);
5201 len = sizeof(lkups[i].h_u.eth_hdr.src_addr);
5202 if ((tun_type == ICE_SW_TUN_VXLAN ||
5203 tun_type == ICE_SW_TUN_GENEVE ||
5204 tun_type == ICE_SW_TUN_VXLAN_GPE) &&
5205 lkups[i].type == ICE_MAC_IL) {
5206 pkt_off += sizeof(struct ice_udp_tnl_hdr);
5208 ice_memcpy(&pkt[pkt_off],
5209 &lkups[i].h_u.eth_hdr.src_addr, len,
5210 ICE_NONDMA_TO_NONDMA);
5211 if (lkups[i].h_u.eth_hdr.ethtype_id) {
5212 pkt_off = offsetof(struct ice_ether_hdr,
5214 ((lkups[i].type == ICE_MAC_IL) ?
5215 ICE_MAC_IL_HDR_OFFSET : 0);
5216 len = sizeof(lkups[i].h_u.eth_hdr.ethtype_id);
5217 if ((tun_type == ICE_SW_TUN_VXLAN ||
5218 tun_type == ICE_SW_TUN_GENEVE ||
5219 tun_type == ICE_SW_TUN_VXLAN_GPE) &&
5220 lkups[i].type == ICE_MAC_IL) {
5222 sizeof(struct ice_udp_tnl_hdr);
5224 ice_memcpy(&pkt[pkt_off],
5225 &lkups[i].h_u.eth_hdr.ethtype_id,
5226 len, ICE_NONDMA_TO_NONDMA);
5230 hdr_size = sizeof(struct ice_ipv4_hdr);
5231 if (lkups[i].h_u.ipv4_hdr.dst_addr) {
5232 pkt_off = ICE_IP_HDR_OFFSET +
5233 offsetof(struct ice_ipv4_hdr,
5235 field_off = offsetof(struct ice_ipv4_hdr,
5237 len = hdr_size - field_off;
5238 ice_memcpy(&pkt[pkt_off],
5239 &lkups[i].h_u.ipv4_hdr.dst_addr,
5240 len, ICE_NONDMA_TO_NONDMA);
5242 if (lkups[i].h_u.ipv4_hdr.src_addr) {
5243 pkt_off = ICE_IP_HDR_OFFSET +
5244 offsetof(struct ice_ipv4_hdr,
5246 field_off = offsetof(struct ice_ipv4_hdr,
5248 len = hdr_size - field_off;
5249 ice_memcpy(&pkt[pkt_off],
5250 &lkups[i].h_u.ipv4_hdr.src_addr,
5251 len, ICE_NONDMA_TO_NONDMA);
5259 hdr_size = sizeof(struct ice_udp_tnl_hdr);
5260 if (lkups[i].h_u.l4_hdr.dst_port) {
5261 pkt_off = ICE_L4_HDR_OFFSET +
5262 offsetof(struct ice_l4_hdr,
5264 field_off = offsetof(struct ice_l4_hdr,
5266 len = hdr_size - field_off;
5267 ice_memcpy(&pkt[pkt_off],
5268 &lkups[i].h_u.l4_hdr.dst_port,
5269 len, ICE_NONDMA_TO_NONDMA);
5271 if (lkups[i].h_u.l4_hdr.src_port) {
5272 pkt_off = ICE_L4_HDR_OFFSET +
5273 offsetof(struct ice_l4_hdr, src_port);
5274 field_off = offsetof(struct ice_l4_hdr,
5276 len = hdr_size - field_off;
5277 ice_memcpy(&pkt[pkt_off],
5278 &lkups[i].h_u.l4_hdr.src_port,
5279 len, ICE_NONDMA_TO_NONDMA);
5285 pkt_off = ICE_UDP_TUN_HDR_OFFSET +
5286 offsetof(struct ice_udp_tnl_hdr, vni);
5287 field_off = offsetof(struct ice_udp_tnl_hdr, vni);
5288 len = sizeof(struct ice_udp_tnl_hdr) - field_off;
5289 ice_memcpy(&pkt[pkt_off], &lkups[i].h_u.tnl_hdr.vni,
5290 len, ICE_NONDMA_TO_NONDMA);
5296 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5300 * ice_find_adv_rule_entry - Search a rule entry
5301 * @hw: pointer to the hardware structure
5302 * @lkups: lookup elements or match criteria for the advanced recipe, one
5303 * structure per protocol header
5304 * @lkups_cnt: number of protocols
5305 * @recp_id: recipe ID for which we are finding the rule
5306 * @rinfo: other information regarding the rule e.g. priority and action info
5308 * Helper function to search for a given advance rule entry
5309 * Returns pointer to entry storing the rule if found
5311 static struct ice_adv_fltr_mgmt_list_entry *
5312 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5313 u16 lkups_cnt, u8 recp_id,
5314 struct ice_adv_rule_info *rinfo)
5316 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5317 struct ice_switch_info *sw = hw->switch_info;
5320 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5321 ice_adv_fltr_mgmt_list_entry, list_entry) {
5322 bool lkups_matched = true;
5324 if (lkups_cnt != list_itr->lkups_cnt)
5326 for (i = 0; i < list_itr->lkups_cnt; i++)
5327 if (memcmp(&list_itr->lkups[i], &lkups[i],
5329 lkups_matched = false;
5332 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5333 rinfo->tun_type == list_itr->rule_info.tun_type &&
5341 * ice_adv_add_update_vsi_list
5342 * @hw: pointer to the hardware structure
5343 * @m_entry: pointer to current adv filter management list entry
5344 * @cur_fltr: filter information from the book keeping entry
5345 * @new_fltr: filter information with the new VSI to be added
5347 * Call AQ command to add or update previously created VSI list with new VSI.
5349 * Helper function to do book keeping associated with adding filter information
5350 * The algorithm to do the booking keeping is described below :
5351 * When a VSI needs to subscribe to a given advanced filter
5352 * if only one VSI has been added till now
5353 * Allocate a new VSI list and add two VSIs
5354 * to this list using switch rule command
5355 * Update the previously created switch rule with the
5356 * newly created VSI list ID
5357 * if a VSI list was previously created
5358 * Add the new VSI to the previously created VSI list set
5359 * using the update switch rule command
5361 static enum ice_status
5362 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5363 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5364 struct ice_adv_rule_info *cur_fltr,
5365 struct ice_adv_rule_info *new_fltr)
5367 enum ice_status status;
5368 u16 vsi_list_id = 0;
5370 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5371 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5372 return ICE_ERR_NOT_IMPL;
5374 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5375 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5376 return ICE_ERR_ALREADY_EXISTS;
5378 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5379 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5380 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5381 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5382 return ICE_ERR_NOT_IMPL;
5384 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5385 /* Only one entry existed in the mapping and it was not already
5386 * a part of a VSI list. So, create a VSI list with the old and
5389 struct ice_fltr_info tmp_fltr;
5390 u16 vsi_handle_arr[2];
5392 /* A rule already exists with the new VSI being added */
5393 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5394 new_fltr->sw_act.fwd_id.hw_vsi_id)
5395 return ICE_ERR_ALREADY_EXISTS;
5397 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5398 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5399 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5405 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5406 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5407 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5408 /* Update the previous switch rule of "forward to VSI" to
5411 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5415 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5416 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5417 m_entry->vsi_list_info =
5418 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5421 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5423 if (!m_entry->vsi_list_info)
5426 /* A rule already exists with the new VSI being added */
5427 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5430 /* Update the previously created VSI list set with
5431 * the new VSI ID passed in
5433 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5435 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5437 ice_aqc_opc_update_sw_rules,
5439 /* update VSI list mapping info with new VSI ID */
5441 ice_set_bit(vsi_handle,
5442 m_entry->vsi_list_info->vsi_map);
5445 m_entry->vsi_count++;
5450 * ice_add_adv_rule - create an advanced switch rule
5451 * @hw: pointer to the hardware structure
5452 * @lkups: information on the words that needs to be looked up. All words
5453 * together makes one recipe
5454 * @lkups_cnt: num of entries in the lkups array
5455 * @rinfo: other information related to the rule that needs to be programmed
5456 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5457 * ignored is case of error.
5459 * This function can program only 1 rule at a time. The lkups is used to
5460 * describe the all the words that forms the "lookup" portion of the recipe.
5461 * These words can span multiple protocols. Callers to this function need to
5462 * pass in a list of protocol headers with lookup information along and mask
5463 * that determines which words are valid from the given protocol header.
5464 * rinfo describes other information related to this rule such as forwarding
5465 * IDs, priority of this rule, etc.
5468 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5469 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5470 struct ice_rule_query_data *added_entry)
5472 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5473 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5474 struct ice_aqc_sw_rules_elem *s_rule;
5475 struct LIST_HEAD_TYPE *rule_head;
5476 struct ice_switch_info *sw;
5477 enum ice_status status;
5478 const u8 *pkt = NULL;
5482 return ICE_ERR_PARAM;
5484 for (i = 0; i < lkups_cnt; i++) {
5487 /* Validate match masks to make sure they match complete 16-bit
5490 ptr = (u16 *)&lkups->m_u;
5491 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5492 if (ptr[j] != 0 && ptr[j] != 0xffff)
5493 return ICE_ERR_PARAM;
5496 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5497 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5498 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5501 vsi_handle = rinfo->sw_act.vsi_handle;
5502 if (!ice_is_vsi_valid(hw, vsi_handle))
5503 return ICE_ERR_PARAM;
5505 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5506 rinfo->sw_act.fwd_id.hw_vsi_id =
5507 ice_get_hw_vsi_num(hw, vsi_handle);
5508 if (rinfo->sw_act.flag & ICE_FLTR_TX)
5509 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5511 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5514 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5516 /* we have to add VSI to VSI_LIST and increment vsi_count.
5517 * Also Update VSI list so that we can change forwarding rule
5518 * if the rule already exists, we will check if it exists with
5519 * same vsi_id, if not then add it to the VSI list if it already
5520 * exists if not then create a VSI list and add the existing VSI
5521 * ID and the new VSI ID to the list
5522 * We will add that VSI to the list
5524 status = ice_adv_add_update_vsi_list(hw, m_entry,
5525 &m_entry->rule_info,
5528 added_entry->rid = rid;
5529 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5530 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5534 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
5536 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5537 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
5539 return ICE_ERR_NO_MEMORY;
5540 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
5541 switch (rinfo->sw_act.fltr_act) {
5542 case ICE_FWD_TO_VSI:
5543 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5544 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5545 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5548 act |= ICE_SINGLE_ACT_TO_Q;
5549 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5550 ICE_SINGLE_ACT_Q_INDEX_M;
5552 case ICE_DROP_PACKET:
5553 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5554 ICE_SINGLE_ACT_VALID_BIT;
5557 status = ICE_ERR_CFG;
5558 goto err_ice_add_adv_rule;
5561 /* set the rule LOOKUP type based on caller specified 'RX'
5562 * instead of hardcoding it to be either LOOKUP_TX/RX
5564 * for 'RX' set the source to be the port number
5565 * for 'TX' set the source to be the source HW VSI number (determined
5569 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
5570 s_rule->pdata.lkup_tx_rx.src =
5571 CPU_TO_LE16(hw->port_info->lport);
5573 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
5574 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
5577 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
5578 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
5580 ice_fill_adv_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, s_rule,
5583 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5584 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5587 goto err_ice_add_adv_rule;
5588 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
5589 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
5591 status = ICE_ERR_NO_MEMORY;
5592 goto err_ice_add_adv_rule;
5595 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
5596 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
5597 ICE_NONDMA_TO_NONDMA);
5598 if (!adv_fltr->lkups) {
5599 status = ICE_ERR_NO_MEMORY;
5600 goto err_ice_add_adv_rule;
5603 adv_fltr->lkups_cnt = lkups_cnt;
5604 adv_fltr->rule_info = *rinfo;
5605 adv_fltr->rule_info.fltr_rule_id =
5606 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5607 sw = hw->switch_info;
5608 sw->recp_list[rid].adv_rule = true;
5609 rule_head = &sw->recp_list[rid].filt_rules;
5611 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
5612 struct ice_fltr_info tmp_fltr;
5614 tmp_fltr.fltr_rule_id =
5615 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5616 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5617 tmp_fltr.fwd_id.hw_vsi_id =
5618 ice_get_hw_vsi_num(hw, vsi_handle);
5619 tmp_fltr.vsi_handle = vsi_handle;
5620 /* Update the previous switch rule of "forward to VSI" to
5623 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5625 goto err_ice_add_adv_rule;
5626 adv_fltr->vsi_count = 1;
5629 /* Add rule entry to book keeping list */
5630 LIST_ADD(&adv_fltr->list_entry, rule_head);
5632 added_entry->rid = rid;
5633 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5634 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5636 err_ice_add_adv_rule:
5637 if (status && adv_fltr) {
5638 ice_free(hw, adv_fltr->lkups);
5639 ice_free(hw, adv_fltr);
5642 ice_free(hw, s_rule);
5648 * ice_adv_rem_update_vsi_list
5649 * @hw: pointer to the hardware structure
5650 * @vsi_handle: VSI handle of the VSI to remove
5651 * @fm_list: filter management entry for which the VSI list management needs to
5654 static enum ice_status
5655 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5656 struct ice_adv_fltr_mgmt_list_entry *fm_list)
5658 struct ice_vsi_list_map_info *vsi_list_info;
5659 enum ice_sw_lkup_type lkup_type;
5660 enum ice_status status;
5663 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5664 fm_list->vsi_count == 0)
5665 return ICE_ERR_PARAM;
5667 /* A rule with the VSI being removed does not exist */
5668 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
5669 return ICE_ERR_DOES_NOT_EXIST;
5671 lkup_type = ICE_SW_LKUP_LAST;
5672 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5673 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5674 ice_aqc_opc_update_sw_rules,
5679 fm_list->vsi_count--;
5680 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5681 vsi_list_info = fm_list->vsi_list_info;
5682 if (fm_list->vsi_count == 1) {
5683 struct ice_fltr_info tmp_fltr;
5686 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
5688 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5689 return ICE_ERR_OUT_OF_RANGE;
5691 /* Make sure VSI list is empty before removing it below */
5692 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5694 ice_aqc_opc_update_sw_rules,
5698 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5699 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5700 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5701 tmp_fltr.fwd_id.hw_vsi_id =
5702 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5703 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5704 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5706 /* Update the previous switch rule of "MAC forward to VSI" to
5707 * "MAC fwd to VSI list"
5709 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5711 ice_debug(hw, ICE_DBG_SW,
5712 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5713 tmp_fltr.fwd_id.hw_vsi_id, status);
5718 if (fm_list->vsi_count == 1) {
5719 /* Remove the VSI list since it is no longer used */
5720 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5722 ice_debug(hw, ICE_DBG_SW,
5723 "Failed to remove VSI list %d, error %d\n",
5724 vsi_list_id, status);
5728 LIST_DEL(&vsi_list_info->list_entry);
5729 ice_free(hw, vsi_list_info);
5730 fm_list->vsi_list_info = NULL;
5737 * ice_rem_adv_rule - removes existing advanced switch rule
5738 * @hw: pointer to the hardware structure
5739 * @lkups: information on the words that needs to be looked up. All words
5740 * together makes one recipe
5741 * @lkups_cnt: num of entries in the lkups array
5742 * @rinfo: Its the pointer to the rule information for the rule
5744 * This function can be used to remove 1 rule at a time. The lkups is
5745 * used to describe all the words that forms the "lookup" portion of the
5746 * rule. These words can span multiple protocols. Callers to this function
5747 * need to pass in a list of protocol headers with lookup information along
5748 * and mask that determines which words are valid from the given protocol
5749 * header. rinfo describes other information related to this rule such as
5750 * forwarding IDs, priority of this rule, etc.
5753 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5754 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5756 struct ice_adv_fltr_mgmt_list_entry *list_elem;
5757 struct ice_prot_lkup_ext lkup_exts;
5758 u16 rule_buf_sz, pkt_len, i, rid;
5759 enum ice_status status = ICE_SUCCESS;
5760 bool remove_rule = false;
5761 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5762 const u8 *pkt = NULL;
5765 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
5766 for (i = 0; i < lkups_cnt; i++) {
5769 if (lkups[i].type >= ICE_PROTOCOL_LAST)
5772 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5776 rid = ice_find_recp(hw, &lkup_exts);
5777 /* If did not find a recipe that match the existing criteria */
5778 if (rid == ICE_MAX_NUM_RECIPES)
5779 return ICE_ERR_PARAM;
5781 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5782 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5783 /* the rule is already removed */
5786 ice_acquire_lock(rule_lock);
5787 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5789 } else if (list_elem->vsi_count > 1) {
5790 list_elem->vsi_list_info->ref_cnt--;
5791 remove_rule = false;
5792 vsi_handle = rinfo->sw_act.vsi_handle;
5793 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5795 vsi_handle = rinfo->sw_act.vsi_handle;
5796 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5798 ice_release_lock(rule_lock);
5801 if (list_elem->vsi_count == 0)
5804 ice_release_lock(rule_lock);
5806 struct ice_aqc_sw_rules_elem *s_rule;
5808 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
5810 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5812 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
5815 return ICE_ERR_NO_MEMORY;
5816 s_rule->pdata.lkup_tx_rx.act = 0;
5817 s_rule->pdata.lkup_tx_rx.index =
5818 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
5819 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5820 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5822 ice_aqc_opc_remove_sw_rules, NULL);
5823 if (status == ICE_SUCCESS) {
5824 ice_acquire_lock(rule_lock);
5825 LIST_DEL(&list_elem->list_entry);
5826 ice_free(hw, list_elem->lkups);
5827 ice_free(hw, list_elem);
5828 ice_release_lock(rule_lock);
5830 ice_free(hw, s_rule);
5836 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
5837 * @hw: pointer to the hardware structure
5838 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
5840 * This function is used to remove 1 rule at a time. The removal is based on
5841 * the remove_entry parameter. This function will remove rule for a given
5842 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
5845 ice_rem_adv_rule_by_id(struct ice_hw *hw,
5846 struct ice_rule_query_data *remove_entry)
5848 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5849 struct LIST_HEAD_TYPE *list_head;
5850 struct ice_adv_rule_info rinfo;
5851 struct ice_switch_info *sw;
5853 sw = hw->switch_info;
5854 if (!sw->recp_list[remove_entry->rid].recp_created)
5855 return ICE_ERR_PARAM;
5856 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
5857 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
5859 if (list_itr->rule_info.fltr_rule_id ==
5860 remove_entry->rule_id) {
5861 rinfo = list_itr->rule_info;
5862 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
5863 return ice_rem_adv_rule(hw, list_itr->lkups,
5864 list_itr->lkups_cnt, &rinfo);
5867 return ICE_ERR_PARAM;
5871 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
5873 * @hw: pointer to the hardware structure
5874 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
5876 * This function is used to remove all the rules for a given VSI and as soon
5877 * as removing a rule fails, it will return immediately with the error code,
5878 * else it will return ICE_SUCCESS
5881 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
5883 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5884 struct ice_vsi_list_map_info *map_info;
5885 struct LIST_HEAD_TYPE *list_head;
5886 struct ice_adv_rule_info rinfo;
5887 struct ice_switch_info *sw;
5888 enum ice_status status;
5889 u16 vsi_list_id = 0;
5892 sw = hw->switch_info;
5893 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
5894 if (!sw->recp_list[rid].recp_created)
5896 if (!sw->recp_list[rid].adv_rule)
5898 list_head = &sw->recp_list[rid].filt_rules;
5900 LIST_FOR_EACH_ENTRY(list_itr, list_head,
5901 ice_adv_fltr_mgmt_list_entry, list_entry) {
5902 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
5906 rinfo = list_itr->rule_info;
5907 rinfo.sw_act.vsi_handle = vsi_handle;
5908 status = ice_rem_adv_rule(hw, list_itr->lkups,
5909 list_itr->lkups_cnt, &rinfo);
5919 * ice_replay_fltr - Replay all the filters stored by a specific list head
5920 * @hw: pointer to the hardware structure
5921 * @list_head: list for which filters needs to be replayed
5922 * @recp_id: Recipe ID for which rules need to be replayed
5924 static enum ice_status
5925 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
5927 struct ice_fltr_mgmt_list_entry *itr;
5928 struct LIST_HEAD_TYPE l_head;
5929 enum ice_status status = ICE_SUCCESS;
5931 if (LIST_EMPTY(list_head))
5934 /* Move entries from the given list_head to a temporary l_head so that
5935 * they can be replayed. Otherwise when trying to re-add the same
5936 * filter, the function will return already exists
5938 LIST_REPLACE_INIT(list_head, &l_head);
5940 /* Mark the given list_head empty by reinitializing it so filters
5941 * could be added again by *handler
5943 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
5945 struct ice_fltr_list_entry f_entry;
5947 f_entry.fltr_info = itr->fltr_info;
5948 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
5949 status = ice_add_rule_internal(hw, recp_id, &f_entry);
5950 if (status != ICE_SUCCESS)
5955 /* Add a filter per VSI separately */
5960 ice_find_first_bit(itr->vsi_list_info->vsi_map,
5962 if (!ice_is_vsi_valid(hw, vsi_handle))
5965 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
5966 f_entry.fltr_info.vsi_handle = vsi_handle;
5967 f_entry.fltr_info.fwd_id.hw_vsi_id =
5968 ice_get_hw_vsi_num(hw, vsi_handle);
5969 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
5970 if (recp_id == ICE_SW_LKUP_VLAN)
5971 status = ice_add_vlan_internal(hw, &f_entry);
5973 status = ice_add_rule_internal(hw, recp_id,
5975 if (status != ICE_SUCCESS)
5980 /* Clear the filter management list */
5981 ice_rem_sw_rule_info(hw, &l_head);
5986 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
5987 * @hw: pointer to the hardware structure
5989 * NOTE: This function does not clean up partially added filters on error.
5990 * It is up to caller of the function to issue a reset or fail early.
5992 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
5994 struct ice_switch_info *sw = hw->switch_info;
5995 enum ice_status status = ICE_SUCCESS;
5998 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5999 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6001 status = ice_replay_fltr(hw, i, head);
6002 if (status != ICE_SUCCESS)
6009 * ice_replay_vsi_fltr - Replay filters for requested VSI
6010 * @hw: pointer to the hardware structure
6011 * @vsi_handle: driver VSI handle
6012 * @recp_id: Recipe ID for which rules need to be replayed
6013 * @list_head: list for which filters need to be replayed
6015 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6016 * It is required to pass valid VSI handle.
6018 static enum ice_status
6019 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6020 struct LIST_HEAD_TYPE *list_head)
6022 struct ice_fltr_mgmt_list_entry *itr;
6023 enum ice_status status = ICE_SUCCESS;
6026 if (LIST_EMPTY(list_head))
6028 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6030 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6032 struct ice_fltr_list_entry f_entry;
6034 f_entry.fltr_info = itr->fltr_info;
6035 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6036 itr->fltr_info.vsi_handle == vsi_handle) {
6037 /* update the src in case it is VSI num */
6038 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6039 f_entry.fltr_info.src = hw_vsi_id;
6040 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6041 if (status != ICE_SUCCESS)
6045 if (!itr->vsi_list_info ||
6046 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6048 /* Clearing it so that the logic can add it back */
6049 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6050 f_entry.fltr_info.vsi_handle = vsi_handle;
6051 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6052 /* update the src in case it is VSI num */
6053 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6054 f_entry.fltr_info.src = hw_vsi_id;
6055 if (recp_id == ICE_SW_LKUP_VLAN)
6056 status = ice_add_vlan_internal(hw, &f_entry);
6058 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6059 if (status != ICE_SUCCESS)
6067 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6068 * @hw: pointer to the hardware structure
6069 * @vsi_handle: driver VSI handle
6070 * @list_head: list for which filters need to be replayed
6072 * Replay the advanced rule for the given VSI.
6074 static enum ice_status
6075 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6076 struct LIST_HEAD_TYPE *list_head)
6078 struct ice_rule_query_data added_entry = { 0 };
6079 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6080 enum ice_status status = ICE_SUCCESS;
6082 if (LIST_EMPTY(list_head))
6084 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6086 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6087 u16 lk_cnt = adv_fltr->lkups_cnt;
6089 if (vsi_handle != rinfo->sw_act.vsi_handle)
6091 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6100 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6101 * @hw: pointer to the hardware structure
6102 * @vsi_handle: driver VSI handle
6104 * Replays filters for requested VSI via vsi_handle.
6106 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6108 struct ice_switch_info *sw = hw->switch_info;
6109 enum ice_status status;
6112 /* Update the recipes that were created */
6113 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6114 struct LIST_HEAD_TYPE *head;
6116 head = &sw->recp_list[i].filt_replay_rules;
6117 if (!sw->recp_list[i].adv_rule)
6118 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6120 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6121 if (status != ICE_SUCCESS)
6129 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6130 * @hw: pointer to the HW struct
6132 * Deletes the filter replay rules.
6134 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6136 struct ice_switch_info *sw = hw->switch_info;
6142 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6143 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6144 struct LIST_HEAD_TYPE *l_head;
6146 l_head = &sw->recp_list[i].filt_replay_rules;
6147 if (!sw->recp_list[i].adv_rule)
6148 ice_rem_sw_rule_info(hw, l_head);
6150 ice_rem_adv_rule_info(hw, l_head);