1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
68 { ICE_PROTOCOL_LAST, 0 },
72 u8 dummy_gre_packet[] = { 0, 0, 0, 0, /* ICE_MAC_OFOS 0 */
76 0x45, 0, 0, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x80, 0, 0x65, 0x58, /* ICE_NVGRE 34 */
83 0, 0, 0, 0, /* ICE_MAC_IL 42 */
87 0x45, 0, 0, 0x14, /* ICE_IPV4_IL 54 */
95 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
97 { ICE_IPV4_OFOS, 14 },
103 { ICE_PROTOCOL_LAST, 0 },
107 u8 dummy_udp_tun_tcp_packet[] = {
108 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
113 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
114 0x00, 0x01, 0x00, 0x00,
115 0x40, 0x11, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
120 0x00, 0x46, 0x00, 0x00,
122 0x04, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
126 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00,
130 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
131 0x00, 0x01, 0x00, 0x00,
132 0x40, 0x06, 0x00, 0x00,
133 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
139 0x50, 0x02, 0x20, 0x00,
140 0x00, 0x00, 0x00, 0x00
144 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
146 { ICE_IPV4_OFOS, 14 },
151 { ICE_UDP_ILOS, 84 },
152 { ICE_PROTOCOL_LAST, 0 },
156 u8 dummy_udp_tun_udp_packet[] = {
157 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
158 0x00, 0x00, 0x00, 0x00,
159 0x00, 0x00, 0x00, 0x00,
162 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
163 0x00, 0x01, 0x00, 0x00,
164 0x00, 0x11, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00,
168 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
169 0x00, 0x3a, 0x00, 0x00,
171 0x0c, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
179 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
180 0x00, 0x01, 0x00, 0x00,
181 0x00, 0x11, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00,
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
186 0x00, 0x08, 0x00, 0x00,
190 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
192 { ICE_IPV4_OFOS, 14 },
193 { ICE_UDP_ILOS, 34 },
194 { ICE_PROTOCOL_LAST, 0 },
198 dummy_udp_packet[] = {
199 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
200 0x00, 0x00, 0x00, 0x00,
201 0x00, 0x00, 0x00, 0x00,
204 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
205 0x00, 0x01, 0x00, 0x00,
206 0x00, 0x11, 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
211 0x00, 0x08, 0x00, 0x00,
213 0x00, 0x00, /* 2 bytes for 4 byte alignment */
217 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
219 { ICE_IPV4_OFOS, 14 },
221 { ICE_PROTOCOL_LAST, 0 },
225 dummy_tcp_packet[] = {
226 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
231 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
232 0x00, 0x01, 0x00, 0x00,
233 0x00, 0x06, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00,
235 0x00, 0x00, 0x00, 0x00,
237 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
238 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00,
240 0x50, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, /* 2 bytes for 4 byte alignment */
246 /* this is a recipe to profile bitmap association */
247 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
248 ICE_MAX_NUM_PROFILES);
249 static ice_declare_bitmap(available_result_ids, ICE_CHAIN_FV_INDEX_START + 1);
251 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
254 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
255 * @hw: pointer to hardware structure
256 * @recps: struct that we need to populate
257 * @rid: recipe ID that we are populating
258 * @refresh_required: true if we should get recipe to profile mapping from FW
260 * This function is used to populate all the necessary entries into our
261 * bookkeeping so that we have a current list of all the recipes that are
262 * programmed in the firmware.
264 static enum ice_status
265 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
266 bool *refresh_required)
268 u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
269 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
270 u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
271 struct ice_aqc_recipe_data_elem *tmp;
272 u16 num_recps = ICE_MAX_NUM_RECIPES;
273 struct ice_prot_lkup_ext *lkup_exts;
274 enum ice_status status;
276 /* we need a buffer big enough to accommodate all the recipes */
277 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
278 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
280 return ICE_ERR_NO_MEMORY;
282 tmp[0].recipe_indx = rid;
283 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
284 /* non-zero status meaning recipe doesn't exist */
288 /* Get recipe to profile map so that we can get the fv from lkups that
289 * we read for a recipe from FW. Since we want to minimize the number of
290 * times we make this FW call, just make one call and cache the copy
291 * until a new recipe is added. This operation is only required the
292 * first time to get the changes from FW. Then to search existing
293 * entries we don't need to update the cache again until another recipe
296 if (*refresh_required) {
297 ice_get_recp_to_prof_map(hw);
298 *refresh_required = false;
300 lkup_exts = &recps[rid].lkup_exts;
301 /* start populating all the entries for recps[rid] based on lkups from
304 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
305 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
306 struct ice_recp_grp_entry *rg_entry;
307 u8 prof_id, prot = 0;
310 rg_entry = (struct ice_recp_grp_entry *)
311 ice_malloc(hw, sizeof(*rg_entry));
313 status = ICE_ERR_NO_MEMORY;
316 /* Avoid 8th bit since its result enable bit */
317 result_idxs[result_idx] = root_bufs.content.result_indx &
318 ~ICE_AQ_RECIPE_RESULT_EN;
319 /* Check if result enable bit is set */
320 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
321 ice_clear_bit(ICE_CHAIN_FV_INDEX_START -
322 result_idxs[result_idx++],
323 available_result_ids);
325 recipe_to_profile[tmp[sub_recps].recipe_indx],
326 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
327 /* get the first profile that is associated with rid */
328 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
329 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
330 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
332 rg_entry->fv_idx[i] = lkup_indx;
333 rg_entry->fv_mask[i] =
334 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
336 /* If the recipe is a chained recipe then all its
337 * child recipe's result will have a result index.
338 * To fill fv_words we should not use those result
339 * index, we only need the protocol ids and offsets.
340 * We will skip all the fv_idx which stores result
341 * index in them. We also need to skip any fv_idx which
342 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
343 * valid offset value.
345 if (result_idxs[0] == rg_entry->fv_idx[i] ||
346 result_idxs[1] == rg_entry->fv_idx[i] ||
347 result_idxs[2] == rg_entry->fv_idx[i] ||
348 result_idxs[3] == rg_entry->fv_idx[i] ||
349 result_idxs[4] == rg_entry->fv_idx[i] ||
350 rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
351 rg_entry->fv_idx[i] == 0)
354 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
355 rg_entry->fv_idx[i], &prot, &off);
356 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
357 lkup_exts->fv_words[fv_word_idx].off = off;
360 /* populate rg_list with the data from the child entry of this
363 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
365 lkup_exts->n_val_words = fv_word_idx;
366 recps[rid].n_grp_count = num_recps;
367 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
368 ice_calloc(hw, recps[rid].n_grp_count,
369 sizeof(struct ice_aqc_recipe_data_elem));
370 if (!recps[rid].root_buf)
373 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
374 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
375 recps[rid].recp_created = true;
376 if (tmp[sub_recps].content.rid & ICE_AQ_RECIPE_ID_IS_ROOT)
377 recps[rid].root_rid = rid;
384 * ice_get_recp_to_prof_map - updates recipe to profile mapping
385 * @hw: pointer to hardware structure
387 * This function is used to populate recipe_to_profile matrix where index to
388 * this array is the recipe ID and the element is the mapping of which profiles
389 * is this recipe mapped to.
392 ice_get_recp_to_prof_map(struct ice_hw *hw)
394 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
397 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
400 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
401 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
404 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
405 if (ice_is_bit_set(r_bitmap, j))
406 ice_set_bit(i, recipe_to_profile[j]);
411 * ice_init_def_sw_recp - initialize the recipe book keeping tables
412 * @hw: pointer to the HW struct
414 * Allocate memory for the entire recipe table and initialize the structures/
415 * entries corresponding to basic recipes.
417 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
419 struct ice_sw_recipe *recps;
422 recps = (struct ice_sw_recipe *)
423 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
425 return ICE_ERR_NO_MEMORY;
427 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
428 recps[i].root_rid = i;
429 INIT_LIST_HEAD(&recps[i].filt_rules);
430 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
431 INIT_LIST_HEAD(&recps[i].rg_list);
432 ice_init_lock(&recps[i].filt_rule_lock);
435 hw->switch_info->recp_list = recps;
441 * ice_aq_get_sw_cfg - get switch configuration
442 * @hw: pointer to the hardware structure
443 * @buf: pointer to the result buffer
444 * @buf_size: length of the buffer available for response
445 * @req_desc: pointer to requested descriptor
446 * @num_elems: pointer to number of elements
447 * @cd: pointer to command details structure or NULL
449 * Get switch configuration (0x0200) to be placed in 'buff'.
450 * This admin command returns information such as initial VSI/port number
451 * and switch ID it belongs to.
453 * NOTE: *req_desc is both an input/output parameter.
454 * The caller of this function first calls this function with *request_desc set
455 * to 0. If the response from f/w has *req_desc set to 0, all the switch
456 * configuration information has been returned; if non-zero (meaning not all
457 * the information was returned), the caller should call this function again
458 * with *req_desc set to the previous value returned by f/w to get the
459 * next block of switch configuration information.
461 * *num_elems is output only parameter. This reflects the number of elements
462 * in response buffer. The caller of this function to use *num_elems while
463 * parsing the response buffer.
465 static enum ice_status
466 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
467 u16 buf_size, u16 *req_desc, u16 *num_elems,
468 struct ice_sq_cd *cd)
470 struct ice_aqc_get_sw_cfg *cmd;
471 enum ice_status status;
472 struct ice_aq_desc desc;
474 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
475 cmd = &desc.params.get_sw_conf;
476 cmd->element = CPU_TO_LE16(*req_desc);
478 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
480 *req_desc = LE16_TO_CPU(cmd->element);
481 *num_elems = LE16_TO_CPU(cmd->num_elems);
489 * ice_alloc_sw - allocate resources specific to switch
490 * @hw: pointer to the HW struct
491 * @ena_stats: true to turn on VEB stats
492 * @shared_res: true for shared resource, false for dedicated resource
493 * @sw_id: switch ID returned
494 * @counter_id: VEB counter ID returned
496 * allocates switch resources (SWID and VEB counter) (0x0208)
499 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
502 struct ice_aqc_alloc_free_res_elem *sw_buf;
503 struct ice_aqc_res_elem *sw_ele;
504 enum ice_status status;
507 buf_len = sizeof(*sw_buf);
508 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
509 ice_malloc(hw, buf_len);
511 return ICE_ERR_NO_MEMORY;
513 /* Prepare buffer for switch ID.
514 * The number of resource entries in buffer is passed as 1 since only a
515 * single switch/VEB instance is allocated, and hence a single sw_id
518 sw_buf->num_elems = CPU_TO_LE16(1);
520 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
521 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
522 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
524 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
525 ice_aqc_opc_alloc_res, NULL);
528 goto ice_alloc_sw_exit;
530 sw_ele = &sw_buf->elem[0];
531 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
534 /* Prepare buffer for VEB Counter */
535 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
536 struct ice_aqc_alloc_free_res_elem *counter_buf;
537 struct ice_aqc_res_elem *counter_ele;
539 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
540 ice_malloc(hw, buf_len);
542 status = ICE_ERR_NO_MEMORY;
543 goto ice_alloc_sw_exit;
546 /* The number of resource entries in buffer is passed as 1 since
547 * only a single switch/VEB instance is allocated, and hence a
548 * single VEB counter is requested.
550 counter_buf->num_elems = CPU_TO_LE16(1);
551 counter_buf->res_type =
552 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
553 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
554 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
558 ice_free(hw, counter_buf);
559 goto ice_alloc_sw_exit;
561 counter_ele = &counter_buf->elem[0];
562 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
563 ice_free(hw, counter_buf);
567 ice_free(hw, sw_buf);
572 * ice_free_sw - free resources specific to switch
573 * @hw: pointer to the HW struct
574 * @sw_id: switch ID returned
575 * @counter_id: VEB counter ID returned
577 * free switch resources (SWID and VEB counter) (0x0209)
579 * NOTE: This function frees multiple resources. It continues
580 * releasing other resources even after it encounters error.
581 * The error code returned is the last error it encountered.
583 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
585 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
586 enum ice_status status, ret_status;
589 buf_len = sizeof(*sw_buf);
590 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
591 ice_malloc(hw, buf_len);
593 return ICE_ERR_NO_MEMORY;
595 /* Prepare buffer to free for switch ID res.
596 * The number of resource entries in buffer is passed as 1 since only a
597 * single switch/VEB instance is freed, and hence a single sw_id
600 sw_buf->num_elems = CPU_TO_LE16(1);
601 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
602 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
604 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
605 ice_aqc_opc_free_res, NULL);
608 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
610 /* Prepare buffer to free for VEB Counter resource */
611 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
612 ice_malloc(hw, buf_len);
614 ice_free(hw, sw_buf);
615 return ICE_ERR_NO_MEMORY;
618 /* The number of resource entries in buffer is passed as 1 since only a
619 * single switch/VEB instance is freed, and hence a single VEB counter
622 counter_buf->num_elems = CPU_TO_LE16(1);
623 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
624 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
626 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
627 ice_aqc_opc_free_res, NULL);
629 ice_debug(hw, ICE_DBG_SW,
630 "VEB counter resource could not be freed\n");
634 ice_free(hw, counter_buf);
635 ice_free(hw, sw_buf);
641 * @hw: pointer to the HW struct
642 * @vsi_ctx: pointer to a VSI context struct
643 * @cd: pointer to command details structure or NULL
645 * Add a VSI context to the hardware (0x0210)
648 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
649 struct ice_sq_cd *cd)
651 struct ice_aqc_add_update_free_vsi_resp *res;
652 struct ice_aqc_add_get_update_free_vsi *cmd;
653 struct ice_aq_desc desc;
654 enum ice_status status;
656 cmd = &desc.params.vsi_cmd;
657 res = &desc.params.add_update_free_vsi_res;
659 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
661 if (!vsi_ctx->alloc_from_pool)
662 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
663 ICE_AQ_VSI_IS_VALID);
665 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
667 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
669 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
670 sizeof(vsi_ctx->info), cd);
673 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
674 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
675 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
683 * @hw: pointer to the HW struct
684 * @vsi_ctx: pointer to a VSI context struct
685 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
686 * @cd: pointer to command details structure or NULL
688 * Free VSI context info from hardware (0x0213)
691 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
692 bool keep_vsi_alloc, struct ice_sq_cd *cd)
694 struct ice_aqc_add_update_free_vsi_resp *resp;
695 struct ice_aqc_add_get_update_free_vsi *cmd;
696 struct ice_aq_desc desc;
697 enum ice_status status;
699 cmd = &desc.params.vsi_cmd;
700 resp = &desc.params.add_update_free_vsi_res;
702 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
704 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
706 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
708 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
710 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
711 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
719 * @hw: pointer to the HW struct
720 * @vsi_ctx: pointer to a VSI context struct
721 * @cd: pointer to command details structure or NULL
723 * Update VSI context in the hardware (0x0211)
726 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
727 struct ice_sq_cd *cd)
729 struct ice_aqc_add_update_free_vsi_resp *resp;
730 struct ice_aqc_add_get_update_free_vsi *cmd;
731 struct ice_aq_desc desc;
732 enum ice_status status;
734 cmd = &desc.params.vsi_cmd;
735 resp = &desc.params.add_update_free_vsi_res;
737 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
739 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
741 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
743 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
744 sizeof(vsi_ctx->info), cd);
747 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
748 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
755 * ice_is_vsi_valid - check whether the VSI is valid or not
756 * @hw: pointer to the HW struct
757 * @vsi_handle: VSI handle
759 * check whether the VSI is valid or not
761 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
763 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
767 * ice_get_hw_vsi_num - return the HW VSI number
768 * @hw: pointer to the HW struct
769 * @vsi_handle: VSI handle
771 * return the HW VSI number
772 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
774 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
776 return hw->vsi_ctx[vsi_handle]->vsi_num;
780 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
781 * @hw: pointer to the HW struct
782 * @vsi_handle: VSI handle
784 * return the VSI context entry for a given VSI handle
786 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
788 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
792 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
793 * @hw: pointer to the HW struct
794 * @vsi_handle: VSI handle
795 * @vsi: VSI context pointer
797 * save the VSI context entry for a given VSI handle
800 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
802 hw->vsi_ctx[vsi_handle] = vsi;
806 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
807 * @hw: pointer to the HW struct
808 * @vsi_handle: VSI handle
810 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
812 struct ice_vsi_ctx *vsi;
815 vsi = ice_get_vsi_ctx(hw, vsi_handle);
818 ice_for_each_traffic_class(i) {
819 if (vsi->lan_q_ctx[i]) {
820 ice_free(hw, vsi->lan_q_ctx[i]);
821 vsi->lan_q_ctx[i] = NULL;
827 * ice_clear_vsi_ctx - clear the VSI context entry
828 * @hw: pointer to the HW struct
829 * @vsi_handle: VSI handle
831 * clear the VSI context entry
833 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
835 struct ice_vsi_ctx *vsi;
837 vsi = ice_get_vsi_ctx(hw, vsi_handle);
839 ice_clear_vsi_q_ctx(hw, vsi_handle);
841 hw->vsi_ctx[vsi_handle] = NULL;
846 * ice_clear_all_vsi_ctx - clear all the VSI context entries
847 * @hw: pointer to the HW struct
849 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
853 for (i = 0; i < ICE_MAX_VSI; i++)
854 ice_clear_vsi_ctx(hw, i);
858 * ice_add_vsi - add VSI context to the hardware and VSI handle list
859 * @hw: pointer to the HW struct
860 * @vsi_handle: unique VSI handle provided by drivers
861 * @vsi_ctx: pointer to a VSI context struct
862 * @cd: pointer to command details structure or NULL
864 * Add a VSI context to the hardware also add it into the VSI handle list.
865 * If this function gets called after reset for existing VSIs then update
866 * with the new HW VSI number in the corresponding VSI handle list entry.
869 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
870 struct ice_sq_cd *cd)
872 struct ice_vsi_ctx *tmp_vsi_ctx;
873 enum ice_status status;
875 if (vsi_handle >= ICE_MAX_VSI)
876 return ICE_ERR_PARAM;
877 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
880 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
882 /* Create a new VSI context */
883 tmp_vsi_ctx = (struct ice_vsi_ctx *)
884 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
886 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
887 return ICE_ERR_NO_MEMORY;
889 *tmp_vsi_ctx = *vsi_ctx;
891 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
893 /* update with new HW VSI num */
894 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
895 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
902 * ice_free_vsi- free VSI context from hardware and VSI handle list
903 * @hw: pointer to the HW struct
904 * @vsi_handle: unique VSI handle
905 * @vsi_ctx: pointer to a VSI context struct
906 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
907 * @cd: pointer to command details structure or NULL
909 * Free VSI context info from hardware as well as from VSI handle list
912 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
913 bool keep_vsi_alloc, struct ice_sq_cd *cd)
915 enum ice_status status;
917 if (!ice_is_vsi_valid(hw, vsi_handle))
918 return ICE_ERR_PARAM;
919 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
920 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
922 ice_clear_vsi_ctx(hw, vsi_handle);
928 * @hw: pointer to the HW struct
929 * @vsi_handle: unique VSI handle
930 * @vsi_ctx: pointer to a VSI context struct
931 * @cd: pointer to command details structure or NULL
933 * Update VSI context in the hardware
936 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
937 struct ice_sq_cd *cd)
939 if (!ice_is_vsi_valid(hw, vsi_handle))
940 return ICE_ERR_PARAM;
941 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
942 return ice_aq_update_vsi(hw, vsi_ctx, cd);
946 * ice_aq_get_vsi_params
947 * @hw: pointer to the HW struct
948 * @vsi_ctx: pointer to a VSI context struct
949 * @cd: pointer to command details structure or NULL
951 * Get VSI context info from hardware (0x0212)
954 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
955 struct ice_sq_cd *cd)
957 struct ice_aqc_add_get_update_free_vsi *cmd;
958 struct ice_aqc_get_vsi_resp *resp;
959 struct ice_aq_desc desc;
960 enum ice_status status;
962 cmd = &desc.params.vsi_cmd;
963 resp = &desc.params.get_vsi_resp;
965 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
967 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
969 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
970 sizeof(vsi_ctx->info), cd);
972 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
974 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
975 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
982 * ice_aq_add_update_mir_rule - add/update a mirror rule
983 * @hw: pointer to the HW struct
984 * @rule_type: Rule Type
985 * @dest_vsi: VSI number to which packets will be mirrored
986 * @count: length of the list
987 * @mr_buf: buffer for list of mirrored VSI numbers
988 * @cd: pointer to command details structure or NULL
991 * Add/Update Mirror Rule (0x260).
994 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
995 u16 count, struct ice_mir_rule_buf *mr_buf,
996 struct ice_sq_cd *cd, u16 *rule_id)
998 struct ice_aqc_add_update_mir_rule *cmd;
999 struct ice_aq_desc desc;
1000 enum ice_status status;
1001 __le16 *mr_list = NULL;
1004 switch (rule_type) {
1005 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1006 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1007 /* Make sure count and mr_buf are set for these rule_types */
1008 if (!(count && mr_buf))
1009 return ICE_ERR_PARAM;
1011 buf_size = count * sizeof(__le16);
1012 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1014 return ICE_ERR_NO_MEMORY;
1016 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1017 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1018 /* Make sure count and mr_buf are not set for these
1021 if (count || mr_buf)
1022 return ICE_ERR_PARAM;
1025 ice_debug(hw, ICE_DBG_SW,
1026 "Error due to unsupported rule_type %u\n", rule_type);
1027 return ICE_ERR_OUT_OF_RANGE;
1030 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1032 /* Pre-process 'mr_buf' items for add/update of virtual port
1033 * ingress/egress mirroring (but not physical port ingress/egress
1039 for (i = 0; i < count; i++) {
1042 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1044 /* Validate specified VSI number, make sure it is less
1045 * than ICE_MAX_VSI, if not return with error.
1047 if (id >= ICE_MAX_VSI) {
1048 ice_debug(hw, ICE_DBG_SW,
1049 "Error VSI index (%u) out-of-range\n",
1051 ice_free(hw, mr_list);
1052 return ICE_ERR_OUT_OF_RANGE;
1055 /* add VSI to mirror rule */
1058 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1059 else /* remove VSI from mirror rule */
1060 mr_list[i] = CPU_TO_LE16(id);
1064 cmd = &desc.params.add_update_rule;
1065 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1066 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1067 ICE_AQC_RULE_ID_VALID_M);
1068 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1069 cmd->num_entries = CPU_TO_LE16(count);
1070 cmd->dest = CPU_TO_LE16(dest_vsi);
1072 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1074 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1076 ice_free(hw, mr_list);
1082 * ice_aq_delete_mir_rule - delete a mirror rule
1083 * @hw: pointer to the HW struct
1084 * @rule_id: Mirror rule ID (to be deleted)
1085 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1086 * otherwise it is returned to the shared pool
1087 * @cd: pointer to command details structure or NULL
1089 * Delete Mirror Rule (0x261).
1092 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1093 struct ice_sq_cd *cd)
1095 struct ice_aqc_delete_mir_rule *cmd;
1096 struct ice_aq_desc desc;
1098 /* rule_id should be in the range 0...63 */
1099 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1100 return ICE_ERR_OUT_OF_RANGE;
1102 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1104 cmd = &desc.params.del_rule;
1105 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1106 cmd->rule_id = CPU_TO_LE16(rule_id);
1109 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1111 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1115 * ice_aq_alloc_free_vsi_list
1116 * @hw: pointer to the HW struct
1117 * @vsi_list_id: VSI list ID returned or used for lookup
1118 * @lkup_type: switch rule filter lookup type
1119 * @opc: switch rules population command type - pass in the command opcode
1121 * allocates or free a VSI list resource
1123 static enum ice_status
1124 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1125 enum ice_sw_lkup_type lkup_type,
1126 enum ice_adminq_opc opc)
1128 struct ice_aqc_alloc_free_res_elem *sw_buf;
1129 struct ice_aqc_res_elem *vsi_ele;
1130 enum ice_status status;
1133 buf_len = sizeof(*sw_buf);
1134 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1135 ice_malloc(hw, buf_len);
1137 return ICE_ERR_NO_MEMORY;
1138 sw_buf->num_elems = CPU_TO_LE16(1);
1140 if (lkup_type == ICE_SW_LKUP_MAC ||
1141 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1142 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1143 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1144 lkup_type == ICE_SW_LKUP_PROMISC ||
1145 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1146 lkup_type == ICE_SW_LKUP_LAST) {
1147 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1148 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1150 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1152 status = ICE_ERR_PARAM;
1153 goto ice_aq_alloc_free_vsi_list_exit;
1156 if (opc == ice_aqc_opc_free_res)
1157 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1159 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1161 goto ice_aq_alloc_free_vsi_list_exit;
1163 if (opc == ice_aqc_opc_alloc_res) {
1164 vsi_ele = &sw_buf->elem[0];
1165 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1168 ice_aq_alloc_free_vsi_list_exit:
1169 ice_free(hw, sw_buf);
1174 * ice_aq_set_storm_ctrl - Sets storm control configuration
1175 * @hw: pointer to the HW struct
1176 * @bcast_thresh: represents the upper threshold for broadcast storm control
1177 * @mcast_thresh: represents the upper threshold for multicast storm control
1178 * @ctl_bitmask: storm control control knobs
1180 * Sets the storm control configuration (0x0280)
1183 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1186 struct ice_aqc_storm_cfg *cmd;
1187 struct ice_aq_desc desc;
1189 cmd = &desc.params.storm_conf;
1191 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1193 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1194 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1195 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1197 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1201 * ice_aq_get_storm_ctrl - gets storm control configuration
1202 * @hw: pointer to the HW struct
1203 * @bcast_thresh: represents the upper threshold for broadcast storm control
1204 * @mcast_thresh: represents the upper threshold for multicast storm control
1205 * @ctl_bitmask: storm control control knobs
1207 * Gets the storm control configuration (0x0281)
1210 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1213 enum ice_status status;
1214 struct ice_aq_desc desc;
1216 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1218 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1220 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1223 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1226 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1229 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1236 * ice_aq_sw_rules - add/update/remove switch rules
1237 * @hw: pointer to the HW struct
1238 * @rule_list: pointer to switch rule population list
1239 * @rule_list_sz: total size of the rule list in bytes
1240 * @num_rules: number of switch rules in the rule_list
1241 * @opc: switch rules population command type - pass in the command opcode
1242 * @cd: pointer to command details structure or NULL
1244 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1246 static enum ice_status
1247 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1248 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1250 struct ice_aq_desc desc;
1252 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_sw_rules");
1254 if (opc != ice_aqc_opc_add_sw_rules &&
1255 opc != ice_aqc_opc_update_sw_rules &&
1256 opc != ice_aqc_opc_remove_sw_rules)
1257 return ICE_ERR_PARAM;
1259 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1261 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1262 desc.params.sw_rules.num_rules_fltr_entry_index =
1263 CPU_TO_LE16(num_rules);
1264 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1268 * ice_aq_add_recipe - add switch recipe
1269 * @hw: pointer to the HW struct
1270 * @s_recipe_list: pointer to switch rule population list
1271 * @num_recipes: number of switch recipes in the list
1272 * @cd: pointer to command details structure or NULL
1277 ice_aq_add_recipe(struct ice_hw *hw,
1278 struct ice_aqc_recipe_data_elem *s_recipe_list,
1279 u16 num_recipes, struct ice_sq_cd *cd)
1281 struct ice_aqc_add_get_recipe *cmd;
1282 struct ice_aq_desc desc;
1285 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_recipe");
1286 cmd = &desc.params.add_get_recipe;
1287 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1289 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1290 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1292 buf_size = num_recipes * sizeof(*s_recipe_list);
1294 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1298 * ice_aq_get_recipe - get switch recipe
1299 * @hw: pointer to the HW struct
1300 * @s_recipe_list: pointer to switch rule population list
1301 * @num_recipes: pointer to the number of recipes (input and output)
1302 * @recipe_root: root recipe number of recipe(s) to retrieve
1303 * @cd: pointer to command details structure or NULL
1307 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1308 * On output, *num_recipes will equal the number of entries returned in
1311 * The caller must supply enough space in s_recipe_list to hold all possible
1312 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1315 ice_aq_get_recipe(struct ice_hw *hw,
1316 struct ice_aqc_recipe_data_elem *s_recipe_list,
1317 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1319 struct ice_aqc_add_get_recipe *cmd;
1320 struct ice_aq_desc desc;
1321 enum ice_status status;
1324 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1325 return ICE_ERR_PARAM;
1327 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe");
1328 cmd = &desc.params.add_get_recipe;
1329 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1331 cmd->return_index = CPU_TO_LE16(recipe_root);
1332 cmd->num_sub_recipes = 0;
1334 buf_size = *num_recipes * sizeof(*s_recipe_list);
1336 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1337 /* cppcheck-suppress constArgument */
1338 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1344 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1345 * @hw: pointer to the HW struct
1346 * @profile_id: package profile ID to associate the recipe with
1347 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1348 * @cd: pointer to command details structure or NULL
1349 * Recipe to profile association (0x0291)
1352 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1353 struct ice_sq_cd *cd)
1355 struct ice_aqc_recipe_to_profile *cmd;
1356 struct ice_aq_desc desc;
1358 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_assoc_recipe_to_prof");
1359 cmd = &desc.params.recipe_to_profile;
1360 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1361 cmd->profile_id = CPU_TO_LE16(profile_id);
1362 /* Set the recipe ID bit in the bitmask to let the device know which
1363 * profile we are associating the recipe to
1365 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1366 ICE_NONDMA_TO_NONDMA);
1368 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1372 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1373 * @hw: pointer to the HW struct
1374 * @profile_id: package profile ID to associate the recipe with
1375 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1376 * @cd: pointer to command details structure or NULL
1377 * Associate profile ID with given recipe (0x0293)
1380 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1381 struct ice_sq_cd *cd)
1383 struct ice_aqc_recipe_to_profile *cmd;
1384 struct ice_aq_desc desc;
1385 enum ice_status status;
1387 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe_to_prof");
1388 cmd = &desc.params.recipe_to_profile;
1389 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1390 cmd->profile_id = CPU_TO_LE16(profile_id);
1392 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1394 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1395 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1401 * ice_alloc_recipe - add recipe resource
1402 * @hw: pointer to the hardware structure
1403 * @rid: recipe ID returned as response to AQ call
1405 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1407 struct ice_aqc_alloc_free_res_elem *sw_buf;
1408 enum ice_status status;
1411 buf_len = sizeof(*sw_buf);
1412 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1414 return ICE_ERR_NO_MEMORY;
1416 sw_buf->num_elems = CPU_TO_LE16(1);
1417 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1418 ICE_AQC_RES_TYPE_S) |
1419 ICE_AQC_RES_TYPE_FLAG_SHARED);
1420 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1421 ice_aqc_opc_alloc_res, NULL);
1423 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1424 ice_free(hw, sw_buf);
1429 /* ice_init_port_info - Initialize port_info with switch configuration data
1430 * @pi: pointer to port_info
1431 * @vsi_port_num: VSI number or port number
1432 * @type: Type of switch element (port or VSI)
1433 * @swid: switch ID of the switch the element is attached to
1434 * @pf_vf_num: PF or VF number
1435 * @is_vf: true if the element is a VF, false otherwise
1438 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1439 u16 swid, u16 pf_vf_num, bool is_vf)
1442 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1443 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1445 pi->pf_vf_num = pf_vf_num;
1447 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1448 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1451 ice_debug(pi->hw, ICE_DBG_SW,
1452 "incorrect VSI/port type received\n");
1457 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1458 * @hw: pointer to the hardware structure
1460 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1462 struct ice_aqc_get_sw_cfg_resp *rbuf;
1463 enum ice_status status;
1464 u16 num_total_ports;
1470 num_total_ports = 1;
1472 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1473 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1476 return ICE_ERR_NO_MEMORY;
1478 /* Multiple calls to ice_aq_get_sw_cfg may be required
1479 * to get all the switch configuration information. The need
1480 * for additional calls is indicated by ice_aq_get_sw_cfg
1481 * writing a non-zero value in req_desc
1484 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1485 &req_desc, &num_elems, NULL);
1490 for (i = 0; i < num_elems; i++) {
1491 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1492 u16 pf_vf_num, swid, vsi_port_num;
1496 ele = rbuf[i].elements;
1497 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1498 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1500 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1501 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1503 swid = LE16_TO_CPU(ele->swid);
1505 if (LE16_TO_CPU(ele->pf_vf_num) &
1506 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1509 type = LE16_TO_CPU(ele->vsi_port_num) >>
1510 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1513 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1514 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1515 if (j == num_total_ports) {
1516 ice_debug(hw, ICE_DBG_SW,
1517 "more ports than expected\n");
1518 status = ICE_ERR_CFG;
1521 ice_init_port_info(hw->port_info,
1522 vsi_port_num, type, swid,
1530 } while (req_desc && !status);
1534 ice_free(hw, (void *)rbuf);
1540 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1541 * @hw: pointer to the hardware structure
1542 * @fi: filter info structure to fill/update
1544 * This helper function populates the lb_en and lan_en elements of the provided
1545 * ice_fltr_info struct using the switch's type and characteristics of the
1546 * switch rule being configured.
1548 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1552 if ((fi->flag & ICE_FLTR_TX) &&
1553 (fi->fltr_act == ICE_FWD_TO_VSI ||
1554 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1555 fi->fltr_act == ICE_FWD_TO_Q ||
1556 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1557 /* Setting LB for prune actions will result in replicated
1558 * packets to the internal switch that will be dropped.
1560 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1563 /* Set lan_en to TRUE if
1564 * 1. The switch is a VEB AND
1566 * 2.1 The lookup is a directional lookup like ethertype,
1567 * promiscuous, ethertype-MAC, promiscuous-VLAN
1568 * and default-port OR
1569 * 2.2 The lookup is VLAN, OR
1570 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1571 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1575 * The switch is a VEPA.
1577 * In all other cases, the LAN enable has to be set to false.
1580 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1581 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1582 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1583 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1584 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1585 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1586 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1587 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1588 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1589 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1598 * ice_ilog2 - Calculates integer log base 2 of a number
1599 * @n: number on which to perform operation
1601 static int ice_ilog2(u64 n)
1605 for (i = 63; i >= 0; i--)
1606 if (((u64)1 << i) & n)
1613 * ice_fill_sw_rule - Helper function to fill switch rule structure
1614 * @hw: pointer to the hardware structure
1615 * @f_info: entry containing packet forwarding information
1616 * @s_rule: switch rule structure to be filled in based on mac_entry
1617 * @opc: switch rules population command type - pass in the command opcode
1620 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1621 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1623 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1631 if (opc == ice_aqc_opc_remove_sw_rules) {
1632 s_rule->pdata.lkup_tx_rx.act = 0;
1633 s_rule->pdata.lkup_tx_rx.index =
1634 CPU_TO_LE16(f_info->fltr_rule_id);
1635 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1639 eth_hdr_sz = sizeof(dummy_eth_header);
1640 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1642 /* initialize the ether header with a dummy header */
1643 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1644 ice_fill_sw_info(hw, f_info);
1646 switch (f_info->fltr_act) {
1647 case ICE_FWD_TO_VSI:
1648 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1649 ICE_SINGLE_ACT_VSI_ID_M;
1650 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1651 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1652 ICE_SINGLE_ACT_VALID_BIT;
1654 case ICE_FWD_TO_VSI_LIST:
1655 act |= ICE_SINGLE_ACT_VSI_LIST;
1656 act |= (f_info->fwd_id.vsi_list_id <<
1657 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1658 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1659 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1660 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1661 ICE_SINGLE_ACT_VALID_BIT;
1664 act |= ICE_SINGLE_ACT_TO_Q;
1665 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1666 ICE_SINGLE_ACT_Q_INDEX_M;
1668 case ICE_DROP_PACKET:
1669 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1670 ICE_SINGLE_ACT_VALID_BIT;
1672 case ICE_FWD_TO_QGRP:
1673 q_rgn = f_info->qgrp_size > 0 ?
1674 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1675 act |= ICE_SINGLE_ACT_TO_Q;
1676 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1677 ICE_SINGLE_ACT_Q_INDEX_M;
1678 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1679 ICE_SINGLE_ACT_Q_REGION_M;
1686 act |= ICE_SINGLE_ACT_LB_ENABLE;
1688 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1690 switch (f_info->lkup_type) {
1691 case ICE_SW_LKUP_MAC:
1692 daddr = f_info->l_data.mac.mac_addr;
1694 case ICE_SW_LKUP_VLAN:
1695 vlan_id = f_info->l_data.vlan.vlan_id;
1696 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1697 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1698 act |= ICE_SINGLE_ACT_PRUNE;
1699 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1702 case ICE_SW_LKUP_ETHERTYPE_MAC:
1703 daddr = f_info->l_data.ethertype_mac.mac_addr;
1705 case ICE_SW_LKUP_ETHERTYPE:
1706 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1707 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1709 case ICE_SW_LKUP_MAC_VLAN:
1710 daddr = f_info->l_data.mac_vlan.mac_addr;
1711 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1713 case ICE_SW_LKUP_PROMISC_VLAN:
1714 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1716 case ICE_SW_LKUP_PROMISC:
1717 daddr = f_info->l_data.mac_vlan.mac_addr;
1723 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1724 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1725 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1727 /* Recipe set depending on lookup type */
1728 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1729 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1730 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1733 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1734 ICE_NONDMA_TO_NONDMA);
1736 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1737 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1738 *off = CPU_TO_BE16(vlan_id);
1741 /* Create the switch rule with the final dummy Ethernet header */
1742 if (opc != ice_aqc_opc_update_sw_rules)
1743 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1747 * ice_add_marker_act
1748 * @hw: pointer to the hardware structure
1749 * @m_ent: the management entry for which sw marker needs to be added
1750 * @sw_marker: sw marker to tag the Rx descriptor with
1751 * @l_id: large action resource ID
1753 * Create a large action to hold software marker and update the switch rule
1754 * entry pointed by m_ent with newly created large action
1756 static enum ice_status
1757 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1758 u16 sw_marker, u16 l_id)
1760 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1761 /* For software marker we need 3 large actions
1762 * 1. FWD action: FWD TO VSI or VSI LIST
1763 * 2. GENERIC VALUE action to hold the profile ID
1764 * 3. GENERIC VALUE action to hold the software marker ID
1766 const u16 num_lg_acts = 3;
1767 enum ice_status status;
1773 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1774 return ICE_ERR_PARAM;
1776 /* Create two back-to-back switch rules and submit them to the HW using
1777 * one memory buffer:
1781 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1782 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1783 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1785 return ICE_ERR_NO_MEMORY;
1787 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1789 /* Fill in the first switch rule i.e. large action */
1790 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1791 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1792 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1794 /* First action VSI forwarding or VSI list forwarding depending on how
1797 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1798 m_ent->fltr_info.fwd_id.hw_vsi_id;
1800 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1801 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1802 ICE_LG_ACT_VSI_LIST_ID_M;
1803 if (m_ent->vsi_count > 1)
1804 act |= ICE_LG_ACT_VSI_LIST;
1805 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1807 /* Second action descriptor type */
1808 act = ICE_LG_ACT_GENERIC;
1810 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1811 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1813 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1814 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1816 /* Third action Marker value */
1817 act |= ICE_LG_ACT_GENERIC;
1818 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1819 ICE_LG_ACT_GENERIC_VALUE_M;
1821 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1823 /* call the fill switch rule to fill the lookup Tx Rx structure */
1824 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1825 ice_aqc_opc_update_sw_rules);
1827 /* Update the action to point to the large action ID */
1828 rx_tx->pdata.lkup_tx_rx.act =
1829 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1830 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1831 ICE_SINGLE_ACT_PTR_VAL_M));
1833 /* Use the filter rule ID of the previously created rule with single
1834 * act. Once the update happens, hardware will treat this as large
1837 rx_tx->pdata.lkup_tx_rx.index =
1838 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1840 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1841 ice_aqc_opc_update_sw_rules, NULL);
1843 m_ent->lg_act_idx = l_id;
1844 m_ent->sw_marker_id = sw_marker;
1847 ice_free(hw, lg_act);
1852 * ice_add_counter_act - add/update filter rule with counter action
1853 * @hw: pointer to the hardware structure
1854 * @m_ent: the management entry for which counter needs to be added
1855 * @counter_id: VLAN counter ID returned as part of allocate resource
1856 * @l_id: large action resource ID
1858 static enum ice_status
1859 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1860 u16 counter_id, u16 l_id)
1862 struct ice_aqc_sw_rules_elem *lg_act;
1863 struct ice_aqc_sw_rules_elem *rx_tx;
1864 enum ice_status status;
1865 /* 2 actions will be added while adding a large action counter */
1866 const int num_acts = 2;
1873 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1874 return ICE_ERR_PARAM;
1876 /* Create two back-to-back switch rules and submit them to the HW using
1877 * one memory buffer:
1881 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
1882 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1883 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
1886 return ICE_ERR_NO_MEMORY;
1888 rx_tx = (struct ice_aqc_sw_rules_elem *)
1889 ((u8 *)lg_act + lg_act_size);
1891 /* Fill in the first switch rule i.e. large action */
1892 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1893 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1894 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
1896 /* First action VSI forwarding or VSI list forwarding depending on how
1899 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1900 m_ent->fltr_info.fwd_id.hw_vsi_id;
1902 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1903 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1904 ICE_LG_ACT_VSI_LIST_ID_M;
1905 if (m_ent->vsi_count > 1)
1906 act |= ICE_LG_ACT_VSI_LIST;
1907 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1909 /* Second action counter ID */
1910 act = ICE_LG_ACT_STAT_COUNT;
1911 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
1912 ICE_LG_ACT_STAT_COUNT_M;
1913 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1915 /* call the fill switch rule to fill the lookup Tx Rx structure */
1916 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1917 ice_aqc_opc_update_sw_rules);
1919 act = ICE_SINGLE_ACT_PTR;
1920 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
1921 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1923 /* Use the filter rule ID of the previously created rule with single
1924 * act. Once the update happens, hardware will treat this as large
1927 f_rule_id = m_ent->fltr_info.fltr_rule_id;
1928 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
1930 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1931 ice_aqc_opc_update_sw_rules, NULL);
1933 m_ent->lg_act_idx = l_id;
1934 m_ent->counter_index = counter_id;
1937 ice_free(hw, lg_act);
1942 * ice_create_vsi_list_map
1943 * @hw: pointer to the hardware structure
1944 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1945 * @num_vsi: number of VSI handles in the array
1946 * @vsi_list_id: VSI list ID generated as part of allocate resource
1948 * Helper function to create a new entry of VSI list ID to VSI mapping
1949 * using the given VSI list ID
1951 static struct ice_vsi_list_map_info *
1952 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1955 struct ice_switch_info *sw = hw->switch_info;
1956 struct ice_vsi_list_map_info *v_map;
1959 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
1964 v_map->vsi_list_id = vsi_list_id;
1966 for (i = 0; i < num_vsi; i++)
1967 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
1969 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
1974 * ice_update_vsi_list_rule
1975 * @hw: pointer to the hardware structure
1976 * @vsi_handle_arr: array of VSI handles to form a VSI list
1977 * @num_vsi: number of VSI handles in the array
1978 * @vsi_list_id: VSI list ID generated as part of allocate resource
1979 * @remove: Boolean value to indicate if this is a remove action
1980 * @opc: switch rules population command type - pass in the command opcode
1981 * @lkup_type: lookup type of the filter
1983 * Call AQ command to add a new switch rule or update existing switch rule
1984 * using the given VSI list ID
1986 static enum ice_status
1987 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1988 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1989 enum ice_sw_lkup_type lkup_type)
1991 struct ice_aqc_sw_rules_elem *s_rule;
1992 enum ice_status status;
1998 return ICE_ERR_PARAM;
2000 if (lkup_type == ICE_SW_LKUP_MAC ||
2001 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2002 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2003 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2004 lkup_type == ICE_SW_LKUP_PROMISC ||
2005 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2006 lkup_type == ICE_SW_LKUP_LAST)
2007 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2008 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2009 else if (lkup_type == ICE_SW_LKUP_VLAN)
2010 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2011 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2013 return ICE_ERR_PARAM;
2015 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2016 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2018 return ICE_ERR_NO_MEMORY;
2019 for (i = 0; i < num_vsi; i++) {
2020 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2021 status = ICE_ERR_PARAM;
2024 /* AQ call requires hw_vsi_id(s) */
2025 s_rule->pdata.vsi_list.vsi[i] =
2026 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2029 s_rule->type = CPU_TO_LE16(type);
2030 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2031 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2033 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2036 ice_free(hw, s_rule);
2041 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2042 * @hw: pointer to the HW struct
2043 * @vsi_handle_arr: array of VSI handles to form a VSI list
2044 * @num_vsi: number of VSI handles in the array
2045 * @vsi_list_id: stores the ID of the VSI list to be created
2046 * @lkup_type: switch rule filter's lookup type
2048 static enum ice_status
2049 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2050 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2052 enum ice_status status;
2054 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2055 ice_aqc_opc_alloc_res);
2059 /* Update the newly created VSI list to include the specified VSIs */
2060 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2061 *vsi_list_id, false,
2062 ice_aqc_opc_add_sw_rules, lkup_type);
2066 * ice_create_pkt_fwd_rule
2067 * @hw: pointer to the hardware structure
2068 * @f_entry: entry containing packet forwarding information
2070 * Create switch rule with given filter information and add an entry
2071 * to the corresponding filter management list to track this switch rule
2074 static enum ice_status
2075 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2076 struct ice_fltr_list_entry *f_entry)
2078 struct ice_fltr_mgmt_list_entry *fm_entry;
2079 struct ice_aqc_sw_rules_elem *s_rule;
2080 enum ice_sw_lkup_type l_type;
2081 struct ice_sw_recipe *recp;
2082 enum ice_status status;
2084 s_rule = (struct ice_aqc_sw_rules_elem *)
2085 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2087 return ICE_ERR_NO_MEMORY;
2088 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2089 ice_malloc(hw, sizeof(*fm_entry));
2091 status = ICE_ERR_NO_MEMORY;
2092 goto ice_create_pkt_fwd_rule_exit;
2095 fm_entry->fltr_info = f_entry->fltr_info;
2097 /* Initialize all the fields for the management entry */
2098 fm_entry->vsi_count = 1;
2099 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2100 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2101 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2103 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2104 ice_aqc_opc_add_sw_rules);
2106 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2107 ice_aqc_opc_add_sw_rules, NULL);
2109 ice_free(hw, fm_entry);
2110 goto ice_create_pkt_fwd_rule_exit;
2113 f_entry->fltr_info.fltr_rule_id =
2114 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2115 fm_entry->fltr_info.fltr_rule_id =
2116 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2118 /* The book keeping entries will get removed when base driver
2119 * calls remove filter AQ command
2121 l_type = fm_entry->fltr_info.lkup_type;
2122 recp = &hw->switch_info->recp_list[l_type];
2123 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2125 ice_create_pkt_fwd_rule_exit:
2126 ice_free(hw, s_rule);
2131 * ice_update_pkt_fwd_rule
2132 * @hw: pointer to the hardware structure
2133 * @f_info: filter information for switch rule
2135 * Call AQ command to update a previously created switch rule with a
2138 static enum ice_status
2139 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2141 struct ice_aqc_sw_rules_elem *s_rule;
2142 enum ice_status status;
2144 s_rule = (struct ice_aqc_sw_rules_elem *)
2145 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2147 return ICE_ERR_NO_MEMORY;
2149 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2151 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2153 /* Update switch rule with new rule set to forward VSI list */
2154 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2155 ice_aqc_opc_update_sw_rules, NULL);
2157 ice_free(hw, s_rule);
2162 * ice_update_sw_rule_bridge_mode
2163 * @hw: pointer to the HW struct
2165 * Updates unicast switch filter rules based on VEB/VEPA mode
2167 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2169 struct ice_switch_info *sw = hw->switch_info;
2170 struct ice_fltr_mgmt_list_entry *fm_entry;
2171 enum ice_status status = ICE_SUCCESS;
2172 struct LIST_HEAD_TYPE *rule_head;
2173 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2175 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2176 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2178 ice_acquire_lock(rule_lock);
2179 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2181 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2182 u8 *addr = fi->l_data.mac.mac_addr;
2184 /* Update unicast Tx rules to reflect the selected
2187 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2188 (fi->fltr_act == ICE_FWD_TO_VSI ||
2189 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2190 fi->fltr_act == ICE_FWD_TO_Q ||
2191 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2192 status = ice_update_pkt_fwd_rule(hw, fi);
2198 ice_release_lock(rule_lock);
2204 * ice_add_update_vsi_list
2205 * @hw: pointer to the hardware structure
2206 * @m_entry: pointer to current filter management list entry
2207 * @cur_fltr: filter information from the book keeping entry
2208 * @new_fltr: filter information with the new VSI to be added
2210 * Call AQ command to add or update previously created VSI list with new VSI.
2212 * Helper function to do book keeping associated with adding filter information
2213 * The algorithm to do the book keeping is described below :
2214 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2215 * if only one VSI has been added till now
2216 * Allocate a new VSI list and add two VSIs
2217 * to this list using switch rule command
2218 * Update the previously created switch rule with the
2219 * newly created VSI list ID
2220 * if a VSI list was previously created
2221 * Add the new VSI to the previously created VSI list set
2222 * using the update switch rule command
2224 static enum ice_status
2225 ice_add_update_vsi_list(struct ice_hw *hw,
2226 struct ice_fltr_mgmt_list_entry *m_entry,
2227 struct ice_fltr_info *cur_fltr,
2228 struct ice_fltr_info *new_fltr)
2230 enum ice_status status = ICE_SUCCESS;
2231 u16 vsi_list_id = 0;
2233 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2234 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2235 return ICE_ERR_NOT_IMPL;
2237 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2238 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2239 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2240 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2241 return ICE_ERR_NOT_IMPL;
2243 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2244 /* Only one entry existed in the mapping and it was not already
2245 * a part of a VSI list. So, create a VSI list with the old and
2248 struct ice_fltr_info tmp_fltr;
2249 u16 vsi_handle_arr[2];
2251 /* A rule already exists with the new VSI being added */
2252 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2253 return ICE_ERR_ALREADY_EXISTS;
2255 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2256 vsi_handle_arr[1] = new_fltr->vsi_handle;
2257 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2259 new_fltr->lkup_type);
2263 tmp_fltr = *new_fltr;
2264 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2265 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2266 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2267 /* Update the previous switch rule of "MAC forward to VSI" to
2268 * "MAC fwd to VSI list"
2270 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2274 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2275 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2276 m_entry->vsi_list_info =
2277 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2280 /* If this entry was large action then the large action needs
2281 * to be updated to point to FWD to VSI list
2283 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2285 ice_add_marker_act(hw, m_entry,
2286 m_entry->sw_marker_id,
2287 m_entry->lg_act_idx);
2289 u16 vsi_handle = new_fltr->vsi_handle;
2290 enum ice_adminq_opc opcode;
2292 if (!m_entry->vsi_list_info)
2295 /* A rule already exists with the new VSI being added */
2296 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2299 /* Update the previously created VSI list set with
2300 * the new VSI ID passed in
2302 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2303 opcode = ice_aqc_opc_update_sw_rules;
2305 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2306 vsi_list_id, false, opcode,
2307 new_fltr->lkup_type);
2308 /* update VSI list mapping info with new VSI ID */
2310 ice_set_bit(vsi_handle,
2311 m_entry->vsi_list_info->vsi_map);
2314 m_entry->vsi_count++;
2319 * ice_find_rule_entry - Search a rule entry
2320 * @hw: pointer to the hardware structure
2321 * @recp_id: lookup type for which the specified rule needs to be searched
2322 * @f_info: rule information
2324 * Helper function to search for a given rule entry
2325 * Returns pointer to entry storing the rule if found
2327 static struct ice_fltr_mgmt_list_entry *
2328 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2330 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2331 struct ice_switch_info *sw = hw->switch_info;
2332 struct LIST_HEAD_TYPE *list_head;
2334 list_head = &sw->recp_list[recp_id].filt_rules;
2335 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2337 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2338 sizeof(f_info->l_data)) &&
2339 f_info->flag == list_itr->fltr_info.flag) {
2348 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2349 * @hw: pointer to the hardware structure
2350 * @recp_id: lookup type for which VSI lists needs to be searched
2351 * @vsi_handle: VSI handle to be found in VSI list
2352 * @vsi_list_id: VSI list ID found containing vsi_handle
2354 * Helper function to search a VSI list with single entry containing given VSI
2355 * handle element. This can be extended further to search VSI list with more
2356 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2358 static struct ice_vsi_list_map_info *
2359 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2362 struct ice_vsi_list_map_info *map_info = NULL;
2363 struct ice_switch_info *sw = hw->switch_info;
2364 struct LIST_HEAD_TYPE *list_head;
2366 list_head = &sw->recp_list[recp_id].filt_rules;
2367 if (sw->recp_list[recp_id].adv_rule) {
2368 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2370 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2371 ice_adv_fltr_mgmt_list_entry,
2373 if (list_itr->vsi_list_info) {
2374 map_info = list_itr->vsi_list_info;
2375 if (ice_is_bit_set(map_info->vsi_map,
2377 *vsi_list_id = map_info->vsi_list_id;
2383 struct ice_fltr_mgmt_list_entry *list_itr;
2385 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2386 ice_fltr_mgmt_list_entry,
2388 if (list_itr->vsi_count == 1 &&
2389 list_itr->vsi_list_info) {
2390 map_info = list_itr->vsi_list_info;
2391 if (ice_is_bit_set(map_info->vsi_map,
2393 *vsi_list_id = map_info->vsi_list_id;
2403 * ice_add_rule_internal - add rule for a given lookup type
2404 * @hw: pointer to the hardware structure
2405 * @recp_id: lookup type (recipe ID) for which rule has to be added
2406 * @f_entry: structure containing MAC forwarding information
2408 * Adds or updates the rule lists for a given recipe
2410 static enum ice_status
2411 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2412 struct ice_fltr_list_entry *f_entry)
2414 struct ice_switch_info *sw = hw->switch_info;
2415 struct ice_fltr_info *new_fltr, *cur_fltr;
2416 struct ice_fltr_mgmt_list_entry *m_entry;
2417 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2418 enum ice_status status = ICE_SUCCESS;
2420 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2421 return ICE_ERR_PARAM;
2423 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2424 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2425 f_entry->fltr_info.fwd_id.hw_vsi_id =
2426 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2428 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2430 ice_acquire_lock(rule_lock);
2431 new_fltr = &f_entry->fltr_info;
2432 if (new_fltr->flag & ICE_FLTR_RX)
2433 new_fltr->src = hw->port_info->lport;
2434 else if (new_fltr->flag & ICE_FLTR_TX)
2436 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2438 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2440 status = ice_create_pkt_fwd_rule(hw, f_entry);
2441 goto exit_add_rule_internal;
2444 cur_fltr = &m_entry->fltr_info;
2445 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2447 exit_add_rule_internal:
2448 ice_release_lock(rule_lock);
2453 * ice_remove_vsi_list_rule
2454 * @hw: pointer to the hardware structure
2455 * @vsi_list_id: VSI list ID generated as part of allocate resource
2456 * @lkup_type: switch rule filter lookup type
2458 * The VSI list should be emptied before this function is called to remove the
2461 static enum ice_status
2462 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2463 enum ice_sw_lkup_type lkup_type)
2465 struct ice_aqc_sw_rules_elem *s_rule;
2466 enum ice_status status;
2469 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2470 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2472 return ICE_ERR_NO_MEMORY;
2474 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2475 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2477 /* Free the vsi_list resource that we allocated. It is assumed that the
2478 * list is empty at this point.
2480 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2481 ice_aqc_opc_free_res);
2483 ice_free(hw, s_rule);
2488 * ice_rem_update_vsi_list
2489 * @hw: pointer to the hardware structure
2490 * @vsi_handle: VSI handle of the VSI to remove
2491 * @fm_list: filter management entry for which the VSI list management needs to
2494 static enum ice_status
2495 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2496 struct ice_fltr_mgmt_list_entry *fm_list)
2498 enum ice_sw_lkup_type lkup_type;
2499 enum ice_status status = ICE_SUCCESS;
2502 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2503 fm_list->vsi_count == 0)
2504 return ICE_ERR_PARAM;
2506 /* A rule with the VSI being removed does not exist */
2507 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2508 return ICE_ERR_DOES_NOT_EXIST;
2510 lkup_type = fm_list->fltr_info.lkup_type;
2511 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2512 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2513 ice_aqc_opc_update_sw_rules,
2518 fm_list->vsi_count--;
2519 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2521 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2522 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2523 struct ice_vsi_list_map_info *vsi_list_info =
2524 fm_list->vsi_list_info;
2527 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2529 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2530 return ICE_ERR_OUT_OF_RANGE;
2532 /* Make sure VSI list is empty before removing it below */
2533 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2535 ice_aqc_opc_update_sw_rules,
2540 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2541 tmp_fltr_info.fwd_id.hw_vsi_id =
2542 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2543 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2544 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2546 ice_debug(hw, ICE_DBG_SW,
2547 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2548 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2552 fm_list->fltr_info = tmp_fltr_info;
2555 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2556 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2557 struct ice_vsi_list_map_info *vsi_list_info =
2558 fm_list->vsi_list_info;
2560 /* Remove the VSI list since it is no longer used */
2561 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2563 ice_debug(hw, ICE_DBG_SW,
2564 "Failed to remove VSI list %d, error %d\n",
2565 vsi_list_id, status);
2569 LIST_DEL(&vsi_list_info->list_entry);
2570 ice_free(hw, vsi_list_info);
2571 fm_list->vsi_list_info = NULL;
2578 * ice_remove_rule_internal - Remove a filter rule of a given type
2580 * @hw: pointer to the hardware structure
2581 * @recp_id: recipe ID for which the rule needs to removed
2582 * @f_entry: rule entry containing filter information
2584 static enum ice_status
2585 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2586 struct ice_fltr_list_entry *f_entry)
2588 struct ice_switch_info *sw = hw->switch_info;
2589 struct ice_fltr_mgmt_list_entry *list_elem;
2590 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2591 enum ice_status status = ICE_SUCCESS;
2592 bool remove_rule = false;
2595 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2596 return ICE_ERR_PARAM;
2597 f_entry->fltr_info.fwd_id.hw_vsi_id =
2598 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2600 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2601 ice_acquire_lock(rule_lock);
2602 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2604 status = ICE_ERR_DOES_NOT_EXIST;
2608 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2610 } else if (!list_elem->vsi_list_info) {
2611 status = ICE_ERR_DOES_NOT_EXIST;
2613 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2614 /* a ref_cnt > 1 indicates that the vsi_list is being
2615 * shared by multiple rules. Decrement the ref_cnt and
2616 * remove this rule, but do not modify the list, as it
2617 * is in-use by other rules.
2619 list_elem->vsi_list_info->ref_cnt--;
2622 /* a ref_cnt of 1 indicates the vsi_list is only used
2623 * by one rule. However, the original removal request is only
2624 * for a single VSI. Update the vsi_list first, and only
2625 * remove the rule if there are no further VSIs in this list.
2627 vsi_handle = f_entry->fltr_info.vsi_handle;
2628 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2631 /* if VSI count goes to zero after updating the VSI list */
2632 if (list_elem->vsi_count == 0)
2637 /* Remove the lookup rule */
2638 struct ice_aqc_sw_rules_elem *s_rule;
2640 s_rule = (struct ice_aqc_sw_rules_elem *)
2641 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2643 status = ICE_ERR_NO_MEMORY;
2647 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2648 ice_aqc_opc_remove_sw_rules);
2650 status = ice_aq_sw_rules(hw, s_rule,
2651 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2652 ice_aqc_opc_remove_sw_rules, NULL);
2656 /* Remove a book keeping from the list */
2657 ice_free(hw, s_rule);
2659 LIST_DEL(&list_elem->list_entry);
2660 ice_free(hw, list_elem);
2663 ice_release_lock(rule_lock);
2668 * ice_aq_get_res_alloc - get allocated resources
2669 * @hw: pointer to the HW struct
2670 * @num_entries: pointer to u16 to store the number of resource entries returned
2671 * @buf: pointer to user-supplied buffer
2672 * @buf_size: size of buff
2673 * @cd: pointer to command details structure or NULL
2675 * The user-supplied buffer must be large enough to store the resource
2676 * information for all resource types. Each resource type is an
2677 * ice_aqc_get_res_resp_data_elem structure.
2680 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2681 u16 buf_size, struct ice_sq_cd *cd)
2683 struct ice_aqc_get_res_alloc *resp;
2684 enum ice_status status;
2685 struct ice_aq_desc desc;
2688 return ICE_ERR_BAD_PTR;
2690 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2691 return ICE_ERR_INVAL_SIZE;
2693 resp = &desc.params.get_res;
2695 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2696 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2698 if (!status && num_entries)
2699 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2705 * ice_aq_get_res_descs - get allocated resource descriptors
2706 * @hw: pointer to the hardware structure
2707 * @num_entries: number of resource entries in buffer
2708 * @buf: Indirect buffer to hold data parameters and response
2709 * @buf_size: size of buffer for indirect commands
2710 * @res_type: resource type
2711 * @res_shared: is resource shared
2712 * @desc_id: input - first desc ID to start; output - next desc ID
2713 * @cd: pointer to command details structure or NULL
2716 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2717 struct ice_aqc_get_allocd_res_desc_resp *buf,
2718 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2719 struct ice_sq_cd *cd)
2721 struct ice_aqc_get_allocd_res_desc *cmd;
2722 struct ice_aq_desc desc;
2723 enum ice_status status;
2725 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_res_descs");
2727 cmd = &desc.params.get_res_desc;
2730 return ICE_ERR_PARAM;
2732 if (buf_size != (num_entries * sizeof(*buf)))
2733 return ICE_ERR_PARAM;
2735 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2737 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2738 ICE_AQC_RES_TYPE_M) | (res_shared ?
2739 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2740 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2742 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2744 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2746 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2752 * ice_add_mac - Add a MAC address based filter rule
2753 * @hw: pointer to the hardware structure
2754 * @m_list: list of MAC addresses and forwarding information
2756 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2757 * multiple unicast addresses, the function assumes that all the
2758 * addresses are unique in a given add_mac call. It doesn't
2759 * check for duplicates in this case, removing duplicates from a given
2760 * list should be taken care of in the caller of this function.
2763 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2765 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2766 struct ice_fltr_list_entry *m_list_itr;
2767 struct LIST_HEAD_TYPE *rule_head;
2768 u16 elem_sent, total_elem_left;
2769 struct ice_switch_info *sw;
2770 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2771 enum ice_status status = ICE_SUCCESS;
2772 u16 num_unicast = 0;
2776 return ICE_ERR_PARAM;
2778 sw = hw->switch_info;
2779 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2780 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2782 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2786 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2787 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2788 if (!ice_is_vsi_valid(hw, vsi_handle))
2789 return ICE_ERR_PARAM;
2790 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2791 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2792 /* update the src in case it is VSI num */
2793 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2794 return ICE_ERR_PARAM;
2795 m_list_itr->fltr_info.src = hw_vsi_id;
2796 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2797 IS_ZERO_ETHER_ADDR(add))
2798 return ICE_ERR_PARAM;
2799 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
2800 /* Don't overwrite the unicast address */
2801 ice_acquire_lock(rule_lock);
2802 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2803 &m_list_itr->fltr_info)) {
2804 ice_release_lock(rule_lock);
2805 return ICE_ERR_ALREADY_EXISTS;
2807 ice_release_lock(rule_lock);
2809 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
2810 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
2811 m_list_itr->status =
2812 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2814 if (m_list_itr->status)
2815 return m_list_itr->status;
2819 ice_acquire_lock(rule_lock);
2820 /* Exit if no suitable entries were found for adding bulk switch rule */
2822 status = ICE_SUCCESS;
2823 goto ice_add_mac_exit;
2826 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2828 /* Allocate switch rule buffer for the bulk update for unicast */
2829 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2830 s_rule = (struct ice_aqc_sw_rules_elem *)
2831 ice_calloc(hw, num_unicast, s_rule_size);
2833 status = ICE_ERR_NO_MEMORY;
2834 goto ice_add_mac_exit;
2838 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2840 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2841 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2843 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2844 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2845 ice_aqc_opc_add_sw_rules);
2846 r_iter = (struct ice_aqc_sw_rules_elem *)
2847 ((u8 *)r_iter + s_rule_size);
2851 /* Call AQ bulk switch rule update for all unicast addresses */
2853 /* Call AQ switch rule in AQ_MAX chunk */
2854 for (total_elem_left = num_unicast; total_elem_left > 0;
2855 total_elem_left -= elem_sent) {
2856 struct ice_aqc_sw_rules_elem *entry = r_iter;
2858 elem_sent = min(total_elem_left,
2859 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
2860 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2861 elem_sent, ice_aqc_opc_add_sw_rules,
2864 goto ice_add_mac_exit;
2865 r_iter = (struct ice_aqc_sw_rules_elem *)
2866 ((u8 *)r_iter + (elem_sent * s_rule_size));
2869 /* Fill up rule ID based on the value returned from FW */
2871 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2873 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2874 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2875 struct ice_fltr_mgmt_list_entry *fm_entry;
2877 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2878 f_info->fltr_rule_id =
2879 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
2880 f_info->fltr_act = ICE_FWD_TO_VSI;
2881 /* Create an entry to track this MAC address */
2882 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2883 ice_malloc(hw, sizeof(*fm_entry));
2885 status = ICE_ERR_NO_MEMORY;
2886 goto ice_add_mac_exit;
2888 fm_entry->fltr_info = *f_info;
2889 fm_entry->vsi_count = 1;
2890 /* The book keeping entries will get removed when
2891 * base driver calls remove filter AQ command
2894 LIST_ADD(&fm_entry->list_entry, rule_head);
2895 r_iter = (struct ice_aqc_sw_rules_elem *)
2896 ((u8 *)r_iter + s_rule_size);
2901 ice_release_lock(rule_lock);
2903 ice_free(hw, s_rule);
2908 * ice_add_vlan_internal - Add one VLAN based filter rule
2909 * @hw: pointer to the hardware structure
2910 * @f_entry: filter entry containing one VLAN information
2912 static enum ice_status
2913 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2915 struct ice_switch_info *sw = hw->switch_info;
2916 struct ice_fltr_mgmt_list_entry *v_list_itr;
2917 struct ice_fltr_info *new_fltr, *cur_fltr;
2918 enum ice_sw_lkup_type lkup_type;
2919 u16 vsi_list_id = 0, vsi_handle;
2920 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2921 enum ice_status status = ICE_SUCCESS;
2923 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2924 return ICE_ERR_PARAM;
2926 f_entry->fltr_info.fwd_id.hw_vsi_id =
2927 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2928 new_fltr = &f_entry->fltr_info;
2930 /* VLAN ID should only be 12 bits */
2931 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2932 return ICE_ERR_PARAM;
2934 if (new_fltr->src_id != ICE_SRC_ID_VSI)
2935 return ICE_ERR_PARAM;
2937 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2938 lkup_type = new_fltr->lkup_type;
2939 vsi_handle = new_fltr->vsi_handle;
2940 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2941 ice_acquire_lock(rule_lock);
2942 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2944 struct ice_vsi_list_map_info *map_info = NULL;
2946 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2947 /* All VLAN pruning rules use a VSI list. Check if
2948 * there is already a VSI list containing VSI that we
2949 * want to add. If found, use the same vsi_list_id for
2950 * this new VLAN rule or else create a new list.
2952 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2956 status = ice_create_vsi_list_rule(hw,
2964 /* Convert the action to forwarding to a VSI list. */
2965 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2966 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2969 status = ice_create_pkt_fwd_rule(hw, f_entry);
2971 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2974 status = ICE_ERR_DOES_NOT_EXIST;
2977 /* reuse VSI list for new rule and increment ref_cnt */
2979 v_list_itr->vsi_list_info = map_info;
2980 map_info->ref_cnt++;
2982 v_list_itr->vsi_list_info =
2983 ice_create_vsi_list_map(hw, &vsi_handle,
2987 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2988 /* Update existing VSI list to add new VSI ID only if it used
2991 cur_fltr = &v_list_itr->fltr_info;
2992 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2995 /* If VLAN rule exists and VSI list being used by this rule is
2996 * referenced by more than 1 VLAN rule. Then create a new VSI
2997 * list appending previous VSI with new VSI and update existing
2998 * VLAN rule to point to new VSI list ID
3000 struct ice_fltr_info tmp_fltr;
3001 u16 vsi_handle_arr[2];
3004 /* Current implementation only supports reusing VSI list with
3005 * one VSI count. We should never hit below condition
3007 if (v_list_itr->vsi_count > 1 &&
3008 v_list_itr->vsi_list_info->ref_cnt > 1) {
3009 ice_debug(hw, ICE_DBG_SW,
3010 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3011 status = ICE_ERR_CFG;
3016 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3019 /* A rule already exists with the new VSI being added */
3020 if (cur_handle == vsi_handle) {
3021 status = ICE_ERR_ALREADY_EXISTS;
3025 vsi_handle_arr[0] = cur_handle;
3026 vsi_handle_arr[1] = vsi_handle;
3027 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3028 &vsi_list_id, lkup_type);
3032 tmp_fltr = v_list_itr->fltr_info;
3033 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3034 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3035 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3036 /* Update the previous switch rule to a new VSI list which
3037 * includes current VSI that is requested
3039 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3043 /* before overriding VSI list map info. decrement ref_cnt of
3046 v_list_itr->vsi_list_info->ref_cnt--;
3048 /* now update to newly created list */
3049 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3050 v_list_itr->vsi_list_info =
3051 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3053 v_list_itr->vsi_count++;
3057 ice_release_lock(rule_lock);
3062 * ice_add_vlan - Add VLAN based filter rule
3063 * @hw: pointer to the hardware structure
3064 * @v_list: list of VLAN entries and forwarding information
3067 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3069 struct ice_fltr_list_entry *v_list_itr;
3072 return ICE_ERR_PARAM;
3074 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3076 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3077 return ICE_ERR_PARAM;
3078 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3079 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3080 if (v_list_itr->status)
3081 return v_list_itr->status;
3087 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3088 * @hw: pointer to the hardware structure
3089 * @mv_list: list of MAC and VLAN filters
3091 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3092 * pruning bits enabled, then it is the responsibility of the caller to make
3093 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3094 * VLAN won't be received on that VSI otherwise.
3097 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3099 struct ice_fltr_list_entry *mv_list_itr;
3101 if (!mv_list || !hw)
3102 return ICE_ERR_PARAM;
3104 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3106 enum ice_sw_lkup_type l_type =
3107 mv_list_itr->fltr_info.lkup_type;
3109 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3110 return ICE_ERR_PARAM;
3111 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3112 mv_list_itr->status =
3113 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3115 if (mv_list_itr->status)
3116 return mv_list_itr->status;
3122 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3123 * @hw: pointer to the hardware structure
3124 * @em_list: list of ether type MAC filter, MAC is optional
3126 * This function requires the caller to populate the entries in
3127 * the filter list with the necessary fields (including flags to
3128 * indicate Tx or Rx rules).
3131 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3133 struct ice_fltr_list_entry *em_list_itr;
3135 if (!em_list || !hw)
3136 return ICE_ERR_PARAM;
3138 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3140 enum ice_sw_lkup_type l_type =
3141 em_list_itr->fltr_info.lkup_type;
3143 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3144 l_type != ICE_SW_LKUP_ETHERTYPE)
3145 return ICE_ERR_PARAM;
3147 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3149 if (em_list_itr->status)
3150 return em_list_itr->status;
3156 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3157 * @hw: pointer to the hardware structure
3158 * @em_list: list of ethertype or ethertype MAC entries
3161 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3163 struct ice_fltr_list_entry *em_list_itr, *tmp;
3165 if (!em_list || !hw)
3166 return ICE_ERR_PARAM;
3168 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3170 enum ice_sw_lkup_type l_type =
3171 em_list_itr->fltr_info.lkup_type;
3173 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3174 l_type != ICE_SW_LKUP_ETHERTYPE)
3175 return ICE_ERR_PARAM;
3177 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3179 if (em_list_itr->status)
3180 return em_list_itr->status;
3187 * ice_rem_sw_rule_info
3188 * @hw: pointer to the hardware structure
3189 * @rule_head: pointer to the switch list structure that we want to delete
3192 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3194 if (!LIST_EMPTY(rule_head)) {
3195 struct ice_fltr_mgmt_list_entry *entry;
3196 struct ice_fltr_mgmt_list_entry *tmp;
3198 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3199 ice_fltr_mgmt_list_entry, list_entry) {
3200 LIST_DEL(&entry->list_entry);
3201 ice_free(hw, entry);
3207 * ice_rem_adv_rule_info
3208 * @hw: pointer to the hardware structure
3209 * @rule_head: pointer to the switch list structure that we want to delete
3212 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3214 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3215 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3217 if (LIST_EMPTY(rule_head))
3220 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3221 ice_adv_fltr_mgmt_list_entry, list_entry) {
3222 LIST_DEL(&lst_itr->list_entry);
3223 ice_free(hw, lst_itr->lkups);
3224 ice_free(hw, lst_itr);
3229 * ice_rem_all_sw_rules_info
3230 * @hw: pointer to the hardware structure
3232 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3234 struct ice_switch_info *sw = hw->switch_info;
3237 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3238 struct LIST_HEAD_TYPE *rule_head;
3240 rule_head = &sw->recp_list[i].filt_rules;
3241 if (!sw->recp_list[i].adv_rule)
3242 ice_rem_sw_rule_info(hw, rule_head);
3244 ice_rem_adv_rule_info(hw, rule_head);
3249 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3250 * @pi: pointer to the port_info structure
3251 * @vsi_handle: VSI handle to set as default
3252 * @set: true to add the above mentioned switch rule, false to remove it
3253 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3255 * add filter rule to set/unset given VSI as default VSI for the switch
3256 * (represented by swid)
3259 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3262 struct ice_aqc_sw_rules_elem *s_rule;
3263 struct ice_fltr_info f_info;
3264 struct ice_hw *hw = pi->hw;
3265 enum ice_adminq_opc opcode;
3266 enum ice_status status;
3270 if (!ice_is_vsi_valid(hw, vsi_handle))
3271 return ICE_ERR_PARAM;
3272 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3274 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3275 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3276 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3278 return ICE_ERR_NO_MEMORY;
3280 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3282 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3283 f_info.flag = direction;
3284 f_info.fltr_act = ICE_FWD_TO_VSI;
3285 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3287 if (f_info.flag & ICE_FLTR_RX) {
3288 f_info.src = pi->lport;
3289 f_info.src_id = ICE_SRC_ID_LPORT;
3291 f_info.fltr_rule_id =
3292 pi->dflt_rx_vsi_rule_id;
3293 } else if (f_info.flag & ICE_FLTR_TX) {
3294 f_info.src_id = ICE_SRC_ID_VSI;
3295 f_info.src = hw_vsi_id;
3297 f_info.fltr_rule_id =
3298 pi->dflt_tx_vsi_rule_id;
3302 opcode = ice_aqc_opc_add_sw_rules;
3304 opcode = ice_aqc_opc_remove_sw_rules;
3306 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3308 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3309 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3312 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3314 if (f_info.flag & ICE_FLTR_TX) {
3315 pi->dflt_tx_vsi_num = hw_vsi_id;
3316 pi->dflt_tx_vsi_rule_id = index;
3317 } else if (f_info.flag & ICE_FLTR_RX) {
3318 pi->dflt_rx_vsi_num = hw_vsi_id;
3319 pi->dflt_rx_vsi_rule_id = index;
3322 if (f_info.flag & ICE_FLTR_TX) {
3323 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3324 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3325 } else if (f_info.flag & ICE_FLTR_RX) {
3326 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3327 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3332 ice_free(hw, s_rule);
3337 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3338 * @hw: pointer to the hardware structure
3339 * @recp_id: lookup type for which the specified rule needs to be searched
3340 * @f_info: rule information
3342 * Helper function to search for a unicast rule entry - this is to be used
3343 * to remove unicast MAC filter that is not shared with other VSIs on the
3346 * Returns pointer to entry storing the rule if found
3348 static struct ice_fltr_mgmt_list_entry *
3349 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3350 struct ice_fltr_info *f_info)
3352 struct ice_switch_info *sw = hw->switch_info;
3353 struct ice_fltr_mgmt_list_entry *list_itr;
3354 struct LIST_HEAD_TYPE *list_head;
3356 list_head = &sw->recp_list[recp_id].filt_rules;
3357 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3359 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3360 sizeof(f_info->l_data)) &&
3361 f_info->fwd_id.hw_vsi_id ==
3362 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3363 f_info->flag == list_itr->fltr_info.flag)
3370 * ice_remove_mac - remove a MAC address based filter rule
3371 * @hw: pointer to the hardware structure
3372 * @m_list: list of MAC addresses and forwarding information
3374 * This function removes either a MAC filter rule or a specific VSI from a
3375 * VSI list for a multicast MAC address.
3377 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3378 * ice_add_mac. Caller should be aware that this call will only work if all
3379 * the entries passed into m_list were added previously. It will not attempt to
3380 * do a partial remove of entries that were found.
3383 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3385 struct ice_fltr_list_entry *list_itr, *tmp;
3386 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3389 return ICE_ERR_PARAM;
3391 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3392 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3394 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3395 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3398 if (l_type != ICE_SW_LKUP_MAC)
3399 return ICE_ERR_PARAM;
3401 vsi_handle = list_itr->fltr_info.vsi_handle;
3402 if (!ice_is_vsi_valid(hw, vsi_handle))
3403 return ICE_ERR_PARAM;
3405 list_itr->fltr_info.fwd_id.hw_vsi_id =
3406 ice_get_hw_vsi_num(hw, vsi_handle);
3407 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3408 /* Don't remove the unicast address that belongs to
3409 * another VSI on the switch, since it is not being
3412 ice_acquire_lock(rule_lock);
3413 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3414 &list_itr->fltr_info)) {
3415 ice_release_lock(rule_lock);
3416 return ICE_ERR_DOES_NOT_EXIST;
3418 ice_release_lock(rule_lock);
3420 list_itr->status = ice_remove_rule_internal(hw,
3423 if (list_itr->status)
3424 return list_itr->status;
3430 * ice_remove_vlan - Remove VLAN based filter rule
3431 * @hw: pointer to the hardware structure
3432 * @v_list: list of VLAN entries and forwarding information
3435 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3437 struct ice_fltr_list_entry *v_list_itr, *tmp;
3440 return ICE_ERR_PARAM;
3442 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3444 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3446 if (l_type != ICE_SW_LKUP_VLAN)
3447 return ICE_ERR_PARAM;
3448 v_list_itr->status = ice_remove_rule_internal(hw,
3451 if (v_list_itr->status)
3452 return v_list_itr->status;
3458 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3459 * @hw: pointer to the hardware structure
3460 * @v_list: list of MAC VLAN entries and forwarding information
3463 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3465 struct ice_fltr_list_entry *v_list_itr, *tmp;
3468 return ICE_ERR_PARAM;
3470 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3472 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3474 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3475 return ICE_ERR_PARAM;
3476 v_list_itr->status =
3477 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3479 if (v_list_itr->status)
3480 return v_list_itr->status;
3486 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3487 * @fm_entry: filter entry to inspect
3488 * @vsi_handle: VSI handle to compare with filter info
3491 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3493 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3494 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3495 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3496 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3501 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3502 * @hw: pointer to the hardware structure
3503 * @vsi_handle: VSI handle to remove filters from
3504 * @vsi_list_head: pointer to the list to add entry to
3505 * @fi: pointer to fltr_info of filter entry to copy & add
3507 * Helper function, used when creating a list of filters to remove from
3508 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3509 * original filter entry, with the exception of fltr_info.fltr_act and
3510 * fltr_info.fwd_id fields. These are set such that later logic can
3511 * extract which VSI to remove the fltr from, and pass on that information.
3513 static enum ice_status
3514 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3515 struct LIST_HEAD_TYPE *vsi_list_head,
3516 struct ice_fltr_info *fi)
3518 struct ice_fltr_list_entry *tmp;
3520 /* this memory is freed up in the caller function
3521 * once filters for this VSI are removed
3523 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3525 return ICE_ERR_NO_MEMORY;
3527 tmp->fltr_info = *fi;
3529 /* Overwrite these fields to indicate which VSI to remove filter from,
3530 * so find and remove logic can extract the information from the
3531 * list entries. Note that original entries will still have proper
3534 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3535 tmp->fltr_info.vsi_handle = vsi_handle;
3536 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3538 LIST_ADD(&tmp->list_entry, vsi_list_head);
3544 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3545 * @hw: pointer to the hardware structure
3546 * @vsi_handle: VSI handle to remove filters from
3547 * @lkup_list_head: pointer to the list that has certain lookup type filters
3548 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3550 * Locates all filters in lkup_list_head that are used by the given VSI,
3551 * and adds COPIES of those entries to vsi_list_head (intended to be used
3552 * to remove the listed filters).
3553 * Note that this means all entries in vsi_list_head must be explicitly
3554 * deallocated by the caller when done with list.
3556 static enum ice_status
3557 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3558 struct LIST_HEAD_TYPE *lkup_list_head,
3559 struct LIST_HEAD_TYPE *vsi_list_head)
3561 struct ice_fltr_mgmt_list_entry *fm_entry;
3562 enum ice_status status = ICE_SUCCESS;
3564 /* check to make sure VSI ID is valid and within boundary */
3565 if (!ice_is_vsi_valid(hw, vsi_handle))
3566 return ICE_ERR_PARAM;
3568 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3569 ice_fltr_mgmt_list_entry, list_entry) {
3570 struct ice_fltr_info *fi;
3572 fi = &fm_entry->fltr_info;
3573 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3576 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3586 * ice_determine_promisc_mask
3587 * @fi: filter info to parse
3589 * Helper function to determine which ICE_PROMISC_ mask corresponds
3590 * to given filter into.
3592 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3594 u16 vid = fi->l_data.mac_vlan.vlan_id;
3595 u8 *macaddr = fi->l_data.mac.mac_addr;
3596 bool is_tx_fltr = false;
3597 u8 promisc_mask = 0;
3599 if (fi->flag == ICE_FLTR_TX)
3602 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3603 promisc_mask |= is_tx_fltr ?
3604 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3605 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3606 promisc_mask |= is_tx_fltr ?
3607 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3608 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3609 promisc_mask |= is_tx_fltr ?
3610 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3612 promisc_mask |= is_tx_fltr ?
3613 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3615 return promisc_mask;
3619 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3620 * @hw: pointer to the hardware structure
3621 * @vsi_handle: VSI handle to retrieve info from
3622 * @promisc_mask: pointer to mask to be filled in
3623 * @vid: VLAN ID of promisc VLAN VSI
3626 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3629 struct ice_switch_info *sw = hw->switch_info;
3630 struct ice_fltr_mgmt_list_entry *itr;
3631 struct LIST_HEAD_TYPE *rule_head;
3632 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3634 if (!ice_is_vsi_valid(hw, vsi_handle))
3635 return ICE_ERR_PARAM;
3639 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3640 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3642 ice_acquire_lock(rule_lock);
3643 LIST_FOR_EACH_ENTRY(itr, rule_head,
3644 ice_fltr_mgmt_list_entry, list_entry) {
3645 /* Continue if this filter doesn't apply to this VSI or the
3646 * VSI ID is not in the VSI map for this filter
3648 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3651 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3653 ice_release_lock(rule_lock);
3659 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3660 * @hw: pointer to the hardware structure
3661 * @vsi_handle: VSI handle to retrieve info from
3662 * @promisc_mask: pointer to mask to be filled in
3663 * @vid: VLAN ID of promisc VLAN VSI
3666 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3669 struct ice_switch_info *sw = hw->switch_info;
3670 struct ice_fltr_mgmt_list_entry *itr;
3671 struct LIST_HEAD_TYPE *rule_head;
3672 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3674 if (!ice_is_vsi_valid(hw, vsi_handle))
3675 return ICE_ERR_PARAM;
3679 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3680 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3682 ice_acquire_lock(rule_lock);
3683 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3685 /* Continue if this filter doesn't apply to this VSI or the
3686 * VSI ID is not in the VSI map for this filter
3688 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3691 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3693 ice_release_lock(rule_lock);
3699 * ice_remove_promisc - Remove promisc based filter rules
3700 * @hw: pointer to the hardware structure
3701 * @recp_id: recipe ID for which the rule needs to removed
3702 * @v_list: list of promisc entries
3704 static enum ice_status
3705 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3706 struct LIST_HEAD_TYPE *v_list)
3708 struct ice_fltr_list_entry *v_list_itr, *tmp;
3710 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3712 v_list_itr->status =
3713 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3714 if (v_list_itr->status)
3715 return v_list_itr->status;
3721 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3722 * @hw: pointer to the hardware structure
3723 * @vsi_handle: VSI handle to clear mode
3724 * @promisc_mask: mask of promiscuous config bits to clear
3725 * @vid: VLAN ID to clear VLAN promiscuous
3728 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3731 struct ice_switch_info *sw = hw->switch_info;
3732 struct ice_fltr_list_entry *fm_entry, *tmp;
3733 struct LIST_HEAD_TYPE remove_list_head;
3734 struct ice_fltr_mgmt_list_entry *itr;
3735 struct LIST_HEAD_TYPE *rule_head;
3736 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3737 enum ice_status status = ICE_SUCCESS;
3740 if (!ice_is_vsi_valid(hw, vsi_handle))
3741 return ICE_ERR_PARAM;
3744 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3746 recipe_id = ICE_SW_LKUP_PROMISC;
3748 rule_head = &sw->recp_list[recipe_id].filt_rules;
3749 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3751 INIT_LIST_HEAD(&remove_list_head);
3753 ice_acquire_lock(rule_lock);
3754 LIST_FOR_EACH_ENTRY(itr, rule_head,
3755 ice_fltr_mgmt_list_entry, list_entry) {
3756 u8 fltr_promisc_mask = 0;
3758 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3761 fltr_promisc_mask |=
3762 ice_determine_promisc_mask(&itr->fltr_info);
3764 /* Skip if filter is not completely specified by given mask */
3765 if (fltr_promisc_mask & ~promisc_mask)
3768 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3772 ice_release_lock(rule_lock);
3773 goto free_fltr_list;
3776 ice_release_lock(rule_lock);
3778 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3781 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3782 ice_fltr_list_entry, list_entry) {
3783 LIST_DEL(&fm_entry->list_entry);
3784 ice_free(hw, fm_entry);
3791 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3792 * @hw: pointer to the hardware structure
3793 * @vsi_handle: VSI handle to configure
3794 * @promisc_mask: mask of promiscuous config bits
3795 * @vid: VLAN ID to set VLAN promiscuous
3798 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3800 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3801 struct ice_fltr_list_entry f_list_entry;
3802 struct ice_fltr_info new_fltr;
3803 enum ice_status status = ICE_SUCCESS;
3809 ice_debug(hw, ICE_DBG_TRACE, "ice_set_vsi_promisc\n");
3811 if (!ice_is_vsi_valid(hw, vsi_handle))
3812 return ICE_ERR_PARAM;
3813 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3815 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3817 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3818 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3819 new_fltr.l_data.mac_vlan.vlan_id = vid;
3820 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3822 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3823 recipe_id = ICE_SW_LKUP_PROMISC;
3826 /* Separate filters must be set for each direction/packet type
3827 * combination, so we will loop over the mask value, store the
3828 * individual type, and clear it out in the input mask as it
3831 while (promisc_mask) {
3837 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3838 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3839 pkt_type = UCAST_FLTR;
3840 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3841 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3842 pkt_type = UCAST_FLTR;
3844 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3845 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3846 pkt_type = MCAST_FLTR;
3847 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3848 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3849 pkt_type = MCAST_FLTR;
3851 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3852 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3853 pkt_type = BCAST_FLTR;
3854 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3855 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3856 pkt_type = BCAST_FLTR;
3860 /* Check for VLAN promiscuous flag */
3861 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3862 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3863 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3864 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3868 /* Set filter DA based on packet type */
3869 mac_addr = new_fltr.l_data.mac.mac_addr;
3870 if (pkt_type == BCAST_FLTR) {
3871 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
3872 } else if (pkt_type == MCAST_FLTR ||
3873 pkt_type == UCAST_FLTR) {
3874 /* Use the dummy ether header DA */
3875 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
3876 ICE_NONDMA_TO_NONDMA);
3877 if (pkt_type == MCAST_FLTR)
3878 mac_addr[0] |= 0x1; /* Set multicast bit */
3881 /* Need to reset this to zero for all iterations */
3884 new_fltr.flag |= ICE_FLTR_TX;
3885 new_fltr.src = hw_vsi_id;
3887 new_fltr.flag |= ICE_FLTR_RX;
3888 new_fltr.src = hw->port_info->lport;
3891 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3892 new_fltr.vsi_handle = vsi_handle;
3893 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3894 f_list_entry.fltr_info = new_fltr;
3896 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3897 if (status != ICE_SUCCESS)
3898 goto set_promisc_exit;
3906 * ice_set_vlan_vsi_promisc
3907 * @hw: pointer to the hardware structure
3908 * @vsi_handle: VSI handle to configure
3909 * @promisc_mask: mask of promiscuous config bits
3910 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3912 * Configure VSI with all associated VLANs to given promiscuous mode(s)
3915 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3916 bool rm_vlan_promisc)
3918 struct ice_switch_info *sw = hw->switch_info;
3919 struct ice_fltr_list_entry *list_itr, *tmp;
3920 struct LIST_HEAD_TYPE vsi_list_head;
3921 struct LIST_HEAD_TYPE *vlan_head;
3922 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
3923 enum ice_status status;
3926 INIT_LIST_HEAD(&vsi_list_head);
3927 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3928 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3929 ice_acquire_lock(vlan_lock);
3930 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3932 ice_release_lock(vlan_lock);
3934 goto free_fltr_list;
3936 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
3938 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3939 if (rm_vlan_promisc)
3940 status = ice_clear_vsi_promisc(hw, vsi_handle,
3941 promisc_mask, vlan_id);
3943 status = ice_set_vsi_promisc(hw, vsi_handle,
3944 promisc_mask, vlan_id);
3950 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
3951 ice_fltr_list_entry, list_entry) {
3952 LIST_DEL(&list_itr->list_entry);
3953 ice_free(hw, list_itr);
3959 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3960 * @hw: pointer to the hardware structure
3961 * @vsi_handle: VSI handle to remove filters from
3962 * @lkup: switch rule filter lookup type
3965 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3966 enum ice_sw_lkup_type lkup)
3968 struct ice_switch_info *sw = hw->switch_info;
3969 struct ice_fltr_list_entry *fm_entry;
3970 struct LIST_HEAD_TYPE remove_list_head;
3971 struct LIST_HEAD_TYPE *rule_head;
3972 struct ice_fltr_list_entry *tmp;
3973 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3974 enum ice_status status;
3976 INIT_LIST_HEAD(&remove_list_head);
3977 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3978 rule_head = &sw->recp_list[lkup].filt_rules;
3979 ice_acquire_lock(rule_lock);
3980 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3982 ice_release_lock(rule_lock);
3987 case ICE_SW_LKUP_MAC:
3988 ice_remove_mac(hw, &remove_list_head);
3990 case ICE_SW_LKUP_VLAN:
3991 ice_remove_vlan(hw, &remove_list_head);
3993 case ICE_SW_LKUP_PROMISC:
3994 case ICE_SW_LKUP_PROMISC_VLAN:
3995 ice_remove_promisc(hw, lkup, &remove_list_head);
3997 case ICE_SW_LKUP_MAC_VLAN:
3998 ice_remove_mac_vlan(hw, &remove_list_head);
4000 case ICE_SW_LKUP_ETHERTYPE:
4001 case ICE_SW_LKUP_ETHERTYPE_MAC:
4002 ice_remove_eth_mac(hw, &remove_list_head);
4004 case ICE_SW_LKUP_DFLT:
4005 ice_debug(hw, ICE_DBG_SW,
4006 "Remove filters for this lookup type hasn't been implemented yet\n");
4008 case ICE_SW_LKUP_LAST:
4009 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4013 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4014 ice_fltr_list_entry, list_entry) {
4015 LIST_DEL(&fm_entry->list_entry);
4016 ice_free(hw, fm_entry);
4021 * ice_remove_vsi_fltr - Remove all filters for a VSI
4022 * @hw: pointer to the hardware structure
4023 * @vsi_handle: VSI handle to remove filters from
4025 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4027 ice_debug(hw, ICE_DBG_TRACE, "ice_remove_vsi_fltr\n");
4029 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4030 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4031 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4032 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4033 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4034 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4035 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4036 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4040 * ice_alloc_res_cntr - allocating resource counter
4041 * @hw: pointer to the hardware structure
4042 * @type: type of resource
4043 * @alloc_shared: if set it is shared else dedicated
4044 * @num_items: number of entries requested for FD resource type
4045 * @counter_id: counter index returned by AQ call
4048 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4051 struct ice_aqc_alloc_free_res_elem *buf;
4052 enum ice_status status;
4055 /* Allocate resource */
4056 buf_len = sizeof(*buf);
4057 buf = (struct ice_aqc_alloc_free_res_elem *)
4058 ice_malloc(hw, buf_len);
4060 return ICE_ERR_NO_MEMORY;
4062 buf->num_elems = CPU_TO_LE16(num_items);
4063 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4064 ICE_AQC_RES_TYPE_M) | alloc_shared);
4066 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4067 ice_aqc_opc_alloc_res, NULL);
4071 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4079 * ice_free_res_cntr - free resource counter
4080 * @hw: pointer to the hardware structure
4081 * @type: type of resource
4082 * @alloc_shared: if set it is shared else dedicated
4083 * @num_items: number of entries to be freed for FD resource type
4084 * @counter_id: counter ID resource which needs to be freed
4087 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4090 struct ice_aqc_alloc_free_res_elem *buf;
4091 enum ice_status status;
4095 buf_len = sizeof(*buf);
4096 buf = (struct ice_aqc_alloc_free_res_elem *)
4097 ice_malloc(hw, buf_len);
4099 return ICE_ERR_NO_MEMORY;
4101 buf->num_elems = CPU_TO_LE16(num_items);
4102 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4103 ICE_AQC_RES_TYPE_M) | alloc_shared);
4104 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4106 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4107 ice_aqc_opc_free_res, NULL);
4109 ice_debug(hw, ICE_DBG_SW,
4110 "counter resource could not be freed\n");
4117 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4118 * @hw: pointer to the hardware structure
4119 * @counter_id: returns counter index
4121 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4123 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4124 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4129 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4130 * @hw: pointer to the hardware structure
4131 * @counter_id: counter index to be freed
4133 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4135 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4136 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4141 * ice_alloc_res_lg_act - add large action resource
4142 * @hw: pointer to the hardware structure
4143 * @l_id: large action ID to fill it in
4144 * @num_acts: number of actions to hold with a large action entry
4146 static enum ice_status
4147 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4149 struct ice_aqc_alloc_free_res_elem *sw_buf;
4150 enum ice_status status;
4153 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4154 return ICE_ERR_PARAM;
4156 /* Allocate resource for large action */
4157 buf_len = sizeof(*sw_buf);
4158 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4159 ice_malloc(hw, buf_len);
4161 return ICE_ERR_NO_MEMORY;
4163 sw_buf->num_elems = CPU_TO_LE16(1);
4165 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4166 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4167 * If num_acts is greater than 2, then use
4168 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4169 * The num_acts cannot exceed 4. This was ensured at the
4170 * beginning of the function.
4173 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4174 else if (num_acts == 2)
4175 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4177 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4179 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4180 ice_aqc_opc_alloc_res, NULL);
4182 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4184 ice_free(hw, sw_buf);
4189 * ice_add_mac_with_sw_marker - add filter with sw marker
4190 * @hw: pointer to the hardware structure
4191 * @f_info: filter info structure containing the MAC filter information
4192 * @sw_marker: sw marker to tag the Rx descriptor with
4195 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4198 struct ice_switch_info *sw = hw->switch_info;
4199 struct ice_fltr_mgmt_list_entry *m_entry;
4200 struct ice_fltr_list_entry fl_info;
4201 struct LIST_HEAD_TYPE l_head;
4202 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4203 enum ice_status ret;
4207 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4208 return ICE_ERR_PARAM;
4210 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4211 return ICE_ERR_PARAM;
4213 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4214 return ICE_ERR_PARAM;
4216 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4217 return ICE_ERR_PARAM;
4218 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4220 /* Add filter if it doesn't exist so then the adding of large
4221 * action always results in update
4224 INIT_LIST_HEAD(&l_head);
4225 fl_info.fltr_info = *f_info;
4226 LIST_ADD(&fl_info.list_entry, &l_head);
4228 entry_exists = false;
4229 ret = ice_add_mac(hw, &l_head);
4230 if (ret == ICE_ERR_ALREADY_EXISTS)
4231 entry_exists = true;
4235 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4236 ice_acquire_lock(rule_lock);
4237 /* Get the book keeping entry for the filter */
4238 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4242 /* If counter action was enabled for this rule then don't enable
4243 * sw marker large action
4245 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4246 ret = ICE_ERR_PARAM;
4250 /* if same marker was added before */
4251 if (m_entry->sw_marker_id == sw_marker) {
4252 ret = ICE_ERR_ALREADY_EXISTS;
4256 /* Allocate a hardware table entry to hold large act. Three actions
4257 * for marker based large action
4259 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4263 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4266 /* Update the switch rule to add the marker action */
4267 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4269 ice_release_lock(rule_lock);
4274 ice_release_lock(rule_lock);
4275 /* only remove entry if it did not exist previously */
4277 ret = ice_remove_mac(hw, &l_head);
4283 * ice_add_mac_with_counter - add filter with counter enabled
4284 * @hw: pointer to the hardware structure
4285 * @f_info: pointer to filter info structure containing the MAC filter
4289 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4291 struct ice_switch_info *sw = hw->switch_info;
4292 struct ice_fltr_mgmt_list_entry *m_entry;
4293 struct ice_fltr_list_entry fl_info;
4294 struct LIST_HEAD_TYPE l_head;
4295 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4296 enum ice_status ret;
4301 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4302 return ICE_ERR_PARAM;
4304 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4305 return ICE_ERR_PARAM;
4307 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4308 return ICE_ERR_PARAM;
4309 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4311 entry_exist = false;
4313 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4315 /* Add filter if it doesn't exist so then the adding of large
4316 * action always results in update
4318 INIT_LIST_HEAD(&l_head);
4320 fl_info.fltr_info = *f_info;
4321 LIST_ADD(&fl_info.list_entry, &l_head);
4323 ret = ice_add_mac(hw, &l_head);
4324 if (ret == ICE_ERR_ALREADY_EXISTS)
4329 ice_acquire_lock(rule_lock);
4330 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4332 ret = ICE_ERR_BAD_PTR;
4336 /* Don't enable counter for a filter for which sw marker was enabled */
4337 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4338 ret = ICE_ERR_PARAM;
4342 /* If a counter was already enabled then don't need to add again */
4343 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4344 ret = ICE_ERR_ALREADY_EXISTS;
4348 /* Allocate a hardware table entry to VLAN counter */
4349 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4353 /* Allocate a hardware table entry to hold large act. Two actions for
4354 * counter based large action
4356 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4360 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4363 /* Update the switch rule to add the counter action */
4364 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4366 ice_release_lock(rule_lock);
4371 ice_release_lock(rule_lock);
4372 /* only remove entry if it did not exist previously */
4374 ret = ice_remove_mac(hw, &l_head);
4379 /* This is mapping table entry that maps every word within a given protocol
4380 * structure to the real byte offset as per the specification of that
4382 * for example dst address is 3 words in ethertype header and corresponding
4383 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4384 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4385 * matching entry describing its field. This needs to be updated if new
4386 * structure is added to that union.
4388 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4389 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4390 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4391 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4392 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4393 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4394 26, 28, 30, 32, 34, 36, 38 } },
4395 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4396 26, 28, 30, 32, 34, 36, 38 } },
4397 { ICE_TCP_IL, { 0, 2 } },
4398 { ICE_UDP_OF, { 0, 2 } },
4399 { ICE_UDP_ILOS, { 0, 2 } },
4400 { ICE_SCTP_IL, { 0, 2 } },
4401 { ICE_VXLAN, { 8, 10, 12, 14 } },
4402 { ICE_GENEVE, { 8, 10, 12, 14 } },
4403 { ICE_VXLAN_GPE, { 0, 2, 4 } },
4404 { ICE_NVGRE, { 0, 2, 4, 6 } },
4405 { ICE_PROTOCOL_LAST, { 0 } }
4408 /* The following table describes preferred grouping of recipes.
4409 * If a recipe that needs to be programmed is a superset or matches one of the
4410 * following combinations, then the recipe needs to be chained as per the
4413 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4414 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4415 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4416 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4417 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4418 { 0xffff, 0xffff, 0xffff, 0xffff } },
4419 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4420 { 0xffff, 0xffff, 0xffff, 0xffff } },
4421 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4422 { 0xffff, 0xffff, 0xffff, 0xffff } },
4425 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4426 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4427 { ICE_MAC_IL, ICE_MAC_IL_HW },
4428 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4429 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4430 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4431 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4432 { ICE_TCP_IL, ICE_TCP_IL_HW },
4433 { ICE_UDP_OF, ICE_UDP_OF_HW },
4434 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4435 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4436 { ICE_VXLAN, ICE_UDP_OF_HW },
4437 { ICE_GENEVE, ICE_UDP_OF_HW },
4438 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4439 { ICE_NVGRE, ICE_GRE_OF_HW },
4440 { ICE_PROTOCOL_LAST, 0 }
4444 * ice_find_recp - find a recipe
4445 * @hw: pointer to the hardware structure
4446 * @lkup_exts: extension sequence to match
4448 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4450 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4452 bool refresh_required = true;
4453 struct ice_sw_recipe *recp;
4456 /* Initialize available_result_ids which tracks available result idx */
4457 for (i = 0; i <= ICE_CHAIN_FV_INDEX_START; i++)
4458 ice_set_bit(ICE_CHAIN_FV_INDEX_START - i,
4459 available_result_ids);
4461 /* Walk through existing recipes to find a match */
4462 recp = hw->switch_info->recp_list;
4463 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4464 /* If recipe was not created for this ID, in SW bookkeeping,
4465 * check if FW has an entry for this recipe. If the FW has an
4466 * entry update it in our SW bookkeeping and continue with the
4469 if (!recp[i].recp_created)
4470 if (ice_get_recp_frm_fw(hw,
4471 hw->switch_info->recp_list, i,
4475 /* if number of words we are looking for match */
4476 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4477 struct ice_fv_word *a = lkup_exts->fv_words;
4478 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4482 for (p = 0; p < lkup_exts->n_val_words; p++) {
4483 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4485 if (a[p].off == b[q].off &&
4486 a[p].prot_id == b[q].prot_id)
4487 /* Found the "p"th word in the
4492 /* After walking through all the words in the
4493 * "i"th recipe if "p"th word was not found then
4494 * this recipe is not what we are looking for.
4495 * So break out from this loop and try the next
4498 if (q >= recp[i].lkup_exts.n_val_words) {
4503 /* If for "i"th recipe the found was never set to false
4504 * then it means we found our match
4507 return i; /* Return the recipe ID */
4510 return ICE_MAX_NUM_RECIPES;
4514 * ice_prot_type_to_id - get protocol ID from protocol type
4515 * @type: protocol type
4516 * @id: pointer to variable that will receive the ID
4518 * Returns true if found, false otherwise
4520 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4524 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4525 if (ice_prot_id_tbl[i].type == type) {
4526 *id = ice_prot_id_tbl[i].protocol_id;
4533 * ice_find_valid_words - count valid words
4534 * @rule: advanced rule with lookup information
4535 * @lkup_exts: byte offset extractions of the words that are valid
4537 * calculate valid words in a lookup rule using mask value
4540 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4541 struct ice_prot_lkup_ext *lkup_exts)
4547 if (!ice_prot_type_to_id(rule->type, &prot_id))
4550 word = lkup_exts->n_val_words;
4552 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4553 if (((u16 *)&rule->m_u)[j] &&
4554 rule->type < ARRAY_SIZE(ice_prot_ext)) {
4555 /* No more space to accommodate */
4556 if (word >= ICE_MAX_CHAIN_WORDS)
4558 lkup_exts->fv_words[word].off =
4559 ice_prot_ext[rule->type].offs[j];
4560 lkup_exts->fv_words[word].prot_id =
4561 ice_prot_id_tbl[rule->type].protocol_id;
4562 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4566 ret_val = word - lkup_exts->n_val_words;
4567 lkup_exts->n_val_words = word;
4573 * ice_find_prot_off_ind - check for specific ID and offset in rule
4574 * @lkup_exts: an array of protocol header extractions
4575 * @prot_type: protocol type to check
4576 * @off: expected offset of the extraction
4578 * Check if the prot_ext has given protocol ID and offset
4581 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4586 for (j = 0; j < lkup_exts->n_val_words; j++)
4587 if (lkup_exts->fv_words[j].off == off &&
4588 lkup_exts->fv_words[j].prot_id == prot_type)
4591 return ICE_MAX_CHAIN_WORDS;
4595 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4596 * @lkup_exts: an array of protocol header extractions
4597 * @r_policy: preferred recipe grouping policy
4599 * Helper function to check if given recipe group is subset we need to check if
4600 * all the words described by the given recipe group exist in the advanced rule
4601 * look up information
4604 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4605 const struct ice_pref_recipe_group *r_policy)
4607 u8 ind[ICE_NUM_WORDS_RECIPE];
4611 /* check if everything in the r_policy is part of the entire rule */
4612 for (i = 0; i < r_policy->n_val_pairs; i++) {
4615 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4616 r_policy->pairs[i].off);
4617 if (j >= ICE_MAX_CHAIN_WORDS)
4620 /* store the indexes temporarily found by the find function
4621 * this will be used to mark the words as 'done'
4626 /* If the entire policy recipe was a true match, then mark the fields
4627 * that are covered by the recipe as 'done' meaning that these words
4628 * will be clumped together in one recipe.
4629 * "Done" here means in our searching if certain recipe group
4630 * matches or is subset of the given rule, then we mark all
4631 * the corresponding offsets as found. So the remaining recipes should
4632 * be created with whatever words that were left.
4634 for (i = 0; i < count; i++) {
4637 ice_set_bit(in, lkup_exts->done);
4643 * ice_create_first_fit_recp_def - Create a recipe grouping
4644 * @hw: pointer to the hardware structure
4645 * @lkup_exts: an array of protocol header extractions
4646 * @rg_list: pointer to a list that stores new recipe groups
4647 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4649 * Using first fit algorithm, take all the words that are still not done
4650 * and start grouping them in 4-word groups. Each group makes up one
4653 static enum ice_status
4654 ice_create_first_fit_recp_def(struct ice_hw *hw,
4655 struct ice_prot_lkup_ext *lkup_exts,
4656 struct LIST_HEAD_TYPE *rg_list,
4659 struct ice_pref_recipe_group *grp = NULL;
4664 /* Walk through every word in the rule to check if it is not done. If so
4665 * then this word needs to be part of a new recipe.
4667 for (j = 0; j < lkup_exts->n_val_words; j++)
4668 if (!ice_is_bit_set(lkup_exts->done, j)) {
4670 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4671 struct ice_recp_grp_entry *entry;
4673 entry = (struct ice_recp_grp_entry *)
4674 ice_malloc(hw, sizeof(*entry));
4676 return ICE_ERR_NO_MEMORY;
4677 LIST_ADD(&entry->l_entry, rg_list);
4678 grp = &entry->r_group;
4682 grp->pairs[grp->n_val_pairs].prot_id =
4683 lkup_exts->fv_words[j].prot_id;
4684 grp->pairs[grp->n_val_pairs].off =
4685 lkup_exts->fv_words[j].off;
4686 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4694 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4695 * @hw: pointer to the hardware structure
4696 * @fv_list: field vector with the extraction sequence information
4697 * @rg_list: recipe groupings with protocol-offset pairs
4699 * Helper function to fill in the field vector indices for protocol-offset
4700 * pairs. These indexes are then ultimately programmed into a recipe.
4703 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4704 struct LIST_HEAD_TYPE *rg_list)
4706 struct ice_sw_fv_list_entry *fv;
4707 struct ice_recp_grp_entry *rg;
4708 struct ice_fv_word *fv_ext;
4710 if (LIST_EMPTY(fv_list))
4713 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4714 fv_ext = fv->fv_ptr->ew;
4716 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4719 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4720 struct ice_fv_word *pr;
4724 pr = &rg->r_group.pairs[i];
4725 mask = rg->r_group.mask[i];
4727 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4728 if (fv_ext[j].prot_id == pr->prot_id &&
4729 fv_ext[j].off == pr->off) {
4730 /* Store index of field vector */
4732 /* Mask is given by caller as big
4733 * endian, but sent to FW as little
4736 rg->fv_mask[i] = mask << 8 | mask >> 8;
4744 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4745 * @hw: pointer to hardware structure
4746 * @rm: recipe management list entry
4747 * @match_tun: if field vector index for tunnel needs to be programmed
4749 static enum ice_status
4750 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4753 struct ice_aqc_recipe_data_elem *tmp;
4754 struct ice_aqc_recipe_data_elem *buf;
4755 struct ice_recp_grp_entry *entry;
4756 enum ice_status status;
4761 /* When more than one recipe are required, another recipe is needed to
4762 * chain them together. Matching a tunnel metadata ID takes up one of
4763 * the match fields in the chaining recipe reducing the number of
4764 * chained recipes by one.
4766 if (rm->n_grp_count > 1)
4768 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE ||
4769 (match_tun && rm->n_grp_count > (ICE_MAX_CHAIN_RECIPE - 1)))
4770 return ICE_ERR_MAX_LIMIT;
4772 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
4773 ICE_MAX_NUM_RECIPES,
4776 return ICE_ERR_NO_MEMORY;
4778 buf = (struct ice_aqc_recipe_data_elem *)
4779 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
4781 status = ICE_ERR_NO_MEMORY;
4785 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4786 recipe_count = ICE_MAX_NUM_RECIPES;
4787 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4789 if (status || recipe_count == 0)
4792 /* Allocate the recipe resources, and configure them according to the
4793 * match fields from protocol headers and extracted field vectors.
4795 chain_idx = ICE_CHAIN_FV_INDEX_START -
4796 ice_find_first_bit(available_result_ids,
4797 ICE_CHAIN_FV_INDEX_START + 1);
4798 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4801 status = ice_alloc_recipe(hw, &entry->rid);
4805 /* Clear the result index of the located recipe, as this will be
4806 * updated, if needed, later in the recipe creation process.
4808 tmp[0].content.result_indx = 0;
4810 buf[recps] = tmp[0];
4811 buf[recps].recipe_indx = (u8)entry->rid;
4812 /* if the recipe is a non-root recipe RID should be programmed
4813 * as 0 for the rules to be applied correctly.
4815 buf[recps].content.rid = 0;
4816 ice_memset(&buf[recps].content.lkup_indx, 0,
4817 sizeof(buf[recps].content.lkup_indx),
4820 /* All recipes use look-up index 0 to match switch ID. */
4821 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4822 buf[recps].content.mask[0] =
4823 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4824 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4827 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4828 buf[recps].content.lkup_indx[i] = 0x80;
4829 buf[recps].content.mask[i] = 0;
4832 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4833 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4834 buf[recps].content.mask[i + 1] =
4835 CPU_TO_LE16(entry->fv_mask[i]);
4838 if (rm->n_grp_count > 1) {
4839 entry->chain_idx = chain_idx;
4840 buf[recps].content.result_indx =
4841 ICE_AQ_RECIPE_RESULT_EN |
4842 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4843 ICE_AQ_RECIPE_RESULT_DATA_M);
4844 ice_clear_bit(ICE_CHAIN_FV_INDEX_START - chain_idx,
4845 available_result_ids);
4846 chain_idx = ICE_CHAIN_FV_INDEX_START -
4847 ice_find_first_bit(available_result_ids,
4848 ICE_CHAIN_FV_INDEX_START +
4852 /* fill recipe dependencies */
4853 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
4854 ICE_MAX_NUM_RECIPES);
4855 ice_set_bit(buf[recps].recipe_indx,
4856 (ice_bitmap_t *)buf[recps].recipe_bitmap);
4857 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4861 if (rm->n_grp_count == 1) {
4862 rm->root_rid = buf[0].recipe_indx;
4863 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
4864 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4865 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4866 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4867 sizeof(buf[0].recipe_bitmap),
4868 ICE_NONDMA_TO_NONDMA);
4870 status = ICE_ERR_BAD_PTR;
4873 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4874 * the recipe which is getting created if specified
4875 * by user. Usually any advanced switch filter, which results
4876 * into new extraction sequence, ended up creating a new recipe
4877 * of type ROOT and usually recipes are associated with profiles
4878 * Switch rule referreing newly created recipe, needs to have
4879 * either/or 'fwd' or 'join' priority, otherwise switch rule
4880 * evaluation will not happen correctly. In other words, if
4881 * switch rule to be evaluated on priority basis, then recipe
4882 * needs to have priority, otherwise it will be evaluated last.
4884 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4886 struct ice_recp_grp_entry *last_chain_entry;
4889 /* Allocate the last recipe that will chain the outcomes of the
4890 * other recipes together
4892 status = ice_alloc_recipe(hw, &rid);
4896 buf[recps].recipe_indx = (u8)rid;
4897 buf[recps].content.rid = (u8)rid;
4898 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4899 /* the new entry created should also be part of rg_list to
4900 * make sure we have complete recipe
4902 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
4903 sizeof(*last_chain_entry));
4904 if (!last_chain_entry) {
4905 status = ICE_ERR_NO_MEMORY;
4908 last_chain_entry->rid = rid;
4909 ice_memset(&buf[recps].content.lkup_indx, 0,
4910 sizeof(buf[recps].content.lkup_indx),
4912 /* All recipes use look-up index 0 to match switch ID. */
4913 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4914 buf[recps].content.mask[0] =
4915 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4916 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4917 buf[recps].content.lkup_indx[i] =
4918 ICE_AQ_RECIPE_LKUP_IGNORE;
4919 buf[recps].content.mask[i] = 0;
4923 /* update r_bitmap with the recp that is used for chaining */
4924 ice_set_bit(rid, rm->r_bitmap);
4925 /* this is the recipe that chains all the other recipes so it
4926 * should not have a chaining ID to indicate the same
4928 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4929 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
4931 last_chain_entry->fv_idx[i] = entry->chain_idx;
4932 buf[recps].content.lkup_indx[i] = entry->chain_idx;
4933 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
4934 ice_set_bit(entry->rid, rm->r_bitmap);
4936 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
4937 if (sizeof(buf[recps].recipe_bitmap) >=
4938 sizeof(rm->r_bitmap)) {
4939 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4940 sizeof(buf[recps].recipe_bitmap),
4941 ICE_NONDMA_TO_NONDMA);
4943 status = ICE_ERR_BAD_PTR;
4946 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4948 /* To differentiate among different UDP tunnels, a meta data ID
4952 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
4953 buf[recps].content.mask[i] =
4954 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
4958 rm->root_rid = (u8)rid;
4960 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4964 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4965 ice_release_change_lock(hw);
4969 /* Every recipe that just got created add it to the recipe
4972 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4973 struct ice_switch_info *sw = hw->switch_info;
4974 struct ice_sw_recipe *recp;
4976 recp = &sw->recp_list[entry->rid];
4977 recp->root_rid = entry->rid;
4978 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
4979 entry->r_group.n_val_pairs *
4980 sizeof(struct ice_fv_word),
4981 ICE_NONDMA_TO_NONDMA);
4983 recp->n_ext_words = entry->r_group.n_val_pairs;
4984 recp->chain_idx = entry->chain_idx;
4985 recp->recp_created = true;
4986 recp->big_recp = false;
5000 * ice_create_recipe_group - creates recipe group
5001 * @hw: pointer to hardware structure
5002 * @rm: recipe management list entry
5003 * @lkup_exts: lookup elements
5005 static enum ice_status
5006 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5007 struct ice_prot_lkup_ext *lkup_exts)
5009 struct ice_recp_grp_entry *entry;
5010 struct ice_recp_grp_entry *tmp;
5011 enum ice_status status;
5015 rm->n_grp_count = 0;
5017 /* Each switch recipe can match up to 5 words or metadata. One word in
5018 * each recipe is used to match the switch ID. Four words are left for
5019 * matching other values. If the new advanced recipe requires more than
5020 * 4 words, it needs to be split into multiple recipes which are chained
5021 * together using the intermediate result that each produces as input to
5022 * the other recipes in the sequence.
5024 groups = ARRAY_SIZE(ice_recipe_pack);
5026 /* Check if any of the preferred recipes from the grouping policy
5029 for (i = 0; i < groups; i++)
5030 /* Check if the recipe from the preferred grouping matches
5031 * or is a subset of the fields that needs to be looked up.
5033 if (ice_is_recipe_subset(lkup_exts, &ice_recipe_pack[i])) {
5034 /* This recipe can be used by itself or grouped with
5037 entry = (struct ice_recp_grp_entry *)
5038 ice_malloc(hw, sizeof(*entry));
5040 status = ICE_ERR_NO_MEMORY;
5043 entry->r_group = ice_recipe_pack[i];
5044 LIST_ADD(&entry->l_entry, &rm->rg_list);
5048 /* Create recipes for words that are marked not done by packing them
5051 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5052 &rm->rg_list, &recp_count);
5054 rm->n_grp_count += recp_count;
5055 rm->n_ext_words = lkup_exts->n_val_words;
5056 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5057 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5058 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5059 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5064 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5066 LIST_DEL(&entry->l_entry);
5067 ice_free(hw, entry);
5075 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5076 * @hw: pointer to hardware structure
5077 * @lkups: lookup elements or match criteria for the advanced recipe, one
5078 * structure per protocol header
5079 * @lkups_cnt: number of protocols
5080 * @fv_list: pointer to a list that holds the returned field vectors
5082 static enum ice_status
5083 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5084 struct LIST_HEAD_TYPE *fv_list)
5086 enum ice_status status;
5090 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5092 return ICE_ERR_NO_MEMORY;
5094 for (i = 0; i < lkups_cnt; i++)
5095 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5096 status = ICE_ERR_CFG;
5100 /* Find field vectors that include all specified protocol types */
5101 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
5104 ice_free(hw, prot_ids);
5109 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5110 * @hw: pointer to hardware structure
5111 * @lkups: lookup elements or match criteria for the advanced recipe, one
5112 * structure per protocol header
5113 * @lkups_cnt: number of protocols
5114 * @rinfo: other information regarding the rule e.g. priority and action info
5115 * @rid: return the recipe ID of the recipe created
5117 static enum ice_status
5118 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5119 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5121 struct ice_prot_lkup_ext *lkup_exts;
5122 struct ice_recp_grp_entry *r_entry;
5123 struct ice_sw_fv_list_entry *fvit;
5124 struct ice_recp_grp_entry *r_tmp;
5125 struct ice_sw_fv_list_entry *tmp;
5126 enum ice_status status = ICE_SUCCESS;
5127 struct ice_sw_recipe *rm;
5128 bool match_tun = false;
5132 return ICE_ERR_PARAM;
5134 lkup_exts = (struct ice_prot_lkup_ext *)
5135 ice_malloc(hw, sizeof(*lkup_exts));
5137 return ICE_ERR_NO_MEMORY;
5139 /* Determine the number of words to be matched and if it exceeds a
5140 * recipe's restrictions
5142 for (i = 0; i < lkups_cnt; i++) {
5145 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5146 status = ICE_ERR_CFG;
5147 goto err_free_lkup_exts;
5150 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5152 status = ICE_ERR_CFG;
5153 goto err_free_lkup_exts;
5157 *rid = ice_find_recp(hw, lkup_exts);
5158 if (*rid < ICE_MAX_NUM_RECIPES)
5159 /* Success if found a recipe that match the existing criteria */
5160 goto err_free_lkup_exts;
5162 /* Recipe we need does not exist, add a recipe */
5164 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5166 status = ICE_ERR_NO_MEMORY;
5167 goto err_free_lkup_exts;
5170 /* Get field vectors that contain fields extracted from all the protocol
5171 * headers being programmed.
5173 INIT_LIST_HEAD(&rm->fv_list);
5174 INIT_LIST_HEAD(&rm->rg_list);
5176 status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
5180 /* Group match words into recipes using preferred recipe grouping
5183 status = ice_create_recipe_group(hw, rm, lkup_exts);
5187 /* There is only profile for UDP tunnels. So, it is necessary to use a
5188 * metadata ID flag to differentiate different tunnel types. A separate
5189 * recipe needs to be used for the metadata.
5191 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5192 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5193 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5196 /* set the recipe priority if specified */
5197 rm->priority = rinfo->priority ? rinfo->priority : 0;
5199 /* Find offsets from the field vector. Pick the first one for all the
5202 ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5203 status = ice_add_sw_recipe(hw, rm, match_tun);
5207 /* Associate all the recipes created with all the profiles in the
5208 * common field vector.
5210 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5212 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5214 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5215 (u8 *)r_bitmap, NULL);
5219 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5220 ICE_MAX_NUM_RECIPES);
5221 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5225 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5228 ice_release_change_lock(hw);
5234 *rid = rm->root_rid;
5235 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5236 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5238 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5239 ice_recp_grp_entry, l_entry) {
5240 LIST_DEL(&r_entry->l_entry);
5241 ice_free(hw, r_entry);
5244 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5246 LIST_DEL(&fvit->list_entry);
5251 ice_free(hw, rm->root_buf);
5256 ice_free(hw, lkup_exts);
5262 * ice_find_dummy_packet - find dummy packet by tunnel type
5264 * @lkups: lookup elements or match criteria for the advanced recipe, one
5265 * structure per protocol header
5266 * @lkups_cnt: number of protocols
5267 * @tun_type: tunnel type from the match criteria
5268 * @pkt: dummy packet to fill according to filter match criteria
5269 * @pkt_len: packet length of dummy packet
5270 * @offsets: pointer to receive the pointer to the offsets for the packet
5273 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5274 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5276 const struct ice_dummy_pkt_offsets **offsets)
5278 bool tcp = false, udp = false;
5281 for (i = 0; i < lkups_cnt; i++) {
5282 if (lkups[i].type == ICE_UDP_ILOS)
5284 else if (lkups[i].type == ICE_TCP_IL)
5288 if (tun_type == ICE_SW_TUN_NVGRE || tun_type == ICE_ALL_TUNNELS) {
5289 *pkt = dummy_gre_packet;
5290 *pkt_len = sizeof(dummy_gre_packet);
5291 *offsets = dummy_gre_packet_offsets;
5295 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5296 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5298 *pkt = dummy_udp_tun_tcp_packet;
5299 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5300 *offsets = dummy_udp_tun_tcp_packet_offsets;
5304 *pkt = dummy_udp_tun_udp_packet;
5305 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5306 *offsets = dummy_udp_tun_udp_packet_offsets;
5311 *pkt = dummy_udp_packet;
5312 *pkt_len = sizeof(dummy_udp_packet);
5313 *offsets = dummy_udp_packet_offsets;
5317 *pkt = dummy_tcp_packet;
5318 *pkt_len = sizeof(dummy_tcp_packet);
5319 *offsets = dummy_tcp_packet_offsets;
5323 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5325 * @lkups: lookup elements or match criteria for the advanced recipe, one
5326 * structure per protocol header
5327 * @lkups_cnt: number of protocols
5328 * @s_rule: stores rule information from the match criteria
5329 * @dummy_pkt: dummy packet to fill according to filter match criteria
5330 * @pkt_len: packet length of dummy packet
5331 * @offsets: offset info for the dummy packet
5333 static enum ice_status
5334 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5335 struct ice_aqc_sw_rules_elem *s_rule,
5336 const u8 *dummy_pkt, u16 pkt_len,
5337 const struct ice_dummy_pkt_offsets *offsets)
5342 /* Start with a packet with a pre-defined/dummy content. Then, fill
5343 * in the header values to be looked up or matched.
5345 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5347 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5349 for (i = 0; i < lkups_cnt; i++) {
5350 enum ice_protocol_type type;
5351 u16 offset = 0, len = 0, j;
5354 /* find the start of this layer; it should be found since this
5355 * was already checked when search for the dummy packet
5357 type = lkups[i].type;
5358 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5359 if (type == offsets[j].type) {
5360 offset = offsets[j].offset;
5365 /* this should never happen in a correct calling sequence */
5367 return ICE_ERR_PARAM;
5369 switch (lkups[i].type) {
5372 len = sizeof(struct ice_ether_hdr);
5376 len = sizeof(struct ice_ipv4_hdr);
5381 len = sizeof(struct ice_l4_hdr);
5384 len = sizeof(struct ice_sctp_hdr);
5387 len = sizeof(struct ice_nvgre);
5392 len = sizeof(struct ice_udp_tnl_hdr);
5395 return ICE_ERR_PARAM;
5398 /* the length should be a word multiple */
5399 if (len % ICE_BYTES_PER_WORD)
5402 /* We have the offset to the header start, the length, the
5403 * caller's header values and mask. Use this information to
5404 * copy the data into the dummy packet appropriately based on
5405 * the mask. Note that we need to only write the bits as
5406 * indicated by the mask to make sure we don't improperly write
5407 * over any significant packet data.
5409 for (j = 0; j < len / sizeof(u16); j++)
5410 if (((u16 *)&lkups[i].m_u)[j])
5411 ((u16 *)(pkt + offset))[j] =
5412 (((u16 *)(pkt + offset))[j] &
5413 ~((u16 *)&lkups[i].m_u)[j]) |
5414 (((u16 *)&lkups[i].h_u)[j] &
5415 ((u16 *)&lkups[i].m_u)[j]);
5418 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5424 * ice_find_adv_rule_entry - Search a rule entry
5425 * @hw: pointer to the hardware structure
5426 * @lkups: lookup elements or match criteria for the advanced recipe, one
5427 * structure per protocol header
5428 * @lkups_cnt: number of protocols
5429 * @recp_id: recipe ID for which we are finding the rule
5430 * @rinfo: other information regarding the rule e.g. priority and action info
5432 * Helper function to search for a given advance rule entry
5433 * Returns pointer to entry storing the rule if found
5435 static struct ice_adv_fltr_mgmt_list_entry *
5436 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5437 u16 lkups_cnt, u8 recp_id,
5438 struct ice_adv_rule_info *rinfo)
5440 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5441 struct ice_switch_info *sw = hw->switch_info;
5444 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5445 ice_adv_fltr_mgmt_list_entry, list_entry) {
5446 bool lkups_matched = true;
5448 if (lkups_cnt != list_itr->lkups_cnt)
5450 for (i = 0; i < list_itr->lkups_cnt; i++)
5451 if (memcmp(&list_itr->lkups[i], &lkups[i],
5453 lkups_matched = false;
5456 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5457 rinfo->tun_type == list_itr->rule_info.tun_type &&
5465 * ice_adv_add_update_vsi_list
5466 * @hw: pointer to the hardware structure
5467 * @m_entry: pointer to current adv filter management list entry
5468 * @cur_fltr: filter information from the book keeping entry
5469 * @new_fltr: filter information with the new VSI to be added
5471 * Call AQ command to add or update previously created VSI list with new VSI.
5473 * Helper function to do book keeping associated with adding filter information
5474 * The algorithm to do the booking keeping is described below :
5475 * When a VSI needs to subscribe to a given advanced filter
5476 * if only one VSI has been added till now
5477 * Allocate a new VSI list and add two VSIs
5478 * to this list using switch rule command
5479 * Update the previously created switch rule with the
5480 * newly created VSI list ID
5481 * if a VSI list was previously created
5482 * Add the new VSI to the previously created VSI list set
5483 * using the update switch rule command
5485 static enum ice_status
5486 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5487 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5488 struct ice_adv_rule_info *cur_fltr,
5489 struct ice_adv_rule_info *new_fltr)
5491 enum ice_status status;
5492 u16 vsi_list_id = 0;
5494 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5495 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5496 return ICE_ERR_NOT_IMPL;
5498 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5499 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5500 return ICE_ERR_ALREADY_EXISTS;
5502 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5503 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5504 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5505 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5506 return ICE_ERR_NOT_IMPL;
5508 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5509 /* Only one entry existed in the mapping and it was not already
5510 * a part of a VSI list. So, create a VSI list with the old and
5513 struct ice_fltr_info tmp_fltr;
5514 u16 vsi_handle_arr[2];
5516 /* A rule already exists with the new VSI being added */
5517 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5518 new_fltr->sw_act.fwd_id.hw_vsi_id)
5519 return ICE_ERR_ALREADY_EXISTS;
5521 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5522 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5523 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5529 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5530 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5531 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5532 /* Update the previous switch rule of "forward to VSI" to
5535 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5539 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5540 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5541 m_entry->vsi_list_info =
5542 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5545 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5547 if (!m_entry->vsi_list_info)
5550 /* A rule already exists with the new VSI being added */
5551 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5554 /* Update the previously created VSI list set with
5555 * the new VSI ID passed in
5557 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5559 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5561 ice_aqc_opc_update_sw_rules,
5563 /* update VSI list mapping info with new VSI ID */
5565 ice_set_bit(vsi_handle,
5566 m_entry->vsi_list_info->vsi_map);
5569 m_entry->vsi_count++;
5574 * ice_add_adv_rule - helper function to create an advanced switch rule
5575 * @hw: pointer to the hardware structure
5576 * @lkups: information on the words that needs to be looked up. All words
5577 * together makes one recipe
5578 * @lkups_cnt: num of entries in the lkups array
5579 * @rinfo: other information related to the rule that needs to be programmed
5580 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5581 * ignored is case of error.
5583 * This function can program only 1 rule at a time. The lkups is used to
5584 * describe the all the words that forms the "lookup" portion of the recipe.
5585 * These words can span multiple protocols. Callers to this function need to
5586 * pass in a list of protocol headers with lookup information along and mask
5587 * that determines which words are valid from the given protocol header.
5588 * rinfo describes other information related to this rule such as forwarding
5589 * IDs, priority of this rule, etc.
5592 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5593 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5594 struct ice_rule_query_data *added_entry)
5596 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5597 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5598 const struct ice_dummy_pkt_offsets *pkt_offsets;
5599 struct ice_aqc_sw_rules_elem *s_rule = NULL;
5600 struct LIST_HEAD_TYPE *rule_head;
5601 struct ice_switch_info *sw;
5602 enum ice_status status;
5603 const u8 *pkt = NULL;
5609 return ICE_ERR_PARAM;
5611 for (i = 0; i < lkups_cnt; i++) {
5614 /* Validate match masks to make sure that there is something
5617 ptr = (u16 *)&lkups[i].m_u;
5618 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5625 return ICE_ERR_PARAM;
5627 /* make sure that we can locate a dummy packet */
5628 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5631 status = ICE_ERR_PARAM;
5632 goto err_ice_add_adv_rule;
5635 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5636 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5637 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5638 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5641 vsi_handle = rinfo->sw_act.vsi_handle;
5642 if (!ice_is_vsi_valid(hw, vsi_handle))
5643 return ICE_ERR_PARAM;
5645 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5646 rinfo->sw_act.fwd_id.hw_vsi_id =
5647 ice_get_hw_vsi_num(hw, vsi_handle);
5648 if (rinfo->sw_act.flag & ICE_FLTR_TX)
5649 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5651 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5654 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5656 /* we have to add VSI to VSI_LIST and increment vsi_count.
5657 * Also Update VSI list so that we can change forwarding rule
5658 * if the rule already exists, we will check if it exists with
5659 * same vsi_id, if not then add it to the VSI list if it already
5660 * exists if not then create a VSI list and add the existing VSI
5661 * ID and the new VSI ID to the list
5662 * We will add that VSI to the list
5664 status = ice_adv_add_update_vsi_list(hw, m_entry,
5665 &m_entry->rule_info,
5668 added_entry->rid = rid;
5669 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5670 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5674 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5675 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
5677 return ICE_ERR_NO_MEMORY;
5678 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
5679 switch (rinfo->sw_act.fltr_act) {
5680 case ICE_FWD_TO_VSI:
5681 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5682 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5683 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5686 act |= ICE_SINGLE_ACT_TO_Q;
5687 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5688 ICE_SINGLE_ACT_Q_INDEX_M;
5690 case ICE_FWD_TO_QGRP:
5691 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
5692 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
5693 act |= ICE_SINGLE_ACT_TO_Q;
5694 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5695 ICE_SINGLE_ACT_Q_INDEX_M;
5696 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
5697 ICE_SINGLE_ACT_Q_REGION_M;
5699 case ICE_DROP_PACKET:
5700 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5701 ICE_SINGLE_ACT_VALID_BIT;
5704 status = ICE_ERR_CFG;
5705 goto err_ice_add_adv_rule;
5708 /* set the rule LOOKUP type based on caller specified 'RX'
5709 * instead of hardcoding it to be either LOOKUP_TX/RX
5711 * for 'RX' set the source to be the port number
5712 * for 'TX' set the source to be the source HW VSI number (determined
5716 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
5717 s_rule->pdata.lkup_tx_rx.src =
5718 CPU_TO_LE16(hw->port_info->lport);
5720 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
5721 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
5724 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
5725 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
5727 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
5730 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5731 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5734 goto err_ice_add_adv_rule;
5735 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
5736 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
5738 status = ICE_ERR_NO_MEMORY;
5739 goto err_ice_add_adv_rule;
5742 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
5743 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
5744 ICE_NONDMA_TO_NONDMA);
5745 if (!adv_fltr->lkups) {
5746 status = ICE_ERR_NO_MEMORY;
5747 goto err_ice_add_adv_rule;
5750 adv_fltr->lkups_cnt = lkups_cnt;
5751 adv_fltr->rule_info = *rinfo;
5752 adv_fltr->rule_info.fltr_rule_id =
5753 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5754 sw = hw->switch_info;
5755 sw->recp_list[rid].adv_rule = true;
5756 rule_head = &sw->recp_list[rid].filt_rules;
5758 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
5759 struct ice_fltr_info tmp_fltr;
5761 tmp_fltr.fltr_rule_id =
5762 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5763 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5764 tmp_fltr.fwd_id.hw_vsi_id =
5765 ice_get_hw_vsi_num(hw, vsi_handle);
5766 tmp_fltr.vsi_handle = vsi_handle;
5767 /* Update the previous switch rule of "forward to VSI" to
5770 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5772 goto err_ice_add_adv_rule;
5773 adv_fltr->vsi_count = 1;
5776 /* Add rule entry to book keeping list */
5777 LIST_ADD(&adv_fltr->list_entry, rule_head);
5779 added_entry->rid = rid;
5780 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5781 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5783 err_ice_add_adv_rule:
5784 if (status && adv_fltr) {
5785 ice_free(hw, adv_fltr->lkups);
5786 ice_free(hw, adv_fltr);
5789 ice_free(hw, s_rule);
5795 * ice_adv_rem_update_vsi_list
5796 * @hw: pointer to the hardware structure
5797 * @vsi_handle: VSI handle of the VSI to remove
5798 * @fm_list: filter management entry for which the VSI list management needs to
5801 static enum ice_status
5802 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5803 struct ice_adv_fltr_mgmt_list_entry *fm_list)
5805 struct ice_vsi_list_map_info *vsi_list_info;
5806 enum ice_sw_lkup_type lkup_type;
5807 enum ice_status status;
5810 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5811 fm_list->vsi_count == 0)
5812 return ICE_ERR_PARAM;
5814 /* A rule with the VSI being removed does not exist */
5815 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
5816 return ICE_ERR_DOES_NOT_EXIST;
5818 lkup_type = ICE_SW_LKUP_LAST;
5819 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5820 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5821 ice_aqc_opc_update_sw_rules,
5826 fm_list->vsi_count--;
5827 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5828 vsi_list_info = fm_list->vsi_list_info;
5829 if (fm_list->vsi_count == 1) {
5830 struct ice_fltr_info tmp_fltr;
5833 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
5835 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5836 return ICE_ERR_OUT_OF_RANGE;
5838 /* Make sure VSI list is empty before removing it below */
5839 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5841 ice_aqc_opc_update_sw_rules,
5845 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5846 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5847 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5848 tmp_fltr.fwd_id.hw_vsi_id =
5849 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5850 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5851 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5853 /* Update the previous switch rule of "MAC forward to VSI" to
5854 * "MAC fwd to VSI list"
5856 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5858 ice_debug(hw, ICE_DBG_SW,
5859 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5860 tmp_fltr.fwd_id.hw_vsi_id, status);
5865 if (fm_list->vsi_count == 1) {
5866 /* Remove the VSI list since it is no longer used */
5867 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5869 ice_debug(hw, ICE_DBG_SW,
5870 "Failed to remove VSI list %d, error %d\n",
5871 vsi_list_id, status);
5875 LIST_DEL(&vsi_list_info->list_entry);
5876 ice_free(hw, vsi_list_info);
5877 fm_list->vsi_list_info = NULL;
5884 * ice_rem_adv_rule - removes existing advanced switch rule
5885 * @hw: pointer to the hardware structure
5886 * @lkups: information on the words that needs to be looked up. All words
5887 * together makes one recipe
5888 * @lkups_cnt: num of entries in the lkups array
5889 * @rinfo: Its the pointer to the rule information for the rule
5891 * This function can be used to remove 1 rule at a time. The lkups is
5892 * used to describe all the words that forms the "lookup" portion of the
5893 * rule. These words can span multiple protocols. Callers to this function
5894 * need to pass in a list of protocol headers with lookup information along
5895 * and mask that determines which words are valid from the given protocol
5896 * header. rinfo describes other information related to this rule such as
5897 * forwarding IDs, priority of this rule, etc.
5900 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5901 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5903 struct ice_adv_fltr_mgmt_list_entry *list_elem;
5904 const struct ice_dummy_pkt_offsets *offsets;
5905 struct ice_prot_lkup_ext lkup_exts;
5906 u16 rule_buf_sz, pkt_len, i, rid;
5907 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5908 enum ice_status status = ICE_SUCCESS;
5909 bool remove_rule = false;
5910 const u8 *pkt = NULL;
5913 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
5914 for (i = 0; i < lkups_cnt; i++) {
5917 if (lkups[i].type >= ICE_PROTOCOL_LAST)
5920 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5924 rid = ice_find_recp(hw, &lkup_exts);
5925 /* If did not find a recipe that match the existing criteria */
5926 if (rid == ICE_MAX_NUM_RECIPES)
5927 return ICE_ERR_PARAM;
5929 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5930 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5931 /* the rule is already removed */
5934 ice_acquire_lock(rule_lock);
5935 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5937 } else if (list_elem->vsi_count > 1) {
5938 list_elem->vsi_list_info->ref_cnt--;
5939 remove_rule = false;
5940 vsi_handle = rinfo->sw_act.vsi_handle;
5941 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5943 vsi_handle = rinfo->sw_act.vsi_handle;
5944 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5946 ice_release_lock(rule_lock);
5949 if (list_elem->vsi_count == 0)
5952 ice_release_lock(rule_lock);
5954 struct ice_aqc_sw_rules_elem *s_rule;
5956 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
5957 &pkt_len, &offsets);
5958 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5960 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
5963 return ICE_ERR_NO_MEMORY;
5964 s_rule->pdata.lkup_tx_rx.act = 0;
5965 s_rule->pdata.lkup_tx_rx.index =
5966 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
5967 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5968 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5970 ice_aqc_opc_remove_sw_rules, NULL);
5971 if (status == ICE_SUCCESS) {
5972 ice_acquire_lock(rule_lock);
5973 LIST_DEL(&list_elem->list_entry);
5974 ice_free(hw, list_elem->lkups);
5975 ice_free(hw, list_elem);
5976 ice_release_lock(rule_lock);
5978 ice_free(hw, s_rule);
5984 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
5985 * @hw: pointer to the hardware structure
5986 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
5988 * This function is used to remove 1 rule at a time. The removal is based on
5989 * the remove_entry parameter. This function will remove rule for a given
5990 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
5993 ice_rem_adv_rule_by_id(struct ice_hw *hw,
5994 struct ice_rule_query_data *remove_entry)
5996 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5997 struct LIST_HEAD_TYPE *list_head;
5998 struct ice_adv_rule_info rinfo;
5999 struct ice_switch_info *sw;
6001 sw = hw->switch_info;
6002 if (!sw->recp_list[remove_entry->rid].recp_created)
6003 return ICE_ERR_PARAM;
6004 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6005 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6007 if (list_itr->rule_info.fltr_rule_id ==
6008 remove_entry->rule_id) {
6009 rinfo = list_itr->rule_info;
6010 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6011 return ice_rem_adv_rule(hw, list_itr->lkups,
6012 list_itr->lkups_cnt, &rinfo);
6015 return ICE_ERR_PARAM;
6019 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6021 * @hw: pointer to the hardware structure
6022 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6024 * This function is used to remove all the rules for a given VSI and as soon
6025 * as removing a rule fails, it will return immediately with the error code,
6026 * else it will return ICE_SUCCESS
6029 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6031 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6032 struct ice_vsi_list_map_info *map_info;
6033 struct LIST_HEAD_TYPE *list_head;
6034 struct ice_adv_rule_info rinfo;
6035 struct ice_switch_info *sw;
6036 enum ice_status status;
6037 u16 vsi_list_id = 0;
6040 sw = hw->switch_info;
6041 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6042 if (!sw->recp_list[rid].recp_created)
6044 if (!sw->recp_list[rid].adv_rule)
6046 list_head = &sw->recp_list[rid].filt_rules;
6048 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6049 ice_adv_fltr_mgmt_list_entry, list_entry) {
6050 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6054 rinfo = list_itr->rule_info;
6055 rinfo.sw_act.vsi_handle = vsi_handle;
6056 status = ice_rem_adv_rule(hw, list_itr->lkups,
6057 list_itr->lkups_cnt, &rinfo);
6067 * ice_replay_fltr - Replay all the filters stored by a specific list head
6068 * @hw: pointer to the hardware structure
6069 * @list_head: list for which filters needs to be replayed
6070 * @recp_id: Recipe ID for which rules need to be replayed
6072 static enum ice_status
6073 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6075 struct ice_fltr_mgmt_list_entry *itr;
6076 struct LIST_HEAD_TYPE l_head;
6077 enum ice_status status = ICE_SUCCESS;
6079 if (LIST_EMPTY(list_head))
6082 /* Move entries from the given list_head to a temporary l_head so that
6083 * they can be replayed. Otherwise when trying to re-add the same
6084 * filter, the function will return already exists
6086 LIST_REPLACE_INIT(list_head, &l_head);
6088 /* Mark the given list_head empty by reinitializing it so filters
6089 * could be added again by *handler
6091 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6093 struct ice_fltr_list_entry f_entry;
6095 f_entry.fltr_info = itr->fltr_info;
6096 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6097 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6098 if (status != ICE_SUCCESS)
6103 /* Add a filter per VSI separately */
6108 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6110 if (!ice_is_vsi_valid(hw, vsi_handle))
6113 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6114 f_entry.fltr_info.vsi_handle = vsi_handle;
6115 f_entry.fltr_info.fwd_id.hw_vsi_id =
6116 ice_get_hw_vsi_num(hw, vsi_handle);
6117 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6118 if (recp_id == ICE_SW_LKUP_VLAN)
6119 status = ice_add_vlan_internal(hw, &f_entry);
6121 status = ice_add_rule_internal(hw, recp_id,
6123 if (status != ICE_SUCCESS)
6128 /* Clear the filter management list */
6129 ice_rem_sw_rule_info(hw, &l_head);
6134 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6135 * @hw: pointer to the hardware structure
6137 * NOTE: This function does not clean up partially added filters on error.
6138 * It is up to caller of the function to issue a reset or fail early.
6140 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6142 struct ice_switch_info *sw = hw->switch_info;
6143 enum ice_status status = ICE_SUCCESS;
6146 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6147 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6149 status = ice_replay_fltr(hw, i, head);
6150 if (status != ICE_SUCCESS)
6157 * ice_replay_vsi_fltr - Replay filters for requested VSI
6158 * @hw: pointer to the hardware structure
6159 * @vsi_handle: driver VSI handle
6160 * @recp_id: Recipe ID for which rules need to be replayed
6161 * @list_head: list for which filters need to be replayed
6163 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6164 * It is required to pass valid VSI handle.
6166 static enum ice_status
6167 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6168 struct LIST_HEAD_TYPE *list_head)
6170 struct ice_fltr_mgmt_list_entry *itr;
6171 enum ice_status status = ICE_SUCCESS;
6174 if (LIST_EMPTY(list_head))
6176 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6178 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6180 struct ice_fltr_list_entry f_entry;
6182 f_entry.fltr_info = itr->fltr_info;
6183 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6184 itr->fltr_info.vsi_handle == vsi_handle) {
6185 /* update the src in case it is VSI num */
6186 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6187 f_entry.fltr_info.src = hw_vsi_id;
6188 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6189 if (status != ICE_SUCCESS)
6193 if (!itr->vsi_list_info ||
6194 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6196 /* Clearing it so that the logic can add it back */
6197 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6198 f_entry.fltr_info.vsi_handle = vsi_handle;
6199 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6200 /* update the src in case it is VSI num */
6201 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6202 f_entry.fltr_info.src = hw_vsi_id;
6203 if (recp_id == ICE_SW_LKUP_VLAN)
6204 status = ice_add_vlan_internal(hw, &f_entry);
6206 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6207 if (status != ICE_SUCCESS)
6215 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6216 * @hw: pointer to the hardware structure
6217 * @vsi_handle: driver VSI handle
6218 * @list_head: list for which filters need to be replayed
6220 * Replay the advanced rule for the given VSI.
6222 static enum ice_status
6223 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6224 struct LIST_HEAD_TYPE *list_head)
6226 struct ice_rule_query_data added_entry = { 0 };
6227 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6228 enum ice_status status = ICE_SUCCESS;
6230 if (LIST_EMPTY(list_head))
6232 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6234 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6235 u16 lk_cnt = adv_fltr->lkups_cnt;
6237 if (vsi_handle != rinfo->sw_act.vsi_handle)
6239 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6248 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6249 * @hw: pointer to the hardware structure
6250 * @vsi_handle: driver VSI handle
6252 * Replays filters for requested VSI via vsi_handle.
6254 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6256 struct ice_switch_info *sw = hw->switch_info;
6257 enum ice_status status;
6260 /* Update the recipes that were created */
6261 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6262 struct LIST_HEAD_TYPE *head;
6264 head = &sw->recp_list[i].filt_replay_rules;
6265 if (!sw->recp_list[i].adv_rule)
6266 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6268 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6269 if (status != ICE_SUCCESS)
6277 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6278 * @hw: pointer to the HW struct
6280 * Deletes the filter replay rules.
6282 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6284 struct ice_switch_info *sw = hw->switch_info;
6290 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6291 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6292 struct LIST_HEAD_TYPE *l_head;
6294 l_head = &sw->recp_list[i].filt_replay_rules;
6295 if (!sw->recp_list[i].adv_rule)
6296 ice_rem_sw_rule_info(hw, l_head);
6298 ice_rem_adv_rule_info(hw, l_head);