1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
73 u8 dummy_gre_packet[] = { 0, 0, 0, 0, /* ICE_MAC_OFOS 0 */
76 0x08, 0, /* ICE_ETYPE_OL 12 */
77 0x45, 0, 0, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x80, 0, 0x65, 0x58, /* ICE_NVGRE 34 */
84 0, 0, 0, 0, /* ICE_MAC_IL 42 */
88 0x45, 0, 0, 0x14, /* ICE_IPV4_IL 54 */
96 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
99 { ICE_IPV4_OFOS, 14 },
105 { ICE_PROTOCOL_LAST, 0 },
109 u8 dummy_udp_tun_tcp_packet[] = {
110 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
111 0x00, 0x00, 0x00, 0x00,
112 0x00, 0x00, 0x00, 0x00,
114 0x08, 0x00, /* ICE_ETYPE_OL 12 */
116 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
117 0x00, 0x01, 0x00, 0x00,
118 0x40, 0x11, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
123 0x00, 0x46, 0x00, 0x00,
125 0x04, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
126 0x00, 0x00, 0x00, 0x00,
128 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
133 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
134 0x00, 0x01, 0x00, 0x00,
135 0x40, 0x06, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
140 0x00, 0x00, 0x00, 0x00,
141 0x00, 0x00, 0x00, 0x00,
142 0x50, 0x02, 0x20, 0x00,
143 0x00, 0x00, 0x00, 0x00
147 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
149 { ICE_ETYPE_OL, 12 },
150 { ICE_IPV4_OFOS, 14 },
155 { ICE_UDP_ILOS, 84 },
156 { ICE_PROTOCOL_LAST, 0 },
160 u8 dummy_udp_tun_udp_packet[] = {
161 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
165 0x08, 0x00, /* ICE_ETYPE_OL 12 */
167 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
168 0x00, 0x01, 0x00, 0x00,
169 0x00, 0x11, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
174 0x00, 0x3a, 0x00, 0x00,
176 0x0c, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
177 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
180 0x00, 0x00, 0x00, 0x00,
181 0x00, 0x00, 0x00, 0x00,
184 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
185 0x00, 0x01, 0x00, 0x00,
186 0x00, 0x11, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
190 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
191 0x00, 0x08, 0x00, 0x00,
195 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
197 { ICE_ETYPE_OL, 12 },
198 { ICE_IPV4_OFOS, 14 },
199 { ICE_UDP_ILOS, 34 },
200 { ICE_PROTOCOL_LAST, 0 },
204 dummy_udp_packet[] = {
205 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
206 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x00,
209 0x08, 0x00, /* ICE_ETYPE_OL 12 */
211 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
212 0x00, 0x01, 0x00, 0x00,
213 0x00, 0x11, 0x00, 0x00,
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
217 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
218 0x00, 0x08, 0x00, 0x00,
220 0x00, 0x00, /* 2 bytes for 4 byte alignment */
224 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
226 { ICE_ETYPE_OL, 12 },
227 { ICE_IPV4_OFOS, 14 },
229 { ICE_PROTOCOL_LAST, 0 },
233 dummy_tcp_packet[] = {
234 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
235 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00,
238 0x08, 0x00, /* ICE_ETYPE_OL 12 */
240 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
241 0x00, 0x01, 0x00, 0x00,
242 0x00, 0x06, 0x00, 0x00,
243 0x00, 0x00, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
246 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x50, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, /* 2 bytes for 4 byte alignment */
256 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
258 { ICE_ETYPE_OL, 12 },
259 { ICE_IPV6_OFOS, 14 },
261 { ICE_PROTOCOL_LAST, 0 },
265 dummy_tcp_ipv6_packet[] = {
266 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
267 0x00, 0x00, 0x00, 0x00,
268 0x00, 0x00, 0x00, 0x00,
270 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
272 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
273 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
274 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00,
276 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00,
278 0x00, 0x00, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
284 0x00, 0x00, 0x00, 0x00,
285 0x00, 0x00, 0x00, 0x00,
286 0x50, 0x00, 0x00, 0x00,
287 0x00, 0x00, 0x00, 0x00,
289 0x00, 0x00, /* 2 bytes for 4 byte alignment */
293 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
295 { ICE_ETYPE_OL, 12 },
296 { ICE_IPV6_OFOS, 14 },
297 { ICE_UDP_ILOS, 54 },
298 { ICE_PROTOCOL_LAST, 0 },
302 dummy_udp_ipv6_packet[] = {
303 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
307 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
309 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
310 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
311 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
321 0x00, 0x08, 0x00, 0x00,
323 0x00, 0x00, /* 2 bytes for 4 byte alignment */
326 /* this is a recipe to profile bitmap association */
327 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
328 ICE_MAX_NUM_PROFILES);
329 static ice_declare_bitmap(available_result_ids, ICE_CHAIN_FV_INDEX_START + 1);
331 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
334 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
335 * @hw: pointer to hardware structure
336 * @recps: struct that we need to populate
337 * @rid: recipe ID that we are populating
338 * @refresh_required: true if we should get recipe to profile mapping from FW
340 * This function is used to populate all the necessary entries into our
341 * bookkeeping so that we have a current list of all the recipes that are
342 * programmed in the firmware.
344 static enum ice_status
345 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
346 bool *refresh_required)
348 u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
349 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
350 u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
351 struct ice_aqc_recipe_data_elem *tmp;
352 u16 num_recps = ICE_MAX_NUM_RECIPES;
353 struct ice_prot_lkup_ext *lkup_exts;
354 enum ice_status status;
356 /* we need a buffer big enough to accommodate all the recipes */
357 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
358 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
360 return ICE_ERR_NO_MEMORY;
362 tmp[0].recipe_indx = rid;
363 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
364 /* non-zero status meaning recipe doesn't exist */
368 /* Get recipe to profile map so that we can get the fv from lkups that
369 * we read for a recipe from FW. Since we want to minimize the number of
370 * times we make this FW call, just make one call and cache the copy
371 * until a new recipe is added. This operation is only required the
372 * first time to get the changes from FW. Then to search existing
373 * entries we don't need to update the cache again until another recipe
376 if (*refresh_required) {
377 ice_get_recp_to_prof_map(hw);
378 *refresh_required = false;
380 lkup_exts = &recps[rid].lkup_exts;
381 /* start populating all the entries for recps[rid] based on lkups from
384 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
385 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
386 struct ice_recp_grp_entry *rg_entry;
387 u8 prof_id, prot = 0;
390 rg_entry = (struct ice_recp_grp_entry *)
391 ice_malloc(hw, sizeof(*rg_entry));
393 status = ICE_ERR_NO_MEMORY;
396 /* Avoid 8th bit since its result enable bit */
397 result_idxs[result_idx] = root_bufs.content.result_indx &
398 ~ICE_AQ_RECIPE_RESULT_EN;
399 /* Check if result enable bit is set */
400 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
401 ice_clear_bit(ICE_CHAIN_FV_INDEX_START -
402 result_idxs[result_idx++],
403 available_result_ids);
405 recipe_to_profile[tmp[sub_recps].recipe_indx],
406 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
407 /* get the first profile that is associated with rid */
408 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
409 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
410 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
412 rg_entry->fv_idx[i] = lkup_indx;
413 rg_entry->fv_mask[i] =
414 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
416 /* If the recipe is a chained recipe then all its
417 * child recipe's result will have a result index.
418 * To fill fv_words we should not use those result
419 * index, we only need the protocol ids and offsets.
420 * We will skip all the fv_idx which stores result
421 * index in them. We also need to skip any fv_idx which
422 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
423 * valid offset value.
425 if (result_idxs[0] == rg_entry->fv_idx[i] ||
426 result_idxs[1] == rg_entry->fv_idx[i] ||
427 result_idxs[2] == rg_entry->fv_idx[i] ||
428 result_idxs[3] == rg_entry->fv_idx[i] ||
429 result_idxs[4] == rg_entry->fv_idx[i] ||
430 rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
431 rg_entry->fv_idx[i] == 0)
434 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
435 rg_entry->fv_idx[i], &prot, &off);
436 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
437 lkup_exts->fv_words[fv_word_idx].off = off;
440 /* populate rg_list with the data from the child entry of this
443 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
445 lkup_exts->n_val_words = fv_word_idx;
446 recps[rid].n_grp_count = num_recps;
447 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
448 ice_calloc(hw, recps[rid].n_grp_count,
449 sizeof(struct ice_aqc_recipe_data_elem));
450 if (!recps[rid].root_buf)
453 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
454 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
455 recps[rid].recp_created = true;
456 if (tmp[sub_recps].content.rid & ICE_AQ_RECIPE_ID_IS_ROOT)
457 recps[rid].root_rid = rid;
464 * ice_get_recp_to_prof_map - updates recipe to profile mapping
465 * @hw: pointer to hardware structure
467 * This function is used to populate recipe_to_profile matrix where index to
468 * this array is the recipe ID and the element is the mapping of which profiles
469 * is this recipe mapped to.
472 ice_get_recp_to_prof_map(struct ice_hw *hw)
474 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
477 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
480 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
481 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
484 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
485 if (ice_is_bit_set(r_bitmap, j))
486 ice_set_bit(i, recipe_to_profile[j]);
491 * ice_init_def_sw_recp - initialize the recipe book keeping tables
492 * @hw: pointer to the HW struct
494 * Allocate memory for the entire recipe table and initialize the structures/
495 * entries corresponding to basic recipes.
497 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
499 struct ice_sw_recipe *recps;
502 recps = (struct ice_sw_recipe *)
503 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
505 return ICE_ERR_NO_MEMORY;
507 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
508 recps[i].root_rid = i;
509 INIT_LIST_HEAD(&recps[i].filt_rules);
510 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
511 INIT_LIST_HEAD(&recps[i].rg_list);
512 ice_init_lock(&recps[i].filt_rule_lock);
515 hw->switch_info->recp_list = recps;
521 * ice_aq_get_sw_cfg - get switch configuration
522 * @hw: pointer to the hardware structure
523 * @buf: pointer to the result buffer
524 * @buf_size: length of the buffer available for response
525 * @req_desc: pointer to requested descriptor
526 * @num_elems: pointer to number of elements
527 * @cd: pointer to command details structure or NULL
529 * Get switch configuration (0x0200) to be placed in 'buff'.
530 * This admin command returns information such as initial VSI/port number
531 * and switch ID it belongs to.
533 * NOTE: *req_desc is both an input/output parameter.
534 * The caller of this function first calls this function with *request_desc set
535 * to 0. If the response from f/w has *req_desc set to 0, all the switch
536 * configuration information has been returned; if non-zero (meaning not all
537 * the information was returned), the caller should call this function again
538 * with *req_desc set to the previous value returned by f/w to get the
539 * next block of switch configuration information.
541 * *num_elems is output only parameter. This reflects the number of elements
542 * in response buffer. The caller of this function to use *num_elems while
543 * parsing the response buffer.
545 static enum ice_status
546 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
547 u16 buf_size, u16 *req_desc, u16 *num_elems,
548 struct ice_sq_cd *cd)
550 struct ice_aqc_get_sw_cfg *cmd;
551 enum ice_status status;
552 struct ice_aq_desc desc;
554 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
555 cmd = &desc.params.get_sw_conf;
556 cmd->element = CPU_TO_LE16(*req_desc);
558 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
560 *req_desc = LE16_TO_CPU(cmd->element);
561 *num_elems = LE16_TO_CPU(cmd->num_elems);
569 * ice_alloc_sw - allocate resources specific to switch
570 * @hw: pointer to the HW struct
571 * @ena_stats: true to turn on VEB stats
572 * @shared_res: true for shared resource, false for dedicated resource
573 * @sw_id: switch ID returned
574 * @counter_id: VEB counter ID returned
576 * allocates switch resources (SWID and VEB counter) (0x0208)
579 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
582 struct ice_aqc_alloc_free_res_elem *sw_buf;
583 struct ice_aqc_res_elem *sw_ele;
584 enum ice_status status;
587 buf_len = sizeof(*sw_buf);
588 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
589 ice_malloc(hw, buf_len);
591 return ICE_ERR_NO_MEMORY;
593 /* Prepare buffer for switch ID.
594 * The number of resource entries in buffer is passed as 1 since only a
595 * single switch/VEB instance is allocated, and hence a single sw_id
598 sw_buf->num_elems = CPU_TO_LE16(1);
600 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
601 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
602 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
604 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
605 ice_aqc_opc_alloc_res, NULL);
608 goto ice_alloc_sw_exit;
610 sw_ele = &sw_buf->elem[0];
611 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
614 /* Prepare buffer for VEB Counter */
615 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
616 struct ice_aqc_alloc_free_res_elem *counter_buf;
617 struct ice_aqc_res_elem *counter_ele;
619 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
620 ice_malloc(hw, buf_len);
622 status = ICE_ERR_NO_MEMORY;
623 goto ice_alloc_sw_exit;
626 /* The number of resource entries in buffer is passed as 1 since
627 * only a single switch/VEB instance is allocated, and hence a
628 * single VEB counter is requested.
630 counter_buf->num_elems = CPU_TO_LE16(1);
631 counter_buf->res_type =
632 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
633 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
634 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
638 ice_free(hw, counter_buf);
639 goto ice_alloc_sw_exit;
641 counter_ele = &counter_buf->elem[0];
642 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
643 ice_free(hw, counter_buf);
647 ice_free(hw, sw_buf);
652 * ice_free_sw - free resources specific to switch
653 * @hw: pointer to the HW struct
654 * @sw_id: switch ID returned
655 * @counter_id: VEB counter ID returned
657 * free switch resources (SWID and VEB counter) (0x0209)
659 * NOTE: This function frees multiple resources. It continues
660 * releasing other resources even after it encounters error.
661 * The error code returned is the last error it encountered.
663 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
665 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
666 enum ice_status status, ret_status;
669 buf_len = sizeof(*sw_buf);
670 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
671 ice_malloc(hw, buf_len);
673 return ICE_ERR_NO_MEMORY;
675 /* Prepare buffer to free for switch ID res.
676 * The number of resource entries in buffer is passed as 1 since only a
677 * single switch/VEB instance is freed, and hence a single sw_id
680 sw_buf->num_elems = CPU_TO_LE16(1);
681 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
682 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
684 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
685 ice_aqc_opc_free_res, NULL);
688 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
690 /* Prepare buffer to free for VEB Counter resource */
691 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
692 ice_malloc(hw, buf_len);
694 ice_free(hw, sw_buf);
695 return ICE_ERR_NO_MEMORY;
698 /* The number of resource entries in buffer is passed as 1 since only a
699 * single switch/VEB instance is freed, and hence a single VEB counter
702 counter_buf->num_elems = CPU_TO_LE16(1);
703 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
704 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
706 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
707 ice_aqc_opc_free_res, NULL);
709 ice_debug(hw, ICE_DBG_SW,
710 "VEB counter resource could not be freed\n");
714 ice_free(hw, counter_buf);
715 ice_free(hw, sw_buf);
721 * @hw: pointer to the HW struct
722 * @vsi_ctx: pointer to a VSI context struct
723 * @cd: pointer to command details structure or NULL
725 * Add a VSI context to the hardware (0x0210)
728 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
729 struct ice_sq_cd *cd)
731 struct ice_aqc_add_update_free_vsi_resp *res;
732 struct ice_aqc_add_get_update_free_vsi *cmd;
733 struct ice_aq_desc desc;
734 enum ice_status status;
736 cmd = &desc.params.vsi_cmd;
737 res = &desc.params.add_update_free_vsi_res;
739 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
741 if (!vsi_ctx->alloc_from_pool)
742 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
743 ICE_AQ_VSI_IS_VALID);
745 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
747 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
749 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
750 sizeof(vsi_ctx->info), cd);
753 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
754 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
755 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
763 * @hw: pointer to the HW struct
764 * @vsi_ctx: pointer to a VSI context struct
765 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
766 * @cd: pointer to command details structure or NULL
768 * Free VSI context info from hardware (0x0213)
771 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
772 bool keep_vsi_alloc, struct ice_sq_cd *cd)
774 struct ice_aqc_add_update_free_vsi_resp *resp;
775 struct ice_aqc_add_get_update_free_vsi *cmd;
776 struct ice_aq_desc desc;
777 enum ice_status status;
779 cmd = &desc.params.vsi_cmd;
780 resp = &desc.params.add_update_free_vsi_res;
782 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
784 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
786 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
788 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
790 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
791 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
799 * @hw: pointer to the HW struct
800 * @vsi_ctx: pointer to a VSI context struct
801 * @cd: pointer to command details structure or NULL
803 * Update VSI context in the hardware (0x0211)
806 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
807 struct ice_sq_cd *cd)
809 struct ice_aqc_add_update_free_vsi_resp *resp;
810 struct ice_aqc_add_get_update_free_vsi *cmd;
811 struct ice_aq_desc desc;
812 enum ice_status status;
814 cmd = &desc.params.vsi_cmd;
815 resp = &desc.params.add_update_free_vsi_res;
817 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
819 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
821 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
823 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
824 sizeof(vsi_ctx->info), cd);
827 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
828 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
835 * ice_is_vsi_valid - check whether the VSI is valid or not
836 * @hw: pointer to the HW struct
837 * @vsi_handle: VSI handle
839 * check whether the VSI is valid or not
841 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
843 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
847 * ice_get_hw_vsi_num - return the HW VSI number
848 * @hw: pointer to the HW struct
849 * @vsi_handle: VSI handle
851 * return the HW VSI number
852 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
854 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
856 return hw->vsi_ctx[vsi_handle]->vsi_num;
860 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
861 * @hw: pointer to the HW struct
862 * @vsi_handle: VSI handle
864 * return the VSI context entry for a given VSI handle
866 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
868 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
872 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
873 * @hw: pointer to the HW struct
874 * @vsi_handle: VSI handle
875 * @vsi: VSI context pointer
877 * save the VSI context entry for a given VSI handle
880 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
882 hw->vsi_ctx[vsi_handle] = vsi;
886 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
887 * @hw: pointer to the HW struct
888 * @vsi_handle: VSI handle
890 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
892 struct ice_vsi_ctx *vsi;
895 vsi = ice_get_vsi_ctx(hw, vsi_handle);
898 ice_for_each_traffic_class(i) {
899 if (vsi->lan_q_ctx[i]) {
900 ice_free(hw, vsi->lan_q_ctx[i]);
901 vsi->lan_q_ctx[i] = NULL;
907 * ice_clear_vsi_ctx - clear the VSI context entry
908 * @hw: pointer to the HW struct
909 * @vsi_handle: VSI handle
911 * clear the VSI context entry
913 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
915 struct ice_vsi_ctx *vsi;
917 vsi = ice_get_vsi_ctx(hw, vsi_handle);
919 ice_clear_vsi_q_ctx(hw, vsi_handle);
921 hw->vsi_ctx[vsi_handle] = NULL;
926 * ice_clear_all_vsi_ctx - clear all the VSI context entries
927 * @hw: pointer to the HW struct
929 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
933 for (i = 0; i < ICE_MAX_VSI; i++)
934 ice_clear_vsi_ctx(hw, i);
938 * ice_add_vsi - add VSI context to the hardware and VSI handle list
939 * @hw: pointer to the HW struct
940 * @vsi_handle: unique VSI handle provided by drivers
941 * @vsi_ctx: pointer to a VSI context struct
942 * @cd: pointer to command details structure or NULL
944 * Add a VSI context to the hardware also add it into the VSI handle list.
945 * If this function gets called after reset for existing VSIs then update
946 * with the new HW VSI number in the corresponding VSI handle list entry.
949 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
950 struct ice_sq_cd *cd)
952 struct ice_vsi_ctx *tmp_vsi_ctx;
953 enum ice_status status;
955 if (vsi_handle >= ICE_MAX_VSI)
956 return ICE_ERR_PARAM;
957 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
960 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
962 /* Create a new VSI context */
963 tmp_vsi_ctx = (struct ice_vsi_ctx *)
964 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
966 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
967 return ICE_ERR_NO_MEMORY;
969 *tmp_vsi_ctx = *vsi_ctx;
971 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
973 /* update with new HW VSI num */
974 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
975 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
982 * ice_free_vsi- free VSI context from hardware and VSI handle list
983 * @hw: pointer to the HW struct
984 * @vsi_handle: unique VSI handle
985 * @vsi_ctx: pointer to a VSI context struct
986 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
987 * @cd: pointer to command details structure or NULL
989 * Free VSI context info from hardware as well as from VSI handle list
992 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
993 bool keep_vsi_alloc, struct ice_sq_cd *cd)
995 enum ice_status status;
997 if (!ice_is_vsi_valid(hw, vsi_handle))
998 return ICE_ERR_PARAM;
999 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1000 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1002 ice_clear_vsi_ctx(hw, vsi_handle);
1008 * @hw: pointer to the HW struct
1009 * @vsi_handle: unique VSI handle
1010 * @vsi_ctx: pointer to a VSI context struct
1011 * @cd: pointer to command details structure or NULL
1013 * Update VSI context in the hardware
1016 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1017 struct ice_sq_cd *cd)
1019 if (!ice_is_vsi_valid(hw, vsi_handle))
1020 return ICE_ERR_PARAM;
1021 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1022 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1026 * ice_aq_get_vsi_params
1027 * @hw: pointer to the HW struct
1028 * @vsi_ctx: pointer to a VSI context struct
1029 * @cd: pointer to command details structure or NULL
1031 * Get VSI context info from hardware (0x0212)
1034 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1035 struct ice_sq_cd *cd)
1037 struct ice_aqc_add_get_update_free_vsi *cmd;
1038 struct ice_aqc_get_vsi_resp *resp;
1039 struct ice_aq_desc desc;
1040 enum ice_status status;
1042 cmd = &desc.params.vsi_cmd;
1043 resp = &desc.params.get_vsi_resp;
1045 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1047 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1049 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1050 sizeof(vsi_ctx->info), cd);
1052 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1054 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1055 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1062 * ice_aq_add_update_mir_rule - add/update a mirror rule
1063 * @hw: pointer to the HW struct
1064 * @rule_type: Rule Type
1065 * @dest_vsi: VSI number to which packets will be mirrored
1066 * @count: length of the list
1067 * @mr_buf: buffer for list of mirrored VSI numbers
1068 * @cd: pointer to command details structure or NULL
1071 * Add/Update Mirror Rule (0x260).
1074 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1075 u16 count, struct ice_mir_rule_buf *mr_buf,
1076 struct ice_sq_cd *cd, u16 *rule_id)
1078 struct ice_aqc_add_update_mir_rule *cmd;
1079 struct ice_aq_desc desc;
1080 enum ice_status status;
1081 __le16 *mr_list = NULL;
1084 switch (rule_type) {
1085 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1086 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1087 /* Make sure count and mr_buf are set for these rule_types */
1088 if (!(count && mr_buf))
1089 return ICE_ERR_PARAM;
1091 buf_size = count * sizeof(__le16);
1092 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1094 return ICE_ERR_NO_MEMORY;
1096 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1097 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1098 /* Make sure count and mr_buf are not set for these
1101 if (count || mr_buf)
1102 return ICE_ERR_PARAM;
1105 ice_debug(hw, ICE_DBG_SW,
1106 "Error due to unsupported rule_type %u\n", rule_type);
1107 return ICE_ERR_OUT_OF_RANGE;
1110 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1112 /* Pre-process 'mr_buf' items for add/update of virtual port
1113 * ingress/egress mirroring (but not physical port ingress/egress
1119 for (i = 0; i < count; i++) {
1122 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1124 /* Validate specified VSI number, make sure it is less
1125 * than ICE_MAX_VSI, if not return with error.
1127 if (id >= ICE_MAX_VSI) {
1128 ice_debug(hw, ICE_DBG_SW,
1129 "Error VSI index (%u) out-of-range\n",
1131 ice_free(hw, mr_list);
1132 return ICE_ERR_OUT_OF_RANGE;
1135 /* add VSI to mirror rule */
1138 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1139 else /* remove VSI from mirror rule */
1140 mr_list[i] = CPU_TO_LE16(id);
1144 cmd = &desc.params.add_update_rule;
1145 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1146 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1147 ICE_AQC_RULE_ID_VALID_M);
1148 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1149 cmd->num_entries = CPU_TO_LE16(count);
1150 cmd->dest = CPU_TO_LE16(dest_vsi);
1152 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1154 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1156 ice_free(hw, mr_list);
1162 * ice_aq_delete_mir_rule - delete a mirror rule
1163 * @hw: pointer to the HW struct
1164 * @rule_id: Mirror rule ID (to be deleted)
1165 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1166 * otherwise it is returned to the shared pool
1167 * @cd: pointer to command details structure or NULL
1169 * Delete Mirror Rule (0x261).
1172 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1173 struct ice_sq_cd *cd)
1175 struct ice_aqc_delete_mir_rule *cmd;
1176 struct ice_aq_desc desc;
1178 /* rule_id should be in the range 0...63 */
1179 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1180 return ICE_ERR_OUT_OF_RANGE;
1182 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1184 cmd = &desc.params.del_rule;
1185 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1186 cmd->rule_id = CPU_TO_LE16(rule_id);
1189 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1191 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1195 * ice_aq_alloc_free_vsi_list
1196 * @hw: pointer to the HW struct
1197 * @vsi_list_id: VSI list ID returned or used for lookup
1198 * @lkup_type: switch rule filter lookup type
1199 * @opc: switch rules population command type - pass in the command opcode
1201 * allocates or free a VSI list resource
1203 static enum ice_status
1204 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1205 enum ice_sw_lkup_type lkup_type,
1206 enum ice_adminq_opc opc)
1208 struct ice_aqc_alloc_free_res_elem *sw_buf;
1209 struct ice_aqc_res_elem *vsi_ele;
1210 enum ice_status status;
1213 buf_len = sizeof(*sw_buf);
1214 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1215 ice_malloc(hw, buf_len);
1217 return ICE_ERR_NO_MEMORY;
1218 sw_buf->num_elems = CPU_TO_LE16(1);
1220 if (lkup_type == ICE_SW_LKUP_MAC ||
1221 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1222 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1223 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1224 lkup_type == ICE_SW_LKUP_PROMISC ||
1225 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1226 lkup_type == ICE_SW_LKUP_LAST) {
1227 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1228 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1230 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1232 status = ICE_ERR_PARAM;
1233 goto ice_aq_alloc_free_vsi_list_exit;
1236 if (opc == ice_aqc_opc_free_res)
1237 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1239 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1241 goto ice_aq_alloc_free_vsi_list_exit;
1243 if (opc == ice_aqc_opc_alloc_res) {
1244 vsi_ele = &sw_buf->elem[0];
1245 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1248 ice_aq_alloc_free_vsi_list_exit:
1249 ice_free(hw, sw_buf);
1254 * ice_aq_set_storm_ctrl - Sets storm control configuration
1255 * @hw: pointer to the HW struct
1256 * @bcast_thresh: represents the upper threshold for broadcast storm control
1257 * @mcast_thresh: represents the upper threshold for multicast storm control
1258 * @ctl_bitmask: storm control control knobs
1260 * Sets the storm control configuration (0x0280)
1263 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1266 struct ice_aqc_storm_cfg *cmd;
1267 struct ice_aq_desc desc;
1269 cmd = &desc.params.storm_conf;
1271 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1273 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1274 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1275 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1277 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1281 * ice_aq_get_storm_ctrl - gets storm control configuration
1282 * @hw: pointer to the HW struct
1283 * @bcast_thresh: represents the upper threshold for broadcast storm control
1284 * @mcast_thresh: represents the upper threshold for multicast storm control
1285 * @ctl_bitmask: storm control control knobs
1287 * Gets the storm control configuration (0x0281)
1290 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1293 enum ice_status status;
1294 struct ice_aq_desc desc;
1296 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1298 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1300 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1303 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1306 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1309 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1316 * ice_aq_sw_rules - add/update/remove switch rules
1317 * @hw: pointer to the HW struct
1318 * @rule_list: pointer to switch rule population list
1319 * @rule_list_sz: total size of the rule list in bytes
1320 * @num_rules: number of switch rules in the rule_list
1321 * @opc: switch rules population command type - pass in the command opcode
1322 * @cd: pointer to command details structure or NULL
1324 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1326 static enum ice_status
1327 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1328 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1330 struct ice_aq_desc desc;
1332 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_sw_rules");
1334 if (opc != ice_aqc_opc_add_sw_rules &&
1335 opc != ice_aqc_opc_update_sw_rules &&
1336 opc != ice_aqc_opc_remove_sw_rules)
1337 return ICE_ERR_PARAM;
1339 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1341 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1342 desc.params.sw_rules.num_rules_fltr_entry_index =
1343 CPU_TO_LE16(num_rules);
1344 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1348 * ice_aq_add_recipe - add switch recipe
1349 * @hw: pointer to the HW struct
1350 * @s_recipe_list: pointer to switch rule population list
1351 * @num_recipes: number of switch recipes in the list
1352 * @cd: pointer to command details structure or NULL
1357 ice_aq_add_recipe(struct ice_hw *hw,
1358 struct ice_aqc_recipe_data_elem *s_recipe_list,
1359 u16 num_recipes, struct ice_sq_cd *cd)
1361 struct ice_aqc_add_get_recipe *cmd;
1362 struct ice_aq_desc desc;
1365 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_recipe");
1366 cmd = &desc.params.add_get_recipe;
1367 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1369 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1370 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1372 buf_size = num_recipes * sizeof(*s_recipe_list);
1374 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1378 * ice_aq_get_recipe - get switch recipe
1379 * @hw: pointer to the HW struct
1380 * @s_recipe_list: pointer to switch rule population list
1381 * @num_recipes: pointer to the number of recipes (input and output)
1382 * @recipe_root: root recipe number of recipe(s) to retrieve
1383 * @cd: pointer to command details structure or NULL
1387 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1388 * On output, *num_recipes will equal the number of entries returned in
1391 * The caller must supply enough space in s_recipe_list to hold all possible
1392 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1395 ice_aq_get_recipe(struct ice_hw *hw,
1396 struct ice_aqc_recipe_data_elem *s_recipe_list,
1397 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1399 struct ice_aqc_add_get_recipe *cmd;
1400 struct ice_aq_desc desc;
1401 enum ice_status status;
1404 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1405 return ICE_ERR_PARAM;
1407 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe");
1408 cmd = &desc.params.add_get_recipe;
1409 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1411 cmd->return_index = CPU_TO_LE16(recipe_root);
1412 cmd->num_sub_recipes = 0;
1414 buf_size = *num_recipes * sizeof(*s_recipe_list);
1416 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1417 /* cppcheck-suppress constArgument */
1418 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1424 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1425 * @hw: pointer to the HW struct
1426 * @profile_id: package profile ID to associate the recipe with
1427 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1428 * @cd: pointer to command details structure or NULL
1429 * Recipe to profile association (0x0291)
1432 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1433 struct ice_sq_cd *cd)
1435 struct ice_aqc_recipe_to_profile *cmd;
1436 struct ice_aq_desc desc;
1438 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_assoc_recipe_to_prof");
1439 cmd = &desc.params.recipe_to_profile;
1440 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1441 cmd->profile_id = CPU_TO_LE16(profile_id);
1442 /* Set the recipe ID bit in the bitmask to let the device know which
1443 * profile we are associating the recipe to
1445 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1446 ICE_NONDMA_TO_NONDMA);
1448 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1452 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1453 * @hw: pointer to the HW struct
1454 * @profile_id: package profile ID to associate the recipe with
1455 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1456 * @cd: pointer to command details structure or NULL
1457 * Associate profile ID with given recipe (0x0293)
1460 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1461 struct ice_sq_cd *cd)
1463 struct ice_aqc_recipe_to_profile *cmd;
1464 struct ice_aq_desc desc;
1465 enum ice_status status;
1467 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe_to_prof");
1468 cmd = &desc.params.recipe_to_profile;
1469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1470 cmd->profile_id = CPU_TO_LE16(profile_id);
1472 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1474 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1475 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1481 * ice_alloc_recipe - add recipe resource
1482 * @hw: pointer to the hardware structure
1483 * @rid: recipe ID returned as response to AQ call
1485 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1487 struct ice_aqc_alloc_free_res_elem *sw_buf;
1488 enum ice_status status;
1491 buf_len = sizeof(*sw_buf);
1492 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1494 return ICE_ERR_NO_MEMORY;
1496 sw_buf->num_elems = CPU_TO_LE16(1);
1497 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1498 ICE_AQC_RES_TYPE_S) |
1499 ICE_AQC_RES_TYPE_FLAG_SHARED);
1500 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1501 ice_aqc_opc_alloc_res, NULL);
1503 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1504 ice_free(hw, sw_buf);
1509 /* ice_init_port_info - Initialize port_info with switch configuration data
1510 * @pi: pointer to port_info
1511 * @vsi_port_num: VSI number or port number
1512 * @type: Type of switch element (port or VSI)
1513 * @swid: switch ID of the switch the element is attached to
1514 * @pf_vf_num: PF or VF number
1515 * @is_vf: true if the element is a VF, false otherwise
1518 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1519 u16 swid, u16 pf_vf_num, bool is_vf)
1522 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1523 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1525 pi->pf_vf_num = pf_vf_num;
1527 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1528 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1531 ice_debug(pi->hw, ICE_DBG_SW,
1532 "incorrect VSI/port type received\n");
1537 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1538 * @hw: pointer to the hardware structure
1540 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1542 struct ice_aqc_get_sw_cfg_resp *rbuf;
1543 enum ice_status status;
1544 u16 num_total_ports;
1550 num_total_ports = 1;
1552 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1553 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1556 return ICE_ERR_NO_MEMORY;
1558 /* Multiple calls to ice_aq_get_sw_cfg may be required
1559 * to get all the switch configuration information. The need
1560 * for additional calls is indicated by ice_aq_get_sw_cfg
1561 * writing a non-zero value in req_desc
1564 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1565 &req_desc, &num_elems, NULL);
1570 for (i = 0; i < num_elems; i++) {
1571 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1572 u16 pf_vf_num, swid, vsi_port_num;
1576 ele = rbuf[i].elements;
1577 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1578 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1580 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1581 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1583 swid = LE16_TO_CPU(ele->swid);
1585 if (LE16_TO_CPU(ele->pf_vf_num) &
1586 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1589 type = LE16_TO_CPU(ele->vsi_port_num) >>
1590 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1593 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1594 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1595 if (j == num_total_ports) {
1596 ice_debug(hw, ICE_DBG_SW,
1597 "more ports than expected\n");
1598 status = ICE_ERR_CFG;
1601 ice_init_port_info(hw->port_info,
1602 vsi_port_num, type, swid,
1610 } while (req_desc && !status);
1614 ice_free(hw, (void *)rbuf);
1620 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1621 * @hw: pointer to the hardware structure
1622 * @fi: filter info structure to fill/update
1624 * This helper function populates the lb_en and lan_en elements of the provided
1625 * ice_fltr_info struct using the switch's type and characteristics of the
1626 * switch rule being configured.
1628 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1632 if ((fi->flag & ICE_FLTR_TX) &&
1633 (fi->fltr_act == ICE_FWD_TO_VSI ||
1634 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1635 fi->fltr_act == ICE_FWD_TO_Q ||
1636 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1637 /* Setting LB for prune actions will result in replicated
1638 * packets to the internal switch that will be dropped.
1640 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1643 /* Set lan_en to TRUE if
1644 * 1. The switch is a VEB AND
1646 * 2.1 The lookup is a directional lookup like ethertype,
1647 * promiscuous, ethertype-MAC, promiscuous-VLAN
1648 * and default-port OR
1649 * 2.2 The lookup is VLAN, OR
1650 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1651 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1655 * The switch is a VEPA.
1657 * In all other cases, the LAN enable has to be set to false.
1660 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1661 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1662 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1663 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1664 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1665 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1666 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1667 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1668 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1669 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1678 * ice_ilog2 - Calculates integer log base 2 of a number
1679 * @n: number on which to perform operation
1681 static int ice_ilog2(u64 n)
1685 for (i = 63; i >= 0; i--)
1686 if (((u64)1 << i) & n)
1693 * ice_fill_sw_rule - Helper function to fill switch rule structure
1694 * @hw: pointer to the hardware structure
1695 * @f_info: entry containing packet forwarding information
1696 * @s_rule: switch rule structure to be filled in based on mac_entry
1697 * @opc: switch rules population command type - pass in the command opcode
1700 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1701 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1703 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1711 if (opc == ice_aqc_opc_remove_sw_rules) {
1712 s_rule->pdata.lkup_tx_rx.act = 0;
1713 s_rule->pdata.lkup_tx_rx.index =
1714 CPU_TO_LE16(f_info->fltr_rule_id);
1715 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1719 eth_hdr_sz = sizeof(dummy_eth_header);
1720 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1722 /* initialize the ether header with a dummy header */
1723 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1724 ice_fill_sw_info(hw, f_info);
1726 switch (f_info->fltr_act) {
1727 case ICE_FWD_TO_VSI:
1728 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1729 ICE_SINGLE_ACT_VSI_ID_M;
1730 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1731 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1732 ICE_SINGLE_ACT_VALID_BIT;
1734 case ICE_FWD_TO_VSI_LIST:
1735 act |= ICE_SINGLE_ACT_VSI_LIST;
1736 act |= (f_info->fwd_id.vsi_list_id <<
1737 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1738 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1739 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1740 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1741 ICE_SINGLE_ACT_VALID_BIT;
1744 act |= ICE_SINGLE_ACT_TO_Q;
1745 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1746 ICE_SINGLE_ACT_Q_INDEX_M;
1748 case ICE_DROP_PACKET:
1749 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1750 ICE_SINGLE_ACT_VALID_BIT;
1752 case ICE_FWD_TO_QGRP:
1753 q_rgn = f_info->qgrp_size > 0 ?
1754 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1755 act |= ICE_SINGLE_ACT_TO_Q;
1756 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1757 ICE_SINGLE_ACT_Q_INDEX_M;
1758 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1759 ICE_SINGLE_ACT_Q_REGION_M;
1766 act |= ICE_SINGLE_ACT_LB_ENABLE;
1768 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1770 switch (f_info->lkup_type) {
1771 case ICE_SW_LKUP_MAC:
1772 daddr = f_info->l_data.mac.mac_addr;
1774 case ICE_SW_LKUP_VLAN:
1775 vlan_id = f_info->l_data.vlan.vlan_id;
1776 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1777 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1778 act |= ICE_SINGLE_ACT_PRUNE;
1779 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1782 case ICE_SW_LKUP_ETHERTYPE_MAC:
1783 daddr = f_info->l_data.ethertype_mac.mac_addr;
1785 case ICE_SW_LKUP_ETHERTYPE:
1786 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1787 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1789 case ICE_SW_LKUP_MAC_VLAN:
1790 daddr = f_info->l_data.mac_vlan.mac_addr;
1791 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1793 case ICE_SW_LKUP_PROMISC_VLAN:
1794 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1796 case ICE_SW_LKUP_PROMISC:
1797 daddr = f_info->l_data.mac_vlan.mac_addr;
1803 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1804 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1805 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1807 /* Recipe set depending on lookup type */
1808 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1809 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1810 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1813 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1814 ICE_NONDMA_TO_NONDMA);
1816 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1817 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1818 *off = CPU_TO_BE16(vlan_id);
1821 /* Create the switch rule with the final dummy Ethernet header */
1822 if (opc != ice_aqc_opc_update_sw_rules)
1823 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1827 * ice_add_marker_act
1828 * @hw: pointer to the hardware structure
1829 * @m_ent: the management entry for which sw marker needs to be added
1830 * @sw_marker: sw marker to tag the Rx descriptor with
1831 * @l_id: large action resource ID
1833 * Create a large action to hold software marker and update the switch rule
1834 * entry pointed by m_ent with newly created large action
1836 static enum ice_status
1837 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1838 u16 sw_marker, u16 l_id)
1840 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1841 /* For software marker we need 3 large actions
1842 * 1. FWD action: FWD TO VSI or VSI LIST
1843 * 2. GENERIC VALUE action to hold the profile ID
1844 * 3. GENERIC VALUE action to hold the software marker ID
1846 const u16 num_lg_acts = 3;
1847 enum ice_status status;
1853 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1854 return ICE_ERR_PARAM;
1856 /* Create two back-to-back switch rules and submit them to the HW using
1857 * one memory buffer:
1861 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1862 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1863 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1865 return ICE_ERR_NO_MEMORY;
1867 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1869 /* Fill in the first switch rule i.e. large action */
1870 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1871 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1872 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1874 /* First action VSI forwarding or VSI list forwarding depending on how
1877 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1878 m_ent->fltr_info.fwd_id.hw_vsi_id;
1880 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1881 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1882 ICE_LG_ACT_VSI_LIST_ID_M;
1883 if (m_ent->vsi_count > 1)
1884 act |= ICE_LG_ACT_VSI_LIST;
1885 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1887 /* Second action descriptor type */
1888 act = ICE_LG_ACT_GENERIC;
1890 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1891 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1893 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1894 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1896 /* Third action Marker value */
1897 act |= ICE_LG_ACT_GENERIC;
1898 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1899 ICE_LG_ACT_GENERIC_VALUE_M;
1901 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1903 /* call the fill switch rule to fill the lookup Tx Rx structure */
1904 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1905 ice_aqc_opc_update_sw_rules);
1907 /* Update the action to point to the large action ID */
1908 rx_tx->pdata.lkup_tx_rx.act =
1909 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1910 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1911 ICE_SINGLE_ACT_PTR_VAL_M));
1913 /* Use the filter rule ID of the previously created rule with single
1914 * act. Once the update happens, hardware will treat this as large
1917 rx_tx->pdata.lkup_tx_rx.index =
1918 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1920 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1921 ice_aqc_opc_update_sw_rules, NULL);
1923 m_ent->lg_act_idx = l_id;
1924 m_ent->sw_marker_id = sw_marker;
1927 ice_free(hw, lg_act);
1932 * ice_add_counter_act - add/update filter rule with counter action
1933 * @hw: pointer to the hardware structure
1934 * @m_ent: the management entry for which counter needs to be added
1935 * @counter_id: VLAN counter ID returned as part of allocate resource
1936 * @l_id: large action resource ID
1938 static enum ice_status
1939 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1940 u16 counter_id, u16 l_id)
1942 struct ice_aqc_sw_rules_elem *lg_act;
1943 struct ice_aqc_sw_rules_elem *rx_tx;
1944 enum ice_status status;
1945 /* 2 actions will be added while adding a large action counter */
1946 const int num_acts = 2;
1953 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1954 return ICE_ERR_PARAM;
1956 /* Create two back-to-back switch rules and submit them to the HW using
1957 * one memory buffer:
1961 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
1962 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1963 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
1966 return ICE_ERR_NO_MEMORY;
1968 rx_tx = (struct ice_aqc_sw_rules_elem *)
1969 ((u8 *)lg_act + lg_act_size);
1971 /* Fill in the first switch rule i.e. large action */
1972 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1973 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1974 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
1976 /* First action VSI forwarding or VSI list forwarding depending on how
1979 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1980 m_ent->fltr_info.fwd_id.hw_vsi_id;
1982 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1983 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1984 ICE_LG_ACT_VSI_LIST_ID_M;
1985 if (m_ent->vsi_count > 1)
1986 act |= ICE_LG_ACT_VSI_LIST;
1987 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1989 /* Second action counter ID */
1990 act = ICE_LG_ACT_STAT_COUNT;
1991 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
1992 ICE_LG_ACT_STAT_COUNT_M;
1993 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1995 /* call the fill switch rule to fill the lookup Tx Rx structure */
1996 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1997 ice_aqc_opc_update_sw_rules);
1999 act = ICE_SINGLE_ACT_PTR;
2000 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2001 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2003 /* Use the filter rule ID of the previously created rule with single
2004 * act. Once the update happens, hardware will treat this as large
2007 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2008 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2010 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2011 ice_aqc_opc_update_sw_rules, NULL);
2013 m_ent->lg_act_idx = l_id;
2014 m_ent->counter_index = counter_id;
2017 ice_free(hw, lg_act);
2022 * ice_create_vsi_list_map
2023 * @hw: pointer to the hardware structure
2024 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2025 * @num_vsi: number of VSI handles in the array
2026 * @vsi_list_id: VSI list ID generated as part of allocate resource
2028 * Helper function to create a new entry of VSI list ID to VSI mapping
2029 * using the given VSI list ID
2031 static struct ice_vsi_list_map_info *
2032 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2035 struct ice_switch_info *sw = hw->switch_info;
2036 struct ice_vsi_list_map_info *v_map;
2039 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2044 v_map->vsi_list_id = vsi_list_id;
2046 for (i = 0; i < num_vsi; i++)
2047 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2049 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2054 * ice_update_vsi_list_rule
2055 * @hw: pointer to the hardware structure
2056 * @vsi_handle_arr: array of VSI handles to form a VSI list
2057 * @num_vsi: number of VSI handles in the array
2058 * @vsi_list_id: VSI list ID generated as part of allocate resource
2059 * @remove: Boolean value to indicate if this is a remove action
2060 * @opc: switch rules population command type - pass in the command opcode
2061 * @lkup_type: lookup type of the filter
2063 * Call AQ command to add a new switch rule or update existing switch rule
2064 * using the given VSI list ID
2066 static enum ice_status
2067 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2068 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2069 enum ice_sw_lkup_type lkup_type)
2071 struct ice_aqc_sw_rules_elem *s_rule;
2072 enum ice_status status;
2078 return ICE_ERR_PARAM;
2080 if (lkup_type == ICE_SW_LKUP_MAC ||
2081 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2082 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2083 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2084 lkup_type == ICE_SW_LKUP_PROMISC ||
2085 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2086 lkup_type == ICE_SW_LKUP_LAST)
2087 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2088 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2089 else if (lkup_type == ICE_SW_LKUP_VLAN)
2090 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2091 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2093 return ICE_ERR_PARAM;
2095 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2096 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2098 return ICE_ERR_NO_MEMORY;
2099 for (i = 0; i < num_vsi; i++) {
2100 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2101 status = ICE_ERR_PARAM;
2104 /* AQ call requires hw_vsi_id(s) */
2105 s_rule->pdata.vsi_list.vsi[i] =
2106 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2109 s_rule->type = CPU_TO_LE16(type);
2110 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2111 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2113 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2116 ice_free(hw, s_rule);
2121 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2122 * @hw: pointer to the HW struct
2123 * @vsi_handle_arr: array of VSI handles to form a VSI list
2124 * @num_vsi: number of VSI handles in the array
2125 * @vsi_list_id: stores the ID of the VSI list to be created
2126 * @lkup_type: switch rule filter's lookup type
2128 static enum ice_status
2129 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2130 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2132 enum ice_status status;
2134 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2135 ice_aqc_opc_alloc_res);
2139 /* Update the newly created VSI list to include the specified VSIs */
2140 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2141 *vsi_list_id, false,
2142 ice_aqc_opc_add_sw_rules, lkup_type);
2146 * ice_create_pkt_fwd_rule
2147 * @hw: pointer to the hardware structure
2148 * @f_entry: entry containing packet forwarding information
2150 * Create switch rule with given filter information and add an entry
2151 * to the corresponding filter management list to track this switch rule
2154 static enum ice_status
2155 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2156 struct ice_fltr_list_entry *f_entry)
2158 struct ice_fltr_mgmt_list_entry *fm_entry;
2159 struct ice_aqc_sw_rules_elem *s_rule;
2160 enum ice_sw_lkup_type l_type;
2161 struct ice_sw_recipe *recp;
2162 enum ice_status status;
2164 s_rule = (struct ice_aqc_sw_rules_elem *)
2165 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2167 return ICE_ERR_NO_MEMORY;
2168 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2169 ice_malloc(hw, sizeof(*fm_entry));
2171 status = ICE_ERR_NO_MEMORY;
2172 goto ice_create_pkt_fwd_rule_exit;
2175 fm_entry->fltr_info = f_entry->fltr_info;
2177 /* Initialize all the fields for the management entry */
2178 fm_entry->vsi_count = 1;
2179 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2180 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2181 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2183 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2184 ice_aqc_opc_add_sw_rules);
2186 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2187 ice_aqc_opc_add_sw_rules, NULL);
2189 ice_free(hw, fm_entry);
2190 goto ice_create_pkt_fwd_rule_exit;
2193 f_entry->fltr_info.fltr_rule_id =
2194 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2195 fm_entry->fltr_info.fltr_rule_id =
2196 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2198 /* The book keeping entries will get removed when base driver
2199 * calls remove filter AQ command
2201 l_type = fm_entry->fltr_info.lkup_type;
2202 recp = &hw->switch_info->recp_list[l_type];
2203 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2205 ice_create_pkt_fwd_rule_exit:
2206 ice_free(hw, s_rule);
2211 * ice_update_pkt_fwd_rule
2212 * @hw: pointer to the hardware structure
2213 * @f_info: filter information for switch rule
2215 * Call AQ command to update a previously created switch rule with a
2218 static enum ice_status
2219 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2221 struct ice_aqc_sw_rules_elem *s_rule;
2222 enum ice_status status;
2224 s_rule = (struct ice_aqc_sw_rules_elem *)
2225 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2227 return ICE_ERR_NO_MEMORY;
2229 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2231 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2233 /* Update switch rule with new rule set to forward VSI list */
2234 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2235 ice_aqc_opc_update_sw_rules, NULL);
2237 ice_free(hw, s_rule);
2242 * ice_update_sw_rule_bridge_mode
2243 * @hw: pointer to the HW struct
2245 * Updates unicast switch filter rules based on VEB/VEPA mode
2247 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2249 struct ice_switch_info *sw = hw->switch_info;
2250 struct ice_fltr_mgmt_list_entry *fm_entry;
2251 enum ice_status status = ICE_SUCCESS;
2252 struct LIST_HEAD_TYPE *rule_head;
2253 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2255 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2256 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2258 ice_acquire_lock(rule_lock);
2259 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2261 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2262 u8 *addr = fi->l_data.mac.mac_addr;
2264 /* Update unicast Tx rules to reflect the selected
2267 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2268 (fi->fltr_act == ICE_FWD_TO_VSI ||
2269 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2270 fi->fltr_act == ICE_FWD_TO_Q ||
2271 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2272 status = ice_update_pkt_fwd_rule(hw, fi);
2278 ice_release_lock(rule_lock);
2284 * ice_add_update_vsi_list
2285 * @hw: pointer to the hardware structure
2286 * @m_entry: pointer to current filter management list entry
2287 * @cur_fltr: filter information from the book keeping entry
2288 * @new_fltr: filter information with the new VSI to be added
2290 * Call AQ command to add or update previously created VSI list with new VSI.
2292 * Helper function to do book keeping associated with adding filter information
2293 * The algorithm to do the book keeping is described below :
2294 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2295 * if only one VSI has been added till now
2296 * Allocate a new VSI list and add two VSIs
2297 * to this list using switch rule command
2298 * Update the previously created switch rule with the
2299 * newly created VSI list ID
2300 * if a VSI list was previously created
2301 * Add the new VSI to the previously created VSI list set
2302 * using the update switch rule command
2304 static enum ice_status
2305 ice_add_update_vsi_list(struct ice_hw *hw,
2306 struct ice_fltr_mgmt_list_entry *m_entry,
2307 struct ice_fltr_info *cur_fltr,
2308 struct ice_fltr_info *new_fltr)
2310 enum ice_status status = ICE_SUCCESS;
2311 u16 vsi_list_id = 0;
2313 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2314 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2315 return ICE_ERR_NOT_IMPL;
2317 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2318 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2319 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2320 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2321 return ICE_ERR_NOT_IMPL;
2323 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2324 /* Only one entry existed in the mapping and it was not already
2325 * a part of a VSI list. So, create a VSI list with the old and
2328 struct ice_fltr_info tmp_fltr;
2329 u16 vsi_handle_arr[2];
2331 /* A rule already exists with the new VSI being added */
2332 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2333 return ICE_ERR_ALREADY_EXISTS;
2335 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2336 vsi_handle_arr[1] = new_fltr->vsi_handle;
2337 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2339 new_fltr->lkup_type);
2343 tmp_fltr = *new_fltr;
2344 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2345 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2346 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2347 /* Update the previous switch rule of "MAC forward to VSI" to
2348 * "MAC fwd to VSI list"
2350 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2354 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2355 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2356 m_entry->vsi_list_info =
2357 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2360 /* If this entry was large action then the large action needs
2361 * to be updated to point to FWD to VSI list
2363 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2365 ice_add_marker_act(hw, m_entry,
2366 m_entry->sw_marker_id,
2367 m_entry->lg_act_idx);
2369 u16 vsi_handle = new_fltr->vsi_handle;
2370 enum ice_adminq_opc opcode;
2372 if (!m_entry->vsi_list_info)
2375 /* A rule already exists with the new VSI being added */
2376 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2379 /* Update the previously created VSI list set with
2380 * the new VSI ID passed in
2382 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2383 opcode = ice_aqc_opc_update_sw_rules;
2385 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2386 vsi_list_id, false, opcode,
2387 new_fltr->lkup_type);
2388 /* update VSI list mapping info with new VSI ID */
2390 ice_set_bit(vsi_handle,
2391 m_entry->vsi_list_info->vsi_map);
2394 m_entry->vsi_count++;
2399 * ice_find_rule_entry - Search a rule entry
2400 * @hw: pointer to the hardware structure
2401 * @recp_id: lookup type for which the specified rule needs to be searched
2402 * @f_info: rule information
2404 * Helper function to search for a given rule entry
2405 * Returns pointer to entry storing the rule if found
2407 static struct ice_fltr_mgmt_list_entry *
2408 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2410 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2411 struct ice_switch_info *sw = hw->switch_info;
2412 struct LIST_HEAD_TYPE *list_head;
2414 list_head = &sw->recp_list[recp_id].filt_rules;
2415 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2417 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2418 sizeof(f_info->l_data)) &&
2419 f_info->flag == list_itr->fltr_info.flag) {
2428 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2429 * @hw: pointer to the hardware structure
2430 * @recp_id: lookup type for which VSI lists needs to be searched
2431 * @vsi_handle: VSI handle to be found in VSI list
2432 * @vsi_list_id: VSI list ID found containing vsi_handle
2434 * Helper function to search a VSI list with single entry containing given VSI
2435 * handle element. This can be extended further to search VSI list with more
2436 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2438 static struct ice_vsi_list_map_info *
2439 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2442 struct ice_vsi_list_map_info *map_info = NULL;
2443 struct ice_switch_info *sw = hw->switch_info;
2444 struct LIST_HEAD_TYPE *list_head;
2446 list_head = &sw->recp_list[recp_id].filt_rules;
2447 if (sw->recp_list[recp_id].adv_rule) {
2448 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2450 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2451 ice_adv_fltr_mgmt_list_entry,
2453 if (list_itr->vsi_list_info) {
2454 map_info = list_itr->vsi_list_info;
2455 if (ice_is_bit_set(map_info->vsi_map,
2457 *vsi_list_id = map_info->vsi_list_id;
2463 struct ice_fltr_mgmt_list_entry *list_itr;
2465 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2466 ice_fltr_mgmt_list_entry,
2468 if (list_itr->vsi_count == 1 &&
2469 list_itr->vsi_list_info) {
2470 map_info = list_itr->vsi_list_info;
2471 if (ice_is_bit_set(map_info->vsi_map,
2473 *vsi_list_id = map_info->vsi_list_id;
2483 * ice_add_rule_internal - add rule for a given lookup type
2484 * @hw: pointer to the hardware structure
2485 * @recp_id: lookup type (recipe ID) for which rule has to be added
2486 * @f_entry: structure containing MAC forwarding information
2488 * Adds or updates the rule lists for a given recipe
2490 static enum ice_status
2491 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2492 struct ice_fltr_list_entry *f_entry)
2494 struct ice_switch_info *sw = hw->switch_info;
2495 struct ice_fltr_info *new_fltr, *cur_fltr;
2496 struct ice_fltr_mgmt_list_entry *m_entry;
2497 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2498 enum ice_status status = ICE_SUCCESS;
2500 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2501 return ICE_ERR_PARAM;
2503 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2504 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2505 f_entry->fltr_info.fwd_id.hw_vsi_id =
2506 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2508 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2510 ice_acquire_lock(rule_lock);
2511 new_fltr = &f_entry->fltr_info;
2512 if (new_fltr->flag & ICE_FLTR_RX)
2513 new_fltr->src = hw->port_info->lport;
2514 else if (new_fltr->flag & ICE_FLTR_TX)
2516 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2518 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2520 status = ice_create_pkt_fwd_rule(hw, f_entry);
2521 goto exit_add_rule_internal;
2524 cur_fltr = &m_entry->fltr_info;
2525 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2527 exit_add_rule_internal:
2528 ice_release_lock(rule_lock);
2533 * ice_remove_vsi_list_rule
2534 * @hw: pointer to the hardware structure
2535 * @vsi_list_id: VSI list ID generated as part of allocate resource
2536 * @lkup_type: switch rule filter lookup type
2538 * The VSI list should be emptied before this function is called to remove the
2541 static enum ice_status
2542 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2543 enum ice_sw_lkup_type lkup_type)
2545 struct ice_aqc_sw_rules_elem *s_rule;
2546 enum ice_status status;
2549 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2550 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2552 return ICE_ERR_NO_MEMORY;
2554 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2555 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2557 /* Free the vsi_list resource that we allocated. It is assumed that the
2558 * list is empty at this point.
2560 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2561 ice_aqc_opc_free_res);
2563 ice_free(hw, s_rule);
2568 * ice_rem_update_vsi_list
2569 * @hw: pointer to the hardware structure
2570 * @vsi_handle: VSI handle of the VSI to remove
2571 * @fm_list: filter management entry for which the VSI list management needs to
2574 static enum ice_status
2575 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2576 struct ice_fltr_mgmt_list_entry *fm_list)
2578 enum ice_sw_lkup_type lkup_type;
2579 enum ice_status status = ICE_SUCCESS;
2582 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2583 fm_list->vsi_count == 0)
2584 return ICE_ERR_PARAM;
2586 /* A rule with the VSI being removed does not exist */
2587 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2588 return ICE_ERR_DOES_NOT_EXIST;
2590 lkup_type = fm_list->fltr_info.lkup_type;
2591 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2592 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2593 ice_aqc_opc_update_sw_rules,
2598 fm_list->vsi_count--;
2599 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2601 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2602 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2603 struct ice_vsi_list_map_info *vsi_list_info =
2604 fm_list->vsi_list_info;
2607 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2609 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2610 return ICE_ERR_OUT_OF_RANGE;
2612 /* Make sure VSI list is empty before removing it below */
2613 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2615 ice_aqc_opc_update_sw_rules,
2620 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2621 tmp_fltr_info.fwd_id.hw_vsi_id =
2622 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2623 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2624 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2626 ice_debug(hw, ICE_DBG_SW,
2627 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2628 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2632 fm_list->fltr_info = tmp_fltr_info;
2635 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2636 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2637 struct ice_vsi_list_map_info *vsi_list_info =
2638 fm_list->vsi_list_info;
2640 /* Remove the VSI list since it is no longer used */
2641 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2643 ice_debug(hw, ICE_DBG_SW,
2644 "Failed to remove VSI list %d, error %d\n",
2645 vsi_list_id, status);
2649 LIST_DEL(&vsi_list_info->list_entry);
2650 ice_free(hw, vsi_list_info);
2651 fm_list->vsi_list_info = NULL;
2658 * ice_remove_rule_internal - Remove a filter rule of a given type
2660 * @hw: pointer to the hardware structure
2661 * @recp_id: recipe ID for which the rule needs to removed
2662 * @f_entry: rule entry containing filter information
2664 static enum ice_status
2665 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2666 struct ice_fltr_list_entry *f_entry)
2668 struct ice_switch_info *sw = hw->switch_info;
2669 struct ice_fltr_mgmt_list_entry *list_elem;
2670 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2671 enum ice_status status = ICE_SUCCESS;
2672 bool remove_rule = false;
2675 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2676 return ICE_ERR_PARAM;
2677 f_entry->fltr_info.fwd_id.hw_vsi_id =
2678 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2680 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2681 ice_acquire_lock(rule_lock);
2682 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2684 status = ICE_ERR_DOES_NOT_EXIST;
2688 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2690 } else if (!list_elem->vsi_list_info) {
2691 status = ICE_ERR_DOES_NOT_EXIST;
2693 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2694 /* a ref_cnt > 1 indicates that the vsi_list is being
2695 * shared by multiple rules. Decrement the ref_cnt and
2696 * remove this rule, but do not modify the list, as it
2697 * is in-use by other rules.
2699 list_elem->vsi_list_info->ref_cnt--;
2702 /* a ref_cnt of 1 indicates the vsi_list is only used
2703 * by one rule. However, the original removal request is only
2704 * for a single VSI. Update the vsi_list first, and only
2705 * remove the rule if there are no further VSIs in this list.
2707 vsi_handle = f_entry->fltr_info.vsi_handle;
2708 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2711 /* if VSI count goes to zero after updating the VSI list */
2712 if (list_elem->vsi_count == 0)
2717 /* Remove the lookup rule */
2718 struct ice_aqc_sw_rules_elem *s_rule;
2720 s_rule = (struct ice_aqc_sw_rules_elem *)
2721 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2723 status = ICE_ERR_NO_MEMORY;
2727 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2728 ice_aqc_opc_remove_sw_rules);
2730 status = ice_aq_sw_rules(hw, s_rule,
2731 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2732 ice_aqc_opc_remove_sw_rules, NULL);
2736 /* Remove a book keeping from the list */
2737 ice_free(hw, s_rule);
2739 LIST_DEL(&list_elem->list_entry);
2740 ice_free(hw, list_elem);
2743 ice_release_lock(rule_lock);
2748 * ice_aq_get_res_alloc - get allocated resources
2749 * @hw: pointer to the HW struct
2750 * @num_entries: pointer to u16 to store the number of resource entries returned
2751 * @buf: pointer to user-supplied buffer
2752 * @buf_size: size of buff
2753 * @cd: pointer to command details structure or NULL
2755 * The user-supplied buffer must be large enough to store the resource
2756 * information for all resource types. Each resource type is an
2757 * ice_aqc_get_res_resp_data_elem structure.
2760 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2761 u16 buf_size, struct ice_sq_cd *cd)
2763 struct ice_aqc_get_res_alloc *resp;
2764 enum ice_status status;
2765 struct ice_aq_desc desc;
2768 return ICE_ERR_BAD_PTR;
2770 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2771 return ICE_ERR_INVAL_SIZE;
2773 resp = &desc.params.get_res;
2775 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2776 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2778 if (!status && num_entries)
2779 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2785 * ice_aq_get_res_descs - get allocated resource descriptors
2786 * @hw: pointer to the hardware structure
2787 * @num_entries: number of resource entries in buffer
2788 * @buf: Indirect buffer to hold data parameters and response
2789 * @buf_size: size of buffer for indirect commands
2790 * @res_type: resource type
2791 * @res_shared: is resource shared
2792 * @desc_id: input - first desc ID to start; output - next desc ID
2793 * @cd: pointer to command details structure or NULL
2796 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2797 struct ice_aqc_get_allocd_res_desc_resp *buf,
2798 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2799 struct ice_sq_cd *cd)
2801 struct ice_aqc_get_allocd_res_desc *cmd;
2802 struct ice_aq_desc desc;
2803 enum ice_status status;
2805 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_res_descs");
2807 cmd = &desc.params.get_res_desc;
2810 return ICE_ERR_PARAM;
2812 if (buf_size != (num_entries * sizeof(*buf)))
2813 return ICE_ERR_PARAM;
2815 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2817 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2818 ICE_AQC_RES_TYPE_M) | (res_shared ?
2819 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2820 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2822 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2824 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2826 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2832 * ice_add_mac - Add a MAC address based filter rule
2833 * @hw: pointer to the hardware structure
2834 * @m_list: list of MAC addresses and forwarding information
2836 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2837 * multiple unicast addresses, the function assumes that all the
2838 * addresses are unique in a given add_mac call. It doesn't
2839 * check for duplicates in this case, removing duplicates from a given
2840 * list should be taken care of in the caller of this function.
2843 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2845 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2846 struct ice_fltr_list_entry *m_list_itr;
2847 struct LIST_HEAD_TYPE *rule_head;
2848 u16 elem_sent, total_elem_left;
2849 struct ice_switch_info *sw;
2850 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2851 enum ice_status status = ICE_SUCCESS;
2852 u16 num_unicast = 0;
2856 return ICE_ERR_PARAM;
2858 sw = hw->switch_info;
2859 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2860 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2862 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2866 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2867 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2868 if (!ice_is_vsi_valid(hw, vsi_handle))
2869 return ICE_ERR_PARAM;
2870 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2871 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2872 /* update the src in case it is VSI num */
2873 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2874 return ICE_ERR_PARAM;
2875 m_list_itr->fltr_info.src = hw_vsi_id;
2876 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2877 IS_ZERO_ETHER_ADDR(add))
2878 return ICE_ERR_PARAM;
2879 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
2880 /* Don't overwrite the unicast address */
2881 ice_acquire_lock(rule_lock);
2882 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2883 &m_list_itr->fltr_info)) {
2884 ice_release_lock(rule_lock);
2885 return ICE_ERR_ALREADY_EXISTS;
2887 ice_release_lock(rule_lock);
2889 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
2890 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
2891 m_list_itr->status =
2892 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2894 if (m_list_itr->status)
2895 return m_list_itr->status;
2899 ice_acquire_lock(rule_lock);
2900 /* Exit if no suitable entries were found for adding bulk switch rule */
2902 status = ICE_SUCCESS;
2903 goto ice_add_mac_exit;
2906 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2908 /* Allocate switch rule buffer for the bulk update for unicast */
2909 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2910 s_rule = (struct ice_aqc_sw_rules_elem *)
2911 ice_calloc(hw, num_unicast, s_rule_size);
2913 status = ICE_ERR_NO_MEMORY;
2914 goto ice_add_mac_exit;
2918 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2920 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2921 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2923 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2924 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2925 ice_aqc_opc_add_sw_rules);
2926 r_iter = (struct ice_aqc_sw_rules_elem *)
2927 ((u8 *)r_iter + s_rule_size);
2931 /* Call AQ bulk switch rule update for all unicast addresses */
2933 /* Call AQ switch rule in AQ_MAX chunk */
2934 for (total_elem_left = num_unicast; total_elem_left > 0;
2935 total_elem_left -= elem_sent) {
2936 struct ice_aqc_sw_rules_elem *entry = r_iter;
2938 elem_sent = min(total_elem_left,
2939 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
2940 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2941 elem_sent, ice_aqc_opc_add_sw_rules,
2944 goto ice_add_mac_exit;
2945 r_iter = (struct ice_aqc_sw_rules_elem *)
2946 ((u8 *)r_iter + (elem_sent * s_rule_size));
2949 /* Fill up rule ID based on the value returned from FW */
2951 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2953 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2954 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2955 struct ice_fltr_mgmt_list_entry *fm_entry;
2957 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2958 f_info->fltr_rule_id =
2959 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
2960 f_info->fltr_act = ICE_FWD_TO_VSI;
2961 /* Create an entry to track this MAC address */
2962 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2963 ice_malloc(hw, sizeof(*fm_entry));
2965 status = ICE_ERR_NO_MEMORY;
2966 goto ice_add_mac_exit;
2968 fm_entry->fltr_info = *f_info;
2969 fm_entry->vsi_count = 1;
2970 /* The book keeping entries will get removed when
2971 * base driver calls remove filter AQ command
2974 LIST_ADD(&fm_entry->list_entry, rule_head);
2975 r_iter = (struct ice_aqc_sw_rules_elem *)
2976 ((u8 *)r_iter + s_rule_size);
2981 ice_release_lock(rule_lock);
2983 ice_free(hw, s_rule);
2988 * ice_add_vlan_internal - Add one VLAN based filter rule
2989 * @hw: pointer to the hardware structure
2990 * @f_entry: filter entry containing one VLAN information
2992 static enum ice_status
2993 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2995 struct ice_switch_info *sw = hw->switch_info;
2996 struct ice_fltr_mgmt_list_entry *v_list_itr;
2997 struct ice_fltr_info *new_fltr, *cur_fltr;
2998 enum ice_sw_lkup_type lkup_type;
2999 u16 vsi_list_id = 0, vsi_handle;
3000 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3001 enum ice_status status = ICE_SUCCESS;
3003 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3004 return ICE_ERR_PARAM;
3006 f_entry->fltr_info.fwd_id.hw_vsi_id =
3007 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3008 new_fltr = &f_entry->fltr_info;
3010 /* VLAN ID should only be 12 bits */
3011 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3012 return ICE_ERR_PARAM;
3014 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3015 return ICE_ERR_PARAM;
3017 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3018 lkup_type = new_fltr->lkup_type;
3019 vsi_handle = new_fltr->vsi_handle;
3020 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3021 ice_acquire_lock(rule_lock);
3022 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3024 struct ice_vsi_list_map_info *map_info = NULL;
3026 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3027 /* All VLAN pruning rules use a VSI list. Check if
3028 * there is already a VSI list containing VSI that we
3029 * want to add. If found, use the same vsi_list_id for
3030 * this new VLAN rule or else create a new list.
3032 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3036 status = ice_create_vsi_list_rule(hw,
3044 /* Convert the action to forwarding to a VSI list. */
3045 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3046 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3049 status = ice_create_pkt_fwd_rule(hw, f_entry);
3051 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3054 status = ICE_ERR_DOES_NOT_EXIST;
3057 /* reuse VSI list for new rule and increment ref_cnt */
3059 v_list_itr->vsi_list_info = map_info;
3060 map_info->ref_cnt++;
3062 v_list_itr->vsi_list_info =
3063 ice_create_vsi_list_map(hw, &vsi_handle,
3067 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3068 /* Update existing VSI list to add new VSI ID only if it used
3071 cur_fltr = &v_list_itr->fltr_info;
3072 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3075 /* If VLAN rule exists and VSI list being used by this rule is
3076 * referenced by more than 1 VLAN rule. Then create a new VSI
3077 * list appending previous VSI with new VSI and update existing
3078 * VLAN rule to point to new VSI list ID
3080 struct ice_fltr_info tmp_fltr;
3081 u16 vsi_handle_arr[2];
3084 /* Current implementation only supports reusing VSI list with
3085 * one VSI count. We should never hit below condition
3087 if (v_list_itr->vsi_count > 1 &&
3088 v_list_itr->vsi_list_info->ref_cnt > 1) {
3089 ice_debug(hw, ICE_DBG_SW,
3090 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3091 status = ICE_ERR_CFG;
3096 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3099 /* A rule already exists with the new VSI being added */
3100 if (cur_handle == vsi_handle) {
3101 status = ICE_ERR_ALREADY_EXISTS;
3105 vsi_handle_arr[0] = cur_handle;
3106 vsi_handle_arr[1] = vsi_handle;
3107 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3108 &vsi_list_id, lkup_type);
3112 tmp_fltr = v_list_itr->fltr_info;
3113 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3114 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3115 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3116 /* Update the previous switch rule to a new VSI list which
3117 * includes current VSI that is requested
3119 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3123 /* before overriding VSI list map info. decrement ref_cnt of
3126 v_list_itr->vsi_list_info->ref_cnt--;
3128 /* now update to newly created list */
3129 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3130 v_list_itr->vsi_list_info =
3131 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3133 v_list_itr->vsi_count++;
3137 ice_release_lock(rule_lock);
3142 * ice_add_vlan - Add VLAN based filter rule
3143 * @hw: pointer to the hardware structure
3144 * @v_list: list of VLAN entries and forwarding information
3147 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3149 struct ice_fltr_list_entry *v_list_itr;
3152 return ICE_ERR_PARAM;
3154 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3156 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3157 return ICE_ERR_PARAM;
3158 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3159 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3160 if (v_list_itr->status)
3161 return v_list_itr->status;
3167 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3168 * @hw: pointer to the hardware structure
3169 * @mv_list: list of MAC and VLAN filters
3171 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3172 * pruning bits enabled, then it is the responsibility of the caller to make
3173 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3174 * VLAN won't be received on that VSI otherwise.
3177 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3179 struct ice_fltr_list_entry *mv_list_itr;
3181 if (!mv_list || !hw)
3182 return ICE_ERR_PARAM;
3184 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3186 enum ice_sw_lkup_type l_type =
3187 mv_list_itr->fltr_info.lkup_type;
3189 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3190 return ICE_ERR_PARAM;
3191 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3192 mv_list_itr->status =
3193 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3195 if (mv_list_itr->status)
3196 return mv_list_itr->status;
3202 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3203 * @hw: pointer to the hardware structure
3204 * @em_list: list of ether type MAC filter, MAC is optional
3206 * This function requires the caller to populate the entries in
3207 * the filter list with the necessary fields (including flags to
3208 * indicate Tx or Rx rules).
3211 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3213 struct ice_fltr_list_entry *em_list_itr;
3215 if (!em_list || !hw)
3216 return ICE_ERR_PARAM;
3218 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3220 enum ice_sw_lkup_type l_type =
3221 em_list_itr->fltr_info.lkup_type;
3223 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3224 l_type != ICE_SW_LKUP_ETHERTYPE)
3225 return ICE_ERR_PARAM;
3227 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3229 if (em_list_itr->status)
3230 return em_list_itr->status;
3236 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3237 * @hw: pointer to the hardware structure
3238 * @em_list: list of ethertype or ethertype MAC entries
3241 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3243 struct ice_fltr_list_entry *em_list_itr, *tmp;
3245 if (!em_list || !hw)
3246 return ICE_ERR_PARAM;
3248 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3250 enum ice_sw_lkup_type l_type =
3251 em_list_itr->fltr_info.lkup_type;
3253 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3254 l_type != ICE_SW_LKUP_ETHERTYPE)
3255 return ICE_ERR_PARAM;
3257 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3259 if (em_list_itr->status)
3260 return em_list_itr->status;
3267 * ice_rem_sw_rule_info
3268 * @hw: pointer to the hardware structure
3269 * @rule_head: pointer to the switch list structure that we want to delete
3272 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3274 if (!LIST_EMPTY(rule_head)) {
3275 struct ice_fltr_mgmt_list_entry *entry;
3276 struct ice_fltr_mgmt_list_entry *tmp;
3278 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3279 ice_fltr_mgmt_list_entry, list_entry) {
3280 LIST_DEL(&entry->list_entry);
3281 ice_free(hw, entry);
3287 * ice_rem_adv_rule_info
3288 * @hw: pointer to the hardware structure
3289 * @rule_head: pointer to the switch list structure that we want to delete
3292 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3294 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3295 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3297 if (LIST_EMPTY(rule_head))
3300 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3301 ice_adv_fltr_mgmt_list_entry, list_entry) {
3302 LIST_DEL(&lst_itr->list_entry);
3303 ice_free(hw, lst_itr->lkups);
3304 ice_free(hw, lst_itr);
3309 * ice_rem_all_sw_rules_info
3310 * @hw: pointer to the hardware structure
3312 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3314 struct ice_switch_info *sw = hw->switch_info;
3317 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3318 struct LIST_HEAD_TYPE *rule_head;
3320 rule_head = &sw->recp_list[i].filt_rules;
3321 if (!sw->recp_list[i].adv_rule)
3322 ice_rem_sw_rule_info(hw, rule_head);
3324 ice_rem_adv_rule_info(hw, rule_head);
3329 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3330 * @pi: pointer to the port_info structure
3331 * @vsi_handle: VSI handle to set as default
3332 * @set: true to add the above mentioned switch rule, false to remove it
3333 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3335 * add filter rule to set/unset given VSI as default VSI for the switch
3336 * (represented by swid)
3339 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3342 struct ice_aqc_sw_rules_elem *s_rule;
3343 struct ice_fltr_info f_info;
3344 struct ice_hw *hw = pi->hw;
3345 enum ice_adminq_opc opcode;
3346 enum ice_status status;
3350 if (!ice_is_vsi_valid(hw, vsi_handle))
3351 return ICE_ERR_PARAM;
3352 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3354 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3355 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3356 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3358 return ICE_ERR_NO_MEMORY;
3360 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3362 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3363 f_info.flag = direction;
3364 f_info.fltr_act = ICE_FWD_TO_VSI;
3365 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3367 if (f_info.flag & ICE_FLTR_RX) {
3368 f_info.src = pi->lport;
3369 f_info.src_id = ICE_SRC_ID_LPORT;
3371 f_info.fltr_rule_id =
3372 pi->dflt_rx_vsi_rule_id;
3373 } else if (f_info.flag & ICE_FLTR_TX) {
3374 f_info.src_id = ICE_SRC_ID_VSI;
3375 f_info.src = hw_vsi_id;
3377 f_info.fltr_rule_id =
3378 pi->dflt_tx_vsi_rule_id;
3382 opcode = ice_aqc_opc_add_sw_rules;
3384 opcode = ice_aqc_opc_remove_sw_rules;
3386 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3388 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3389 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3392 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3394 if (f_info.flag & ICE_FLTR_TX) {
3395 pi->dflt_tx_vsi_num = hw_vsi_id;
3396 pi->dflt_tx_vsi_rule_id = index;
3397 } else if (f_info.flag & ICE_FLTR_RX) {
3398 pi->dflt_rx_vsi_num = hw_vsi_id;
3399 pi->dflt_rx_vsi_rule_id = index;
3402 if (f_info.flag & ICE_FLTR_TX) {
3403 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3404 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3405 } else if (f_info.flag & ICE_FLTR_RX) {
3406 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3407 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3412 ice_free(hw, s_rule);
3417 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3418 * @hw: pointer to the hardware structure
3419 * @recp_id: lookup type for which the specified rule needs to be searched
3420 * @f_info: rule information
3422 * Helper function to search for a unicast rule entry - this is to be used
3423 * to remove unicast MAC filter that is not shared with other VSIs on the
3426 * Returns pointer to entry storing the rule if found
3428 static struct ice_fltr_mgmt_list_entry *
3429 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3430 struct ice_fltr_info *f_info)
3432 struct ice_switch_info *sw = hw->switch_info;
3433 struct ice_fltr_mgmt_list_entry *list_itr;
3434 struct LIST_HEAD_TYPE *list_head;
3436 list_head = &sw->recp_list[recp_id].filt_rules;
3437 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3439 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3440 sizeof(f_info->l_data)) &&
3441 f_info->fwd_id.hw_vsi_id ==
3442 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3443 f_info->flag == list_itr->fltr_info.flag)
3450 * ice_remove_mac - remove a MAC address based filter rule
3451 * @hw: pointer to the hardware structure
3452 * @m_list: list of MAC addresses and forwarding information
3454 * This function removes either a MAC filter rule or a specific VSI from a
3455 * VSI list for a multicast MAC address.
3457 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3458 * ice_add_mac. Caller should be aware that this call will only work if all
3459 * the entries passed into m_list were added previously. It will not attempt to
3460 * do a partial remove of entries that were found.
3463 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3465 struct ice_fltr_list_entry *list_itr, *tmp;
3466 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3469 return ICE_ERR_PARAM;
3471 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3472 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3474 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3475 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3478 if (l_type != ICE_SW_LKUP_MAC)
3479 return ICE_ERR_PARAM;
3481 vsi_handle = list_itr->fltr_info.vsi_handle;
3482 if (!ice_is_vsi_valid(hw, vsi_handle))
3483 return ICE_ERR_PARAM;
3485 list_itr->fltr_info.fwd_id.hw_vsi_id =
3486 ice_get_hw_vsi_num(hw, vsi_handle);
3487 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3488 /* Don't remove the unicast address that belongs to
3489 * another VSI on the switch, since it is not being
3492 ice_acquire_lock(rule_lock);
3493 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3494 &list_itr->fltr_info)) {
3495 ice_release_lock(rule_lock);
3496 return ICE_ERR_DOES_NOT_EXIST;
3498 ice_release_lock(rule_lock);
3500 list_itr->status = ice_remove_rule_internal(hw,
3503 if (list_itr->status)
3504 return list_itr->status;
3510 * ice_remove_vlan - Remove VLAN based filter rule
3511 * @hw: pointer to the hardware structure
3512 * @v_list: list of VLAN entries and forwarding information
3515 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3517 struct ice_fltr_list_entry *v_list_itr, *tmp;
3520 return ICE_ERR_PARAM;
3522 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3524 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3526 if (l_type != ICE_SW_LKUP_VLAN)
3527 return ICE_ERR_PARAM;
3528 v_list_itr->status = ice_remove_rule_internal(hw,
3531 if (v_list_itr->status)
3532 return v_list_itr->status;
3538 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3539 * @hw: pointer to the hardware structure
3540 * @v_list: list of MAC VLAN entries and forwarding information
3543 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3545 struct ice_fltr_list_entry *v_list_itr, *tmp;
3548 return ICE_ERR_PARAM;
3550 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3552 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3554 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3555 return ICE_ERR_PARAM;
3556 v_list_itr->status =
3557 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3559 if (v_list_itr->status)
3560 return v_list_itr->status;
3566 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3567 * @fm_entry: filter entry to inspect
3568 * @vsi_handle: VSI handle to compare with filter info
3571 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3573 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3574 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3575 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3576 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3581 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3582 * @hw: pointer to the hardware structure
3583 * @vsi_handle: VSI handle to remove filters from
3584 * @vsi_list_head: pointer to the list to add entry to
3585 * @fi: pointer to fltr_info of filter entry to copy & add
3587 * Helper function, used when creating a list of filters to remove from
3588 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3589 * original filter entry, with the exception of fltr_info.fltr_act and
3590 * fltr_info.fwd_id fields. These are set such that later logic can
3591 * extract which VSI to remove the fltr from, and pass on that information.
3593 static enum ice_status
3594 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3595 struct LIST_HEAD_TYPE *vsi_list_head,
3596 struct ice_fltr_info *fi)
3598 struct ice_fltr_list_entry *tmp;
3600 /* this memory is freed up in the caller function
3601 * once filters for this VSI are removed
3603 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3605 return ICE_ERR_NO_MEMORY;
3607 tmp->fltr_info = *fi;
3609 /* Overwrite these fields to indicate which VSI to remove filter from,
3610 * so find and remove logic can extract the information from the
3611 * list entries. Note that original entries will still have proper
3614 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3615 tmp->fltr_info.vsi_handle = vsi_handle;
3616 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3618 LIST_ADD(&tmp->list_entry, vsi_list_head);
3624 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3625 * @hw: pointer to the hardware structure
3626 * @vsi_handle: VSI handle to remove filters from
3627 * @lkup_list_head: pointer to the list that has certain lookup type filters
3628 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3630 * Locates all filters in lkup_list_head that are used by the given VSI,
3631 * and adds COPIES of those entries to vsi_list_head (intended to be used
3632 * to remove the listed filters).
3633 * Note that this means all entries in vsi_list_head must be explicitly
3634 * deallocated by the caller when done with list.
3636 static enum ice_status
3637 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3638 struct LIST_HEAD_TYPE *lkup_list_head,
3639 struct LIST_HEAD_TYPE *vsi_list_head)
3641 struct ice_fltr_mgmt_list_entry *fm_entry;
3642 enum ice_status status = ICE_SUCCESS;
3644 /* check to make sure VSI ID is valid and within boundary */
3645 if (!ice_is_vsi_valid(hw, vsi_handle))
3646 return ICE_ERR_PARAM;
3648 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3649 ice_fltr_mgmt_list_entry, list_entry) {
3650 struct ice_fltr_info *fi;
3652 fi = &fm_entry->fltr_info;
3653 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3656 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3666 * ice_determine_promisc_mask
3667 * @fi: filter info to parse
3669 * Helper function to determine which ICE_PROMISC_ mask corresponds
3670 * to given filter into.
3672 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3674 u16 vid = fi->l_data.mac_vlan.vlan_id;
3675 u8 *macaddr = fi->l_data.mac.mac_addr;
3676 bool is_tx_fltr = false;
3677 u8 promisc_mask = 0;
3679 if (fi->flag == ICE_FLTR_TX)
3682 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3683 promisc_mask |= is_tx_fltr ?
3684 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3685 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3686 promisc_mask |= is_tx_fltr ?
3687 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3688 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3689 promisc_mask |= is_tx_fltr ?
3690 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3692 promisc_mask |= is_tx_fltr ?
3693 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3695 return promisc_mask;
3699 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3700 * @hw: pointer to the hardware structure
3701 * @vsi_handle: VSI handle to retrieve info from
3702 * @promisc_mask: pointer to mask to be filled in
3703 * @vid: VLAN ID of promisc VLAN VSI
3706 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3709 struct ice_switch_info *sw = hw->switch_info;
3710 struct ice_fltr_mgmt_list_entry *itr;
3711 struct LIST_HEAD_TYPE *rule_head;
3712 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3714 if (!ice_is_vsi_valid(hw, vsi_handle))
3715 return ICE_ERR_PARAM;
3719 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3720 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3722 ice_acquire_lock(rule_lock);
3723 LIST_FOR_EACH_ENTRY(itr, rule_head,
3724 ice_fltr_mgmt_list_entry, list_entry) {
3725 /* Continue if this filter doesn't apply to this VSI or the
3726 * VSI ID is not in the VSI map for this filter
3728 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3731 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3733 ice_release_lock(rule_lock);
3739 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3740 * @hw: pointer to the hardware structure
3741 * @vsi_handle: VSI handle to retrieve info from
3742 * @promisc_mask: pointer to mask to be filled in
3743 * @vid: VLAN ID of promisc VLAN VSI
3746 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3749 struct ice_switch_info *sw = hw->switch_info;
3750 struct ice_fltr_mgmt_list_entry *itr;
3751 struct LIST_HEAD_TYPE *rule_head;
3752 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3754 if (!ice_is_vsi_valid(hw, vsi_handle))
3755 return ICE_ERR_PARAM;
3759 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3760 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3762 ice_acquire_lock(rule_lock);
3763 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3765 /* Continue if this filter doesn't apply to this VSI or the
3766 * VSI ID is not in the VSI map for this filter
3768 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3771 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3773 ice_release_lock(rule_lock);
3779 * ice_remove_promisc - Remove promisc based filter rules
3780 * @hw: pointer to the hardware structure
3781 * @recp_id: recipe ID for which the rule needs to removed
3782 * @v_list: list of promisc entries
3784 static enum ice_status
3785 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3786 struct LIST_HEAD_TYPE *v_list)
3788 struct ice_fltr_list_entry *v_list_itr, *tmp;
3790 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3792 v_list_itr->status =
3793 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3794 if (v_list_itr->status)
3795 return v_list_itr->status;
3801 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3802 * @hw: pointer to the hardware structure
3803 * @vsi_handle: VSI handle to clear mode
3804 * @promisc_mask: mask of promiscuous config bits to clear
3805 * @vid: VLAN ID to clear VLAN promiscuous
3808 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3811 struct ice_switch_info *sw = hw->switch_info;
3812 struct ice_fltr_list_entry *fm_entry, *tmp;
3813 struct LIST_HEAD_TYPE remove_list_head;
3814 struct ice_fltr_mgmt_list_entry *itr;
3815 struct LIST_HEAD_TYPE *rule_head;
3816 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3817 enum ice_status status = ICE_SUCCESS;
3820 if (!ice_is_vsi_valid(hw, vsi_handle))
3821 return ICE_ERR_PARAM;
3824 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3826 recipe_id = ICE_SW_LKUP_PROMISC;
3828 rule_head = &sw->recp_list[recipe_id].filt_rules;
3829 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3831 INIT_LIST_HEAD(&remove_list_head);
3833 ice_acquire_lock(rule_lock);
3834 LIST_FOR_EACH_ENTRY(itr, rule_head,
3835 ice_fltr_mgmt_list_entry, list_entry) {
3836 u8 fltr_promisc_mask = 0;
3838 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3841 fltr_promisc_mask |=
3842 ice_determine_promisc_mask(&itr->fltr_info);
3844 /* Skip if filter is not completely specified by given mask */
3845 if (fltr_promisc_mask & ~promisc_mask)
3848 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3852 ice_release_lock(rule_lock);
3853 goto free_fltr_list;
3856 ice_release_lock(rule_lock);
3858 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3861 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3862 ice_fltr_list_entry, list_entry) {
3863 LIST_DEL(&fm_entry->list_entry);
3864 ice_free(hw, fm_entry);
3871 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3872 * @hw: pointer to the hardware structure
3873 * @vsi_handle: VSI handle to configure
3874 * @promisc_mask: mask of promiscuous config bits
3875 * @vid: VLAN ID to set VLAN promiscuous
3878 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3880 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3881 struct ice_fltr_list_entry f_list_entry;
3882 struct ice_fltr_info new_fltr;
3883 enum ice_status status = ICE_SUCCESS;
3889 ice_debug(hw, ICE_DBG_TRACE, "ice_set_vsi_promisc\n");
3891 if (!ice_is_vsi_valid(hw, vsi_handle))
3892 return ICE_ERR_PARAM;
3893 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3895 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3897 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3898 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3899 new_fltr.l_data.mac_vlan.vlan_id = vid;
3900 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3902 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3903 recipe_id = ICE_SW_LKUP_PROMISC;
3906 /* Separate filters must be set for each direction/packet type
3907 * combination, so we will loop over the mask value, store the
3908 * individual type, and clear it out in the input mask as it
3911 while (promisc_mask) {
3917 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3918 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3919 pkt_type = UCAST_FLTR;
3920 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3921 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3922 pkt_type = UCAST_FLTR;
3924 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3925 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3926 pkt_type = MCAST_FLTR;
3927 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3928 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3929 pkt_type = MCAST_FLTR;
3931 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3932 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3933 pkt_type = BCAST_FLTR;
3934 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3935 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3936 pkt_type = BCAST_FLTR;
3940 /* Check for VLAN promiscuous flag */
3941 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3942 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3943 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3944 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3948 /* Set filter DA based on packet type */
3949 mac_addr = new_fltr.l_data.mac.mac_addr;
3950 if (pkt_type == BCAST_FLTR) {
3951 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
3952 } else if (pkt_type == MCAST_FLTR ||
3953 pkt_type == UCAST_FLTR) {
3954 /* Use the dummy ether header DA */
3955 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
3956 ICE_NONDMA_TO_NONDMA);
3957 if (pkt_type == MCAST_FLTR)
3958 mac_addr[0] |= 0x1; /* Set multicast bit */
3961 /* Need to reset this to zero for all iterations */
3964 new_fltr.flag |= ICE_FLTR_TX;
3965 new_fltr.src = hw_vsi_id;
3967 new_fltr.flag |= ICE_FLTR_RX;
3968 new_fltr.src = hw->port_info->lport;
3971 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3972 new_fltr.vsi_handle = vsi_handle;
3973 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3974 f_list_entry.fltr_info = new_fltr;
3976 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3977 if (status != ICE_SUCCESS)
3978 goto set_promisc_exit;
3986 * ice_set_vlan_vsi_promisc
3987 * @hw: pointer to the hardware structure
3988 * @vsi_handle: VSI handle to configure
3989 * @promisc_mask: mask of promiscuous config bits
3990 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3992 * Configure VSI with all associated VLANs to given promiscuous mode(s)
3995 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3996 bool rm_vlan_promisc)
3998 struct ice_switch_info *sw = hw->switch_info;
3999 struct ice_fltr_list_entry *list_itr, *tmp;
4000 struct LIST_HEAD_TYPE vsi_list_head;
4001 struct LIST_HEAD_TYPE *vlan_head;
4002 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4003 enum ice_status status;
4006 INIT_LIST_HEAD(&vsi_list_head);
4007 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4008 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4009 ice_acquire_lock(vlan_lock);
4010 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4012 ice_release_lock(vlan_lock);
4014 goto free_fltr_list;
4016 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4018 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4019 if (rm_vlan_promisc)
4020 status = ice_clear_vsi_promisc(hw, vsi_handle,
4021 promisc_mask, vlan_id);
4023 status = ice_set_vsi_promisc(hw, vsi_handle,
4024 promisc_mask, vlan_id);
4030 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4031 ice_fltr_list_entry, list_entry) {
4032 LIST_DEL(&list_itr->list_entry);
4033 ice_free(hw, list_itr);
4039 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4040 * @hw: pointer to the hardware structure
4041 * @vsi_handle: VSI handle to remove filters from
4042 * @lkup: switch rule filter lookup type
4045 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4046 enum ice_sw_lkup_type lkup)
4048 struct ice_switch_info *sw = hw->switch_info;
4049 struct ice_fltr_list_entry *fm_entry;
4050 struct LIST_HEAD_TYPE remove_list_head;
4051 struct LIST_HEAD_TYPE *rule_head;
4052 struct ice_fltr_list_entry *tmp;
4053 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4054 enum ice_status status;
4056 INIT_LIST_HEAD(&remove_list_head);
4057 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4058 rule_head = &sw->recp_list[lkup].filt_rules;
4059 ice_acquire_lock(rule_lock);
4060 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4062 ice_release_lock(rule_lock);
4067 case ICE_SW_LKUP_MAC:
4068 ice_remove_mac(hw, &remove_list_head);
4070 case ICE_SW_LKUP_VLAN:
4071 ice_remove_vlan(hw, &remove_list_head);
4073 case ICE_SW_LKUP_PROMISC:
4074 case ICE_SW_LKUP_PROMISC_VLAN:
4075 ice_remove_promisc(hw, lkup, &remove_list_head);
4077 case ICE_SW_LKUP_MAC_VLAN:
4078 ice_remove_mac_vlan(hw, &remove_list_head);
4080 case ICE_SW_LKUP_ETHERTYPE:
4081 case ICE_SW_LKUP_ETHERTYPE_MAC:
4082 ice_remove_eth_mac(hw, &remove_list_head);
4084 case ICE_SW_LKUP_DFLT:
4085 ice_debug(hw, ICE_DBG_SW,
4086 "Remove filters for this lookup type hasn't been implemented yet\n");
4088 case ICE_SW_LKUP_LAST:
4089 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4093 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4094 ice_fltr_list_entry, list_entry) {
4095 LIST_DEL(&fm_entry->list_entry);
4096 ice_free(hw, fm_entry);
4101 * ice_remove_vsi_fltr - Remove all filters for a VSI
4102 * @hw: pointer to the hardware structure
4103 * @vsi_handle: VSI handle to remove filters from
4105 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4107 ice_debug(hw, ICE_DBG_TRACE, "ice_remove_vsi_fltr\n");
4109 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4110 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4111 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4112 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4113 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4114 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4115 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4116 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4120 * ice_alloc_res_cntr - allocating resource counter
4121 * @hw: pointer to the hardware structure
4122 * @type: type of resource
4123 * @alloc_shared: if set it is shared else dedicated
4124 * @num_items: number of entries requested for FD resource type
4125 * @counter_id: counter index returned by AQ call
4128 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4131 struct ice_aqc_alloc_free_res_elem *buf;
4132 enum ice_status status;
4135 /* Allocate resource */
4136 buf_len = sizeof(*buf);
4137 buf = (struct ice_aqc_alloc_free_res_elem *)
4138 ice_malloc(hw, buf_len);
4140 return ICE_ERR_NO_MEMORY;
4142 buf->num_elems = CPU_TO_LE16(num_items);
4143 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4144 ICE_AQC_RES_TYPE_M) | alloc_shared);
4146 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4147 ice_aqc_opc_alloc_res, NULL);
4151 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4159 * ice_free_res_cntr - free resource counter
4160 * @hw: pointer to the hardware structure
4161 * @type: type of resource
4162 * @alloc_shared: if set it is shared else dedicated
4163 * @num_items: number of entries to be freed for FD resource type
4164 * @counter_id: counter ID resource which needs to be freed
4167 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4170 struct ice_aqc_alloc_free_res_elem *buf;
4171 enum ice_status status;
4175 buf_len = sizeof(*buf);
4176 buf = (struct ice_aqc_alloc_free_res_elem *)
4177 ice_malloc(hw, buf_len);
4179 return ICE_ERR_NO_MEMORY;
4181 buf->num_elems = CPU_TO_LE16(num_items);
4182 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4183 ICE_AQC_RES_TYPE_M) | alloc_shared);
4184 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4186 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4187 ice_aqc_opc_free_res, NULL);
4189 ice_debug(hw, ICE_DBG_SW,
4190 "counter resource could not be freed\n");
4197 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4198 * @hw: pointer to the hardware structure
4199 * @counter_id: returns counter index
4201 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4203 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4204 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4209 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4210 * @hw: pointer to the hardware structure
4211 * @counter_id: counter index to be freed
4213 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4215 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4216 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4221 * ice_alloc_res_lg_act - add large action resource
4222 * @hw: pointer to the hardware structure
4223 * @l_id: large action ID to fill it in
4224 * @num_acts: number of actions to hold with a large action entry
4226 static enum ice_status
4227 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4229 struct ice_aqc_alloc_free_res_elem *sw_buf;
4230 enum ice_status status;
4233 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4234 return ICE_ERR_PARAM;
4236 /* Allocate resource for large action */
4237 buf_len = sizeof(*sw_buf);
4238 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4239 ice_malloc(hw, buf_len);
4241 return ICE_ERR_NO_MEMORY;
4243 sw_buf->num_elems = CPU_TO_LE16(1);
4245 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4246 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4247 * If num_acts is greater than 2, then use
4248 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4249 * The num_acts cannot exceed 4. This was ensured at the
4250 * beginning of the function.
4253 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4254 else if (num_acts == 2)
4255 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4257 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4259 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4260 ice_aqc_opc_alloc_res, NULL);
4262 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4264 ice_free(hw, sw_buf);
4269 * ice_add_mac_with_sw_marker - add filter with sw marker
4270 * @hw: pointer to the hardware structure
4271 * @f_info: filter info structure containing the MAC filter information
4272 * @sw_marker: sw marker to tag the Rx descriptor with
4275 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4278 struct ice_switch_info *sw = hw->switch_info;
4279 struct ice_fltr_mgmt_list_entry *m_entry;
4280 struct ice_fltr_list_entry fl_info;
4281 struct LIST_HEAD_TYPE l_head;
4282 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4283 enum ice_status ret;
4287 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4288 return ICE_ERR_PARAM;
4290 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4291 return ICE_ERR_PARAM;
4293 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4294 return ICE_ERR_PARAM;
4296 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4297 return ICE_ERR_PARAM;
4298 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4300 /* Add filter if it doesn't exist so then the adding of large
4301 * action always results in update
4304 INIT_LIST_HEAD(&l_head);
4305 fl_info.fltr_info = *f_info;
4306 LIST_ADD(&fl_info.list_entry, &l_head);
4308 entry_exists = false;
4309 ret = ice_add_mac(hw, &l_head);
4310 if (ret == ICE_ERR_ALREADY_EXISTS)
4311 entry_exists = true;
4315 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4316 ice_acquire_lock(rule_lock);
4317 /* Get the book keeping entry for the filter */
4318 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4322 /* If counter action was enabled for this rule then don't enable
4323 * sw marker large action
4325 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4326 ret = ICE_ERR_PARAM;
4330 /* if same marker was added before */
4331 if (m_entry->sw_marker_id == sw_marker) {
4332 ret = ICE_ERR_ALREADY_EXISTS;
4336 /* Allocate a hardware table entry to hold large act. Three actions
4337 * for marker based large action
4339 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4343 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4346 /* Update the switch rule to add the marker action */
4347 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4349 ice_release_lock(rule_lock);
4354 ice_release_lock(rule_lock);
4355 /* only remove entry if it did not exist previously */
4357 ret = ice_remove_mac(hw, &l_head);
4363 * ice_add_mac_with_counter - add filter with counter enabled
4364 * @hw: pointer to the hardware structure
4365 * @f_info: pointer to filter info structure containing the MAC filter
4369 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4371 struct ice_switch_info *sw = hw->switch_info;
4372 struct ice_fltr_mgmt_list_entry *m_entry;
4373 struct ice_fltr_list_entry fl_info;
4374 struct LIST_HEAD_TYPE l_head;
4375 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4376 enum ice_status ret;
4381 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4382 return ICE_ERR_PARAM;
4384 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4385 return ICE_ERR_PARAM;
4387 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4388 return ICE_ERR_PARAM;
4389 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4391 entry_exist = false;
4393 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4395 /* Add filter if it doesn't exist so then the adding of large
4396 * action always results in update
4398 INIT_LIST_HEAD(&l_head);
4400 fl_info.fltr_info = *f_info;
4401 LIST_ADD(&fl_info.list_entry, &l_head);
4403 ret = ice_add_mac(hw, &l_head);
4404 if (ret == ICE_ERR_ALREADY_EXISTS)
4409 ice_acquire_lock(rule_lock);
4410 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4412 ret = ICE_ERR_BAD_PTR;
4416 /* Don't enable counter for a filter for which sw marker was enabled */
4417 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4418 ret = ICE_ERR_PARAM;
4422 /* If a counter was already enabled then don't need to add again */
4423 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4424 ret = ICE_ERR_ALREADY_EXISTS;
4428 /* Allocate a hardware table entry to VLAN counter */
4429 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4433 /* Allocate a hardware table entry to hold large act. Two actions for
4434 * counter based large action
4436 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4440 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4443 /* Update the switch rule to add the counter action */
4444 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4446 ice_release_lock(rule_lock);
4451 ice_release_lock(rule_lock);
4452 /* only remove entry if it did not exist previously */
4454 ret = ice_remove_mac(hw, &l_head);
4459 /* This is mapping table entry that maps every word within a given protocol
4460 * structure to the real byte offset as per the specification of that
4462 * for example dst address is 3 words in ethertype header and corresponding
4463 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4464 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4465 * matching entry describing its field. This needs to be updated if new
4466 * structure is added to that union.
4468 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4469 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4470 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4471 { ICE_ETYPE_OL, { 0 } },
4472 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4473 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4474 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4475 26, 28, 30, 32, 34, 36, 38 } },
4476 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4477 26, 28, 30, 32, 34, 36, 38 } },
4478 { ICE_TCP_IL, { 0, 2 } },
4479 { ICE_UDP_OF, { 0, 2 } },
4480 { ICE_UDP_ILOS, { 0, 2 } },
4481 { ICE_SCTP_IL, { 0, 2 } },
4482 { ICE_VXLAN, { 8, 10, 12, 14 } },
4483 { ICE_GENEVE, { 8, 10, 12, 14 } },
4484 { ICE_VXLAN_GPE, { 0, 2, 4 } },
4485 { ICE_NVGRE, { 0, 2, 4, 6 } },
4486 { ICE_PROTOCOL_LAST, { 0 } }
4489 /* The following table describes preferred grouping of recipes.
4490 * If a recipe that needs to be programmed is a superset or matches one of the
4491 * following combinations, then the recipe needs to be chained as per the
4494 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4495 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4496 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4497 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4498 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4499 { 0xffff, 0xffff, 0xffff, 0xffff } },
4500 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4501 { 0xffff, 0xffff, 0xffff, 0xffff } },
4502 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4503 { 0xffff, 0xffff, 0xffff, 0xffff } },
4506 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4507 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4508 { ICE_MAC_IL, ICE_MAC_IL_HW },
4509 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4510 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4511 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4512 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4513 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4514 { ICE_TCP_IL, ICE_TCP_IL_HW },
4515 { ICE_UDP_OF, ICE_UDP_OF_HW },
4516 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4517 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4518 { ICE_VXLAN, ICE_UDP_OF_HW },
4519 { ICE_GENEVE, ICE_UDP_OF_HW },
4520 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4521 { ICE_NVGRE, ICE_GRE_OF_HW },
4522 { ICE_PROTOCOL_LAST, 0 }
4526 * ice_find_recp - find a recipe
4527 * @hw: pointer to the hardware structure
4528 * @lkup_exts: extension sequence to match
4530 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4532 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4534 bool refresh_required = true;
4535 struct ice_sw_recipe *recp;
4538 /* Initialize available_result_ids which tracks available result idx */
4539 for (i = 0; i <= ICE_CHAIN_FV_INDEX_START; i++)
4540 ice_set_bit(ICE_CHAIN_FV_INDEX_START - i,
4541 available_result_ids);
4543 /* Walk through existing recipes to find a match */
4544 recp = hw->switch_info->recp_list;
4545 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4546 /* If recipe was not created for this ID, in SW bookkeeping,
4547 * check if FW has an entry for this recipe. If the FW has an
4548 * entry update it in our SW bookkeeping and continue with the
4551 if (!recp[i].recp_created)
4552 if (ice_get_recp_frm_fw(hw,
4553 hw->switch_info->recp_list, i,
4557 /* if number of words we are looking for match */
4558 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4559 struct ice_fv_word *a = lkup_exts->fv_words;
4560 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4564 for (p = 0; p < lkup_exts->n_val_words; p++) {
4565 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4567 if (a[p].off == b[q].off &&
4568 a[p].prot_id == b[q].prot_id)
4569 /* Found the "p"th word in the
4574 /* After walking through all the words in the
4575 * "i"th recipe if "p"th word was not found then
4576 * this recipe is not what we are looking for.
4577 * So break out from this loop and try the next
4580 if (q >= recp[i].lkup_exts.n_val_words) {
4585 /* If for "i"th recipe the found was never set to false
4586 * then it means we found our match
4589 return i; /* Return the recipe ID */
4592 return ICE_MAX_NUM_RECIPES;
4596 * ice_prot_type_to_id - get protocol ID from protocol type
4597 * @type: protocol type
4598 * @id: pointer to variable that will receive the ID
4600 * Returns true if found, false otherwise
4602 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4606 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4607 if (ice_prot_id_tbl[i].type == type) {
4608 *id = ice_prot_id_tbl[i].protocol_id;
4615 * ice_find_valid_words - count valid words
4616 * @rule: advanced rule with lookup information
4617 * @lkup_exts: byte offset extractions of the words that are valid
4619 * calculate valid words in a lookup rule using mask value
4622 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4623 struct ice_prot_lkup_ext *lkup_exts)
4629 if (!ice_prot_type_to_id(rule->type, &prot_id))
4632 word = lkup_exts->n_val_words;
4634 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4635 if (((u16 *)&rule->m_u)[j] &&
4636 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4637 /* No more space to accommodate */
4638 if (word >= ICE_MAX_CHAIN_WORDS)
4640 lkup_exts->fv_words[word].off =
4641 ice_prot_ext[rule->type].offs[j];
4642 lkup_exts->fv_words[word].prot_id =
4643 ice_prot_id_tbl[rule->type].protocol_id;
4644 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4648 ret_val = word - lkup_exts->n_val_words;
4649 lkup_exts->n_val_words = word;
4655 * ice_find_prot_off_ind - check for specific ID and offset in rule
4656 * @lkup_exts: an array of protocol header extractions
4657 * @prot_type: protocol type to check
4658 * @off: expected offset of the extraction
4660 * Check if the prot_ext has given protocol ID and offset
4663 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4668 for (j = 0; j < lkup_exts->n_val_words; j++)
4669 if (lkup_exts->fv_words[j].off == off &&
4670 lkup_exts->fv_words[j].prot_id == prot_type)
4673 return ICE_MAX_CHAIN_WORDS;
4677 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4678 * @lkup_exts: an array of protocol header extractions
4679 * @r_policy: preferred recipe grouping policy
4681 * Helper function to check if given recipe group is subset we need to check if
4682 * all the words described by the given recipe group exist in the advanced rule
4683 * look up information
4686 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4687 const struct ice_pref_recipe_group *r_policy)
4689 u8 ind[ICE_NUM_WORDS_RECIPE];
4693 /* check if everything in the r_policy is part of the entire rule */
4694 for (i = 0; i < r_policy->n_val_pairs; i++) {
4697 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4698 r_policy->pairs[i].off);
4699 if (j >= ICE_MAX_CHAIN_WORDS)
4702 /* store the indexes temporarily found by the find function
4703 * this will be used to mark the words as 'done'
4708 /* If the entire policy recipe was a true match, then mark the fields
4709 * that are covered by the recipe as 'done' meaning that these words
4710 * will be clumped together in one recipe.
4711 * "Done" here means in our searching if certain recipe group
4712 * matches or is subset of the given rule, then we mark all
4713 * the corresponding offsets as found. So the remaining recipes should
4714 * be created with whatever words that were left.
4716 for (i = 0; i < count; i++) {
4719 ice_set_bit(in, lkup_exts->done);
4725 * ice_create_first_fit_recp_def - Create a recipe grouping
4726 * @hw: pointer to the hardware structure
4727 * @lkup_exts: an array of protocol header extractions
4728 * @rg_list: pointer to a list that stores new recipe groups
4729 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4731 * Using first fit algorithm, take all the words that are still not done
4732 * and start grouping them in 4-word groups. Each group makes up one
4735 static enum ice_status
4736 ice_create_first_fit_recp_def(struct ice_hw *hw,
4737 struct ice_prot_lkup_ext *lkup_exts,
4738 struct LIST_HEAD_TYPE *rg_list,
4741 struct ice_pref_recipe_group *grp = NULL;
4746 /* Walk through every word in the rule to check if it is not done. If so
4747 * then this word needs to be part of a new recipe.
4749 for (j = 0; j < lkup_exts->n_val_words; j++)
4750 if (!ice_is_bit_set(lkup_exts->done, j)) {
4752 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4753 struct ice_recp_grp_entry *entry;
4755 entry = (struct ice_recp_grp_entry *)
4756 ice_malloc(hw, sizeof(*entry));
4758 return ICE_ERR_NO_MEMORY;
4759 LIST_ADD(&entry->l_entry, rg_list);
4760 grp = &entry->r_group;
4764 grp->pairs[grp->n_val_pairs].prot_id =
4765 lkup_exts->fv_words[j].prot_id;
4766 grp->pairs[grp->n_val_pairs].off =
4767 lkup_exts->fv_words[j].off;
4768 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4776 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4777 * @hw: pointer to the hardware structure
4778 * @fv_list: field vector with the extraction sequence information
4779 * @rg_list: recipe groupings with protocol-offset pairs
4781 * Helper function to fill in the field vector indices for protocol-offset
4782 * pairs. These indexes are then ultimately programmed into a recipe.
4785 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4786 struct LIST_HEAD_TYPE *rg_list)
4788 struct ice_sw_fv_list_entry *fv;
4789 struct ice_recp_grp_entry *rg;
4790 struct ice_fv_word *fv_ext;
4792 if (LIST_EMPTY(fv_list))
4795 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4796 fv_ext = fv->fv_ptr->ew;
4798 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4801 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4802 struct ice_fv_word *pr;
4806 pr = &rg->r_group.pairs[i];
4807 mask = rg->r_group.mask[i];
4809 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4810 if (fv_ext[j].prot_id == pr->prot_id &&
4811 fv_ext[j].off == pr->off) {
4812 /* Store index of field vector */
4814 /* Mask is given by caller as big
4815 * endian, but sent to FW as little
4818 rg->fv_mask[i] = mask << 8 | mask >> 8;
4826 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4827 * @hw: pointer to hardware structure
4828 * @rm: recipe management list entry
4829 * @match_tun: if field vector index for tunnel needs to be programmed
4831 static enum ice_status
4832 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4835 struct ice_aqc_recipe_data_elem *tmp;
4836 struct ice_aqc_recipe_data_elem *buf;
4837 struct ice_recp_grp_entry *entry;
4838 enum ice_status status;
4843 /* When more than one recipe are required, another recipe is needed to
4844 * chain them together. Matching a tunnel metadata ID takes up one of
4845 * the match fields in the chaining recipe reducing the number of
4846 * chained recipes by one.
4848 if (rm->n_grp_count > 1)
4850 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE ||
4851 (match_tun && rm->n_grp_count > (ICE_MAX_CHAIN_RECIPE - 1)))
4852 return ICE_ERR_MAX_LIMIT;
4854 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
4855 ICE_MAX_NUM_RECIPES,
4858 return ICE_ERR_NO_MEMORY;
4860 buf = (struct ice_aqc_recipe_data_elem *)
4861 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
4863 status = ICE_ERR_NO_MEMORY;
4867 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4868 recipe_count = ICE_MAX_NUM_RECIPES;
4869 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4871 if (status || recipe_count == 0)
4874 /* Allocate the recipe resources, and configure them according to the
4875 * match fields from protocol headers and extracted field vectors.
4877 chain_idx = ICE_CHAIN_FV_INDEX_START -
4878 ice_find_first_bit(available_result_ids,
4879 ICE_CHAIN_FV_INDEX_START + 1);
4880 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4883 status = ice_alloc_recipe(hw, &entry->rid);
4887 /* Clear the result index of the located recipe, as this will be
4888 * updated, if needed, later in the recipe creation process.
4890 tmp[0].content.result_indx = 0;
4892 buf[recps] = tmp[0];
4893 buf[recps].recipe_indx = (u8)entry->rid;
4894 /* if the recipe is a non-root recipe RID should be programmed
4895 * as 0 for the rules to be applied correctly.
4897 buf[recps].content.rid = 0;
4898 ice_memset(&buf[recps].content.lkup_indx, 0,
4899 sizeof(buf[recps].content.lkup_indx),
4902 /* All recipes use look-up index 0 to match switch ID. */
4903 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4904 buf[recps].content.mask[0] =
4905 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4906 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4909 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4910 buf[recps].content.lkup_indx[i] = 0x80;
4911 buf[recps].content.mask[i] = 0;
4914 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4915 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4916 buf[recps].content.mask[i + 1] =
4917 CPU_TO_LE16(entry->fv_mask[i]);
4920 if (rm->n_grp_count > 1) {
4921 entry->chain_idx = chain_idx;
4922 buf[recps].content.result_indx =
4923 ICE_AQ_RECIPE_RESULT_EN |
4924 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4925 ICE_AQ_RECIPE_RESULT_DATA_M);
4926 ice_clear_bit(ICE_CHAIN_FV_INDEX_START - chain_idx,
4927 available_result_ids);
4928 chain_idx = ICE_CHAIN_FV_INDEX_START -
4929 ice_find_first_bit(available_result_ids,
4930 ICE_CHAIN_FV_INDEX_START +
4934 /* fill recipe dependencies */
4935 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
4936 ICE_MAX_NUM_RECIPES);
4937 ice_set_bit(buf[recps].recipe_indx,
4938 (ice_bitmap_t *)buf[recps].recipe_bitmap);
4939 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4943 if (rm->n_grp_count == 1) {
4944 rm->root_rid = buf[0].recipe_indx;
4945 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
4946 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4947 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4948 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4949 sizeof(buf[0].recipe_bitmap),
4950 ICE_NONDMA_TO_NONDMA);
4952 status = ICE_ERR_BAD_PTR;
4955 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4956 * the recipe which is getting created if specified
4957 * by user. Usually any advanced switch filter, which results
4958 * into new extraction sequence, ended up creating a new recipe
4959 * of type ROOT and usually recipes are associated with profiles
4960 * Switch rule referreing newly created recipe, needs to have
4961 * either/or 'fwd' or 'join' priority, otherwise switch rule
4962 * evaluation will not happen correctly. In other words, if
4963 * switch rule to be evaluated on priority basis, then recipe
4964 * needs to have priority, otherwise it will be evaluated last.
4966 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4968 struct ice_recp_grp_entry *last_chain_entry;
4971 /* Allocate the last recipe that will chain the outcomes of the
4972 * other recipes together
4974 status = ice_alloc_recipe(hw, &rid);
4978 buf[recps].recipe_indx = (u8)rid;
4979 buf[recps].content.rid = (u8)rid;
4980 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4981 /* the new entry created should also be part of rg_list to
4982 * make sure we have complete recipe
4984 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
4985 sizeof(*last_chain_entry));
4986 if (!last_chain_entry) {
4987 status = ICE_ERR_NO_MEMORY;
4990 last_chain_entry->rid = rid;
4991 ice_memset(&buf[recps].content.lkup_indx, 0,
4992 sizeof(buf[recps].content.lkup_indx),
4994 /* All recipes use look-up index 0 to match switch ID. */
4995 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4996 buf[recps].content.mask[0] =
4997 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4998 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4999 buf[recps].content.lkup_indx[i] =
5000 ICE_AQ_RECIPE_LKUP_IGNORE;
5001 buf[recps].content.mask[i] = 0;
5005 /* update r_bitmap with the recp that is used for chaining */
5006 ice_set_bit(rid, rm->r_bitmap);
5007 /* this is the recipe that chains all the other recipes so it
5008 * should not have a chaining ID to indicate the same
5010 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5011 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5013 last_chain_entry->fv_idx[i] = entry->chain_idx;
5014 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5015 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5016 ice_set_bit(entry->rid, rm->r_bitmap);
5018 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5019 if (sizeof(buf[recps].recipe_bitmap) >=
5020 sizeof(rm->r_bitmap)) {
5021 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5022 sizeof(buf[recps].recipe_bitmap),
5023 ICE_NONDMA_TO_NONDMA);
5025 status = ICE_ERR_BAD_PTR;
5028 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5030 /* To differentiate among different UDP tunnels, a meta data ID
5034 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5035 buf[recps].content.mask[i] =
5036 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5040 rm->root_rid = (u8)rid;
5042 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5046 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5047 ice_release_change_lock(hw);
5051 /* Every recipe that just got created add it to the recipe
5054 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5055 struct ice_switch_info *sw = hw->switch_info;
5056 struct ice_sw_recipe *recp;
5058 recp = &sw->recp_list[entry->rid];
5059 recp->root_rid = entry->rid;
5060 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5061 entry->r_group.n_val_pairs *
5062 sizeof(struct ice_fv_word),
5063 ICE_NONDMA_TO_NONDMA);
5065 recp->n_ext_words = entry->r_group.n_val_pairs;
5066 recp->chain_idx = entry->chain_idx;
5067 recp->recp_created = true;
5068 recp->big_recp = false;
5082 * ice_create_recipe_group - creates recipe group
5083 * @hw: pointer to hardware structure
5084 * @rm: recipe management list entry
5085 * @lkup_exts: lookup elements
5087 static enum ice_status
5088 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5089 struct ice_prot_lkup_ext *lkup_exts)
5091 struct ice_recp_grp_entry *entry;
5092 struct ice_recp_grp_entry *tmp;
5093 enum ice_status status;
5097 rm->n_grp_count = 0;
5099 /* Each switch recipe can match up to 5 words or metadata. One word in
5100 * each recipe is used to match the switch ID. Four words are left for
5101 * matching other values. If the new advanced recipe requires more than
5102 * 4 words, it needs to be split into multiple recipes which are chained
5103 * together using the intermediate result that each produces as input to
5104 * the other recipes in the sequence.
5106 groups = ARRAY_SIZE(ice_recipe_pack);
5108 /* Check if any of the preferred recipes from the grouping policy
5111 for (i = 0; i < groups; i++)
5112 /* Check if the recipe from the preferred grouping matches
5113 * or is a subset of the fields that needs to be looked up.
5115 if (ice_is_recipe_subset(lkup_exts, &ice_recipe_pack[i])) {
5116 /* This recipe can be used by itself or grouped with
5119 entry = (struct ice_recp_grp_entry *)
5120 ice_malloc(hw, sizeof(*entry));
5122 status = ICE_ERR_NO_MEMORY;
5125 entry->r_group = ice_recipe_pack[i];
5126 LIST_ADD(&entry->l_entry, &rm->rg_list);
5130 /* Create recipes for words that are marked not done by packing them
5133 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5134 &rm->rg_list, &recp_count);
5136 rm->n_grp_count += recp_count;
5137 rm->n_ext_words = lkup_exts->n_val_words;
5138 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5139 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5140 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5141 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5146 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5148 LIST_DEL(&entry->l_entry);
5149 ice_free(hw, entry);
5157 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5158 * @hw: pointer to hardware structure
5159 * @lkups: lookup elements or match criteria for the advanced recipe, one
5160 * structure per protocol header
5161 * @lkups_cnt: number of protocols
5162 * @fv_list: pointer to a list that holds the returned field vectors
5164 static enum ice_status
5165 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5166 struct LIST_HEAD_TYPE *fv_list)
5168 enum ice_status status;
5172 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5174 return ICE_ERR_NO_MEMORY;
5176 for (i = 0; i < lkups_cnt; i++)
5177 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5178 status = ICE_ERR_CFG;
5182 /* Find field vectors that include all specified protocol types */
5183 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
5186 ice_free(hw, prot_ids);
5191 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5192 * @hw: pointer to hardware structure
5193 * @lkups: lookup elements or match criteria for the advanced recipe, one
5194 * structure per protocol header
5195 * @lkups_cnt: number of protocols
5196 * @rinfo: other information regarding the rule e.g. priority and action info
5197 * @rid: return the recipe ID of the recipe created
5199 static enum ice_status
5200 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5201 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5203 struct ice_prot_lkup_ext *lkup_exts;
5204 struct ice_recp_grp_entry *r_entry;
5205 struct ice_sw_fv_list_entry *fvit;
5206 struct ice_recp_grp_entry *r_tmp;
5207 struct ice_sw_fv_list_entry *tmp;
5208 enum ice_status status = ICE_SUCCESS;
5209 struct ice_sw_recipe *rm;
5210 bool match_tun = false;
5214 return ICE_ERR_PARAM;
5216 lkup_exts = (struct ice_prot_lkup_ext *)
5217 ice_malloc(hw, sizeof(*lkup_exts));
5219 return ICE_ERR_NO_MEMORY;
5221 /* Determine the number of words to be matched and if it exceeds a
5222 * recipe's restrictions
5224 for (i = 0; i < lkups_cnt; i++) {
5227 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5228 status = ICE_ERR_CFG;
5229 goto err_free_lkup_exts;
5232 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5234 status = ICE_ERR_CFG;
5235 goto err_free_lkup_exts;
5239 *rid = ice_find_recp(hw, lkup_exts);
5240 if (*rid < ICE_MAX_NUM_RECIPES)
5241 /* Success if found a recipe that match the existing criteria */
5242 goto err_free_lkup_exts;
5244 /* Recipe we need does not exist, add a recipe */
5246 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5248 status = ICE_ERR_NO_MEMORY;
5249 goto err_free_lkup_exts;
5252 /* Get field vectors that contain fields extracted from all the protocol
5253 * headers being programmed.
5255 INIT_LIST_HEAD(&rm->fv_list);
5256 INIT_LIST_HEAD(&rm->rg_list);
5258 status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
5262 /* Group match words into recipes using preferred recipe grouping
5265 status = ice_create_recipe_group(hw, rm, lkup_exts);
5269 /* There is only profile for UDP tunnels. So, it is necessary to use a
5270 * metadata ID flag to differentiate different tunnel types. A separate
5271 * recipe needs to be used for the metadata.
5273 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5274 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5275 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5278 /* set the recipe priority if specified */
5279 rm->priority = rinfo->priority ? rinfo->priority : 0;
5281 /* Find offsets from the field vector. Pick the first one for all the
5284 ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5285 status = ice_add_sw_recipe(hw, rm, match_tun);
5289 /* Associate all the recipes created with all the profiles in the
5290 * common field vector.
5292 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5294 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5296 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5297 (u8 *)r_bitmap, NULL);
5301 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5302 ICE_MAX_NUM_RECIPES);
5303 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5307 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5310 ice_release_change_lock(hw);
5316 *rid = rm->root_rid;
5317 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5318 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5320 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5321 ice_recp_grp_entry, l_entry) {
5322 LIST_DEL(&r_entry->l_entry);
5323 ice_free(hw, r_entry);
5326 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5328 LIST_DEL(&fvit->list_entry);
5333 ice_free(hw, rm->root_buf);
5338 ice_free(hw, lkup_exts);
5344 * ice_find_dummy_packet - find dummy packet by tunnel type
5346 * @lkups: lookup elements or match criteria for the advanced recipe, one
5347 * structure per protocol header
5348 * @lkups_cnt: number of protocols
5349 * @tun_type: tunnel type from the match criteria
5350 * @pkt: dummy packet to fill according to filter match criteria
5351 * @pkt_len: packet length of dummy packet
5352 * @offsets: pointer to receive the pointer to the offsets for the packet
5355 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5356 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5358 const struct ice_dummy_pkt_offsets **offsets)
5360 bool tcp = false, udp = false, ipv6 = false;
5363 for (i = 0; i < lkups_cnt; i++) {
5364 if (lkups[i].type == ICE_UDP_ILOS)
5366 else if (lkups[i].type == ICE_TCP_IL)
5368 else if (lkups[i].type == ICE_IPV6_OFOS)
5372 if (tun_type == ICE_SW_TUN_NVGRE || tun_type == ICE_ALL_TUNNELS) {
5373 *pkt = dummy_gre_packet;
5374 *pkt_len = sizeof(dummy_gre_packet);
5375 *offsets = dummy_gre_packet_offsets;
5379 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5380 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5382 *pkt = dummy_udp_tun_tcp_packet;
5383 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5384 *offsets = dummy_udp_tun_tcp_packet_offsets;
5388 *pkt = dummy_udp_tun_udp_packet;
5389 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5390 *offsets = dummy_udp_tun_udp_packet_offsets;
5395 *pkt = dummy_udp_packet;
5396 *pkt_len = sizeof(dummy_udp_packet);
5397 *offsets = dummy_udp_packet_offsets;
5399 } else if (udp && ipv6) {
5400 *pkt = dummy_udp_ipv6_packet;
5401 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5402 *offsets = dummy_udp_ipv6_packet_offsets;
5404 } else if ((tcp && ipv6) || ipv6) {
5405 *pkt = dummy_tcp_ipv6_packet;
5406 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5407 *offsets = dummy_tcp_ipv6_packet_offsets;
5411 *pkt = dummy_tcp_packet;
5412 *pkt_len = sizeof(dummy_tcp_packet);
5413 *offsets = dummy_tcp_packet_offsets;
5417 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5419 * @lkups: lookup elements or match criteria for the advanced recipe, one
5420 * structure per protocol header
5421 * @lkups_cnt: number of protocols
5422 * @s_rule: stores rule information from the match criteria
5423 * @dummy_pkt: dummy packet to fill according to filter match criteria
5424 * @pkt_len: packet length of dummy packet
5425 * @offsets: offset info for the dummy packet
5427 static enum ice_status
5428 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5429 struct ice_aqc_sw_rules_elem *s_rule,
5430 const u8 *dummy_pkt, u16 pkt_len,
5431 const struct ice_dummy_pkt_offsets *offsets)
5436 /* Start with a packet with a pre-defined/dummy content. Then, fill
5437 * in the header values to be looked up or matched.
5439 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5441 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5443 for (i = 0; i < lkups_cnt; i++) {
5444 enum ice_protocol_type type;
5445 u16 offset = 0, len = 0, j;
5448 /* find the start of this layer; it should be found since this
5449 * was already checked when search for the dummy packet
5451 type = lkups[i].type;
5452 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5453 if (type == offsets[j].type) {
5454 offset = offsets[j].offset;
5459 /* this should never happen in a correct calling sequence */
5461 return ICE_ERR_PARAM;
5463 switch (lkups[i].type) {
5466 len = sizeof(struct ice_ether_hdr);
5469 len = sizeof(struct ice_ethtype_hdr);
5473 len = sizeof(struct ice_ipv4_hdr);
5477 len = sizeof(struct ice_ipv6_hdr);
5482 len = sizeof(struct ice_l4_hdr);
5485 len = sizeof(struct ice_sctp_hdr);
5488 len = sizeof(struct ice_nvgre);
5493 len = sizeof(struct ice_udp_tnl_hdr);
5496 return ICE_ERR_PARAM;
5499 /* the length should be a word multiple */
5500 if (len % ICE_BYTES_PER_WORD)
5503 /* We have the offset to the header start, the length, the
5504 * caller's header values and mask. Use this information to
5505 * copy the data into the dummy packet appropriately based on
5506 * the mask. Note that we need to only write the bits as
5507 * indicated by the mask to make sure we don't improperly write
5508 * over any significant packet data.
5510 for (j = 0; j < len / sizeof(u16); j++)
5511 if (((u16 *)&lkups[i].m_u)[j])
5512 ((u16 *)(pkt + offset))[j] =
5513 (((u16 *)(pkt + offset))[j] &
5514 ~((u16 *)&lkups[i].m_u)[j]) |
5515 (((u16 *)&lkups[i].h_u)[j] &
5516 ((u16 *)&lkups[i].m_u)[j]);
5519 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5525 * ice_find_adv_rule_entry - Search a rule entry
5526 * @hw: pointer to the hardware structure
5527 * @lkups: lookup elements or match criteria for the advanced recipe, one
5528 * structure per protocol header
5529 * @lkups_cnt: number of protocols
5530 * @recp_id: recipe ID for which we are finding the rule
5531 * @rinfo: other information regarding the rule e.g. priority and action info
5533 * Helper function to search for a given advance rule entry
5534 * Returns pointer to entry storing the rule if found
5536 static struct ice_adv_fltr_mgmt_list_entry *
5537 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5538 u16 lkups_cnt, u8 recp_id,
5539 struct ice_adv_rule_info *rinfo)
5541 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5542 struct ice_switch_info *sw = hw->switch_info;
5545 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5546 ice_adv_fltr_mgmt_list_entry, list_entry) {
5547 bool lkups_matched = true;
5549 if (lkups_cnt != list_itr->lkups_cnt)
5551 for (i = 0; i < list_itr->lkups_cnt; i++)
5552 if (memcmp(&list_itr->lkups[i], &lkups[i],
5554 lkups_matched = false;
5557 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5558 rinfo->tun_type == list_itr->rule_info.tun_type &&
5566 * ice_adv_add_update_vsi_list
5567 * @hw: pointer to the hardware structure
5568 * @m_entry: pointer to current adv filter management list entry
5569 * @cur_fltr: filter information from the book keeping entry
5570 * @new_fltr: filter information with the new VSI to be added
5572 * Call AQ command to add or update previously created VSI list with new VSI.
5574 * Helper function to do book keeping associated with adding filter information
5575 * The algorithm to do the booking keeping is described below :
5576 * When a VSI needs to subscribe to a given advanced filter
5577 * if only one VSI has been added till now
5578 * Allocate a new VSI list and add two VSIs
5579 * to this list using switch rule command
5580 * Update the previously created switch rule with the
5581 * newly created VSI list ID
5582 * if a VSI list was previously created
5583 * Add the new VSI to the previously created VSI list set
5584 * using the update switch rule command
5586 static enum ice_status
5587 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5588 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5589 struct ice_adv_rule_info *cur_fltr,
5590 struct ice_adv_rule_info *new_fltr)
5592 enum ice_status status;
5593 u16 vsi_list_id = 0;
5595 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5596 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5597 return ICE_ERR_NOT_IMPL;
5599 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5600 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5601 return ICE_ERR_ALREADY_EXISTS;
5603 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5604 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5605 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5606 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5607 return ICE_ERR_NOT_IMPL;
5609 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5610 /* Only one entry existed in the mapping and it was not already
5611 * a part of a VSI list. So, create a VSI list with the old and
5614 struct ice_fltr_info tmp_fltr;
5615 u16 vsi_handle_arr[2];
5617 /* A rule already exists with the new VSI being added */
5618 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5619 new_fltr->sw_act.fwd_id.hw_vsi_id)
5620 return ICE_ERR_ALREADY_EXISTS;
5622 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5623 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5624 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5630 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5631 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5632 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5633 /* Update the previous switch rule of "forward to VSI" to
5636 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5640 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5641 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5642 m_entry->vsi_list_info =
5643 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5646 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5648 if (!m_entry->vsi_list_info)
5651 /* A rule already exists with the new VSI being added */
5652 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5655 /* Update the previously created VSI list set with
5656 * the new VSI ID passed in
5658 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5660 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5662 ice_aqc_opc_update_sw_rules,
5664 /* update VSI list mapping info with new VSI ID */
5666 ice_set_bit(vsi_handle,
5667 m_entry->vsi_list_info->vsi_map);
5670 m_entry->vsi_count++;
5675 * ice_add_adv_rule - helper function to create an advanced switch rule
5676 * @hw: pointer to the hardware structure
5677 * @lkups: information on the words that needs to be looked up. All words
5678 * together makes one recipe
5679 * @lkups_cnt: num of entries in the lkups array
5680 * @rinfo: other information related to the rule that needs to be programmed
5681 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5682 * ignored is case of error.
5684 * This function can program only 1 rule at a time. The lkups is used to
5685 * describe the all the words that forms the "lookup" portion of the recipe.
5686 * These words can span multiple protocols. Callers to this function need to
5687 * pass in a list of protocol headers with lookup information along and mask
5688 * that determines which words are valid from the given protocol header.
5689 * rinfo describes other information related to this rule such as forwarding
5690 * IDs, priority of this rule, etc.
5693 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5694 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5695 struct ice_rule_query_data *added_entry)
5697 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5698 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5699 const struct ice_dummy_pkt_offsets *pkt_offsets;
5700 struct ice_aqc_sw_rules_elem *s_rule = NULL;
5701 struct LIST_HEAD_TYPE *rule_head;
5702 struct ice_switch_info *sw;
5703 enum ice_status status;
5704 const u8 *pkt = NULL;
5710 return ICE_ERR_PARAM;
5712 for (i = 0; i < lkups_cnt; i++) {
5715 /* Validate match masks to make sure that there is something
5718 ptr = (u16 *)&lkups[i].m_u;
5719 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5726 return ICE_ERR_PARAM;
5728 /* make sure that we can locate a dummy packet */
5729 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5732 status = ICE_ERR_PARAM;
5733 goto err_ice_add_adv_rule;
5736 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5737 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5738 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5739 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5742 vsi_handle = rinfo->sw_act.vsi_handle;
5743 if (!ice_is_vsi_valid(hw, vsi_handle))
5744 return ICE_ERR_PARAM;
5746 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5747 rinfo->sw_act.fwd_id.hw_vsi_id =
5748 ice_get_hw_vsi_num(hw, vsi_handle);
5749 if (rinfo->sw_act.flag & ICE_FLTR_TX)
5750 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5752 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5755 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5757 /* we have to add VSI to VSI_LIST and increment vsi_count.
5758 * Also Update VSI list so that we can change forwarding rule
5759 * if the rule already exists, we will check if it exists with
5760 * same vsi_id, if not then add it to the VSI list if it already
5761 * exists if not then create a VSI list and add the existing VSI
5762 * ID and the new VSI ID to the list
5763 * We will add that VSI to the list
5765 status = ice_adv_add_update_vsi_list(hw, m_entry,
5766 &m_entry->rule_info,
5769 added_entry->rid = rid;
5770 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5771 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5775 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5776 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
5778 return ICE_ERR_NO_MEMORY;
5779 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
5780 switch (rinfo->sw_act.fltr_act) {
5781 case ICE_FWD_TO_VSI:
5782 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5783 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5784 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5787 act |= ICE_SINGLE_ACT_TO_Q;
5788 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5789 ICE_SINGLE_ACT_Q_INDEX_M;
5791 case ICE_FWD_TO_QGRP:
5792 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
5793 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
5794 act |= ICE_SINGLE_ACT_TO_Q;
5795 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5796 ICE_SINGLE_ACT_Q_INDEX_M;
5797 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
5798 ICE_SINGLE_ACT_Q_REGION_M;
5800 case ICE_DROP_PACKET:
5801 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5802 ICE_SINGLE_ACT_VALID_BIT;
5805 status = ICE_ERR_CFG;
5806 goto err_ice_add_adv_rule;
5809 /* set the rule LOOKUP type based on caller specified 'RX'
5810 * instead of hardcoding it to be either LOOKUP_TX/RX
5812 * for 'RX' set the source to be the port number
5813 * for 'TX' set the source to be the source HW VSI number (determined
5817 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
5818 s_rule->pdata.lkup_tx_rx.src =
5819 CPU_TO_LE16(hw->port_info->lport);
5821 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
5822 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
5825 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
5826 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
5828 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
5831 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5832 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5835 goto err_ice_add_adv_rule;
5836 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
5837 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
5839 status = ICE_ERR_NO_MEMORY;
5840 goto err_ice_add_adv_rule;
5843 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
5844 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
5845 ICE_NONDMA_TO_NONDMA);
5846 if (!adv_fltr->lkups) {
5847 status = ICE_ERR_NO_MEMORY;
5848 goto err_ice_add_adv_rule;
5851 adv_fltr->lkups_cnt = lkups_cnt;
5852 adv_fltr->rule_info = *rinfo;
5853 adv_fltr->rule_info.fltr_rule_id =
5854 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5855 sw = hw->switch_info;
5856 sw->recp_list[rid].adv_rule = true;
5857 rule_head = &sw->recp_list[rid].filt_rules;
5859 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
5860 struct ice_fltr_info tmp_fltr;
5862 tmp_fltr.fltr_rule_id =
5863 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5864 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5865 tmp_fltr.fwd_id.hw_vsi_id =
5866 ice_get_hw_vsi_num(hw, vsi_handle);
5867 tmp_fltr.vsi_handle = vsi_handle;
5868 /* Update the previous switch rule of "forward to VSI" to
5871 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5873 goto err_ice_add_adv_rule;
5874 adv_fltr->vsi_count = 1;
5877 /* Add rule entry to book keeping list */
5878 LIST_ADD(&adv_fltr->list_entry, rule_head);
5880 added_entry->rid = rid;
5881 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5882 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5884 err_ice_add_adv_rule:
5885 if (status && adv_fltr) {
5886 ice_free(hw, adv_fltr->lkups);
5887 ice_free(hw, adv_fltr);
5890 ice_free(hw, s_rule);
5896 * ice_adv_rem_update_vsi_list
5897 * @hw: pointer to the hardware structure
5898 * @vsi_handle: VSI handle of the VSI to remove
5899 * @fm_list: filter management entry for which the VSI list management needs to
5902 static enum ice_status
5903 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5904 struct ice_adv_fltr_mgmt_list_entry *fm_list)
5906 struct ice_vsi_list_map_info *vsi_list_info;
5907 enum ice_sw_lkup_type lkup_type;
5908 enum ice_status status;
5911 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5912 fm_list->vsi_count == 0)
5913 return ICE_ERR_PARAM;
5915 /* A rule with the VSI being removed does not exist */
5916 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
5917 return ICE_ERR_DOES_NOT_EXIST;
5919 lkup_type = ICE_SW_LKUP_LAST;
5920 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5921 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5922 ice_aqc_opc_update_sw_rules,
5927 fm_list->vsi_count--;
5928 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5929 vsi_list_info = fm_list->vsi_list_info;
5930 if (fm_list->vsi_count == 1) {
5931 struct ice_fltr_info tmp_fltr;
5934 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
5936 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5937 return ICE_ERR_OUT_OF_RANGE;
5939 /* Make sure VSI list is empty before removing it below */
5940 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5942 ice_aqc_opc_update_sw_rules,
5946 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5947 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5948 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5949 tmp_fltr.fwd_id.hw_vsi_id =
5950 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5951 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5952 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5954 /* Update the previous switch rule of "MAC forward to VSI" to
5955 * "MAC fwd to VSI list"
5957 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5959 ice_debug(hw, ICE_DBG_SW,
5960 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5961 tmp_fltr.fwd_id.hw_vsi_id, status);
5966 if (fm_list->vsi_count == 1) {
5967 /* Remove the VSI list since it is no longer used */
5968 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5970 ice_debug(hw, ICE_DBG_SW,
5971 "Failed to remove VSI list %d, error %d\n",
5972 vsi_list_id, status);
5976 LIST_DEL(&vsi_list_info->list_entry);
5977 ice_free(hw, vsi_list_info);
5978 fm_list->vsi_list_info = NULL;
5985 * ice_rem_adv_rule - removes existing advanced switch rule
5986 * @hw: pointer to the hardware structure
5987 * @lkups: information on the words that needs to be looked up. All words
5988 * together makes one recipe
5989 * @lkups_cnt: num of entries in the lkups array
5990 * @rinfo: Its the pointer to the rule information for the rule
5992 * This function can be used to remove 1 rule at a time. The lkups is
5993 * used to describe all the words that forms the "lookup" portion of the
5994 * rule. These words can span multiple protocols. Callers to this function
5995 * need to pass in a list of protocol headers with lookup information along
5996 * and mask that determines which words are valid from the given protocol
5997 * header. rinfo describes other information related to this rule such as
5998 * forwarding IDs, priority of this rule, etc.
6001 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6002 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6004 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6005 const struct ice_dummy_pkt_offsets *offsets;
6006 struct ice_prot_lkup_ext lkup_exts;
6007 u16 rule_buf_sz, pkt_len, i, rid;
6008 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6009 enum ice_status status = ICE_SUCCESS;
6010 bool remove_rule = false;
6011 const u8 *pkt = NULL;
6014 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6015 for (i = 0; i < lkups_cnt; i++) {
6018 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6021 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6025 rid = ice_find_recp(hw, &lkup_exts);
6026 /* If did not find a recipe that match the existing criteria */
6027 if (rid == ICE_MAX_NUM_RECIPES)
6028 return ICE_ERR_PARAM;
6030 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6031 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6032 /* the rule is already removed */
6035 ice_acquire_lock(rule_lock);
6036 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6038 } else if (list_elem->vsi_count > 1) {
6039 list_elem->vsi_list_info->ref_cnt--;
6040 remove_rule = false;
6041 vsi_handle = rinfo->sw_act.vsi_handle;
6042 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6044 vsi_handle = rinfo->sw_act.vsi_handle;
6045 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6047 ice_release_lock(rule_lock);
6050 if (list_elem->vsi_count == 0)
6053 ice_release_lock(rule_lock);
6055 struct ice_aqc_sw_rules_elem *s_rule;
6057 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
6058 &pkt_len, &offsets);
6059 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6061 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6064 return ICE_ERR_NO_MEMORY;
6065 s_rule->pdata.lkup_tx_rx.act = 0;
6066 s_rule->pdata.lkup_tx_rx.index =
6067 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6068 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6069 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6071 ice_aqc_opc_remove_sw_rules, NULL);
6072 if (status == ICE_SUCCESS) {
6073 ice_acquire_lock(rule_lock);
6074 LIST_DEL(&list_elem->list_entry);
6075 ice_free(hw, list_elem->lkups);
6076 ice_free(hw, list_elem);
6077 ice_release_lock(rule_lock);
6079 ice_free(hw, s_rule);
6085 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6086 * @hw: pointer to the hardware structure
6087 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6089 * This function is used to remove 1 rule at a time. The removal is based on
6090 * the remove_entry parameter. This function will remove rule for a given
6091 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6094 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6095 struct ice_rule_query_data *remove_entry)
6097 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6098 struct LIST_HEAD_TYPE *list_head;
6099 struct ice_adv_rule_info rinfo;
6100 struct ice_switch_info *sw;
6102 sw = hw->switch_info;
6103 if (!sw->recp_list[remove_entry->rid].recp_created)
6104 return ICE_ERR_PARAM;
6105 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6106 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6108 if (list_itr->rule_info.fltr_rule_id ==
6109 remove_entry->rule_id) {
6110 rinfo = list_itr->rule_info;
6111 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6112 return ice_rem_adv_rule(hw, list_itr->lkups,
6113 list_itr->lkups_cnt, &rinfo);
6116 return ICE_ERR_PARAM;
6120 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6122 * @hw: pointer to the hardware structure
6123 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6125 * This function is used to remove all the rules for a given VSI and as soon
6126 * as removing a rule fails, it will return immediately with the error code,
6127 * else it will return ICE_SUCCESS
6130 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6132 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6133 struct ice_vsi_list_map_info *map_info;
6134 struct LIST_HEAD_TYPE *list_head;
6135 struct ice_adv_rule_info rinfo;
6136 struct ice_switch_info *sw;
6137 enum ice_status status;
6138 u16 vsi_list_id = 0;
6141 sw = hw->switch_info;
6142 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6143 if (!sw->recp_list[rid].recp_created)
6145 if (!sw->recp_list[rid].adv_rule)
6147 list_head = &sw->recp_list[rid].filt_rules;
6149 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6150 ice_adv_fltr_mgmt_list_entry, list_entry) {
6151 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6155 rinfo = list_itr->rule_info;
6156 rinfo.sw_act.vsi_handle = vsi_handle;
6157 status = ice_rem_adv_rule(hw, list_itr->lkups,
6158 list_itr->lkups_cnt, &rinfo);
6168 * ice_replay_fltr - Replay all the filters stored by a specific list head
6169 * @hw: pointer to the hardware structure
6170 * @list_head: list for which filters needs to be replayed
6171 * @recp_id: Recipe ID for which rules need to be replayed
6173 static enum ice_status
6174 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6176 struct ice_fltr_mgmt_list_entry *itr;
6177 struct LIST_HEAD_TYPE l_head;
6178 enum ice_status status = ICE_SUCCESS;
6180 if (LIST_EMPTY(list_head))
6183 /* Move entries from the given list_head to a temporary l_head so that
6184 * they can be replayed. Otherwise when trying to re-add the same
6185 * filter, the function will return already exists
6187 LIST_REPLACE_INIT(list_head, &l_head);
6189 /* Mark the given list_head empty by reinitializing it so filters
6190 * could be added again by *handler
6192 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6194 struct ice_fltr_list_entry f_entry;
6196 f_entry.fltr_info = itr->fltr_info;
6197 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6198 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6199 if (status != ICE_SUCCESS)
6204 /* Add a filter per VSI separately */
6209 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6211 if (!ice_is_vsi_valid(hw, vsi_handle))
6214 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6215 f_entry.fltr_info.vsi_handle = vsi_handle;
6216 f_entry.fltr_info.fwd_id.hw_vsi_id =
6217 ice_get_hw_vsi_num(hw, vsi_handle);
6218 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6219 if (recp_id == ICE_SW_LKUP_VLAN)
6220 status = ice_add_vlan_internal(hw, &f_entry);
6222 status = ice_add_rule_internal(hw, recp_id,
6224 if (status != ICE_SUCCESS)
6229 /* Clear the filter management list */
6230 ice_rem_sw_rule_info(hw, &l_head);
6235 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6236 * @hw: pointer to the hardware structure
6238 * NOTE: This function does not clean up partially added filters on error.
6239 * It is up to caller of the function to issue a reset or fail early.
6241 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6243 struct ice_switch_info *sw = hw->switch_info;
6244 enum ice_status status = ICE_SUCCESS;
6247 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6248 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6250 status = ice_replay_fltr(hw, i, head);
6251 if (status != ICE_SUCCESS)
6258 * ice_replay_vsi_fltr - Replay filters for requested VSI
6259 * @hw: pointer to the hardware structure
6260 * @vsi_handle: driver VSI handle
6261 * @recp_id: Recipe ID for which rules need to be replayed
6262 * @list_head: list for which filters need to be replayed
6264 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6265 * It is required to pass valid VSI handle.
6267 static enum ice_status
6268 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6269 struct LIST_HEAD_TYPE *list_head)
6271 struct ice_fltr_mgmt_list_entry *itr;
6272 enum ice_status status = ICE_SUCCESS;
6275 if (LIST_EMPTY(list_head))
6277 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6279 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6281 struct ice_fltr_list_entry f_entry;
6283 f_entry.fltr_info = itr->fltr_info;
6284 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6285 itr->fltr_info.vsi_handle == vsi_handle) {
6286 /* update the src in case it is VSI num */
6287 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6288 f_entry.fltr_info.src = hw_vsi_id;
6289 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6290 if (status != ICE_SUCCESS)
6294 if (!itr->vsi_list_info ||
6295 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6297 /* Clearing it so that the logic can add it back */
6298 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6299 f_entry.fltr_info.vsi_handle = vsi_handle;
6300 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6301 /* update the src in case it is VSI num */
6302 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6303 f_entry.fltr_info.src = hw_vsi_id;
6304 if (recp_id == ICE_SW_LKUP_VLAN)
6305 status = ice_add_vlan_internal(hw, &f_entry);
6307 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6308 if (status != ICE_SUCCESS)
6316 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6317 * @hw: pointer to the hardware structure
6318 * @vsi_handle: driver VSI handle
6319 * @list_head: list for which filters need to be replayed
6321 * Replay the advanced rule for the given VSI.
6323 static enum ice_status
6324 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6325 struct LIST_HEAD_TYPE *list_head)
6327 struct ice_rule_query_data added_entry = { 0 };
6328 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6329 enum ice_status status = ICE_SUCCESS;
6331 if (LIST_EMPTY(list_head))
6333 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6335 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6336 u16 lk_cnt = adv_fltr->lkups_cnt;
6338 if (vsi_handle != rinfo->sw_act.vsi_handle)
6340 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6349 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6350 * @hw: pointer to the hardware structure
6351 * @vsi_handle: driver VSI handle
6353 * Replays filters for requested VSI via vsi_handle.
6355 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6357 struct ice_switch_info *sw = hw->switch_info;
6358 enum ice_status status;
6361 /* Update the recipes that were created */
6362 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6363 struct LIST_HEAD_TYPE *head;
6365 head = &sw->recp_list[i].filt_replay_rules;
6366 if (!sw->recp_list[i].adv_rule)
6367 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6369 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6370 if (status != ICE_SUCCESS)
6378 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6379 * @hw: pointer to the HW struct
6381 * Deletes the filter replay rules.
6383 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6385 struct ice_switch_info *sw = hw->switch_info;
6391 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6392 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6393 struct LIST_HEAD_TYPE *l_head;
6395 l_head = &sw->recp_list[i].filt_replay_rules;
6396 if (!sw->recp_list[i].adv_rule)
6397 ice_rem_sw_rule_info(hw, l_head);
6399 ice_rem_adv_rule_info(hw, l_head);