1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
70 { ICE_PROTOCOL_LAST, 0 },
74 u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
109 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
111 { ICE_ETYPE_OL, 12 },
112 { ICE_IPV4_OFOS, 14 },
116 { ICE_UDP_ILOS, 76 },
117 { ICE_PROTOCOL_LAST, 0 },
121 u8 dummy_gre_udp_packet[] = {
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_OL 12 */
128 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x2F, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
135 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x00, 0x00, 0x00,
142 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
149 0x00, 0x08, 0x00, 0x00,
153 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
155 { ICE_ETYPE_OL, 12 },
156 { ICE_IPV4_OFOS, 14 },
162 { ICE_PROTOCOL_LAST, 0 },
166 u8 dummy_udp_tun_tcp_packet[] = {
167 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
168 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
171 0x08, 0x00, /* ICE_ETYPE_OL 12 */
173 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
174 0x00, 0x01, 0x00, 0x00,
175 0x40, 0x11, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
180 0x00, 0x46, 0x00, 0x00,
182 0x04, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
186 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00,
190 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
191 0x00, 0x01, 0x00, 0x00,
192 0x40, 0x06, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
199 0x50, 0x02, 0x20, 0x00,
200 0x00, 0x00, 0x00, 0x00
204 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
206 { ICE_ETYPE_OL, 12 },
207 { ICE_IPV4_OFOS, 14 },
212 { ICE_UDP_ILOS, 84 },
213 { ICE_PROTOCOL_LAST, 0 },
217 u8 dummy_udp_tun_udp_packet[] = {
218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
222 0x08, 0x00, /* ICE_ETYPE_OL 12 */
224 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
225 0x00, 0x01, 0x00, 0x00,
226 0x00, 0x11, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
231 0x00, 0x3a, 0x00, 0x00,
233 0x0c, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
234 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
237 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00,
241 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
242 0x00, 0x01, 0x00, 0x00,
243 0x00, 0x11, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
248 0x00, 0x08, 0x00, 0x00,
252 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
254 { ICE_ETYPE_OL, 12 },
255 { ICE_IPV4_OFOS, 14 },
256 { ICE_UDP_ILOS, 34 },
257 { ICE_PROTOCOL_LAST, 0 },
261 dummy_udp_packet[] = {
262 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
263 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00,
266 0x08, 0x00, /* ICE_ETYPE_OL 12 */
268 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
269 0x00, 0x01, 0x00, 0x00,
270 0x00, 0x11, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
275 0x00, 0x08, 0x00, 0x00,
277 0x00, 0x00, /* 2 bytes for 4 byte alignment */
281 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
283 { ICE_ETYPE_OL, 12 },
284 { ICE_IPV4_OFOS, 14 },
286 { ICE_PROTOCOL_LAST, 0 },
290 dummy_tcp_packet[] = {
291 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
292 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00,
295 0x08, 0x00, /* ICE_ETYPE_OL 12 */
297 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
298 0x00, 0x01, 0x00, 0x00,
299 0x00, 0x06, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
306 0x50, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, /* 2 bytes for 4 byte alignment */
313 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
315 { ICE_ETYPE_OL, 12 },
316 { ICE_IPV6_OFOS, 14 },
318 { ICE_PROTOCOL_LAST, 0 },
322 dummy_tcp_ipv6_packet[] = {
323 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
327 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
329 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
330 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
341 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00,
343 0x50, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, /* 2 bytes for 4 byte alignment */
350 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
352 { ICE_ETYPE_OL, 12 },
353 { ICE_IPV6_OFOS, 14 },
354 { ICE_UDP_ILOS, 54 },
355 { ICE_PROTOCOL_LAST, 0 },
359 dummy_udp_ipv6_packet[] = {
360 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
364 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
366 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
367 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
378 0x00, 0x08, 0x00, 0x00,
380 0x00, 0x00, /* 2 bytes for 4 byte alignment */
383 /* this is a recipe to profile bitmap association */
384 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
385 ICE_MAX_NUM_PROFILES);
386 static ice_declare_bitmap(available_result_ids, ICE_CHAIN_FV_INDEX_START + 1);
388 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
391 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
392 * @hw: pointer to hardware structure
393 * @recps: struct that we need to populate
394 * @rid: recipe ID that we are populating
395 * @refresh_required: true if we should get recipe to profile mapping from FW
397 * This function is used to populate all the necessary entries into our
398 * bookkeeping so that we have a current list of all the recipes that are
399 * programmed in the firmware.
401 static enum ice_status
402 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
403 bool *refresh_required)
405 u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
406 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
407 u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
408 struct ice_aqc_recipe_data_elem *tmp;
409 u16 num_recps = ICE_MAX_NUM_RECIPES;
410 struct ice_prot_lkup_ext *lkup_exts;
411 enum ice_status status;
413 /* we need a buffer big enough to accommodate all the recipes */
414 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
415 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
417 return ICE_ERR_NO_MEMORY;
419 tmp[0].recipe_indx = rid;
420 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
421 /* non-zero status meaning recipe doesn't exist */
425 /* Get recipe to profile map so that we can get the fv from lkups that
426 * we read for a recipe from FW. Since we want to minimize the number of
427 * times we make this FW call, just make one call and cache the copy
428 * until a new recipe is added. This operation is only required the
429 * first time to get the changes from FW. Then to search existing
430 * entries we don't need to update the cache again until another recipe
433 if (*refresh_required) {
434 ice_get_recp_to_prof_map(hw);
435 *refresh_required = false;
437 lkup_exts = &recps[rid].lkup_exts;
438 /* start populating all the entries for recps[rid] based on lkups from
441 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
442 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
443 struct ice_recp_grp_entry *rg_entry;
444 u8 prof_id, prot = 0;
447 rg_entry = (struct ice_recp_grp_entry *)
448 ice_malloc(hw, sizeof(*rg_entry));
450 status = ICE_ERR_NO_MEMORY;
453 /* Avoid 8th bit since its result enable bit */
454 result_idxs[result_idx] = root_bufs.content.result_indx &
455 ~ICE_AQ_RECIPE_RESULT_EN;
456 /* Check if result enable bit is set */
457 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
458 ice_clear_bit(ICE_CHAIN_FV_INDEX_START -
459 result_idxs[result_idx++],
460 available_result_ids);
462 recipe_to_profile[tmp[sub_recps].recipe_indx],
463 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
464 /* get the first profile that is associated with rid */
465 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
466 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
467 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
469 rg_entry->fv_idx[i] = lkup_indx;
470 rg_entry->fv_mask[i] =
471 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
473 /* If the recipe is a chained recipe then all its
474 * child recipe's result will have a result index.
475 * To fill fv_words we should not use those result
476 * index, we only need the protocol ids and offsets.
477 * We will skip all the fv_idx which stores result
478 * index in them. We also need to skip any fv_idx which
479 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
480 * valid offset value.
482 if (result_idxs[0] == rg_entry->fv_idx[i] ||
483 result_idxs[1] == rg_entry->fv_idx[i] ||
484 result_idxs[2] == rg_entry->fv_idx[i] ||
485 result_idxs[3] == rg_entry->fv_idx[i] ||
486 result_idxs[4] == rg_entry->fv_idx[i] ||
487 rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
488 rg_entry->fv_idx[i] == 0)
491 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
492 rg_entry->fv_idx[i], &prot, &off);
493 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
494 lkup_exts->fv_words[fv_word_idx].off = off;
497 /* populate rg_list with the data from the child entry of this
500 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
502 lkup_exts->n_val_words = fv_word_idx;
503 recps[rid].n_grp_count = num_recps;
504 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
505 ice_calloc(hw, recps[rid].n_grp_count,
506 sizeof(struct ice_aqc_recipe_data_elem));
507 if (!recps[rid].root_buf)
510 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
511 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
512 recps[rid].recp_created = true;
513 if (tmp[sub_recps].content.rid & ICE_AQ_RECIPE_ID_IS_ROOT)
514 recps[rid].root_rid = rid;
521 * ice_get_recp_to_prof_map - updates recipe to profile mapping
522 * @hw: pointer to hardware structure
524 * This function is used to populate recipe_to_profile matrix where index to
525 * this array is the recipe ID and the element is the mapping of which profiles
526 * is this recipe mapped to.
529 ice_get_recp_to_prof_map(struct ice_hw *hw)
531 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
534 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
537 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
538 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
541 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
542 if (ice_is_bit_set(r_bitmap, j))
543 ice_set_bit(i, recipe_to_profile[j]);
548 * ice_init_def_sw_recp - initialize the recipe book keeping tables
549 * @hw: pointer to the HW struct
551 * Allocate memory for the entire recipe table and initialize the structures/
552 * entries corresponding to basic recipes.
554 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
556 struct ice_sw_recipe *recps;
559 recps = (struct ice_sw_recipe *)
560 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
562 return ICE_ERR_NO_MEMORY;
564 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
565 recps[i].root_rid = i;
566 INIT_LIST_HEAD(&recps[i].filt_rules);
567 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
568 INIT_LIST_HEAD(&recps[i].rg_list);
569 ice_init_lock(&recps[i].filt_rule_lock);
572 hw->switch_info->recp_list = recps;
578 * ice_aq_get_sw_cfg - get switch configuration
579 * @hw: pointer to the hardware structure
580 * @buf: pointer to the result buffer
581 * @buf_size: length of the buffer available for response
582 * @req_desc: pointer to requested descriptor
583 * @num_elems: pointer to number of elements
584 * @cd: pointer to command details structure or NULL
586 * Get switch configuration (0x0200) to be placed in 'buff'.
587 * This admin command returns information such as initial VSI/port number
588 * and switch ID it belongs to.
590 * NOTE: *req_desc is both an input/output parameter.
591 * The caller of this function first calls this function with *request_desc set
592 * to 0. If the response from f/w has *req_desc set to 0, all the switch
593 * configuration information has been returned; if non-zero (meaning not all
594 * the information was returned), the caller should call this function again
595 * with *req_desc set to the previous value returned by f/w to get the
596 * next block of switch configuration information.
598 * *num_elems is output only parameter. This reflects the number of elements
599 * in response buffer. The caller of this function to use *num_elems while
600 * parsing the response buffer.
602 static enum ice_status
603 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
604 u16 buf_size, u16 *req_desc, u16 *num_elems,
605 struct ice_sq_cd *cd)
607 struct ice_aqc_get_sw_cfg *cmd;
608 enum ice_status status;
609 struct ice_aq_desc desc;
611 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
612 cmd = &desc.params.get_sw_conf;
613 cmd->element = CPU_TO_LE16(*req_desc);
615 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
617 *req_desc = LE16_TO_CPU(cmd->element);
618 *num_elems = LE16_TO_CPU(cmd->num_elems);
626 * ice_alloc_sw - allocate resources specific to switch
627 * @hw: pointer to the HW struct
628 * @ena_stats: true to turn on VEB stats
629 * @shared_res: true for shared resource, false for dedicated resource
630 * @sw_id: switch ID returned
631 * @counter_id: VEB counter ID returned
633 * allocates switch resources (SWID and VEB counter) (0x0208)
636 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
639 struct ice_aqc_alloc_free_res_elem *sw_buf;
640 struct ice_aqc_res_elem *sw_ele;
641 enum ice_status status;
644 buf_len = sizeof(*sw_buf);
645 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
646 ice_malloc(hw, buf_len);
648 return ICE_ERR_NO_MEMORY;
650 /* Prepare buffer for switch ID.
651 * The number of resource entries in buffer is passed as 1 since only a
652 * single switch/VEB instance is allocated, and hence a single sw_id
655 sw_buf->num_elems = CPU_TO_LE16(1);
657 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
658 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
659 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
661 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
662 ice_aqc_opc_alloc_res, NULL);
665 goto ice_alloc_sw_exit;
667 sw_ele = &sw_buf->elem[0];
668 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
671 /* Prepare buffer for VEB Counter */
672 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
673 struct ice_aqc_alloc_free_res_elem *counter_buf;
674 struct ice_aqc_res_elem *counter_ele;
676 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
677 ice_malloc(hw, buf_len);
679 status = ICE_ERR_NO_MEMORY;
680 goto ice_alloc_sw_exit;
683 /* The number of resource entries in buffer is passed as 1 since
684 * only a single switch/VEB instance is allocated, and hence a
685 * single VEB counter is requested.
687 counter_buf->num_elems = CPU_TO_LE16(1);
688 counter_buf->res_type =
689 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
690 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
691 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
695 ice_free(hw, counter_buf);
696 goto ice_alloc_sw_exit;
698 counter_ele = &counter_buf->elem[0];
699 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
700 ice_free(hw, counter_buf);
704 ice_free(hw, sw_buf);
709 * ice_free_sw - free resources specific to switch
710 * @hw: pointer to the HW struct
711 * @sw_id: switch ID returned
712 * @counter_id: VEB counter ID returned
714 * free switch resources (SWID and VEB counter) (0x0209)
716 * NOTE: This function frees multiple resources. It continues
717 * releasing other resources even after it encounters error.
718 * The error code returned is the last error it encountered.
720 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
722 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
723 enum ice_status status, ret_status;
726 buf_len = sizeof(*sw_buf);
727 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
728 ice_malloc(hw, buf_len);
730 return ICE_ERR_NO_MEMORY;
732 /* Prepare buffer to free for switch ID res.
733 * The number of resource entries in buffer is passed as 1 since only a
734 * single switch/VEB instance is freed, and hence a single sw_id
737 sw_buf->num_elems = CPU_TO_LE16(1);
738 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
739 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
741 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
742 ice_aqc_opc_free_res, NULL);
745 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
747 /* Prepare buffer to free for VEB Counter resource */
748 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
749 ice_malloc(hw, buf_len);
751 ice_free(hw, sw_buf);
752 return ICE_ERR_NO_MEMORY;
755 /* The number of resource entries in buffer is passed as 1 since only a
756 * single switch/VEB instance is freed, and hence a single VEB counter
759 counter_buf->num_elems = CPU_TO_LE16(1);
760 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
761 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
763 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
764 ice_aqc_opc_free_res, NULL);
766 ice_debug(hw, ICE_DBG_SW,
767 "VEB counter resource could not be freed\n");
771 ice_free(hw, counter_buf);
772 ice_free(hw, sw_buf);
778 * @hw: pointer to the HW struct
779 * @vsi_ctx: pointer to a VSI context struct
780 * @cd: pointer to command details structure or NULL
782 * Add a VSI context to the hardware (0x0210)
785 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
786 struct ice_sq_cd *cd)
788 struct ice_aqc_add_update_free_vsi_resp *res;
789 struct ice_aqc_add_get_update_free_vsi *cmd;
790 struct ice_aq_desc desc;
791 enum ice_status status;
793 cmd = &desc.params.vsi_cmd;
794 res = &desc.params.add_update_free_vsi_res;
796 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
798 if (!vsi_ctx->alloc_from_pool)
799 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
800 ICE_AQ_VSI_IS_VALID);
802 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
804 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
806 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
807 sizeof(vsi_ctx->info), cd);
810 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
811 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
812 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
820 * @hw: pointer to the HW struct
821 * @vsi_ctx: pointer to a VSI context struct
822 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
823 * @cd: pointer to command details structure or NULL
825 * Free VSI context info from hardware (0x0213)
828 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
829 bool keep_vsi_alloc, struct ice_sq_cd *cd)
831 struct ice_aqc_add_update_free_vsi_resp *resp;
832 struct ice_aqc_add_get_update_free_vsi *cmd;
833 struct ice_aq_desc desc;
834 enum ice_status status;
836 cmd = &desc.params.vsi_cmd;
837 resp = &desc.params.add_update_free_vsi_res;
839 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
841 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
843 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
845 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
847 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
848 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
856 * @hw: pointer to the HW struct
857 * @vsi_ctx: pointer to a VSI context struct
858 * @cd: pointer to command details structure or NULL
860 * Update VSI context in the hardware (0x0211)
863 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
864 struct ice_sq_cd *cd)
866 struct ice_aqc_add_update_free_vsi_resp *resp;
867 struct ice_aqc_add_get_update_free_vsi *cmd;
868 struct ice_aq_desc desc;
869 enum ice_status status;
871 cmd = &desc.params.vsi_cmd;
872 resp = &desc.params.add_update_free_vsi_res;
874 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
876 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
878 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
880 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
881 sizeof(vsi_ctx->info), cd);
884 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
885 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
892 * ice_is_vsi_valid - check whether the VSI is valid or not
893 * @hw: pointer to the HW struct
894 * @vsi_handle: VSI handle
896 * check whether the VSI is valid or not
898 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
900 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
904 * ice_get_hw_vsi_num - return the HW VSI number
905 * @hw: pointer to the HW struct
906 * @vsi_handle: VSI handle
908 * return the HW VSI number
909 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
911 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
913 return hw->vsi_ctx[vsi_handle]->vsi_num;
917 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
918 * @hw: pointer to the HW struct
919 * @vsi_handle: VSI handle
921 * return the VSI context entry for a given VSI handle
923 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
925 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
929 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
930 * @hw: pointer to the HW struct
931 * @vsi_handle: VSI handle
932 * @vsi: VSI context pointer
934 * save the VSI context entry for a given VSI handle
937 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
939 hw->vsi_ctx[vsi_handle] = vsi;
943 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
944 * @hw: pointer to the HW struct
945 * @vsi_handle: VSI handle
947 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
949 struct ice_vsi_ctx *vsi;
952 vsi = ice_get_vsi_ctx(hw, vsi_handle);
955 ice_for_each_traffic_class(i) {
956 if (vsi->lan_q_ctx[i]) {
957 ice_free(hw, vsi->lan_q_ctx[i]);
958 vsi->lan_q_ctx[i] = NULL;
964 * ice_clear_vsi_ctx - clear the VSI context entry
965 * @hw: pointer to the HW struct
966 * @vsi_handle: VSI handle
968 * clear the VSI context entry
970 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
972 struct ice_vsi_ctx *vsi;
974 vsi = ice_get_vsi_ctx(hw, vsi_handle);
976 ice_clear_vsi_q_ctx(hw, vsi_handle);
978 hw->vsi_ctx[vsi_handle] = NULL;
983 * ice_clear_all_vsi_ctx - clear all the VSI context entries
984 * @hw: pointer to the HW struct
986 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
990 for (i = 0; i < ICE_MAX_VSI; i++)
991 ice_clear_vsi_ctx(hw, i);
995 * ice_add_vsi - add VSI context to the hardware and VSI handle list
996 * @hw: pointer to the HW struct
997 * @vsi_handle: unique VSI handle provided by drivers
998 * @vsi_ctx: pointer to a VSI context struct
999 * @cd: pointer to command details structure or NULL
1001 * Add a VSI context to the hardware also add it into the VSI handle list.
1002 * If this function gets called after reset for existing VSIs then update
1003 * with the new HW VSI number in the corresponding VSI handle list entry.
1006 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1007 struct ice_sq_cd *cd)
1009 struct ice_vsi_ctx *tmp_vsi_ctx;
1010 enum ice_status status;
1012 if (vsi_handle >= ICE_MAX_VSI)
1013 return ICE_ERR_PARAM;
1014 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1017 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1019 /* Create a new VSI context */
1020 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1021 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1023 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1024 return ICE_ERR_NO_MEMORY;
1026 *tmp_vsi_ctx = *vsi_ctx;
1028 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1030 /* update with new HW VSI num */
1031 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
1032 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1039 * ice_free_vsi- free VSI context from hardware and VSI handle list
1040 * @hw: pointer to the HW struct
1041 * @vsi_handle: unique VSI handle
1042 * @vsi_ctx: pointer to a VSI context struct
1043 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1044 * @cd: pointer to command details structure or NULL
1046 * Free VSI context info from hardware as well as from VSI handle list
1049 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1050 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1052 enum ice_status status;
1054 if (!ice_is_vsi_valid(hw, vsi_handle))
1055 return ICE_ERR_PARAM;
1056 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1057 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1059 ice_clear_vsi_ctx(hw, vsi_handle);
1065 * @hw: pointer to the HW struct
1066 * @vsi_handle: unique VSI handle
1067 * @vsi_ctx: pointer to a VSI context struct
1068 * @cd: pointer to command details structure or NULL
1070 * Update VSI context in the hardware
1073 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1074 struct ice_sq_cd *cd)
1076 if (!ice_is_vsi_valid(hw, vsi_handle))
1077 return ICE_ERR_PARAM;
1078 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1079 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1083 * ice_aq_get_vsi_params
1084 * @hw: pointer to the HW struct
1085 * @vsi_ctx: pointer to a VSI context struct
1086 * @cd: pointer to command details structure or NULL
1088 * Get VSI context info from hardware (0x0212)
1091 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1092 struct ice_sq_cd *cd)
1094 struct ice_aqc_add_get_update_free_vsi *cmd;
1095 struct ice_aqc_get_vsi_resp *resp;
1096 struct ice_aq_desc desc;
1097 enum ice_status status;
1099 cmd = &desc.params.vsi_cmd;
1100 resp = &desc.params.get_vsi_resp;
1102 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1104 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1106 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1107 sizeof(vsi_ctx->info), cd);
1109 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1111 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1112 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1119 * ice_aq_add_update_mir_rule - add/update a mirror rule
1120 * @hw: pointer to the HW struct
1121 * @rule_type: Rule Type
1122 * @dest_vsi: VSI number to which packets will be mirrored
1123 * @count: length of the list
1124 * @mr_buf: buffer for list of mirrored VSI numbers
1125 * @cd: pointer to command details structure or NULL
1128 * Add/Update Mirror Rule (0x260).
1131 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1132 u16 count, struct ice_mir_rule_buf *mr_buf,
1133 struct ice_sq_cd *cd, u16 *rule_id)
1135 struct ice_aqc_add_update_mir_rule *cmd;
1136 struct ice_aq_desc desc;
1137 enum ice_status status;
1138 __le16 *mr_list = NULL;
1141 switch (rule_type) {
1142 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1143 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1144 /* Make sure count and mr_buf are set for these rule_types */
1145 if (!(count && mr_buf))
1146 return ICE_ERR_PARAM;
1148 buf_size = count * sizeof(__le16);
1149 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1151 return ICE_ERR_NO_MEMORY;
1153 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1154 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1155 /* Make sure count and mr_buf are not set for these
1158 if (count || mr_buf)
1159 return ICE_ERR_PARAM;
1162 ice_debug(hw, ICE_DBG_SW,
1163 "Error due to unsupported rule_type %u\n", rule_type);
1164 return ICE_ERR_OUT_OF_RANGE;
1167 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1169 /* Pre-process 'mr_buf' items for add/update of virtual port
1170 * ingress/egress mirroring (but not physical port ingress/egress
1176 for (i = 0; i < count; i++) {
1179 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1181 /* Validate specified VSI number, make sure it is less
1182 * than ICE_MAX_VSI, if not return with error.
1184 if (id >= ICE_MAX_VSI) {
1185 ice_debug(hw, ICE_DBG_SW,
1186 "Error VSI index (%u) out-of-range\n",
1188 ice_free(hw, mr_list);
1189 return ICE_ERR_OUT_OF_RANGE;
1192 /* add VSI to mirror rule */
1195 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1196 else /* remove VSI from mirror rule */
1197 mr_list[i] = CPU_TO_LE16(id);
1201 cmd = &desc.params.add_update_rule;
1202 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1203 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1204 ICE_AQC_RULE_ID_VALID_M);
1205 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1206 cmd->num_entries = CPU_TO_LE16(count);
1207 cmd->dest = CPU_TO_LE16(dest_vsi);
1209 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1211 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1213 ice_free(hw, mr_list);
1219 * ice_aq_delete_mir_rule - delete a mirror rule
1220 * @hw: pointer to the HW struct
1221 * @rule_id: Mirror rule ID (to be deleted)
1222 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1223 * otherwise it is returned to the shared pool
1224 * @cd: pointer to command details structure or NULL
1226 * Delete Mirror Rule (0x261).
1229 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1230 struct ice_sq_cd *cd)
1232 struct ice_aqc_delete_mir_rule *cmd;
1233 struct ice_aq_desc desc;
1235 /* rule_id should be in the range 0...63 */
1236 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1237 return ICE_ERR_OUT_OF_RANGE;
1239 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1241 cmd = &desc.params.del_rule;
1242 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1243 cmd->rule_id = CPU_TO_LE16(rule_id);
1246 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1248 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1252 * ice_aq_alloc_free_vsi_list
1253 * @hw: pointer to the HW struct
1254 * @vsi_list_id: VSI list ID returned or used for lookup
1255 * @lkup_type: switch rule filter lookup type
1256 * @opc: switch rules population command type - pass in the command opcode
1258 * allocates or free a VSI list resource
1260 static enum ice_status
1261 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1262 enum ice_sw_lkup_type lkup_type,
1263 enum ice_adminq_opc opc)
1265 struct ice_aqc_alloc_free_res_elem *sw_buf;
1266 struct ice_aqc_res_elem *vsi_ele;
1267 enum ice_status status;
1270 buf_len = sizeof(*sw_buf);
1271 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1272 ice_malloc(hw, buf_len);
1274 return ICE_ERR_NO_MEMORY;
1275 sw_buf->num_elems = CPU_TO_LE16(1);
1277 if (lkup_type == ICE_SW_LKUP_MAC ||
1278 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1279 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1280 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1281 lkup_type == ICE_SW_LKUP_PROMISC ||
1282 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1283 lkup_type == ICE_SW_LKUP_LAST) {
1284 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1285 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1287 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1289 status = ICE_ERR_PARAM;
1290 goto ice_aq_alloc_free_vsi_list_exit;
1293 if (opc == ice_aqc_opc_free_res)
1294 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1296 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1298 goto ice_aq_alloc_free_vsi_list_exit;
1300 if (opc == ice_aqc_opc_alloc_res) {
1301 vsi_ele = &sw_buf->elem[0];
1302 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1305 ice_aq_alloc_free_vsi_list_exit:
1306 ice_free(hw, sw_buf);
1311 * ice_aq_set_storm_ctrl - Sets storm control configuration
1312 * @hw: pointer to the HW struct
1313 * @bcast_thresh: represents the upper threshold for broadcast storm control
1314 * @mcast_thresh: represents the upper threshold for multicast storm control
1315 * @ctl_bitmask: storm control control knobs
1317 * Sets the storm control configuration (0x0280)
1320 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1323 struct ice_aqc_storm_cfg *cmd;
1324 struct ice_aq_desc desc;
1326 cmd = &desc.params.storm_conf;
1328 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1330 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1331 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1332 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1334 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1338 * ice_aq_get_storm_ctrl - gets storm control configuration
1339 * @hw: pointer to the HW struct
1340 * @bcast_thresh: represents the upper threshold for broadcast storm control
1341 * @mcast_thresh: represents the upper threshold for multicast storm control
1342 * @ctl_bitmask: storm control control knobs
1344 * Gets the storm control configuration (0x0281)
1347 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1350 enum ice_status status;
1351 struct ice_aq_desc desc;
1353 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1355 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1357 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1360 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1363 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1366 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1373 * ice_aq_sw_rules - add/update/remove switch rules
1374 * @hw: pointer to the HW struct
1375 * @rule_list: pointer to switch rule population list
1376 * @rule_list_sz: total size of the rule list in bytes
1377 * @num_rules: number of switch rules in the rule_list
1378 * @opc: switch rules population command type - pass in the command opcode
1379 * @cd: pointer to command details structure or NULL
1381 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1383 static enum ice_status
1384 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1385 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1387 struct ice_aq_desc desc;
1389 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_sw_rules");
1391 if (opc != ice_aqc_opc_add_sw_rules &&
1392 opc != ice_aqc_opc_update_sw_rules &&
1393 opc != ice_aqc_opc_remove_sw_rules)
1394 return ICE_ERR_PARAM;
1396 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1398 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1399 desc.params.sw_rules.num_rules_fltr_entry_index =
1400 CPU_TO_LE16(num_rules);
1401 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1405 * ice_aq_add_recipe - add switch recipe
1406 * @hw: pointer to the HW struct
1407 * @s_recipe_list: pointer to switch rule population list
1408 * @num_recipes: number of switch recipes in the list
1409 * @cd: pointer to command details structure or NULL
1414 ice_aq_add_recipe(struct ice_hw *hw,
1415 struct ice_aqc_recipe_data_elem *s_recipe_list,
1416 u16 num_recipes, struct ice_sq_cd *cd)
1418 struct ice_aqc_add_get_recipe *cmd;
1419 struct ice_aq_desc desc;
1422 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_recipe");
1423 cmd = &desc.params.add_get_recipe;
1424 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1426 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1427 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1429 buf_size = num_recipes * sizeof(*s_recipe_list);
1431 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1435 * ice_aq_get_recipe - get switch recipe
1436 * @hw: pointer to the HW struct
1437 * @s_recipe_list: pointer to switch rule population list
1438 * @num_recipes: pointer to the number of recipes (input and output)
1439 * @recipe_root: root recipe number of recipe(s) to retrieve
1440 * @cd: pointer to command details structure or NULL
1444 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1445 * On output, *num_recipes will equal the number of entries returned in
1448 * The caller must supply enough space in s_recipe_list to hold all possible
1449 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1452 ice_aq_get_recipe(struct ice_hw *hw,
1453 struct ice_aqc_recipe_data_elem *s_recipe_list,
1454 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1456 struct ice_aqc_add_get_recipe *cmd;
1457 struct ice_aq_desc desc;
1458 enum ice_status status;
1461 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1462 return ICE_ERR_PARAM;
1464 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe");
1465 cmd = &desc.params.add_get_recipe;
1466 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1468 cmd->return_index = CPU_TO_LE16(recipe_root);
1469 cmd->num_sub_recipes = 0;
1471 buf_size = *num_recipes * sizeof(*s_recipe_list);
1473 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1474 /* cppcheck-suppress constArgument */
1475 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1481 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1482 * @hw: pointer to the HW struct
1483 * @profile_id: package profile ID to associate the recipe with
1484 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1485 * @cd: pointer to command details structure or NULL
1486 * Recipe to profile association (0x0291)
1489 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1490 struct ice_sq_cd *cd)
1492 struct ice_aqc_recipe_to_profile *cmd;
1493 struct ice_aq_desc desc;
1495 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_assoc_recipe_to_prof");
1496 cmd = &desc.params.recipe_to_profile;
1497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1498 cmd->profile_id = CPU_TO_LE16(profile_id);
1499 /* Set the recipe ID bit in the bitmask to let the device know which
1500 * profile we are associating the recipe to
1502 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1503 ICE_NONDMA_TO_NONDMA);
1505 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1509 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1510 * @hw: pointer to the HW struct
1511 * @profile_id: package profile ID to associate the recipe with
1512 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1513 * @cd: pointer to command details structure or NULL
1514 * Associate profile ID with given recipe (0x0293)
1517 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1518 struct ice_sq_cd *cd)
1520 struct ice_aqc_recipe_to_profile *cmd;
1521 struct ice_aq_desc desc;
1522 enum ice_status status;
1524 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe_to_prof");
1525 cmd = &desc.params.recipe_to_profile;
1526 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1527 cmd->profile_id = CPU_TO_LE16(profile_id);
1529 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1531 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1532 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1538 * ice_alloc_recipe - add recipe resource
1539 * @hw: pointer to the hardware structure
1540 * @rid: recipe ID returned as response to AQ call
1542 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1544 struct ice_aqc_alloc_free_res_elem *sw_buf;
1545 enum ice_status status;
1548 buf_len = sizeof(*sw_buf);
1549 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1551 return ICE_ERR_NO_MEMORY;
1553 sw_buf->num_elems = CPU_TO_LE16(1);
1554 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1555 ICE_AQC_RES_TYPE_S) |
1556 ICE_AQC_RES_TYPE_FLAG_SHARED);
1557 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1558 ice_aqc_opc_alloc_res, NULL);
1560 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1561 ice_free(hw, sw_buf);
1566 /* ice_init_port_info - Initialize port_info with switch configuration data
1567 * @pi: pointer to port_info
1568 * @vsi_port_num: VSI number or port number
1569 * @type: Type of switch element (port or VSI)
1570 * @swid: switch ID of the switch the element is attached to
1571 * @pf_vf_num: PF or VF number
1572 * @is_vf: true if the element is a VF, false otherwise
1575 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1576 u16 swid, u16 pf_vf_num, bool is_vf)
1579 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1580 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1582 pi->pf_vf_num = pf_vf_num;
1584 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1585 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1588 ice_debug(pi->hw, ICE_DBG_SW,
1589 "incorrect VSI/port type received\n");
1594 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1595 * @hw: pointer to the hardware structure
1597 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1599 struct ice_aqc_get_sw_cfg_resp *rbuf;
1600 enum ice_status status;
1601 u16 num_total_ports;
1607 num_total_ports = 1;
1609 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1610 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1613 return ICE_ERR_NO_MEMORY;
1615 /* Multiple calls to ice_aq_get_sw_cfg may be required
1616 * to get all the switch configuration information. The need
1617 * for additional calls is indicated by ice_aq_get_sw_cfg
1618 * writing a non-zero value in req_desc
1621 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1622 &req_desc, &num_elems, NULL);
1627 for (i = 0; i < num_elems; i++) {
1628 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1629 u16 pf_vf_num, swid, vsi_port_num;
1633 ele = rbuf[i].elements;
1634 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1635 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1637 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1638 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1640 swid = LE16_TO_CPU(ele->swid);
1642 if (LE16_TO_CPU(ele->pf_vf_num) &
1643 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1646 type = LE16_TO_CPU(ele->vsi_port_num) >>
1647 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1650 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1651 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1652 if (j == num_total_ports) {
1653 ice_debug(hw, ICE_DBG_SW,
1654 "more ports than expected\n");
1655 status = ICE_ERR_CFG;
1658 ice_init_port_info(hw->port_info,
1659 vsi_port_num, type, swid,
1667 } while (req_desc && !status);
1671 ice_free(hw, (void *)rbuf);
1677 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1678 * @hw: pointer to the hardware structure
1679 * @fi: filter info structure to fill/update
1681 * This helper function populates the lb_en and lan_en elements of the provided
1682 * ice_fltr_info struct using the switch's type and characteristics of the
1683 * switch rule being configured.
1685 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1689 if ((fi->flag & ICE_FLTR_TX) &&
1690 (fi->fltr_act == ICE_FWD_TO_VSI ||
1691 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1692 fi->fltr_act == ICE_FWD_TO_Q ||
1693 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1694 /* Setting LB for prune actions will result in replicated
1695 * packets to the internal switch that will be dropped.
1697 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1700 /* Set lan_en to TRUE if
1701 * 1. The switch is a VEB AND
1703 * 2.1 The lookup is a directional lookup like ethertype,
1704 * promiscuous, ethertype-MAC, promiscuous-VLAN
1705 * and default-port OR
1706 * 2.2 The lookup is VLAN, OR
1707 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1708 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1712 * The switch is a VEPA.
1714 * In all other cases, the LAN enable has to be set to false.
1717 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1718 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1719 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1720 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1721 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1722 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1723 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1724 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1725 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1726 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1735 * ice_ilog2 - Calculates integer log base 2 of a number
1736 * @n: number on which to perform operation
1738 static int ice_ilog2(u64 n)
1742 for (i = 63; i >= 0; i--)
1743 if (((u64)1 << i) & n)
1750 * ice_fill_sw_rule - Helper function to fill switch rule structure
1751 * @hw: pointer to the hardware structure
1752 * @f_info: entry containing packet forwarding information
1753 * @s_rule: switch rule structure to be filled in based on mac_entry
1754 * @opc: switch rules population command type - pass in the command opcode
1757 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1758 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1760 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1768 if (opc == ice_aqc_opc_remove_sw_rules) {
1769 s_rule->pdata.lkup_tx_rx.act = 0;
1770 s_rule->pdata.lkup_tx_rx.index =
1771 CPU_TO_LE16(f_info->fltr_rule_id);
1772 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1776 eth_hdr_sz = sizeof(dummy_eth_header);
1777 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1779 /* initialize the ether header with a dummy header */
1780 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1781 ice_fill_sw_info(hw, f_info);
1783 switch (f_info->fltr_act) {
1784 case ICE_FWD_TO_VSI:
1785 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1786 ICE_SINGLE_ACT_VSI_ID_M;
1787 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1788 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1789 ICE_SINGLE_ACT_VALID_BIT;
1791 case ICE_FWD_TO_VSI_LIST:
1792 act |= ICE_SINGLE_ACT_VSI_LIST;
1793 act |= (f_info->fwd_id.vsi_list_id <<
1794 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1795 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1796 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1797 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1798 ICE_SINGLE_ACT_VALID_BIT;
1801 act |= ICE_SINGLE_ACT_TO_Q;
1802 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1803 ICE_SINGLE_ACT_Q_INDEX_M;
1805 case ICE_DROP_PACKET:
1806 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1807 ICE_SINGLE_ACT_VALID_BIT;
1809 case ICE_FWD_TO_QGRP:
1810 q_rgn = f_info->qgrp_size > 0 ?
1811 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1812 act |= ICE_SINGLE_ACT_TO_Q;
1813 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1814 ICE_SINGLE_ACT_Q_INDEX_M;
1815 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1816 ICE_SINGLE_ACT_Q_REGION_M;
1823 act |= ICE_SINGLE_ACT_LB_ENABLE;
1825 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1827 switch (f_info->lkup_type) {
1828 case ICE_SW_LKUP_MAC:
1829 daddr = f_info->l_data.mac.mac_addr;
1831 case ICE_SW_LKUP_VLAN:
1832 vlan_id = f_info->l_data.vlan.vlan_id;
1833 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1834 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1835 act |= ICE_SINGLE_ACT_PRUNE;
1836 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1839 case ICE_SW_LKUP_ETHERTYPE_MAC:
1840 daddr = f_info->l_data.ethertype_mac.mac_addr;
1842 case ICE_SW_LKUP_ETHERTYPE:
1843 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1844 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1846 case ICE_SW_LKUP_MAC_VLAN:
1847 daddr = f_info->l_data.mac_vlan.mac_addr;
1848 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1850 case ICE_SW_LKUP_PROMISC_VLAN:
1851 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1853 case ICE_SW_LKUP_PROMISC:
1854 daddr = f_info->l_data.mac_vlan.mac_addr;
1860 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1861 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1862 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1864 /* Recipe set depending on lookup type */
1865 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1866 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1867 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1870 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1871 ICE_NONDMA_TO_NONDMA);
1873 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1874 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1875 *off = CPU_TO_BE16(vlan_id);
1878 /* Create the switch rule with the final dummy Ethernet header */
1879 if (opc != ice_aqc_opc_update_sw_rules)
1880 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1884 * ice_add_marker_act
1885 * @hw: pointer to the hardware structure
1886 * @m_ent: the management entry for which sw marker needs to be added
1887 * @sw_marker: sw marker to tag the Rx descriptor with
1888 * @l_id: large action resource ID
1890 * Create a large action to hold software marker and update the switch rule
1891 * entry pointed by m_ent with newly created large action
1893 static enum ice_status
1894 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1895 u16 sw_marker, u16 l_id)
1897 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1898 /* For software marker we need 3 large actions
1899 * 1. FWD action: FWD TO VSI or VSI LIST
1900 * 2. GENERIC VALUE action to hold the profile ID
1901 * 3. GENERIC VALUE action to hold the software marker ID
1903 const u16 num_lg_acts = 3;
1904 enum ice_status status;
1910 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1911 return ICE_ERR_PARAM;
1913 /* Create two back-to-back switch rules and submit them to the HW using
1914 * one memory buffer:
1918 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1919 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1920 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1922 return ICE_ERR_NO_MEMORY;
1924 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1926 /* Fill in the first switch rule i.e. large action */
1927 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1928 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1929 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1931 /* First action VSI forwarding or VSI list forwarding depending on how
1934 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1935 m_ent->fltr_info.fwd_id.hw_vsi_id;
1937 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1938 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1939 ICE_LG_ACT_VSI_LIST_ID_M;
1940 if (m_ent->vsi_count > 1)
1941 act |= ICE_LG_ACT_VSI_LIST;
1942 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1944 /* Second action descriptor type */
1945 act = ICE_LG_ACT_GENERIC;
1947 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1948 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1950 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1951 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1953 /* Third action Marker value */
1954 act |= ICE_LG_ACT_GENERIC;
1955 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1956 ICE_LG_ACT_GENERIC_VALUE_M;
1958 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1960 /* call the fill switch rule to fill the lookup Tx Rx structure */
1961 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1962 ice_aqc_opc_update_sw_rules);
1964 /* Update the action to point to the large action ID */
1965 rx_tx->pdata.lkup_tx_rx.act =
1966 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1967 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1968 ICE_SINGLE_ACT_PTR_VAL_M));
1970 /* Use the filter rule ID of the previously created rule with single
1971 * act. Once the update happens, hardware will treat this as large
1974 rx_tx->pdata.lkup_tx_rx.index =
1975 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1977 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1978 ice_aqc_opc_update_sw_rules, NULL);
1980 m_ent->lg_act_idx = l_id;
1981 m_ent->sw_marker_id = sw_marker;
1984 ice_free(hw, lg_act);
1989 * ice_add_counter_act - add/update filter rule with counter action
1990 * @hw: pointer to the hardware structure
1991 * @m_ent: the management entry for which counter needs to be added
1992 * @counter_id: VLAN counter ID returned as part of allocate resource
1993 * @l_id: large action resource ID
1995 static enum ice_status
1996 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1997 u16 counter_id, u16 l_id)
1999 struct ice_aqc_sw_rules_elem *lg_act;
2000 struct ice_aqc_sw_rules_elem *rx_tx;
2001 enum ice_status status;
2002 /* 2 actions will be added while adding a large action counter */
2003 const int num_acts = 2;
2010 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2011 return ICE_ERR_PARAM;
2013 /* Create two back-to-back switch rules and submit them to the HW using
2014 * one memory buffer:
2018 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2019 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2020 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2023 return ICE_ERR_NO_MEMORY;
2025 rx_tx = (struct ice_aqc_sw_rules_elem *)
2026 ((u8 *)lg_act + lg_act_size);
2028 /* Fill in the first switch rule i.e. large action */
2029 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2030 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2031 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2033 /* First action VSI forwarding or VSI list forwarding depending on how
2036 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2037 m_ent->fltr_info.fwd_id.hw_vsi_id;
2039 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2040 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2041 ICE_LG_ACT_VSI_LIST_ID_M;
2042 if (m_ent->vsi_count > 1)
2043 act |= ICE_LG_ACT_VSI_LIST;
2044 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2046 /* Second action counter ID */
2047 act = ICE_LG_ACT_STAT_COUNT;
2048 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2049 ICE_LG_ACT_STAT_COUNT_M;
2050 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2052 /* call the fill switch rule to fill the lookup Tx Rx structure */
2053 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2054 ice_aqc_opc_update_sw_rules);
2056 act = ICE_SINGLE_ACT_PTR;
2057 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2058 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2060 /* Use the filter rule ID of the previously created rule with single
2061 * act. Once the update happens, hardware will treat this as large
2064 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2065 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2067 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2068 ice_aqc_opc_update_sw_rules, NULL);
2070 m_ent->lg_act_idx = l_id;
2071 m_ent->counter_index = counter_id;
2074 ice_free(hw, lg_act);
2079 * ice_create_vsi_list_map
2080 * @hw: pointer to the hardware structure
2081 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2082 * @num_vsi: number of VSI handles in the array
2083 * @vsi_list_id: VSI list ID generated as part of allocate resource
2085 * Helper function to create a new entry of VSI list ID to VSI mapping
2086 * using the given VSI list ID
2088 static struct ice_vsi_list_map_info *
2089 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2092 struct ice_switch_info *sw = hw->switch_info;
2093 struct ice_vsi_list_map_info *v_map;
2096 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2101 v_map->vsi_list_id = vsi_list_id;
2103 for (i = 0; i < num_vsi; i++)
2104 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2106 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2111 * ice_update_vsi_list_rule
2112 * @hw: pointer to the hardware structure
2113 * @vsi_handle_arr: array of VSI handles to form a VSI list
2114 * @num_vsi: number of VSI handles in the array
2115 * @vsi_list_id: VSI list ID generated as part of allocate resource
2116 * @remove: Boolean value to indicate if this is a remove action
2117 * @opc: switch rules population command type - pass in the command opcode
2118 * @lkup_type: lookup type of the filter
2120 * Call AQ command to add a new switch rule or update existing switch rule
2121 * using the given VSI list ID
2123 static enum ice_status
2124 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2125 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2126 enum ice_sw_lkup_type lkup_type)
2128 struct ice_aqc_sw_rules_elem *s_rule;
2129 enum ice_status status;
2135 return ICE_ERR_PARAM;
2137 if (lkup_type == ICE_SW_LKUP_MAC ||
2138 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2139 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2140 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2141 lkup_type == ICE_SW_LKUP_PROMISC ||
2142 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2143 lkup_type == ICE_SW_LKUP_LAST)
2144 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2145 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2146 else if (lkup_type == ICE_SW_LKUP_VLAN)
2147 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2148 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2150 return ICE_ERR_PARAM;
2152 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2153 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2155 return ICE_ERR_NO_MEMORY;
2156 for (i = 0; i < num_vsi; i++) {
2157 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2158 status = ICE_ERR_PARAM;
2161 /* AQ call requires hw_vsi_id(s) */
2162 s_rule->pdata.vsi_list.vsi[i] =
2163 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2166 s_rule->type = CPU_TO_LE16(type);
2167 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2168 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2170 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2173 ice_free(hw, s_rule);
2178 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2179 * @hw: pointer to the HW struct
2180 * @vsi_handle_arr: array of VSI handles to form a VSI list
2181 * @num_vsi: number of VSI handles in the array
2182 * @vsi_list_id: stores the ID of the VSI list to be created
2183 * @lkup_type: switch rule filter's lookup type
2185 static enum ice_status
2186 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2187 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2189 enum ice_status status;
2191 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2192 ice_aqc_opc_alloc_res);
2196 /* Update the newly created VSI list to include the specified VSIs */
2197 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2198 *vsi_list_id, false,
2199 ice_aqc_opc_add_sw_rules, lkup_type);
2203 * ice_create_pkt_fwd_rule
2204 * @hw: pointer to the hardware structure
2205 * @f_entry: entry containing packet forwarding information
2207 * Create switch rule with given filter information and add an entry
2208 * to the corresponding filter management list to track this switch rule
2211 static enum ice_status
2212 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2213 struct ice_fltr_list_entry *f_entry)
2215 struct ice_fltr_mgmt_list_entry *fm_entry;
2216 struct ice_aqc_sw_rules_elem *s_rule;
2217 enum ice_sw_lkup_type l_type;
2218 struct ice_sw_recipe *recp;
2219 enum ice_status status;
2221 s_rule = (struct ice_aqc_sw_rules_elem *)
2222 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2224 return ICE_ERR_NO_MEMORY;
2225 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2226 ice_malloc(hw, sizeof(*fm_entry));
2228 status = ICE_ERR_NO_MEMORY;
2229 goto ice_create_pkt_fwd_rule_exit;
2232 fm_entry->fltr_info = f_entry->fltr_info;
2234 /* Initialize all the fields for the management entry */
2235 fm_entry->vsi_count = 1;
2236 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2237 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2238 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2240 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2241 ice_aqc_opc_add_sw_rules);
2243 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2244 ice_aqc_opc_add_sw_rules, NULL);
2246 ice_free(hw, fm_entry);
2247 goto ice_create_pkt_fwd_rule_exit;
2250 f_entry->fltr_info.fltr_rule_id =
2251 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2252 fm_entry->fltr_info.fltr_rule_id =
2253 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2255 /* The book keeping entries will get removed when base driver
2256 * calls remove filter AQ command
2258 l_type = fm_entry->fltr_info.lkup_type;
2259 recp = &hw->switch_info->recp_list[l_type];
2260 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2262 ice_create_pkt_fwd_rule_exit:
2263 ice_free(hw, s_rule);
2268 * ice_update_pkt_fwd_rule
2269 * @hw: pointer to the hardware structure
2270 * @f_info: filter information for switch rule
2272 * Call AQ command to update a previously created switch rule with a
2275 static enum ice_status
2276 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2278 struct ice_aqc_sw_rules_elem *s_rule;
2279 enum ice_status status;
2281 s_rule = (struct ice_aqc_sw_rules_elem *)
2282 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2284 return ICE_ERR_NO_MEMORY;
2286 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2288 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2290 /* Update switch rule with new rule set to forward VSI list */
2291 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2292 ice_aqc_opc_update_sw_rules, NULL);
2294 ice_free(hw, s_rule);
2299 * ice_update_sw_rule_bridge_mode
2300 * @hw: pointer to the HW struct
2302 * Updates unicast switch filter rules based on VEB/VEPA mode
2304 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2306 struct ice_switch_info *sw = hw->switch_info;
2307 struct ice_fltr_mgmt_list_entry *fm_entry;
2308 enum ice_status status = ICE_SUCCESS;
2309 struct LIST_HEAD_TYPE *rule_head;
2310 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2312 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2313 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2315 ice_acquire_lock(rule_lock);
2316 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2318 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2319 u8 *addr = fi->l_data.mac.mac_addr;
2321 /* Update unicast Tx rules to reflect the selected
2324 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2325 (fi->fltr_act == ICE_FWD_TO_VSI ||
2326 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2327 fi->fltr_act == ICE_FWD_TO_Q ||
2328 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2329 status = ice_update_pkt_fwd_rule(hw, fi);
2335 ice_release_lock(rule_lock);
2341 * ice_add_update_vsi_list
2342 * @hw: pointer to the hardware structure
2343 * @m_entry: pointer to current filter management list entry
2344 * @cur_fltr: filter information from the book keeping entry
2345 * @new_fltr: filter information with the new VSI to be added
2347 * Call AQ command to add or update previously created VSI list with new VSI.
2349 * Helper function to do book keeping associated with adding filter information
2350 * The algorithm to do the book keeping is described below :
2351 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2352 * if only one VSI has been added till now
2353 * Allocate a new VSI list and add two VSIs
2354 * to this list using switch rule command
2355 * Update the previously created switch rule with the
2356 * newly created VSI list ID
2357 * if a VSI list was previously created
2358 * Add the new VSI to the previously created VSI list set
2359 * using the update switch rule command
2361 static enum ice_status
2362 ice_add_update_vsi_list(struct ice_hw *hw,
2363 struct ice_fltr_mgmt_list_entry *m_entry,
2364 struct ice_fltr_info *cur_fltr,
2365 struct ice_fltr_info *new_fltr)
2367 enum ice_status status = ICE_SUCCESS;
2368 u16 vsi_list_id = 0;
2370 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2371 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2372 return ICE_ERR_NOT_IMPL;
2374 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2375 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2376 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2377 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2378 return ICE_ERR_NOT_IMPL;
2380 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2381 /* Only one entry existed in the mapping and it was not already
2382 * a part of a VSI list. So, create a VSI list with the old and
2385 struct ice_fltr_info tmp_fltr;
2386 u16 vsi_handle_arr[2];
2388 /* A rule already exists with the new VSI being added */
2389 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2390 return ICE_ERR_ALREADY_EXISTS;
2392 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2393 vsi_handle_arr[1] = new_fltr->vsi_handle;
2394 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2396 new_fltr->lkup_type);
2400 tmp_fltr = *new_fltr;
2401 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2402 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2403 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2404 /* Update the previous switch rule of "MAC forward to VSI" to
2405 * "MAC fwd to VSI list"
2407 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2411 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2412 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2413 m_entry->vsi_list_info =
2414 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2417 /* If this entry was large action then the large action needs
2418 * to be updated to point to FWD to VSI list
2420 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2422 ice_add_marker_act(hw, m_entry,
2423 m_entry->sw_marker_id,
2424 m_entry->lg_act_idx);
2426 u16 vsi_handle = new_fltr->vsi_handle;
2427 enum ice_adminq_opc opcode;
2429 if (!m_entry->vsi_list_info)
2432 /* A rule already exists with the new VSI being added */
2433 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2436 /* Update the previously created VSI list set with
2437 * the new VSI ID passed in
2439 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2440 opcode = ice_aqc_opc_update_sw_rules;
2442 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2443 vsi_list_id, false, opcode,
2444 new_fltr->lkup_type);
2445 /* update VSI list mapping info with new VSI ID */
2447 ice_set_bit(vsi_handle,
2448 m_entry->vsi_list_info->vsi_map);
2451 m_entry->vsi_count++;
2456 * ice_find_rule_entry - Search a rule entry
2457 * @hw: pointer to the hardware structure
2458 * @recp_id: lookup type for which the specified rule needs to be searched
2459 * @f_info: rule information
2461 * Helper function to search for a given rule entry
2462 * Returns pointer to entry storing the rule if found
2464 static struct ice_fltr_mgmt_list_entry *
2465 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2467 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2468 struct ice_switch_info *sw = hw->switch_info;
2469 struct LIST_HEAD_TYPE *list_head;
2471 list_head = &sw->recp_list[recp_id].filt_rules;
2472 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2474 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2475 sizeof(f_info->l_data)) &&
2476 f_info->flag == list_itr->fltr_info.flag) {
2485 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2486 * @hw: pointer to the hardware structure
2487 * @recp_id: lookup type for which VSI lists needs to be searched
2488 * @vsi_handle: VSI handle to be found in VSI list
2489 * @vsi_list_id: VSI list ID found containing vsi_handle
2491 * Helper function to search a VSI list with single entry containing given VSI
2492 * handle element. This can be extended further to search VSI list with more
2493 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2495 static struct ice_vsi_list_map_info *
2496 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2499 struct ice_vsi_list_map_info *map_info = NULL;
2500 struct ice_switch_info *sw = hw->switch_info;
2501 struct LIST_HEAD_TYPE *list_head;
2503 list_head = &sw->recp_list[recp_id].filt_rules;
2504 if (sw->recp_list[recp_id].adv_rule) {
2505 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2507 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2508 ice_adv_fltr_mgmt_list_entry,
2510 if (list_itr->vsi_list_info) {
2511 map_info = list_itr->vsi_list_info;
2512 if (ice_is_bit_set(map_info->vsi_map,
2514 *vsi_list_id = map_info->vsi_list_id;
2520 struct ice_fltr_mgmt_list_entry *list_itr;
2522 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2523 ice_fltr_mgmt_list_entry,
2525 if (list_itr->vsi_count == 1 &&
2526 list_itr->vsi_list_info) {
2527 map_info = list_itr->vsi_list_info;
2528 if (ice_is_bit_set(map_info->vsi_map,
2530 *vsi_list_id = map_info->vsi_list_id;
2540 * ice_add_rule_internal - add rule for a given lookup type
2541 * @hw: pointer to the hardware structure
2542 * @recp_id: lookup type (recipe ID) for which rule has to be added
2543 * @f_entry: structure containing MAC forwarding information
2545 * Adds or updates the rule lists for a given recipe
2547 static enum ice_status
2548 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2549 struct ice_fltr_list_entry *f_entry)
2551 struct ice_switch_info *sw = hw->switch_info;
2552 struct ice_fltr_info *new_fltr, *cur_fltr;
2553 struct ice_fltr_mgmt_list_entry *m_entry;
2554 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2555 enum ice_status status = ICE_SUCCESS;
2557 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2558 return ICE_ERR_PARAM;
2560 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2561 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2562 f_entry->fltr_info.fwd_id.hw_vsi_id =
2563 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2565 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2567 ice_acquire_lock(rule_lock);
2568 new_fltr = &f_entry->fltr_info;
2569 if (new_fltr->flag & ICE_FLTR_RX)
2570 new_fltr->src = hw->port_info->lport;
2571 else if (new_fltr->flag & ICE_FLTR_TX)
2573 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2575 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2577 status = ice_create_pkt_fwd_rule(hw, f_entry);
2578 goto exit_add_rule_internal;
2581 cur_fltr = &m_entry->fltr_info;
2582 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2584 exit_add_rule_internal:
2585 ice_release_lock(rule_lock);
2590 * ice_remove_vsi_list_rule
2591 * @hw: pointer to the hardware structure
2592 * @vsi_list_id: VSI list ID generated as part of allocate resource
2593 * @lkup_type: switch rule filter lookup type
2595 * The VSI list should be emptied before this function is called to remove the
2598 static enum ice_status
2599 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2600 enum ice_sw_lkup_type lkup_type)
2602 struct ice_aqc_sw_rules_elem *s_rule;
2603 enum ice_status status;
2606 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2607 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2609 return ICE_ERR_NO_MEMORY;
2611 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2612 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2614 /* Free the vsi_list resource that we allocated. It is assumed that the
2615 * list is empty at this point.
2617 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2618 ice_aqc_opc_free_res);
2620 ice_free(hw, s_rule);
2625 * ice_rem_update_vsi_list
2626 * @hw: pointer to the hardware structure
2627 * @vsi_handle: VSI handle of the VSI to remove
2628 * @fm_list: filter management entry for which the VSI list management needs to
2631 static enum ice_status
2632 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2633 struct ice_fltr_mgmt_list_entry *fm_list)
2635 enum ice_sw_lkup_type lkup_type;
2636 enum ice_status status = ICE_SUCCESS;
2639 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2640 fm_list->vsi_count == 0)
2641 return ICE_ERR_PARAM;
2643 /* A rule with the VSI being removed does not exist */
2644 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2645 return ICE_ERR_DOES_NOT_EXIST;
2647 lkup_type = fm_list->fltr_info.lkup_type;
2648 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2649 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2650 ice_aqc_opc_update_sw_rules,
2655 fm_list->vsi_count--;
2656 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2658 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2659 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2660 struct ice_vsi_list_map_info *vsi_list_info =
2661 fm_list->vsi_list_info;
2664 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2666 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2667 return ICE_ERR_OUT_OF_RANGE;
2669 /* Make sure VSI list is empty before removing it below */
2670 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2672 ice_aqc_opc_update_sw_rules,
2677 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2678 tmp_fltr_info.fwd_id.hw_vsi_id =
2679 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2680 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2681 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2683 ice_debug(hw, ICE_DBG_SW,
2684 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2685 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2689 fm_list->fltr_info = tmp_fltr_info;
2692 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2693 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2694 struct ice_vsi_list_map_info *vsi_list_info =
2695 fm_list->vsi_list_info;
2697 /* Remove the VSI list since it is no longer used */
2698 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2700 ice_debug(hw, ICE_DBG_SW,
2701 "Failed to remove VSI list %d, error %d\n",
2702 vsi_list_id, status);
2706 LIST_DEL(&vsi_list_info->list_entry);
2707 ice_free(hw, vsi_list_info);
2708 fm_list->vsi_list_info = NULL;
2715 * ice_remove_rule_internal - Remove a filter rule of a given type
2717 * @hw: pointer to the hardware structure
2718 * @recp_id: recipe ID for which the rule needs to removed
2719 * @f_entry: rule entry containing filter information
2721 static enum ice_status
2722 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2723 struct ice_fltr_list_entry *f_entry)
2725 struct ice_switch_info *sw = hw->switch_info;
2726 struct ice_fltr_mgmt_list_entry *list_elem;
2727 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2728 enum ice_status status = ICE_SUCCESS;
2729 bool remove_rule = false;
2732 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2733 return ICE_ERR_PARAM;
2734 f_entry->fltr_info.fwd_id.hw_vsi_id =
2735 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2737 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2738 ice_acquire_lock(rule_lock);
2739 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2741 status = ICE_ERR_DOES_NOT_EXIST;
2745 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2747 } else if (!list_elem->vsi_list_info) {
2748 status = ICE_ERR_DOES_NOT_EXIST;
2750 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2751 /* a ref_cnt > 1 indicates that the vsi_list is being
2752 * shared by multiple rules. Decrement the ref_cnt and
2753 * remove this rule, but do not modify the list, as it
2754 * is in-use by other rules.
2756 list_elem->vsi_list_info->ref_cnt--;
2759 /* a ref_cnt of 1 indicates the vsi_list is only used
2760 * by one rule. However, the original removal request is only
2761 * for a single VSI. Update the vsi_list first, and only
2762 * remove the rule if there are no further VSIs in this list.
2764 vsi_handle = f_entry->fltr_info.vsi_handle;
2765 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2768 /* if VSI count goes to zero after updating the VSI list */
2769 if (list_elem->vsi_count == 0)
2774 /* Remove the lookup rule */
2775 struct ice_aqc_sw_rules_elem *s_rule;
2777 s_rule = (struct ice_aqc_sw_rules_elem *)
2778 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2780 status = ICE_ERR_NO_MEMORY;
2784 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2785 ice_aqc_opc_remove_sw_rules);
2787 status = ice_aq_sw_rules(hw, s_rule,
2788 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2789 ice_aqc_opc_remove_sw_rules, NULL);
2791 /* Remove a book keeping from the list */
2792 ice_free(hw, s_rule);
2797 LIST_DEL(&list_elem->list_entry);
2798 ice_free(hw, list_elem);
2801 ice_release_lock(rule_lock);
2806 * ice_aq_get_res_alloc - get allocated resources
2807 * @hw: pointer to the HW struct
2808 * @num_entries: pointer to u16 to store the number of resource entries returned
2809 * @buf: pointer to user-supplied buffer
2810 * @buf_size: size of buff
2811 * @cd: pointer to command details structure or NULL
2813 * The user-supplied buffer must be large enough to store the resource
2814 * information for all resource types. Each resource type is an
2815 * ice_aqc_get_res_resp_data_elem structure.
2818 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2819 u16 buf_size, struct ice_sq_cd *cd)
2821 struct ice_aqc_get_res_alloc *resp;
2822 enum ice_status status;
2823 struct ice_aq_desc desc;
2826 return ICE_ERR_BAD_PTR;
2828 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2829 return ICE_ERR_INVAL_SIZE;
2831 resp = &desc.params.get_res;
2833 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2834 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2836 if (!status && num_entries)
2837 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2843 * ice_aq_get_res_descs - get allocated resource descriptors
2844 * @hw: pointer to the hardware structure
2845 * @num_entries: number of resource entries in buffer
2846 * @buf: Indirect buffer to hold data parameters and response
2847 * @buf_size: size of buffer for indirect commands
2848 * @res_type: resource type
2849 * @res_shared: is resource shared
2850 * @desc_id: input - first desc ID to start; output - next desc ID
2851 * @cd: pointer to command details structure or NULL
2854 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2855 struct ice_aqc_get_allocd_res_desc_resp *buf,
2856 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2857 struct ice_sq_cd *cd)
2859 struct ice_aqc_get_allocd_res_desc *cmd;
2860 struct ice_aq_desc desc;
2861 enum ice_status status;
2863 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_res_descs");
2865 cmd = &desc.params.get_res_desc;
2868 return ICE_ERR_PARAM;
2870 if (buf_size != (num_entries * sizeof(*buf)))
2871 return ICE_ERR_PARAM;
2873 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2875 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2876 ICE_AQC_RES_TYPE_M) | (res_shared ?
2877 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2878 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2880 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2882 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2884 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2890 * ice_add_mac - Add a MAC address based filter rule
2891 * @hw: pointer to the hardware structure
2892 * @m_list: list of MAC addresses and forwarding information
2894 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2895 * multiple unicast addresses, the function assumes that all the
2896 * addresses are unique in a given add_mac call. It doesn't
2897 * check for duplicates in this case, removing duplicates from a given
2898 * list should be taken care of in the caller of this function.
2901 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2903 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2904 struct ice_fltr_list_entry *m_list_itr;
2905 struct LIST_HEAD_TYPE *rule_head;
2906 u16 elem_sent, total_elem_left;
2907 struct ice_switch_info *sw;
2908 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2909 enum ice_status status = ICE_SUCCESS;
2910 u16 num_unicast = 0;
2914 return ICE_ERR_PARAM;
2916 sw = hw->switch_info;
2917 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2918 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2920 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2924 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2925 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2926 if (!ice_is_vsi_valid(hw, vsi_handle))
2927 return ICE_ERR_PARAM;
2928 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2929 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2930 /* update the src in case it is VSI num */
2931 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2932 return ICE_ERR_PARAM;
2933 m_list_itr->fltr_info.src = hw_vsi_id;
2934 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2935 IS_ZERO_ETHER_ADDR(add))
2936 return ICE_ERR_PARAM;
2937 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
2938 /* Don't overwrite the unicast address */
2939 ice_acquire_lock(rule_lock);
2940 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2941 &m_list_itr->fltr_info)) {
2942 ice_release_lock(rule_lock);
2943 return ICE_ERR_ALREADY_EXISTS;
2945 ice_release_lock(rule_lock);
2947 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
2948 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
2949 m_list_itr->status =
2950 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2952 if (m_list_itr->status)
2953 return m_list_itr->status;
2957 ice_acquire_lock(rule_lock);
2958 /* Exit if no suitable entries were found for adding bulk switch rule */
2960 status = ICE_SUCCESS;
2961 goto ice_add_mac_exit;
2964 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2966 /* Allocate switch rule buffer for the bulk update for unicast */
2967 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2968 s_rule = (struct ice_aqc_sw_rules_elem *)
2969 ice_calloc(hw, num_unicast, s_rule_size);
2971 status = ICE_ERR_NO_MEMORY;
2972 goto ice_add_mac_exit;
2976 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2978 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2979 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2981 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2982 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2983 ice_aqc_opc_add_sw_rules);
2984 r_iter = (struct ice_aqc_sw_rules_elem *)
2985 ((u8 *)r_iter + s_rule_size);
2989 /* Call AQ bulk switch rule update for all unicast addresses */
2991 /* Call AQ switch rule in AQ_MAX chunk */
2992 for (total_elem_left = num_unicast; total_elem_left > 0;
2993 total_elem_left -= elem_sent) {
2994 struct ice_aqc_sw_rules_elem *entry = r_iter;
2996 elem_sent = min(total_elem_left,
2997 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
2998 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2999 elem_sent, ice_aqc_opc_add_sw_rules,
3002 goto ice_add_mac_exit;
3003 r_iter = (struct ice_aqc_sw_rules_elem *)
3004 ((u8 *)r_iter + (elem_sent * s_rule_size));
3007 /* Fill up rule ID based on the value returned from FW */
3009 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3011 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3012 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3013 struct ice_fltr_mgmt_list_entry *fm_entry;
3015 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3016 f_info->fltr_rule_id =
3017 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3018 f_info->fltr_act = ICE_FWD_TO_VSI;
3019 /* Create an entry to track this MAC address */
3020 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3021 ice_malloc(hw, sizeof(*fm_entry));
3023 status = ICE_ERR_NO_MEMORY;
3024 goto ice_add_mac_exit;
3026 fm_entry->fltr_info = *f_info;
3027 fm_entry->vsi_count = 1;
3028 /* The book keeping entries will get removed when
3029 * base driver calls remove filter AQ command
3032 LIST_ADD(&fm_entry->list_entry, rule_head);
3033 r_iter = (struct ice_aqc_sw_rules_elem *)
3034 ((u8 *)r_iter + s_rule_size);
3039 ice_release_lock(rule_lock);
3041 ice_free(hw, s_rule);
3046 * ice_add_vlan_internal - Add one VLAN based filter rule
3047 * @hw: pointer to the hardware structure
3048 * @f_entry: filter entry containing one VLAN information
3050 static enum ice_status
3051 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3053 struct ice_switch_info *sw = hw->switch_info;
3054 struct ice_fltr_mgmt_list_entry *v_list_itr;
3055 struct ice_fltr_info *new_fltr, *cur_fltr;
3056 enum ice_sw_lkup_type lkup_type;
3057 u16 vsi_list_id = 0, vsi_handle;
3058 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3059 enum ice_status status = ICE_SUCCESS;
3061 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3062 return ICE_ERR_PARAM;
3064 f_entry->fltr_info.fwd_id.hw_vsi_id =
3065 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3066 new_fltr = &f_entry->fltr_info;
3068 /* VLAN ID should only be 12 bits */
3069 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3070 return ICE_ERR_PARAM;
3072 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3073 return ICE_ERR_PARAM;
3075 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3076 lkup_type = new_fltr->lkup_type;
3077 vsi_handle = new_fltr->vsi_handle;
3078 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3079 ice_acquire_lock(rule_lock);
3080 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3082 struct ice_vsi_list_map_info *map_info = NULL;
3084 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3085 /* All VLAN pruning rules use a VSI list. Check if
3086 * there is already a VSI list containing VSI that we
3087 * want to add. If found, use the same vsi_list_id for
3088 * this new VLAN rule or else create a new list.
3090 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3094 status = ice_create_vsi_list_rule(hw,
3102 /* Convert the action to forwarding to a VSI list. */
3103 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3104 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3107 status = ice_create_pkt_fwd_rule(hw, f_entry);
3109 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3112 status = ICE_ERR_DOES_NOT_EXIST;
3115 /* reuse VSI list for new rule and increment ref_cnt */
3117 v_list_itr->vsi_list_info = map_info;
3118 map_info->ref_cnt++;
3120 v_list_itr->vsi_list_info =
3121 ice_create_vsi_list_map(hw, &vsi_handle,
3125 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3126 /* Update existing VSI list to add new VSI ID only if it used
3129 cur_fltr = &v_list_itr->fltr_info;
3130 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3133 /* If VLAN rule exists and VSI list being used by this rule is
3134 * referenced by more than 1 VLAN rule. Then create a new VSI
3135 * list appending previous VSI with new VSI and update existing
3136 * VLAN rule to point to new VSI list ID
3138 struct ice_fltr_info tmp_fltr;
3139 u16 vsi_handle_arr[2];
3142 /* Current implementation only supports reusing VSI list with
3143 * one VSI count. We should never hit below condition
3145 if (v_list_itr->vsi_count > 1 &&
3146 v_list_itr->vsi_list_info->ref_cnt > 1) {
3147 ice_debug(hw, ICE_DBG_SW,
3148 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3149 status = ICE_ERR_CFG;
3154 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3157 /* A rule already exists with the new VSI being added */
3158 if (cur_handle == vsi_handle) {
3159 status = ICE_ERR_ALREADY_EXISTS;
3163 vsi_handle_arr[0] = cur_handle;
3164 vsi_handle_arr[1] = vsi_handle;
3165 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3166 &vsi_list_id, lkup_type);
3170 tmp_fltr = v_list_itr->fltr_info;
3171 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3172 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3173 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3174 /* Update the previous switch rule to a new VSI list which
3175 * includes current VSI that is requested
3177 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3181 /* before overriding VSI list map info. decrement ref_cnt of
3184 v_list_itr->vsi_list_info->ref_cnt--;
3186 /* now update to newly created list */
3187 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3188 v_list_itr->vsi_list_info =
3189 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3191 v_list_itr->vsi_count++;
3195 ice_release_lock(rule_lock);
3200 * ice_add_vlan - Add VLAN based filter rule
3201 * @hw: pointer to the hardware structure
3202 * @v_list: list of VLAN entries and forwarding information
3205 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3207 struct ice_fltr_list_entry *v_list_itr;
3210 return ICE_ERR_PARAM;
3212 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3214 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3215 return ICE_ERR_PARAM;
3216 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3217 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3218 if (v_list_itr->status)
3219 return v_list_itr->status;
3225 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3226 * @hw: pointer to the hardware structure
3227 * @mv_list: list of MAC and VLAN filters
3229 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3230 * pruning bits enabled, then it is the responsibility of the caller to make
3231 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3232 * VLAN won't be received on that VSI otherwise.
3235 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3237 struct ice_fltr_list_entry *mv_list_itr;
3239 if (!mv_list || !hw)
3240 return ICE_ERR_PARAM;
3242 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3244 enum ice_sw_lkup_type l_type =
3245 mv_list_itr->fltr_info.lkup_type;
3247 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3248 return ICE_ERR_PARAM;
3249 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3250 mv_list_itr->status =
3251 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3253 if (mv_list_itr->status)
3254 return mv_list_itr->status;
3260 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3261 * @hw: pointer to the hardware structure
3262 * @em_list: list of ether type MAC filter, MAC is optional
3264 * This function requires the caller to populate the entries in
3265 * the filter list with the necessary fields (including flags to
3266 * indicate Tx or Rx rules).
3269 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3271 struct ice_fltr_list_entry *em_list_itr;
3273 if (!em_list || !hw)
3274 return ICE_ERR_PARAM;
3276 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3278 enum ice_sw_lkup_type l_type =
3279 em_list_itr->fltr_info.lkup_type;
3281 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3282 l_type != ICE_SW_LKUP_ETHERTYPE)
3283 return ICE_ERR_PARAM;
3285 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3287 if (em_list_itr->status)
3288 return em_list_itr->status;
3294 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3295 * @hw: pointer to the hardware structure
3296 * @em_list: list of ethertype or ethertype MAC entries
3299 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3301 struct ice_fltr_list_entry *em_list_itr, *tmp;
3303 if (!em_list || !hw)
3304 return ICE_ERR_PARAM;
3306 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3308 enum ice_sw_lkup_type l_type =
3309 em_list_itr->fltr_info.lkup_type;
3311 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3312 l_type != ICE_SW_LKUP_ETHERTYPE)
3313 return ICE_ERR_PARAM;
3315 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3317 if (em_list_itr->status)
3318 return em_list_itr->status;
3325 * ice_rem_sw_rule_info
3326 * @hw: pointer to the hardware structure
3327 * @rule_head: pointer to the switch list structure that we want to delete
3330 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3332 if (!LIST_EMPTY(rule_head)) {
3333 struct ice_fltr_mgmt_list_entry *entry;
3334 struct ice_fltr_mgmt_list_entry *tmp;
3336 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3337 ice_fltr_mgmt_list_entry, list_entry) {
3338 LIST_DEL(&entry->list_entry);
3339 ice_free(hw, entry);
3345 * ice_rem_adv_rule_info
3346 * @hw: pointer to the hardware structure
3347 * @rule_head: pointer to the switch list structure that we want to delete
3350 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3352 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3353 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3355 if (LIST_EMPTY(rule_head))
3358 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3359 ice_adv_fltr_mgmt_list_entry, list_entry) {
3360 LIST_DEL(&lst_itr->list_entry);
3361 ice_free(hw, lst_itr->lkups);
3362 ice_free(hw, lst_itr);
3367 * ice_rem_all_sw_rules_info
3368 * @hw: pointer to the hardware structure
3370 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3372 struct ice_switch_info *sw = hw->switch_info;
3375 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3376 struct LIST_HEAD_TYPE *rule_head;
3378 rule_head = &sw->recp_list[i].filt_rules;
3379 if (!sw->recp_list[i].adv_rule)
3380 ice_rem_sw_rule_info(hw, rule_head);
3382 ice_rem_adv_rule_info(hw, rule_head);
3387 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3388 * @pi: pointer to the port_info structure
3389 * @vsi_handle: VSI handle to set as default
3390 * @set: true to add the above mentioned switch rule, false to remove it
3391 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3393 * add filter rule to set/unset given VSI as default VSI for the switch
3394 * (represented by swid)
3397 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3400 struct ice_aqc_sw_rules_elem *s_rule;
3401 struct ice_fltr_info f_info;
3402 struct ice_hw *hw = pi->hw;
3403 enum ice_adminq_opc opcode;
3404 enum ice_status status;
3408 if (!ice_is_vsi_valid(hw, vsi_handle))
3409 return ICE_ERR_PARAM;
3410 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3412 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3413 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3414 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3416 return ICE_ERR_NO_MEMORY;
3418 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3420 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3421 f_info.flag = direction;
3422 f_info.fltr_act = ICE_FWD_TO_VSI;
3423 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3425 if (f_info.flag & ICE_FLTR_RX) {
3426 f_info.src = pi->lport;
3427 f_info.src_id = ICE_SRC_ID_LPORT;
3429 f_info.fltr_rule_id =
3430 pi->dflt_rx_vsi_rule_id;
3431 } else if (f_info.flag & ICE_FLTR_TX) {
3432 f_info.src_id = ICE_SRC_ID_VSI;
3433 f_info.src = hw_vsi_id;
3435 f_info.fltr_rule_id =
3436 pi->dflt_tx_vsi_rule_id;
3440 opcode = ice_aqc_opc_add_sw_rules;
3442 opcode = ice_aqc_opc_remove_sw_rules;
3444 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3446 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3447 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3450 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3452 if (f_info.flag & ICE_FLTR_TX) {
3453 pi->dflt_tx_vsi_num = hw_vsi_id;
3454 pi->dflt_tx_vsi_rule_id = index;
3455 } else if (f_info.flag & ICE_FLTR_RX) {
3456 pi->dflt_rx_vsi_num = hw_vsi_id;
3457 pi->dflt_rx_vsi_rule_id = index;
3460 if (f_info.flag & ICE_FLTR_TX) {
3461 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3462 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3463 } else if (f_info.flag & ICE_FLTR_RX) {
3464 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3465 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3470 ice_free(hw, s_rule);
3475 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3476 * @hw: pointer to the hardware structure
3477 * @recp_id: lookup type for which the specified rule needs to be searched
3478 * @f_info: rule information
3480 * Helper function to search for a unicast rule entry - this is to be used
3481 * to remove unicast MAC filter that is not shared with other VSIs on the
3484 * Returns pointer to entry storing the rule if found
3486 static struct ice_fltr_mgmt_list_entry *
3487 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3488 struct ice_fltr_info *f_info)
3490 struct ice_switch_info *sw = hw->switch_info;
3491 struct ice_fltr_mgmt_list_entry *list_itr;
3492 struct LIST_HEAD_TYPE *list_head;
3494 list_head = &sw->recp_list[recp_id].filt_rules;
3495 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3497 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3498 sizeof(f_info->l_data)) &&
3499 f_info->fwd_id.hw_vsi_id ==
3500 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3501 f_info->flag == list_itr->fltr_info.flag)
3508 * ice_remove_mac - remove a MAC address based filter rule
3509 * @hw: pointer to the hardware structure
3510 * @m_list: list of MAC addresses and forwarding information
3512 * This function removes either a MAC filter rule or a specific VSI from a
3513 * VSI list for a multicast MAC address.
3515 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3516 * ice_add_mac. Caller should be aware that this call will only work if all
3517 * the entries passed into m_list were added previously. It will not attempt to
3518 * do a partial remove of entries that were found.
3521 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3523 struct ice_fltr_list_entry *list_itr, *tmp;
3524 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3527 return ICE_ERR_PARAM;
3529 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3530 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3532 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3533 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3536 if (l_type != ICE_SW_LKUP_MAC)
3537 return ICE_ERR_PARAM;
3539 vsi_handle = list_itr->fltr_info.vsi_handle;
3540 if (!ice_is_vsi_valid(hw, vsi_handle))
3541 return ICE_ERR_PARAM;
3543 list_itr->fltr_info.fwd_id.hw_vsi_id =
3544 ice_get_hw_vsi_num(hw, vsi_handle);
3545 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3546 /* Don't remove the unicast address that belongs to
3547 * another VSI on the switch, since it is not being
3550 ice_acquire_lock(rule_lock);
3551 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3552 &list_itr->fltr_info)) {
3553 ice_release_lock(rule_lock);
3554 return ICE_ERR_DOES_NOT_EXIST;
3556 ice_release_lock(rule_lock);
3558 list_itr->status = ice_remove_rule_internal(hw,
3561 if (list_itr->status)
3562 return list_itr->status;
3568 * ice_remove_vlan - Remove VLAN based filter rule
3569 * @hw: pointer to the hardware structure
3570 * @v_list: list of VLAN entries and forwarding information
3573 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3575 struct ice_fltr_list_entry *v_list_itr, *tmp;
3578 return ICE_ERR_PARAM;
3580 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3582 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3584 if (l_type != ICE_SW_LKUP_VLAN)
3585 return ICE_ERR_PARAM;
3586 v_list_itr->status = ice_remove_rule_internal(hw,
3589 if (v_list_itr->status)
3590 return v_list_itr->status;
3596 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3597 * @hw: pointer to the hardware structure
3598 * @v_list: list of MAC VLAN entries and forwarding information
3601 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3603 struct ice_fltr_list_entry *v_list_itr, *tmp;
3606 return ICE_ERR_PARAM;
3608 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3610 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3612 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3613 return ICE_ERR_PARAM;
3614 v_list_itr->status =
3615 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3617 if (v_list_itr->status)
3618 return v_list_itr->status;
3624 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3625 * @fm_entry: filter entry to inspect
3626 * @vsi_handle: VSI handle to compare with filter info
3629 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3631 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3632 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3633 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3634 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3639 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3640 * @hw: pointer to the hardware structure
3641 * @vsi_handle: VSI handle to remove filters from
3642 * @vsi_list_head: pointer to the list to add entry to
3643 * @fi: pointer to fltr_info of filter entry to copy & add
3645 * Helper function, used when creating a list of filters to remove from
3646 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3647 * original filter entry, with the exception of fltr_info.fltr_act and
3648 * fltr_info.fwd_id fields. These are set such that later logic can
3649 * extract which VSI to remove the fltr from, and pass on that information.
3651 static enum ice_status
3652 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3653 struct LIST_HEAD_TYPE *vsi_list_head,
3654 struct ice_fltr_info *fi)
3656 struct ice_fltr_list_entry *tmp;
3658 /* this memory is freed up in the caller function
3659 * once filters for this VSI are removed
3661 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3663 return ICE_ERR_NO_MEMORY;
3665 tmp->fltr_info = *fi;
3667 /* Overwrite these fields to indicate which VSI to remove filter from,
3668 * so find and remove logic can extract the information from the
3669 * list entries. Note that original entries will still have proper
3672 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3673 tmp->fltr_info.vsi_handle = vsi_handle;
3674 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3676 LIST_ADD(&tmp->list_entry, vsi_list_head);
3682 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3683 * @hw: pointer to the hardware structure
3684 * @vsi_handle: VSI handle to remove filters from
3685 * @lkup_list_head: pointer to the list that has certain lookup type filters
3686 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3688 * Locates all filters in lkup_list_head that are used by the given VSI,
3689 * and adds COPIES of those entries to vsi_list_head (intended to be used
3690 * to remove the listed filters).
3691 * Note that this means all entries in vsi_list_head must be explicitly
3692 * deallocated by the caller when done with list.
3694 static enum ice_status
3695 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3696 struct LIST_HEAD_TYPE *lkup_list_head,
3697 struct LIST_HEAD_TYPE *vsi_list_head)
3699 struct ice_fltr_mgmt_list_entry *fm_entry;
3700 enum ice_status status = ICE_SUCCESS;
3702 /* check to make sure VSI ID is valid and within boundary */
3703 if (!ice_is_vsi_valid(hw, vsi_handle))
3704 return ICE_ERR_PARAM;
3706 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3707 ice_fltr_mgmt_list_entry, list_entry) {
3708 struct ice_fltr_info *fi;
3710 fi = &fm_entry->fltr_info;
3711 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3714 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3724 * ice_determine_promisc_mask
3725 * @fi: filter info to parse
3727 * Helper function to determine which ICE_PROMISC_ mask corresponds
3728 * to given filter into.
3730 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3732 u16 vid = fi->l_data.mac_vlan.vlan_id;
3733 u8 *macaddr = fi->l_data.mac.mac_addr;
3734 bool is_tx_fltr = false;
3735 u8 promisc_mask = 0;
3737 if (fi->flag == ICE_FLTR_TX)
3740 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3741 promisc_mask |= is_tx_fltr ?
3742 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3743 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3744 promisc_mask |= is_tx_fltr ?
3745 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3746 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3747 promisc_mask |= is_tx_fltr ?
3748 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3750 promisc_mask |= is_tx_fltr ?
3751 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3753 return promisc_mask;
3757 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3758 * @hw: pointer to the hardware structure
3759 * @vsi_handle: VSI handle to retrieve info from
3760 * @promisc_mask: pointer to mask to be filled in
3761 * @vid: VLAN ID of promisc VLAN VSI
3764 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3767 struct ice_switch_info *sw = hw->switch_info;
3768 struct ice_fltr_mgmt_list_entry *itr;
3769 struct LIST_HEAD_TYPE *rule_head;
3770 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3772 if (!ice_is_vsi_valid(hw, vsi_handle))
3773 return ICE_ERR_PARAM;
3777 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3778 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3780 ice_acquire_lock(rule_lock);
3781 LIST_FOR_EACH_ENTRY(itr, rule_head,
3782 ice_fltr_mgmt_list_entry, list_entry) {
3783 /* Continue if this filter doesn't apply to this VSI or the
3784 * VSI ID is not in the VSI map for this filter
3786 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3789 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3791 ice_release_lock(rule_lock);
3797 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3798 * @hw: pointer to the hardware structure
3799 * @vsi_handle: VSI handle to retrieve info from
3800 * @promisc_mask: pointer to mask to be filled in
3801 * @vid: VLAN ID of promisc VLAN VSI
3804 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3807 struct ice_switch_info *sw = hw->switch_info;
3808 struct ice_fltr_mgmt_list_entry *itr;
3809 struct LIST_HEAD_TYPE *rule_head;
3810 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3812 if (!ice_is_vsi_valid(hw, vsi_handle))
3813 return ICE_ERR_PARAM;
3817 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3818 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3820 ice_acquire_lock(rule_lock);
3821 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3823 /* Continue if this filter doesn't apply to this VSI or the
3824 * VSI ID is not in the VSI map for this filter
3826 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3829 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3831 ice_release_lock(rule_lock);
3837 * ice_remove_promisc - Remove promisc based filter rules
3838 * @hw: pointer to the hardware structure
3839 * @recp_id: recipe ID for which the rule needs to removed
3840 * @v_list: list of promisc entries
3842 static enum ice_status
3843 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3844 struct LIST_HEAD_TYPE *v_list)
3846 struct ice_fltr_list_entry *v_list_itr, *tmp;
3848 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3850 v_list_itr->status =
3851 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3852 if (v_list_itr->status)
3853 return v_list_itr->status;
3859 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3860 * @hw: pointer to the hardware structure
3861 * @vsi_handle: VSI handle to clear mode
3862 * @promisc_mask: mask of promiscuous config bits to clear
3863 * @vid: VLAN ID to clear VLAN promiscuous
3866 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3869 struct ice_switch_info *sw = hw->switch_info;
3870 struct ice_fltr_list_entry *fm_entry, *tmp;
3871 struct LIST_HEAD_TYPE remove_list_head;
3872 struct ice_fltr_mgmt_list_entry *itr;
3873 struct LIST_HEAD_TYPE *rule_head;
3874 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3875 enum ice_status status = ICE_SUCCESS;
3878 if (!ice_is_vsi_valid(hw, vsi_handle))
3879 return ICE_ERR_PARAM;
3882 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3884 recipe_id = ICE_SW_LKUP_PROMISC;
3886 rule_head = &sw->recp_list[recipe_id].filt_rules;
3887 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3889 INIT_LIST_HEAD(&remove_list_head);
3891 ice_acquire_lock(rule_lock);
3892 LIST_FOR_EACH_ENTRY(itr, rule_head,
3893 ice_fltr_mgmt_list_entry, list_entry) {
3894 u8 fltr_promisc_mask = 0;
3896 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3899 fltr_promisc_mask |=
3900 ice_determine_promisc_mask(&itr->fltr_info);
3902 /* Skip if filter is not completely specified by given mask */
3903 if (fltr_promisc_mask & ~promisc_mask)
3906 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3910 ice_release_lock(rule_lock);
3911 goto free_fltr_list;
3914 ice_release_lock(rule_lock);
3916 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3919 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3920 ice_fltr_list_entry, list_entry) {
3921 LIST_DEL(&fm_entry->list_entry);
3922 ice_free(hw, fm_entry);
3929 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3930 * @hw: pointer to the hardware structure
3931 * @vsi_handle: VSI handle to configure
3932 * @promisc_mask: mask of promiscuous config bits
3933 * @vid: VLAN ID to set VLAN promiscuous
3936 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3938 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3939 struct ice_fltr_list_entry f_list_entry;
3940 struct ice_fltr_info new_fltr;
3941 enum ice_status status = ICE_SUCCESS;
3947 ice_debug(hw, ICE_DBG_TRACE, "ice_set_vsi_promisc\n");
3949 if (!ice_is_vsi_valid(hw, vsi_handle))
3950 return ICE_ERR_PARAM;
3951 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3953 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3955 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3956 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3957 new_fltr.l_data.mac_vlan.vlan_id = vid;
3958 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3960 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3961 recipe_id = ICE_SW_LKUP_PROMISC;
3964 /* Separate filters must be set for each direction/packet type
3965 * combination, so we will loop over the mask value, store the
3966 * individual type, and clear it out in the input mask as it
3969 while (promisc_mask) {
3975 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3976 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3977 pkt_type = UCAST_FLTR;
3978 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3979 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3980 pkt_type = UCAST_FLTR;
3982 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3983 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3984 pkt_type = MCAST_FLTR;
3985 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3986 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3987 pkt_type = MCAST_FLTR;
3989 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3990 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3991 pkt_type = BCAST_FLTR;
3992 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3993 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3994 pkt_type = BCAST_FLTR;
3998 /* Check for VLAN promiscuous flag */
3999 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4000 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4001 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4002 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4006 /* Set filter DA based on packet type */
4007 mac_addr = new_fltr.l_data.mac.mac_addr;
4008 if (pkt_type == BCAST_FLTR) {
4009 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4010 } else if (pkt_type == MCAST_FLTR ||
4011 pkt_type == UCAST_FLTR) {
4012 /* Use the dummy ether header DA */
4013 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4014 ICE_NONDMA_TO_NONDMA);
4015 if (pkt_type == MCAST_FLTR)
4016 mac_addr[0] |= 0x1; /* Set multicast bit */
4019 /* Need to reset this to zero for all iterations */
4022 new_fltr.flag |= ICE_FLTR_TX;
4023 new_fltr.src = hw_vsi_id;
4025 new_fltr.flag |= ICE_FLTR_RX;
4026 new_fltr.src = hw->port_info->lport;
4029 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4030 new_fltr.vsi_handle = vsi_handle;
4031 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4032 f_list_entry.fltr_info = new_fltr;
4034 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4035 if (status != ICE_SUCCESS)
4036 goto set_promisc_exit;
4044 * ice_set_vlan_vsi_promisc
4045 * @hw: pointer to the hardware structure
4046 * @vsi_handle: VSI handle to configure
4047 * @promisc_mask: mask of promiscuous config bits
4048 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4050 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4053 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4054 bool rm_vlan_promisc)
4056 struct ice_switch_info *sw = hw->switch_info;
4057 struct ice_fltr_list_entry *list_itr, *tmp;
4058 struct LIST_HEAD_TYPE vsi_list_head;
4059 struct LIST_HEAD_TYPE *vlan_head;
4060 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4061 enum ice_status status;
4064 INIT_LIST_HEAD(&vsi_list_head);
4065 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4066 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4067 ice_acquire_lock(vlan_lock);
4068 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4070 ice_release_lock(vlan_lock);
4072 goto free_fltr_list;
4074 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4076 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4077 if (rm_vlan_promisc)
4078 status = ice_clear_vsi_promisc(hw, vsi_handle,
4079 promisc_mask, vlan_id);
4081 status = ice_set_vsi_promisc(hw, vsi_handle,
4082 promisc_mask, vlan_id);
4088 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4089 ice_fltr_list_entry, list_entry) {
4090 LIST_DEL(&list_itr->list_entry);
4091 ice_free(hw, list_itr);
4097 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4098 * @hw: pointer to the hardware structure
4099 * @vsi_handle: VSI handle to remove filters from
4100 * @lkup: switch rule filter lookup type
4103 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4104 enum ice_sw_lkup_type lkup)
4106 struct ice_switch_info *sw = hw->switch_info;
4107 struct ice_fltr_list_entry *fm_entry;
4108 struct LIST_HEAD_TYPE remove_list_head;
4109 struct LIST_HEAD_TYPE *rule_head;
4110 struct ice_fltr_list_entry *tmp;
4111 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4112 enum ice_status status;
4114 INIT_LIST_HEAD(&remove_list_head);
4115 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4116 rule_head = &sw->recp_list[lkup].filt_rules;
4117 ice_acquire_lock(rule_lock);
4118 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4120 ice_release_lock(rule_lock);
4125 case ICE_SW_LKUP_MAC:
4126 ice_remove_mac(hw, &remove_list_head);
4128 case ICE_SW_LKUP_VLAN:
4129 ice_remove_vlan(hw, &remove_list_head);
4131 case ICE_SW_LKUP_PROMISC:
4132 case ICE_SW_LKUP_PROMISC_VLAN:
4133 ice_remove_promisc(hw, lkup, &remove_list_head);
4135 case ICE_SW_LKUP_MAC_VLAN:
4136 ice_remove_mac_vlan(hw, &remove_list_head);
4138 case ICE_SW_LKUP_ETHERTYPE:
4139 case ICE_SW_LKUP_ETHERTYPE_MAC:
4140 ice_remove_eth_mac(hw, &remove_list_head);
4142 case ICE_SW_LKUP_DFLT:
4143 ice_debug(hw, ICE_DBG_SW,
4144 "Remove filters for this lookup type hasn't been implemented yet\n");
4146 case ICE_SW_LKUP_LAST:
4147 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4151 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4152 ice_fltr_list_entry, list_entry) {
4153 LIST_DEL(&fm_entry->list_entry);
4154 ice_free(hw, fm_entry);
4159 * ice_remove_vsi_fltr - Remove all filters for a VSI
4160 * @hw: pointer to the hardware structure
4161 * @vsi_handle: VSI handle to remove filters from
4163 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4165 ice_debug(hw, ICE_DBG_TRACE, "ice_remove_vsi_fltr\n");
4167 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4168 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4169 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4170 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4171 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4172 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4173 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4174 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4178 * ice_alloc_res_cntr - allocating resource counter
4179 * @hw: pointer to the hardware structure
4180 * @type: type of resource
4181 * @alloc_shared: if set it is shared else dedicated
4182 * @num_items: number of entries requested for FD resource type
4183 * @counter_id: counter index returned by AQ call
4186 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4189 struct ice_aqc_alloc_free_res_elem *buf;
4190 enum ice_status status;
4193 /* Allocate resource */
4194 buf_len = sizeof(*buf);
4195 buf = (struct ice_aqc_alloc_free_res_elem *)
4196 ice_malloc(hw, buf_len);
4198 return ICE_ERR_NO_MEMORY;
4200 buf->num_elems = CPU_TO_LE16(num_items);
4201 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4202 ICE_AQC_RES_TYPE_M) | alloc_shared);
4204 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4205 ice_aqc_opc_alloc_res, NULL);
4209 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4217 * ice_free_res_cntr - free resource counter
4218 * @hw: pointer to the hardware structure
4219 * @type: type of resource
4220 * @alloc_shared: if set it is shared else dedicated
4221 * @num_items: number of entries to be freed for FD resource type
4222 * @counter_id: counter ID resource which needs to be freed
4225 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4228 struct ice_aqc_alloc_free_res_elem *buf;
4229 enum ice_status status;
4233 buf_len = sizeof(*buf);
4234 buf = (struct ice_aqc_alloc_free_res_elem *)
4235 ice_malloc(hw, buf_len);
4237 return ICE_ERR_NO_MEMORY;
4239 buf->num_elems = CPU_TO_LE16(num_items);
4240 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4241 ICE_AQC_RES_TYPE_M) | alloc_shared);
4242 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4244 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4245 ice_aqc_opc_free_res, NULL);
4247 ice_debug(hw, ICE_DBG_SW,
4248 "counter resource could not be freed\n");
4255 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4256 * @hw: pointer to the hardware structure
4257 * @counter_id: returns counter index
4259 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4261 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4262 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4267 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4268 * @hw: pointer to the hardware structure
4269 * @counter_id: counter index to be freed
4271 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4273 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4274 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4279 * ice_alloc_res_lg_act - add large action resource
4280 * @hw: pointer to the hardware structure
4281 * @l_id: large action ID to fill it in
4282 * @num_acts: number of actions to hold with a large action entry
4284 static enum ice_status
4285 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4287 struct ice_aqc_alloc_free_res_elem *sw_buf;
4288 enum ice_status status;
4291 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4292 return ICE_ERR_PARAM;
4294 /* Allocate resource for large action */
4295 buf_len = sizeof(*sw_buf);
4296 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4297 ice_malloc(hw, buf_len);
4299 return ICE_ERR_NO_MEMORY;
4301 sw_buf->num_elems = CPU_TO_LE16(1);
4303 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4304 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4305 * If num_acts is greater than 2, then use
4306 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4307 * The num_acts cannot exceed 4. This was ensured at the
4308 * beginning of the function.
4311 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4312 else if (num_acts == 2)
4313 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4315 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4317 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4318 ice_aqc_opc_alloc_res, NULL);
4320 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4322 ice_free(hw, sw_buf);
4327 * ice_add_mac_with_sw_marker - add filter with sw marker
4328 * @hw: pointer to the hardware structure
4329 * @f_info: filter info structure containing the MAC filter information
4330 * @sw_marker: sw marker to tag the Rx descriptor with
4333 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4336 struct ice_switch_info *sw = hw->switch_info;
4337 struct ice_fltr_mgmt_list_entry *m_entry;
4338 struct ice_fltr_list_entry fl_info;
4339 struct LIST_HEAD_TYPE l_head;
4340 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4341 enum ice_status ret;
4345 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4346 return ICE_ERR_PARAM;
4348 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4349 return ICE_ERR_PARAM;
4351 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4352 return ICE_ERR_PARAM;
4354 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4355 return ICE_ERR_PARAM;
4356 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4358 /* Add filter if it doesn't exist so then the adding of large
4359 * action always results in update
4362 INIT_LIST_HEAD(&l_head);
4363 fl_info.fltr_info = *f_info;
4364 LIST_ADD(&fl_info.list_entry, &l_head);
4366 entry_exists = false;
4367 ret = ice_add_mac(hw, &l_head);
4368 if (ret == ICE_ERR_ALREADY_EXISTS)
4369 entry_exists = true;
4373 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4374 ice_acquire_lock(rule_lock);
4375 /* Get the book keeping entry for the filter */
4376 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4380 /* If counter action was enabled for this rule then don't enable
4381 * sw marker large action
4383 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4384 ret = ICE_ERR_PARAM;
4388 /* if same marker was added before */
4389 if (m_entry->sw_marker_id == sw_marker) {
4390 ret = ICE_ERR_ALREADY_EXISTS;
4394 /* Allocate a hardware table entry to hold large act. Three actions
4395 * for marker based large action
4397 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4401 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4404 /* Update the switch rule to add the marker action */
4405 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4407 ice_release_lock(rule_lock);
4412 ice_release_lock(rule_lock);
4413 /* only remove entry if it did not exist previously */
4415 ret = ice_remove_mac(hw, &l_head);
4421 * ice_add_mac_with_counter - add filter with counter enabled
4422 * @hw: pointer to the hardware structure
4423 * @f_info: pointer to filter info structure containing the MAC filter
4427 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4429 struct ice_switch_info *sw = hw->switch_info;
4430 struct ice_fltr_mgmt_list_entry *m_entry;
4431 struct ice_fltr_list_entry fl_info;
4432 struct LIST_HEAD_TYPE l_head;
4433 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4434 enum ice_status ret;
4439 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4440 return ICE_ERR_PARAM;
4442 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4443 return ICE_ERR_PARAM;
4445 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4446 return ICE_ERR_PARAM;
4447 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4449 entry_exist = false;
4451 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4453 /* Add filter if it doesn't exist so then the adding of large
4454 * action always results in update
4456 INIT_LIST_HEAD(&l_head);
4458 fl_info.fltr_info = *f_info;
4459 LIST_ADD(&fl_info.list_entry, &l_head);
4461 ret = ice_add_mac(hw, &l_head);
4462 if (ret == ICE_ERR_ALREADY_EXISTS)
4467 ice_acquire_lock(rule_lock);
4468 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4470 ret = ICE_ERR_BAD_PTR;
4474 /* Don't enable counter for a filter for which sw marker was enabled */
4475 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4476 ret = ICE_ERR_PARAM;
4480 /* If a counter was already enabled then don't need to add again */
4481 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4482 ret = ICE_ERR_ALREADY_EXISTS;
4486 /* Allocate a hardware table entry to VLAN counter */
4487 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4491 /* Allocate a hardware table entry to hold large act. Two actions for
4492 * counter based large action
4494 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4498 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4501 /* Update the switch rule to add the counter action */
4502 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4504 ice_release_lock(rule_lock);
4509 ice_release_lock(rule_lock);
4510 /* only remove entry if it did not exist previously */
4512 ret = ice_remove_mac(hw, &l_head);
4517 /* This is mapping table entry that maps every word within a given protocol
4518 * structure to the real byte offset as per the specification of that
4520 * for example dst address is 3 words in ethertype header and corresponding
4521 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4522 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4523 * matching entry describing its field. This needs to be updated if new
4524 * structure is added to that union.
4526 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4527 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4528 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4529 { ICE_ETYPE_OL, { 0 } },
4530 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4531 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4532 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4533 26, 28, 30, 32, 34, 36, 38 } },
4534 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4535 26, 28, 30, 32, 34, 36, 38 } },
4536 { ICE_TCP_IL, { 0, 2 } },
4537 { ICE_UDP_OF, { 0, 2 } },
4538 { ICE_UDP_ILOS, { 0, 2 } },
4539 { ICE_SCTP_IL, { 0, 2 } },
4540 { ICE_VXLAN, { 8, 10, 12, 14 } },
4541 { ICE_GENEVE, { 8, 10, 12, 14 } },
4542 { ICE_VXLAN_GPE, { 0, 2, 4 } },
4543 { ICE_NVGRE, { 0, 2, 4, 6 } },
4544 { ICE_PROTOCOL_LAST, { 0 } }
4547 /* The following table describes preferred grouping of recipes.
4548 * If a recipe that needs to be programmed is a superset or matches one of the
4549 * following combinations, then the recipe needs to be chained as per the
4552 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4553 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4554 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4555 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4556 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4557 { 0xffff, 0xffff, 0xffff, 0xffff } },
4558 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4559 { 0xffff, 0xffff, 0xffff, 0xffff } },
4560 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4561 { 0xffff, 0xffff, 0xffff, 0xffff } },
4564 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4565 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4566 { ICE_MAC_IL, ICE_MAC_IL_HW },
4567 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4568 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4569 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4570 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4571 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4572 { ICE_TCP_IL, ICE_TCP_IL_HW },
4573 { ICE_UDP_OF, ICE_UDP_OF_HW },
4574 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4575 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4576 { ICE_VXLAN, ICE_UDP_OF_HW },
4577 { ICE_GENEVE, ICE_UDP_OF_HW },
4578 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4579 { ICE_NVGRE, ICE_GRE_OF_HW },
4580 { ICE_PROTOCOL_LAST, 0 }
4584 * ice_find_recp - find a recipe
4585 * @hw: pointer to the hardware structure
4586 * @lkup_exts: extension sequence to match
4588 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4590 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4592 bool refresh_required = true;
4593 struct ice_sw_recipe *recp;
4596 /* Initialize available_result_ids which tracks available result idx */
4597 for (i = 0; i <= ICE_CHAIN_FV_INDEX_START; i++)
4598 ice_set_bit(ICE_CHAIN_FV_INDEX_START - i,
4599 available_result_ids);
4601 /* Walk through existing recipes to find a match */
4602 recp = hw->switch_info->recp_list;
4603 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4604 /* If recipe was not created for this ID, in SW bookkeeping,
4605 * check if FW has an entry for this recipe. If the FW has an
4606 * entry update it in our SW bookkeeping and continue with the
4609 if (!recp[i].recp_created)
4610 if (ice_get_recp_frm_fw(hw,
4611 hw->switch_info->recp_list, i,
4615 /* if number of words we are looking for match */
4616 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4617 struct ice_fv_word *a = lkup_exts->fv_words;
4618 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4622 for (p = 0; p < lkup_exts->n_val_words; p++) {
4623 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4625 if (a[p].off == b[q].off &&
4626 a[p].prot_id == b[q].prot_id)
4627 /* Found the "p"th word in the
4632 /* After walking through all the words in the
4633 * "i"th recipe if "p"th word was not found then
4634 * this recipe is not what we are looking for.
4635 * So break out from this loop and try the next
4638 if (q >= recp[i].lkup_exts.n_val_words) {
4643 /* If for "i"th recipe the found was never set to false
4644 * then it means we found our match
4647 return i; /* Return the recipe ID */
4650 return ICE_MAX_NUM_RECIPES;
4654 * ice_prot_type_to_id - get protocol ID from protocol type
4655 * @type: protocol type
4656 * @id: pointer to variable that will receive the ID
4658 * Returns true if found, false otherwise
4660 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4664 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4665 if (ice_prot_id_tbl[i].type == type) {
4666 *id = ice_prot_id_tbl[i].protocol_id;
4673 * ice_find_valid_words - count valid words
4674 * @rule: advanced rule with lookup information
4675 * @lkup_exts: byte offset extractions of the words that are valid
4677 * calculate valid words in a lookup rule using mask value
4680 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4681 struct ice_prot_lkup_ext *lkup_exts)
4687 if (!ice_prot_type_to_id(rule->type, &prot_id))
4690 word = lkup_exts->n_val_words;
4692 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4693 if (((u16 *)&rule->m_u)[j] &&
4694 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4695 /* No more space to accommodate */
4696 if (word >= ICE_MAX_CHAIN_WORDS)
4698 lkup_exts->fv_words[word].off =
4699 ice_prot_ext[rule->type].offs[j];
4700 lkup_exts->fv_words[word].prot_id =
4701 ice_prot_id_tbl[rule->type].protocol_id;
4702 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4706 ret_val = word - lkup_exts->n_val_words;
4707 lkup_exts->n_val_words = word;
4713 * ice_find_prot_off_ind - check for specific ID and offset in rule
4714 * @lkup_exts: an array of protocol header extractions
4715 * @prot_type: protocol type to check
4716 * @off: expected offset of the extraction
4718 * Check if the prot_ext has given protocol ID and offset
4721 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4726 for (j = 0; j < lkup_exts->n_val_words; j++)
4727 if (lkup_exts->fv_words[j].off == off &&
4728 lkup_exts->fv_words[j].prot_id == prot_type)
4731 return ICE_MAX_CHAIN_WORDS;
4735 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4736 * @lkup_exts: an array of protocol header extractions
4737 * @r_policy: preferred recipe grouping policy
4739 * Helper function to check if given recipe group is subset we need to check if
4740 * all the words described by the given recipe group exist in the advanced rule
4741 * look up information
4744 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4745 const struct ice_pref_recipe_group *r_policy)
4747 u8 ind[ICE_NUM_WORDS_RECIPE];
4751 /* check if everything in the r_policy is part of the entire rule */
4752 for (i = 0; i < r_policy->n_val_pairs; i++) {
4755 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4756 r_policy->pairs[i].off);
4757 if (j >= ICE_MAX_CHAIN_WORDS)
4760 /* store the indexes temporarily found by the find function
4761 * this will be used to mark the words as 'done'
4766 /* If the entire policy recipe was a true match, then mark the fields
4767 * that are covered by the recipe as 'done' meaning that these words
4768 * will be clumped together in one recipe.
4769 * "Done" here means in our searching if certain recipe group
4770 * matches or is subset of the given rule, then we mark all
4771 * the corresponding offsets as found. So the remaining recipes should
4772 * be created with whatever words that were left.
4774 for (i = 0; i < count; i++) {
4777 ice_set_bit(in, lkup_exts->done);
4783 * ice_create_first_fit_recp_def - Create a recipe grouping
4784 * @hw: pointer to the hardware structure
4785 * @lkup_exts: an array of protocol header extractions
4786 * @rg_list: pointer to a list that stores new recipe groups
4787 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4789 * Using first fit algorithm, take all the words that are still not done
4790 * and start grouping them in 4-word groups. Each group makes up one
4793 static enum ice_status
4794 ice_create_first_fit_recp_def(struct ice_hw *hw,
4795 struct ice_prot_lkup_ext *lkup_exts,
4796 struct LIST_HEAD_TYPE *rg_list,
4799 struct ice_pref_recipe_group *grp = NULL;
4804 /* Walk through every word in the rule to check if it is not done. If so
4805 * then this word needs to be part of a new recipe.
4807 for (j = 0; j < lkup_exts->n_val_words; j++)
4808 if (!ice_is_bit_set(lkup_exts->done, j)) {
4810 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4811 struct ice_recp_grp_entry *entry;
4813 entry = (struct ice_recp_grp_entry *)
4814 ice_malloc(hw, sizeof(*entry));
4816 return ICE_ERR_NO_MEMORY;
4817 LIST_ADD(&entry->l_entry, rg_list);
4818 grp = &entry->r_group;
4822 grp->pairs[grp->n_val_pairs].prot_id =
4823 lkup_exts->fv_words[j].prot_id;
4824 grp->pairs[grp->n_val_pairs].off =
4825 lkup_exts->fv_words[j].off;
4826 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4834 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4835 * @hw: pointer to the hardware structure
4836 * @fv_list: field vector with the extraction sequence information
4837 * @rg_list: recipe groupings with protocol-offset pairs
4839 * Helper function to fill in the field vector indices for protocol-offset
4840 * pairs. These indexes are then ultimately programmed into a recipe.
4843 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4844 struct LIST_HEAD_TYPE *rg_list)
4846 struct ice_sw_fv_list_entry *fv;
4847 struct ice_recp_grp_entry *rg;
4848 struct ice_fv_word *fv_ext;
4850 if (LIST_EMPTY(fv_list))
4853 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4854 fv_ext = fv->fv_ptr->ew;
4856 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4859 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4860 struct ice_fv_word *pr;
4864 pr = &rg->r_group.pairs[i];
4865 mask = rg->r_group.mask[i];
4867 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4868 if (fv_ext[j].prot_id == pr->prot_id &&
4869 fv_ext[j].off == pr->off) {
4870 /* Store index of field vector */
4872 /* Mask is given by caller as big
4873 * endian, but sent to FW as little
4876 rg->fv_mask[i] = mask << 8 | mask >> 8;
4884 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4885 * @hw: pointer to hardware structure
4886 * @rm: recipe management list entry
4887 * @match_tun: if field vector index for tunnel needs to be programmed
4889 static enum ice_status
4890 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4893 struct ice_aqc_recipe_data_elem *tmp;
4894 struct ice_aqc_recipe_data_elem *buf;
4895 struct ice_recp_grp_entry *entry;
4896 enum ice_status status;
4901 /* When more than one recipe are required, another recipe is needed to
4902 * chain them together. Matching a tunnel metadata ID takes up one of
4903 * the match fields in the chaining recipe reducing the number of
4904 * chained recipes by one.
4906 if (rm->n_grp_count > 1)
4908 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE ||
4909 (match_tun && rm->n_grp_count > (ICE_MAX_CHAIN_RECIPE - 1)))
4910 return ICE_ERR_MAX_LIMIT;
4912 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
4913 ICE_MAX_NUM_RECIPES,
4916 return ICE_ERR_NO_MEMORY;
4918 buf = (struct ice_aqc_recipe_data_elem *)
4919 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
4921 status = ICE_ERR_NO_MEMORY;
4925 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4926 recipe_count = ICE_MAX_NUM_RECIPES;
4927 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4929 if (status || recipe_count == 0)
4932 /* Allocate the recipe resources, and configure them according to the
4933 * match fields from protocol headers and extracted field vectors.
4935 chain_idx = ICE_CHAIN_FV_INDEX_START -
4936 ice_find_first_bit(available_result_ids,
4937 ICE_CHAIN_FV_INDEX_START + 1);
4938 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4941 status = ice_alloc_recipe(hw, &entry->rid);
4945 /* Clear the result index of the located recipe, as this will be
4946 * updated, if needed, later in the recipe creation process.
4948 tmp[0].content.result_indx = 0;
4950 buf[recps] = tmp[0];
4951 buf[recps].recipe_indx = (u8)entry->rid;
4952 /* if the recipe is a non-root recipe RID should be programmed
4953 * as 0 for the rules to be applied correctly.
4955 buf[recps].content.rid = 0;
4956 ice_memset(&buf[recps].content.lkup_indx, 0,
4957 sizeof(buf[recps].content.lkup_indx),
4960 /* All recipes use look-up index 0 to match switch ID. */
4961 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4962 buf[recps].content.mask[0] =
4963 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4964 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4967 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4968 buf[recps].content.lkup_indx[i] = 0x80;
4969 buf[recps].content.mask[i] = 0;
4972 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4973 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4974 buf[recps].content.mask[i + 1] =
4975 CPU_TO_LE16(entry->fv_mask[i]);
4978 if (rm->n_grp_count > 1) {
4979 entry->chain_idx = chain_idx;
4980 buf[recps].content.result_indx =
4981 ICE_AQ_RECIPE_RESULT_EN |
4982 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4983 ICE_AQ_RECIPE_RESULT_DATA_M);
4984 ice_clear_bit(ICE_CHAIN_FV_INDEX_START - chain_idx,
4985 available_result_ids);
4986 chain_idx = ICE_CHAIN_FV_INDEX_START -
4987 ice_find_first_bit(available_result_ids,
4988 ICE_CHAIN_FV_INDEX_START +
4992 /* fill recipe dependencies */
4993 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
4994 ICE_MAX_NUM_RECIPES);
4995 ice_set_bit(buf[recps].recipe_indx,
4996 (ice_bitmap_t *)buf[recps].recipe_bitmap);
4997 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5001 if (rm->n_grp_count == 1) {
5002 rm->root_rid = buf[0].recipe_indx;
5003 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5004 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5005 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5006 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5007 sizeof(buf[0].recipe_bitmap),
5008 ICE_NONDMA_TO_NONDMA);
5010 status = ICE_ERR_BAD_PTR;
5013 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5014 * the recipe which is getting created if specified
5015 * by user. Usually any advanced switch filter, which results
5016 * into new extraction sequence, ended up creating a new recipe
5017 * of type ROOT and usually recipes are associated with profiles
5018 * Switch rule referreing newly created recipe, needs to have
5019 * either/or 'fwd' or 'join' priority, otherwise switch rule
5020 * evaluation will not happen correctly. In other words, if
5021 * switch rule to be evaluated on priority basis, then recipe
5022 * needs to have priority, otherwise it will be evaluated last.
5024 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5026 struct ice_recp_grp_entry *last_chain_entry;
5029 /* Allocate the last recipe that will chain the outcomes of the
5030 * other recipes together
5032 status = ice_alloc_recipe(hw, &rid);
5036 buf[recps].recipe_indx = (u8)rid;
5037 buf[recps].content.rid = (u8)rid;
5038 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5039 /* the new entry created should also be part of rg_list to
5040 * make sure we have complete recipe
5042 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5043 sizeof(*last_chain_entry));
5044 if (!last_chain_entry) {
5045 status = ICE_ERR_NO_MEMORY;
5048 last_chain_entry->rid = rid;
5049 ice_memset(&buf[recps].content.lkup_indx, 0,
5050 sizeof(buf[recps].content.lkup_indx),
5052 /* All recipes use look-up index 0 to match switch ID. */
5053 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5054 buf[recps].content.mask[0] =
5055 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5056 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5057 buf[recps].content.lkup_indx[i] =
5058 ICE_AQ_RECIPE_LKUP_IGNORE;
5059 buf[recps].content.mask[i] = 0;
5063 /* update r_bitmap with the recp that is used for chaining */
5064 ice_set_bit(rid, rm->r_bitmap);
5065 /* this is the recipe that chains all the other recipes so it
5066 * should not have a chaining ID to indicate the same
5068 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5069 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5071 last_chain_entry->fv_idx[i] = entry->chain_idx;
5072 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5073 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5074 ice_set_bit(entry->rid, rm->r_bitmap);
5076 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5077 if (sizeof(buf[recps].recipe_bitmap) >=
5078 sizeof(rm->r_bitmap)) {
5079 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5080 sizeof(buf[recps].recipe_bitmap),
5081 ICE_NONDMA_TO_NONDMA);
5083 status = ICE_ERR_BAD_PTR;
5086 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5088 /* To differentiate among different UDP tunnels, a meta data ID
5092 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5093 buf[recps].content.mask[i] =
5094 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5098 rm->root_rid = (u8)rid;
5100 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5104 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5105 ice_release_change_lock(hw);
5109 /* Every recipe that just got created add it to the recipe
5112 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5113 struct ice_switch_info *sw = hw->switch_info;
5114 struct ice_sw_recipe *recp;
5116 recp = &sw->recp_list[entry->rid];
5117 recp->root_rid = entry->rid;
5118 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5119 entry->r_group.n_val_pairs *
5120 sizeof(struct ice_fv_word),
5121 ICE_NONDMA_TO_NONDMA);
5123 recp->n_ext_words = entry->r_group.n_val_pairs;
5124 recp->chain_idx = entry->chain_idx;
5125 recp->recp_created = true;
5126 recp->big_recp = false;
5140 * ice_create_recipe_group - creates recipe group
5141 * @hw: pointer to hardware structure
5142 * @rm: recipe management list entry
5143 * @lkup_exts: lookup elements
5145 static enum ice_status
5146 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5147 struct ice_prot_lkup_ext *lkup_exts)
5149 struct ice_recp_grp_entry *entry;
5150 struct ice_recp_grp_entry *tmp;
5151 enum ice_status status;
5155 rm->n_grp_count = 0;
5157 /* Each switch recipe can match up to 5 words or metadata. One word in
5158 * each recipe is used to match the switch ID. Four words are left for
5159 * matching other values. If the new advanced recipe requires more than
5160 * 4 words, it needs to be split into multiple recipes which are chained
5161 * together using the intermediate result that each produces as input to
5162 * the other recipes in the sequence.
5164 groups = ARRAY_SIZE(ice_recipe_pack);
5166 /* Check if any of the preferred recipes from the grouping policy
5169 for (i = 0; i < groups; i++)
5170 /* Check if the recipe from the preferred grouping matches
5171 * or is a subset of the fields that needs to be looked up.
5173 if (ice_is_recipe_subset(lkup_exts, &ice_recipe_pack[i])) {
5174 /* This recipe can be used by itself or grouped with
5177 entry = (struct ice_recp_grp_entry *)
5178 ice_malloc(hw, sizeof(*entry));
5180 status = ICE_ERR_NO_MEMORY;
5183 entry->r_group = ice_recipe_pack[i];
5184 LIST_ADD(&entry->l_entry, &rm->rg_list);
5188 /* Create recipes for words that are marked not done by packing them
5191 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5192 &rm->rg_list, &recp_count);
5194 rm->n_grp_count += recp_count;
5195 rm->n_ext_words = lkup_exts->n_val_words;
5196 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5197 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5198 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5199 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5204 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5206 LIST_DEL(&entry->l_entry);
5207 ice_free(hw, entry);
5215 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5216 * @hw: pointer to hardware structure
5217 * @lkups: lookup elements or match criteria for the advanced recipe, one
5218 * structure per protocol header
5219 * @lkups_cnt: number of protocols
5220 * @fv_list: pointer to a list that holds the returned field vectors
5222 static enum ice_status
5223 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5224 struct LIST_HEAD_TYPE *fv_list)
5226 enum ice_status status;
5230 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5232 return ICE_ERR_NO_MEMORY;
5234 for (i = 0; i < lkups_cnt; i++)
5235 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5236 status = ICE_ERR_CFG;
5240 /* Find field vectors that include all specified protocol types */
5241 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
5244 ice_free(hw, prot_ids);
5249 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5250 * @hw: pointer to hardware structure
5251 * @lkups: lookup elements or match criteria for the advanced recipe, one
5252 * structure per protocol header
5253 * @lkups_cnt: number of protocols
5254 * @rinfo: other information regarding the rule e.g. priority and action info
5255 * @rid: return the recipe ID of the recipe created
5257 static enum ice_status
5258 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5259 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5261 struct ice_prot_lkup_ext *lkup_exts;
5262 struct ice_recp_grp_entry *r_entry;
5263 struct ice_sw_fv_list_entry *fvit;
5264 struct ice_recp_grp_entry *r_tmp;
5265 struct ice_sw_fv_list_entry *tmp;
5266 enum ice_status status = ICE_SUCCESS;
5267 struct ice_sw_recipe *rm;
5268 bool match_tun = false;
5272 return ICE_ERR_PARAM;
5274 lkup_exts = (struct ice_prot_lkup_ext *)
5275 ice_malloc(hw, sizeof(*lkup_exts));
5277 return ICE_ERR_NO_MEMORY;
5279 /* Determine the number of words to be matched and if it exceeds a
5280 * recipe's restrictions
5282 for (i = 0; i < lkups_cnt; i++) {
5285 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5286 status = ICE_ERR_CFG;
5287 goto err_free_lkup_exts;
5290 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5292 status = ICE_ERR_CFG;
5293 goto err_free_lkup_exts;
5297 *rid = ice_find_recp(hw, lkup_exts);
5298 if (*rid < ICE_MAX_NUM_RECIPES)
5299 /* Success if found a recipe that match the existing criteria */
5300 goto err_free_lkup_exts;
5302 /* Recipe we need does not exist, add a recipe */
5304 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5306 status = ICE_ERR_NO_MEMORY;
5307 goto err_free_lkup_exts;
5310 /* Get field vectors that contain fields extracted from all the protocol
5311 * headers being programmed.
5313 INIT_LIST_HEAD(&rm->fv_list);
5314 INIT_LIST_HEAD(&rm->rg_list);
5316 status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
5320 /* Group match words into recipes using preferred recipe grouping
5323 status = ice_create_recipe_group(hw, rm, lkup_exts);
5327 /* There is only profile for UDP tunnels. So, it is necessary to use a
5328 * metadata ID flag to differentiate different tunnel types. A separate
5329 * recipe needs to be used for the metadata.
5331 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5332 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5333 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5336 /* set the recipe priority if specified */
5337 rm->priority = rinfo->priority ? rinfo->priority : 0;
5339 /* Find offsets from the field vector. Pick the first one for all the
5342 ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5343 status = ice_add_sw_recipe(hw, rm, match_tun);
5347 /* Associate all the recipes created with all the profiles in the
5348 * common field vector.
5350 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5352 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5354 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5355 (u8 *)r_bitmap, NULL);
5359 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5360 ICE_MAX_NUM_RECIPES);
5361 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5365 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5368 ice_release_change_lock(hw);
5374 *rid = rm->root_rid;
5375 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5376 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5378 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5379 ice_recp_grp_entry, l_entry) {
5380 LIST_DEL(&r_entry->l_entry);
5381 ice_free(hw, r_entry);
5384 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5386 LIST_DEL(&fvit->list_entry);
5391 ice_free(hw, rm->root_buf);
5396 ice_free(hw, lkup_exts);
5402 * ice_find_dummy_packet - find dummy packet by tunnel type
5404 * @lkups: lookup elements or match criteria for the advanced recipe, one
5405 * structure per protocol header
5406 * @lkups_cnt: number of protocols
5407 * @tun_type: tunnel type from the match criteria
5408 * @pkt: dummy packet to fill according to filter match criteria
5409 * @pkt_len: packet length of dummy packet
5410 * @offsets: pointer to receive the pointer to the offsets for the packet
5413 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5414 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5416 const struct ice_dummy_pkt_offsets **offsets)
5418 bool tcp = false, udp = false, ipv6 = false;
5421 for (i = 0; i < lkups_cnt; i++) {
5422 if (lkups[i].type == ICE_UDP_ILOS)
5424 else if (lkups[i].type == ICE_TCP_IL)
5426 else if (lkups[i].type == ICE_IPV6_OFOS)
5430 if (tun_type == ICE_ALL_TUNNELS) {
5431 *pkt = dummy_gre_udp_packet;
5432 *pkt_len = sizeof(dummy_gre_udp_packet);
5433 *offsets = dummy_gre_udp_packet_offsets;
5437 if (tun_type == ICE_SW_TUN_NVGRE) {
5439 *pkt = dummy_gre_tcp_packet;
5440 *pkt_len = sizeof(dummy_gre_tcp_packet);
5441 *offsets = dummy_gre_tcp_packet_offsets;
5445 *pkt = dummy_gre_udp_packet;
5446 *pkt_len = sizeof(dummy_gre_udp_packet);
5447 *offsets = dummy_gre_udp_packet_offsets;
5451 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5452 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5454 *pkt = dummy_udp_tun_tcp_packet;
5455 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5456 *offsets = dummy_udp_tun_tcp_packet_offsets;
5460 *pkt = dummy_udp_tun_udp_packet;
5461 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5462 *offsets = dummy_udp_tun_udp_packet_offsets;
5467 *pkt = dummy_udp_packet;
5468 *pkt_len = sizeof(dummy_udp_packet);
5469 *offsets = dummy_udp_packet_offsets;
5471 } else if (udp && ipv6) {
5472 *pkt = dummy_udp_ipv6_packet;
5473 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5474 *offsets = dummy_udp_ipv6_packet_offsets;
5476 } else if ((tcp && ipv6) || ipv6) {
5477 *pkt = dummy_tcp_ipv6_packet;
5478 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5479 *offsets = dummy_tcp_ipv6_packet_offsets;
5483 *pkt = dummy_tcp_packet;
5484 *pkt_len = sizeof(dummy_tcp_packet);
5485 *offsets = dummy_tcp_packet_offsets;
5489 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5491 * @lkups: lookup elements or match criteria for the advanced recipe, one
5492 * structure per protocol header
5493 * @lkups_cnt: number of protocols
5494 * @s_rule: stores rule information from the match criteria
5495 * @dummy_pkt: dummy packet to fill according to filter match criteria
5496 * @pkt_len: packet length of dummy packet
5497 * @offsets: offset info for the dummy packet
5499 static enum ice_status
5500 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5501 struct ice_aqc_sw_rules_elem *s_rule,
5502 const u8 *dummy_pkt, u16 pkt_len,
5503 const struct ice_dummy_pkt_offsets *offsets)
5508 /* Start with a packet with a pre-defined/dummy content. Then, fill
5509 * in the header values to be looked up or matched.
5511 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5513 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5515 for (i = 0; i < lkups_cnt; i++) {
5516 enum ice_protocol_type type;
5517 u16 offset = 0, len = 0, j;
5520 /* find the start of this layer; it should be found since this
5521 * was already checked when search for the dummy packet
5523 type = lkups[i].type;
5524 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5525 if (type == offsets[j].type) {
5526 offset = offsets[j].offset;
5531 /* this should never happen in a correct calling sequence */
5533 return ICE_ERR_PARAM;
5535 switch (lkups[i].type) {
5538 len = sizeof(struct ice_ether_hdr);
5541 len = sizeof(struct ice_ethtype_hdr);
5545 len = sizeof(struct ice_ipv4_hdr);
5549 len = sizeof(struct ice_ipv6_hdr);
5554 len = sizeof(struct ice_l4_hdr);
5557 len = sizeof(struct ice_sctp_hdr);
5560 len = sizeof(struct ice_nvgre);
5565 len = sizeof(struct ice_udp_tnl_hdr);
5568 return ICE_ERR_PARAM;
5571 /* the length should be a word multiple */
5572 if (len % ICE_BYTES_PER_WORD)
5575 /* We have the offset to the header start, the length, the
5576 * caller's header values and mask. Use this information to
5577 * copy the data into the dummy packet appropriately based on
5578 * the mask. Note that we need to only write the bits as
5579 * indicated by the mask to make sure we don't improperly write
5580 * over any significant packet data.
5582 for (j = 0; j < len / sizeof(u16); j++)
5583 if (((u16 *)&lkups[i].m_u)[j])
5584 ((u16 *)(pkt + offset))[j] =
5585 (((u16 *)(pkt + offset))[j] &
5586 ~((u16 *)&lkups[i].m_u)[j]) |
5587 (((u16 *)&lkups[i].h_u)[j] &
5588 ((u16 *)&lkups[i].m_u)[j]);
5591 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5597 * ice_find_adv_rule_entry - Search a rule entry
5598 * @hw: pointer to the hardware structure
5599 * @lkups: lookup elements or match criteria for the advanced recipe, one
5600 * structure per protocol header
5601 * @lkups_cnt: number of protocols
5602 * @recp_id: recipe ID for which we are finding the rule
5603 * @rinfo: other information regarding the rule e.g. priority and action info
5605 * Helper function to search for a given advance rule entry
5606 * Returns pointer to entry storing the rule if found
5608 static struct ice_adv_fltr_mgmt_list_entry *
5609 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5610 u16 lkups_cnt, u8 recp_id,
5611 struct ice_adv_rule_info *rinfo)
5613 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5614 struct ice_switch_info *sw = hw->switch_info;
5617 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5618 ice_adv_fltr_mgmt_list_entry, list_entry) {
5619 bool lkups_matched = true;
5621 if (lkups_cnt != list_itr->lkups_cnt)
5623 for (i = 0; i < list_itr->lkups_cnt; i++)
5624 if (memcmp(&list_itr->lkups[i], &lkups[i],
5626 lkups_matched = false;
5629 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5630 rinfo->tun_type == list_itr->rule_info.tun_type &&
5638 * ice_adv_add_update_vsi_list
5639 * @hw: pointer to the hardware structure
5640 * @m_entry: pointer to current adv filter management list entry
5641 * @cur_fltr: filter information from the book keeping entry
5642 * @new_fltr: filter information with the new VSI to be added
5644 * Call AQ command to add or update previously created VSI list with new VSI.
5646 * Helper function to do book keeping associated with adding filter information
5647 * The algorithm to do the booking keeping is described below :
5648 * When a VSI needs to subscribe to a given advanced filter
5649 * if only one VSI has been added till now
5650 * Allocate a new VSI list and add two VSIs
5651 * to this list using switch rule command
5652 * Update the previously created switch rule with the
5653 * newly created VSI list ID
5654 * if a VSI list was previously created
5655 * Add the new VSI to the previously created VSI list set
5656 * using the update switch rule command
5658 static enum ice_status
5659 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5660 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5661 struct ice_adv_rule_info *cur_fltr,
5662 struct ice_adv_rule_info *new_fltr)
5664 enum ice_status status;
5665 u16 vsi_list_id = 0;
5667 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5668 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5669 return ICE_ERR_NOT_IMPL;
5671 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5672 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5673 return ICE_ERR_ALREADY_EXISTS;
5675 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5676 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5677 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5678 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5679 return ICE_ERR_NOT_IMPL;
5681 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5682 /* Only one entry existed in the mapping and it was not already
5683 * a part of a VSI list. So, create a VSI list with the old and
5686 struct ice_fltr_info tmp_fltr;
5687 u16 vsi_handle_arr[2];
5689 /* A rule already exists with the new VSI being added */
5690 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5691 new_fltr->sw_act.fwd_id.hw_vsi_id)
5692 return ICE_ERR_ALREADY_EXISTS;
5694 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5695 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5696 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5702 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5703 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5704 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5705 /* Update the previous switch rule of "forward to VSI" to
5708 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5712 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5713 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5714 m_entry->vsi_list_info =
5715 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5718 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5720 if (!m_entry->vsi_list_info)
5723 /* A rule already exists with the new VSI being added */
5724 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5727 /* Update the previously created VSI list set with
5728 * the new VSI ID passed in
5730 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5732 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5734 ice_aqc_opc_update_sw_rules,
5736 /* update VSI list mapping info with new VSI ID */
5738 ice_set_bit(vsi_handle,
5739 m_entry->vsi_list_info->vsi_map);
5742 m_entry->vsi_count++;
5747 * ice_add_adv_rule - helper function to create an advanced switch rule
5748 * @hw: pointer to the hardware structure
5749 * @lkups: information on the words that needs to be looked up. All words
5750 * together makes one recipe
5751 * @lkups_cnt: num of entries in the lkups array
5752 * @rinfo: other information related to the rule that needs to be programmed
5753 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5754 * ignored is case of error.
5756 * This function can program only 1 rule at a time. The lkups is used to
5757 * describe the all the words that forms the "lookup" portion of the recipe.
5758 * These words can span multiple protocols. Callers to this function need to
5759 * pass in a list of protocol headers with lookup information along and mask
5760 * that determines which words are valid from the given protocol header.
5761 * rinfo describes other information related to this rule such as forwarding
5762 * IDs, priority of this rule, etc.
5765 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5766 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5767 struct ice_rule_query_data *added_entry)
5769 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5770 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5771 const struct ice_dummy_pkt_offsets *pkt_offsets;
5772 struct ice_aqc_sw_rules_elem *s_rule = NULL;
5773 struct LIST_HEAD_TYPE *rule_head;
5774 struct ice_switch_info *sw;
5775 enum ice_status status;
5776 const u8 *pkt = NULL;
5782 return ICE_ERR_PARAM;
5784 for (i = 0; i < lkups_cnt; i++) {
5787 /* Validate match masks to make sure that there is something
5790 ptr = (u16 *)&lkups[i].m_u;
5791 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5798 return ICE_ERR_PARAM;
5800 /* make sure that we can locate a dummy packet */
5801 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5804 status = ICE_ERR_PARAM;
5805 goto err_ice_add_adv_rule;
5808 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5809 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5810 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5811 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5814 vsi_handle = rinfo->sw_act.vsi_handle;
5815 if (!ice_is_vsi_valid(hw, vsi_handle))
5816 return ICE_ERR_PARAM;
5818 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5819 rinfo->sw_act.fwd_id.hw_vsi_id =
5820 ice_get_hw_vsi_num(hw, vsi_handle);
5821 if (rinfo->sw_act.flag & ICE_FLTR_TX)
5822 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5824 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5827 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5829 /* we have to add VSI to VSI_LIST and increment vsi_count.
5830 * Also Update VSI list so that we can change forwarding rule
5831 * if the rule already exists, we will check if it exists with
5832 * same vsi_id, if not then add it to the VSI list if it already
5833 * exists if not then create a VSI list and add the existing VSI
5834 * ID and the new VSI ID to the list
5835 * We will add that VSI to the list
5837 status = ice_adv_add_update_vsi_list(hw, m_entry,
5838 &m_entry->rule_info,
5841 added_entry->rid = rid;
5842 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5843 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5847 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5848 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
5850 return ICE_ERR_NO_MEMORY;
5851 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
5852 switch (rinfo->sw_act.fltr_act) {
5853 case ICE_FWD_TO_VSI:
5854 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5855 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5856 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5859 act |= ICE_SINGLE_ACT_TO_Q;
5860 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5861 ICE_SINGLE_ACT_Q_INDEX_M;
5863 case ICE_FWD_TO_QGRP:
5864 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
5865 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
5866 act |= ICE_SINGLE_ACT_TO_Q;
5867 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5868 ICE_SINGLE_ACT_Q_INDEX_M;
5869 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
5870 ICE_SINGLE_ACT_Q_REGION_M;
5872 case ICE_DROP_PACKET:
5873 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5874 ICE_SINGLE_ACT_VALID_BIT;
5877 status = ICE_ERR_CFG;
5878 goto err_ice_add_adv_rule;
5881 /* set the rule LOOKUP type based on caller specified 'RX'
5882 * instead of hardcoding it to be either LOOKUP_TX/RX
5884 * for 'RX' set the source to be the port number
5885 * for 'TX' set the source to be the source HW VSI number (determined
5889 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
5890 s_rule->pdata.lkup_tx_rx.src =
5891 CPU_TO_LE16(hw->port_info->lport);
5893 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
5894 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
5897 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
5898 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
5900 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
5903 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5904 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5907 goto err_ice_add_adv_rule;
5908 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
5909 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
5911 status = ICE_ERR_NO_MEMORY;
5912 goto err_ice_add_adv_rule;
5915 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
5916 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
5917 ICE_NONDMA_TO_NONDMA);
5918 if (!adv_fltr->lkups) {
5919 status = ICE_ERR_NO_MEMORY;
5920 goto err_ice_add_adv_rule;
5923 adv_fltr->lkups_cnt = lkups_cnt;
5924 adv_fltr->rule_info = *rinfo;
5925 adv_fltr->rule_info.fltr_rule_id =
5926 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5927 sw = hw->switch_info;
5928 sw->recp_list[rid].adv_rule = true;
5929 rule_head = &sw->recp_list[rid].filt_rules;
5931 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
5932 struct ice_fltr_info tmp_fltr;
5934 tmp_fltr.fltr_rule_id =
5935 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5936 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5937 tmp_fltr.fwd_id.hw_vsi_id =
5938 ice_get_hw_vsi_num(hw, vsi_handle);
5939 tmp_fltr.vsi_handle = vsi_handle;
5940 /* Update the previous switch rule of "forward to VSI" to
5943 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5945 goto err_ice_add_adv_rule;
5946 adv_fltr->vsi_count = 1;
5949 /* Add rule entry to book keeping list */
5950 LIST_ADD(&adv_fltr->list_entry, rule_head);
5952 added_entry->rid = rid;
5953 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5954 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5956 err_ice_add_adv_rule:
5957 if (status && adv_fltr) {
5958 ice_free(hw, adv_fltr->lkups);
5959 ice_free(hw, adv_fltr);
5962 ice_free(hw, s_rule);
5968 * ice_adv_rem_update_vsi_list
5969 * @hw: pointer to the hardware structure
5970 * @vsi_handle: VSI handle of the VSI to remove
5971 * @fm_list: filter management entry for which the VSI list management needs to
5974 static enum ice_status
5975 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5976 struct ice_adv_fltr_mgmt_list_entry *fm_list)
5978 struct ice_vsi_list_map_info *vsi_list_info;
5979 enum ice_sw_lkup_type lkup_type;
5980 enum ice_status status;
5983 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5984 fm_list->vsi_count == 0)
5985 return ICE_ERR_PARAM;
5987 /* A rule with the VSI being removed does not exist */
5988 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
5989 return ICE_ERR_DOES_NOT_EXIST;
5991 lkup_type = ICE_SW_LKUP_LAST;
5992 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5993 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5994 ice_aqc_opc_update_sw_rules,
5999 fm_list->vsi_count--;
6000 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6001 vsi_list_info = fm_list->vsi_list_info;
6002 if (fm_list->vsi_count == 1) {
6003 struct ice_fltr_info tmp_fltr;
6006 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6008 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6009 return ICE_ERR_OUT_OF_RANGE;
6011 /* Make sure VSI list is empty before removing it below */
6012 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6014 ice_aqc_opc_update_sw_rules,
6018 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6019 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6020 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6021 tmp_fltr.fwd_id.hw_vsi_id =
6022 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6023 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6024 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6026 /* Update the previous switch rule of "MAC forward to VSI" to
6027 * "MAC fwd to VSI list"
6029 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6031 ice_debug(hw, ICE_DBG_SW,
6032 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6033 tmp_fltr.fwd_id.hw_vsi_id, status);
6038 if (fm_list->vsi_count == 1) {
6039 /* Remove the VSI list since it is no longer used */
6040 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6042 ice_debug(hw, ICE_DBG_SW,
6043 "Failed to remove VSI list %d, error %d\n",
6044 vsi_list_id, status);
6048 LIST_DEL(&vsi_list_info->list_entry);
6049 ice_free(hw, vsi_list_info);
6050 fm_list->vsi_list_info = NULL;
6057 * ice_rem_adv_rule - removes existing advanced switch rule
6058 * @hw: pointer to the hardware structure
6059 * @lkups: information on the words that needs to be looked up. All words
6060 * together makes one recipe
6061 * @lkups_cnt: num of entries in the lkups array
6062 * @rinfo: Its the pointer to the rule information for the rule
6064 * This function can be used to remove 1 rule at a time. The lkups is
6065 * used to describe all the words that forms the "lookup" portion of the
6066 * rule. These words can span multiple protocols. Callers to this function
6067 * need to pass in a list of protocol headers with lookup information along
6068 * and mask that determines which words are valid from the given protocol
6069 * header. rinfo describes other information related to this rule such as
6070 * forwarding IDs, priority of this rule, etc.
6073 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6074 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6076 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6077 const struct ice_dummy_pkt_offsets *offsets;
6078 struct ice_prot_lkup_ext lkup_exts;
6079 u16 rule_buf_sz, pkt_len, i, rid;
6080 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6081 enum ice_status status = ICE_SUCCESS;
6082 bool remove_rule = false;
6083 const u8 *pkt = NULL;
6086 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6087 for (i = 0; i < lkups_cnt; i++) {
6090 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6093 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6097 rid = ice_find_recp(hw, &lkup_exts);
6098 /* If did not find a recipe that match the existing criteria */
6099 if (rid == ICE_MAX_NUM_RECIPES)
6100 return ICE_ERR_PARAM;
6102 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6103 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6104 /* the rule is already removed */
6107 ice_acquire_lock(rule_lock);
6108 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6110 } else if (list_elem->vsi_count > 1) {
6111 list_elem->vsi_list_info->ref_cnt--;
6112 remove_rule = false;
6113 vsi_handle = rinfo->sw_act.vsi_handle;
6114 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6116 vsi_handle = rinfo->sw_act.vsi_handle;
6117 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6119 ice_release_lock(rule_lock);
6122 if (list_elem->vsi_count == 0)
6125 ice_release_lock(rule_lock);
6127 struct ice_aqc_sw_rules_elem *s_rule;
6129 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
6130 &pkt_len, &offsets);
6131 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6133 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6136 return ICE_ERR_NO_MEMORY;
6137 s_rule->pdata.lkup_tx_rx.act = 0;
6138 s_rule->pdata.lkup_tx_rx.index =
6139 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6140 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6141 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6143 ice_aqc_opc_remove_sw_rules, NULL);
6144 if (status == ICE_SUCCESS) {
6145 ice_acquire_lock(rule_lock);
6146 LIST_DEL(&list_elem->list_entry);
6147 ice_free(hw, list_elem->lkups);
6148 ice_free(hw, list_elem);
6149 ice_release_lock(rule_lock);
6151 ice_free(hw, s_rule);
6157 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6158 * @hw: pointer to the hardware structure
6159 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6161 * This function is used to remove 1 rule at a time. The removal is based on
6162 * the remove_entry parameter. This function will remove rule for a given
6163 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6166 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6167 struct ice_rule_query_data *remove_entry)
6169 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6170 struct LIST_HEAD_TYPE *list_head;
6171 struct ice_adv_rule_info rinfo;
6172 struct ice_switch_info *sw;
6174 sw = hw->switch_info;
6175 if (!sw->recp_list[remove_entry->rid].recp_created)
6176 return ICE_ERR_PARAM;
6177 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6178 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6180 if (list_itr->rule_info.fltr_rule_id ==
6181 remove_entry->rule_id) {
6182 rinfo = list_itr->rule_info;
6183 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6184 return ice_rem_adv_rule(hw, list_itr->lkups,
6185 list_itr->lkups_cnt, &rinfo);
6188 return ICE_ERR_PARAM;
6192 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6194 * @hw: pointer to the hardware structure
6195 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6197 * This function is used to remove all the rules for a given VSI and as soon
6198 * as removing a rule fails, it will return immediately with the error code,
6199 * else it will return ICE_SUCCESS
6202 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6204 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6205 struct ice_vsi_list_map_info *map_info;
6206 struct LIST_HEAD_TYPE *list_head;
6207 struct ice_adv_rule_info rinfo;
6208 struct ice_switch_info *sw;
6209 enum ice_status status;
6210 u16 vsi_list_id = 0;
6213 sw = hw->switch_info;
6214 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6215 if (!sw->recp_list[rid].recp_created)
6217 if (!sw->recp_list[rid].adv_rule)
6219 list_head = &sw->recp_list[rid].filt_rules;
6221 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6222 ice_adv_fltr_mgmt_list_entry, list_entry) {
6223 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6227 rinfo = list_itr->rule_info;
6228 rinfo.sw_act.vsi_handle = vsi_handle;
6229 status = ice_rem_adv_rule(hw, list_itr->lkups,
6230 list_itr->lkups_cnt, &rinfo);
6240 * ice_replay_fltr - Replay all the filters stored by a specific list head
6241 * @hw: pointer to the hardware structure
6242 * @list_head: list for which filters needs to be replayed
6243 * @recp_id: Recipe ID for which rules need to be replayed
6245 static enum ice_status
6246 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6248 struct ice_fltr_mgmt_list_entry *itr;
6249 struct LIST_HEAD_TYPE l_head;
6250 enum ice_status status = ICE_SUCCESS;
6252 if (LIST_EMPTY(list_head))
6255 /* Move entries from the given list_head to a temporary l_head so that
6256 * they can be replayed. Otherwise when trying to re-add the same
6257 * filter, the function will return already exists
6259 LIST_REPLACE_INIT(list_head, &l_head);
6261 /* Mark the given list_head empty by reinitializing it so filters
6262 * could be added again by *handler
6264 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6266 struct ice_fltr_list_entry f_entry;
6268 f_entry.fltr_info = itr->fltr_info;
6269 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6270 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6271 if (status != ICE_SUCCESS)
6276 /* Add a filter per VSI separately */
6281 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6283 if (!ice_is_vsi_valid(hw, vsi_handle))
6286 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6287 f_entry.fltr_info.vsi_handle = vsi_handle;
6288 f_entry.fltr_info.fwd_id.hw_vsi_id =
6289 ice_get_hw_vsi_num(hw, vsi_handle);
6290 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6291 if (recp_id == ICE_SW_LKUP_VLAN)
6292 status = ice_add_vlan_internal(hw, &f_entry);
6294 status = ice_add_rule_internal(hw, recp_id,
6296 if (status != ICE_SUCCESS)
6301 /* Clear the filter management list */
6302 ice_rem_sw_rule_info(hw, &l_head);
6307 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6308 * @hw: pointer to the hardware structure
6310 * NOTE: This function does not clean up partially added filters on error.
6311 * It is up to caller of the function to issue a reset or fail early.
6313 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6315 struct ice_switch_info *sw = hw->switch_info;
6316 enum ice_status status = ICE_SUCCESS;
6319 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6320 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6322 status = ice_replay_fltr(hw, i, head);
6323 if (status != ICE_SUCCESS)
6330 * ice_replay_vsi_fltr - Replay filters for requested VSI
6331 * @hw: pointer to the hardware structure
6332 * @vsi_handle: driver VSI handle
6333 * @recp_id: Recipe ID for which rules need to be replayed
6334 * @list_head: list for which filters need to be replayed
6336 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6337 * It is required to pass valid VSI handle.
6339 static enum ice_status
6340 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6341 struct LIST_HEAD_TYPE *list_head)
6343 struct ice_fltr_mgmt_list_entry *itr;
6344 enum ice_status status = ICE_SUCCESS;
6347 if (LIST_EMPTY(list_head))
6349 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6351 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6353 struct ice_fltr_list_entry f_entry;
6355 f_entry.fltr_info = itr->fltr_info;
6356 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6357 itr->fltr_info.vsi_handle == vsi_handle) {
6358 /* update the src in case it is VSI num */
6359 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6360 f_entry.fltr_info.src = hw_vsi_id;
6361 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6362 if (status != ICE_SUCCESS)
6366 if (!itr->vsi_list_info ||
6367 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6369 /* Clearing it so that the logic can add it back */
6370 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6371 f_entry.fltr_info.vsi_handle = vsi_handle;
6372 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6373 /* update the src in case it is VSI num */
6374 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6375 f_entry.fltr_info.src = hw_vsi_id;
6376 if (recp_id == ICE_SW_LKUP_VLAN)
6377 status = ice_add_vlan_internal(hw, &f_entry);
6379 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6380 if (status != ICE_SUCCESS)
6388 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6389 * @hw: pointer to the hardware structure
6390 * @vsi_handle: driver VSI handle
6391 * @list_head: list for which filters need to be replayed
6393 * Replay the advanced rule for the given VSI.
6395 static enum ice_status
6396 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6397 struct LIST_HEAD_TYPE *list_head)
6399 struct ice_rule_query_data added_entry = { 0 };
6400 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6401 enum ice_status status = ICE_SUCCESS;
6403 if (LIST_EMPTY(list_head))
6405 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6407 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6408 u16 lk_cnt = adv_fltr->lkups_cnt;
6410 if (vsi_handle != rinfo->sw_act.vsi_handle)
6412 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6421 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6422 * @hw: pointer to the hardware structure
6423 * @vsi_handle: driver VSI handle
6425 * Replays filters for requested VSI via vsi_handle.
6427 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6429 struct ice_switch_info *sw = hw->switch_info;
6430 enum ice_status status;
6433 /* Update the recipes that were created */
6434 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6435 struct LIST_HEAD_TYPE *head;
6437 head = &sw->recp_list[i].filt_replay_rules;
6438 if (!sw->recp_list[i].adv_rule)
6439 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6441 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6442 if (status != ICE_SUCCESS)
6450 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6451 * @hw: pointer to the HW struct
6453 * Deletes the filter replay rules.
6455 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6457 struct ice_switch_info *sw = hw->switch_info;
6463 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6464 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6465 struct LIST_HEAD_TYPE *l_head;
6467 l_head = &sw->recp_list[i].filt_replay_rules;
6468 if (!sw->recp_list[i].adv_rule)
6469 ice_rem_sw_rule_info(hw, l_head);
6471 ice_rem_adv_rule_info(hw, l_head);