1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
70 { ICE_PROTOCOL_LAST, 0 },
74 u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
109 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
111 { ICE_ETYPE_OL, 12 },
112 { ICE_IPV4_OFOS, 14 },
116 { ICE_UDP_ILOS, 76 },
117 { ICE_PROTOCOL_LAST, 0 },
121 u8 dummy_gre_udp_packet[] = {
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_OL 12 */
128 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x2F, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
135 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x00, 0x00, 0x00,
142 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
149 0x00, 0x08, 0x00, 0x00,
153 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
155 { ICE_ETYPE_OL, 12 },
156 { ICE_IPV4_OFOS, 14 },
162 { ICE_PROTOCOL_LAST, 0 },
166 u8 dummy_udp_tun_tcp_packet[] = {
167 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
168 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
171 0x08, 0x00, /* ICE_ETYPE_OL 12 */
173 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
174 0x00, 0x01, 0x00, 0x00,
175 0x40, 0x11, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
180 0x00, 0x46, 0x00, 0x00,
182 0x04, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
186 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00,
190 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
191 0x00, 0x01, 0x00, 0x00,
192 0x40, 0x06, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
199 0x50, 0x02, 0x20, 0x00,
200 0x00, 0x00, 0x00, 0x00
204 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
206 { ICE_ETYPE_OL, 12 },
207 { ICE_IPV4_OFOS, 14 },
212 { ICE_UDP_ILOS, 84 },
213 { ICE_PROTOCOL_LAST, 0 },
217 u8 dummy_udp_tun_udp_packet[] = {
218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
222 0x08, 0x00, /* ICE_ETYPE_OL 12 */
224 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
225 0x00, 0x01, 0x00, 0x00,
226 0x00, 0x11, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
231 0x00, 0x3a, 0x00, 0x00,
233 0x0c, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
234 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
237 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00,
241 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
242 0x00, 0x01, 0x00, 0x00,
243 0x00, 0x11, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
248 0x00, 0x08, 0x00, 0x00,
252 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
254 { ICE_ETYPE_OL, 12 },
255 { ICE_IPV4_OFOS, 14 },
256 { ICE_UDP_ILOS, 34 },
257 { ICE_PROTOCOL_LAST, 0 },
261 dummy_udp_packet[] = {
262 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
263 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00,
266 0x08, 0x00, /* ICE_ETYPE_OL 12 */
268 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
269 0x00, 0x01, 0x00, 0x00,
270 0x00, 0x11, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
275 0x00, 0x08, 0x00, 0x00,
277 0x00, 0x00, /* 2 bytes for 4 byte alignment */
281 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
283 { ICE_ETYPE_OL, 12 },
284 { ICE_IPV4_OFOS, 14 },
286 { ICE_PROTOCOL_LAST, 0 },
290 dummy_tcp_packet[] = {
291 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
292 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00,
295 0x08, 0x00, /* ICE_ETYPE_OL 12 */
297 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
298 0x00, 0x01, 0x00, 0x00,
299 0x00, 0x06, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
306 0x50, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, /* 2 bytes for 4 byte alignment */
313 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
315 { ICE_ETYPE_OL, 12 },
316 { ICE_IPV6_OFOS, 14 },
318 { ICE_PROTOCOL_LAST, 0 },
322 dummy_tcp_ipv6_packet[] = {
323 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
327 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
329 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
330 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
341 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00,
343 0x50, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, /* 2 bytes for 4 byte alignment */
350 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
352 { ICE_ETYPE_OL, 12 },
353 { ICE_IPV6_OFOS, 14 },
354 { ICE_UDP_ILOS, 54 },
355 { ICE_PROTOCOL_LAST, 0 },
359 dummy_udp_ipv6_packet[] = {
360 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
364 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
366 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
367 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
378 0x00, 0x08, 0x00, 0x00,
380 0x00, 0x00, /* 2 bytes for 4 byte alignment */
383 /* this is a recipe to profile bitmap association */
384 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
385 ICE_MAX_NUM_PROFILES);
386 static ice_declare_bitmap(available_result_ids, ICE_CHAIN_FV_INDEX_START + 1);
388 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
391 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
392 * @hw: pointer to hardware structure
393 * @recps: struct that we need to populate
394 * @rid: recipe ID that we are populating
395 * @refresh_required: true if we should get recipe to profile mapping from FW
397 * This function is used to populate all the necessary entries into our
398 * bookkeeping so that we have a current list of all the recipes that are
399 * programmed in the firmware.
401 static enum ice_status
402 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
403 bool *refresh_required)
405 u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
406 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
407 u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
408 struct ice_aqc_recipe_data_elem *tmp;
409 u16 num_recps = ICE_MAX_NUM_RECIPES;
410 struct ice_prot_lkup_ext *lkup_exts;
411 enum ice_status status;
413 /* we need a buffer big enough to accommodate all the recipes */
414 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
415 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
417 return ICE_ERR_NO_MEMORY;
419 tmp[0].recipe_indx = rid;
420 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
421 /* non-zero status meaning recipe doesn't exist */
425 /* Get recipe to profile map so that we can get the fv from lkups that
426 * we read for a recipe from FW. Since we want to minimize the number of
427 * times we make this FW call, just make one call and cache the copy
428 * until a new recipe is added. This operation is only required the
429 * first time to get the changes from FW. Then to search existing
430 * entries we don't need to update the cache again until another recipe
433 if (*refresh_required) {
434 ice_get_recp_to_prof_map(hw);
435 *refresh_required = false;
437 lkup_exts = &recps[rid].lkup_exts;
438 /* start populating all the entries for recps[rid] based on lkups from
441 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
442 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
443 struct ice_recp_grp_entry *rg_entry;
444 u8 prof_id, prot = 0;
447 rg_entry = (struct ice_recp_grp_entry *)
448 ice_malloc(hw, sizeof(*rg_entry));
450 status = ICE_ERR_NO_MEMORY;
453 /* Avoid 8th bit since its result enable bit */
454 result_idxs[result_idx] = root_bufs.content.result_indx &
455 ~ICE_AQ_RECIPE_RESULT_EN;
456 /* Check if result enable bit is set */
457 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
458 ice_clear_bit(ICE_CHAIN_FV_INDEX_START -
459 result_idxs[result_idx++],
460 available_result_ids);
462 recipe_to_profile[tmp[sub_recps].recipe_indx],
463 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
464 /* get the first profile that is associated with rid */
465 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
466 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
467 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
469 rg_entry->fv_idx[i] = lkup_indx;
470 rg_entry->fv_mask[i] =
471 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
473 /* If the recipe is a chained recipe then all its
474 * child recipe's result will have a result index.
475 * To fill fv_words we should not use those result
476 * index, we only need the protocol ids and offsets.
477 * We will skip all the fv_idx which stores result
478 * index in them. We also need to skip any fv_idx which
479 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
480 * valid offset value.
482 if (result_idxs[0] == rg_entry->fv_idx[i] ||
483 result_idxs[1] == rg_entry->fv_idx[i] ||
484 result_idxs[2] == rg_entry->fv_idx[i] ||
485 result_idxs[3] == rg_entry->fv_idx[i] ||
486 result_idxs[4] == rg_entry->fv_idx[i] ||
487 rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
488 rg_entry->fv_idx[i] == 0)
491 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
492 rg_entry->fv_idx[i], &prot, &off);
493 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
494 lkup_exts->fv_words[fv_word_idx].off = off;
497 /* populate rg_list with the data from the child entry of this
500 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
502 lkup_exts->n_val_words = fv_word_idx;
503 recps[rid].n_grp_count = num_recps;
504 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
505 ice_calloc(hw, recps[rid].n_grp_count,
506 sizeof(struct ice_aqc_recipe_data_elem));
507 if (!recps[rid].root_buf)
510 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
511 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
512 recps[rid].recp_created = true;
513 if (tmp[sub_recps].content.rid & ICE_AQ_RECIPE_ID_IS_ROOT)
514 recps[rid].root_rid = rid;
521 * ice_get_recp_to_prof_map - updates recipe to profile mapping
522 * @hw: pointer to hardware structure
524 * This function is used to populate recipe_to_profile matrix where index to
525 * this array is the recipe ID and the element is the mapping of which profiles
526 * is this recipe mapped to.
529 ice_get_recp_to_prof_map(struct ice_hw *hw)
531 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
534 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
537 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
538 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
541 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
542 if (ice_is_bit_set(r_bitmap, j))
543 ice_set_bit(i, recipe_to_profile[j]);
548 * ice_init_def_sw_recp - initialize the recipe book keeping tables
549 * @hw: pointer to the HW struct
551 * Allocate memory for the entire recipe table and initialize the structures/
552 * entries corresponding to basic recipes.
554 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
556 struct ice_sw_recipe *recps;
559 recps = (struct ice_sw_recipe *)
560 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
562 return ICE_ERR_NO_MEMORY;
564 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
565 recps[i].root_rid = i;
566 INIT_LIST_HEAD(&recps[i].filt_rules);
567 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
568 INIT_LIST_HEAD(&recps[i].rg_list);
569 ice_init_lock(&recps[i].filt_rule_lock);
572 hw->switch_info->recp_list = recps;
578 * ice_aq_get_sw_cfg - get switch configuration
579 * @hw: pointer to the hardware structure
580 * @buf: pointer to the result buffer
581 * @buf_size: length of the buffer available for response
582 * @req_desc: pointer to requested descriptor
583 * @num_elems: pointer to number of elements
584 * @cd: pointer to command details structure or NULL
586 * Get switch configuration (0x0200) to be placed in 'buff'.
587 * This admin command returns information such as initial VSI/port number
588 * and switch ID it belongs to.
590 * NOTE: *req_desc is both an input/output parameter.
591 * The caller of this function first calls this function with *request_desc set
592 * to 0. If the response from f/w has *req_desc set to 0, all the switch
593 * configuration information has been returned; if non-zero (meaning not all
594 * the information was returned), the caller should call this function again
595 * with *req_desc set to the previous value returned by f/w to get the
596 * next block of switch configuration information.
598 * *num_elems is output only parameter. This reflects the number of elements
599 * in response buffer. The caller of this function to use *num_elems while
600 * parsing the response buffer.
602 static enum ice_status
603 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
604 u16 buf_size, u16 *req_desc, u16 *num_elems,
605 struct ice_sq_cd *cd)
607 struct ice_aqc_get_sw_cfg *cmd;
608 enum ice_status status;
609 struct ice_aq_desc desc;
611 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
612 cmd = &desc.params.get_sw_conf;
613 cmd->element = CPU_TO_LE16(*req_desc);
615 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
617 *req_desc = LE16_TO_CPU(cmd->element);
618 *num_elems = LE16_TO_CPU(cmd->num_elems);
626 * ice_alloc_sw - allocate resources specific to switch
627 * @hw: pointer to the HW struct
628 * @ena_stats: true to turn on VEB stats
629 * @shared_res: true for shared resource, false for dedicated resource
630 * @sw_id: switch ID returned
631 * @counter_id: VEB counter ID returned
633 * allocates switch resources (SWID and VEB counter) (0x0208)
636 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
639 struct ice_aqc_alloc_free_res_elem *sw_buf;
640 struct ice_aqc_res_elem *sw_ele;
641 enum ice_status status;
644 buf_len = sizeof(*sw_buf);
645 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
646 ice_malloc(hw, buf_len);
648 return ICE_ERR_NO_MEMORY;
650 /* Prepare buffer for switch ID.
651 * The number of resource entries in buffer is passed as 1 since only a
652 * single switch/VEB instance is allocated, and hence a single sw_id
655 sw_buf->num_elems = CPU_TO_LE16(1);
657 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
658 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
659 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
661 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
662 ice_aqc_opc_alloc_res, NULL);
665 goto ice_alloc_sw_exit;
667 sw_ele = &sw_buf->elem[0];
668 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
671 /* Prepare buffer for VEB Counter */
672 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
673 struct ice_aqc_alloc_free_res_elem *counter_buf;
674 struct ice_aqc_res_elem *counter_ele;
676 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
677 ice_malloc(hw, buf_len);
679 status = ICE_ERR_NO_MEMORY;
680 goto ice_alloc_sw_exit;
683 /* The number of resource entries in buffer is passed as 1 since
684 * only a single switch/VEB instance is allocated, and hence a
685 * single VEB counter is requested.
687 counter_buf->num_elems = CPU_TO_LE16(1);
688 counter_buf->res_type =
689 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
690 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
691 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
695 ice_free(hw, counter_buf);
696 goto ice_alloc_sw_exit;
698 counter_ele = &counter_buf->elem[0];
699 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
700 ice_free(hw, counter_buf);
704 ice_free(hw, sw_buf);
709 * ice_free_sw - free resources specific to switch
710 * @hw: pointer to the HW struct
711 * @sw_id: switch ID returned
712 * @counter_id: VEB counter ID returned
714 * free switch resources (SWID and VEB counter) (0x0209)
716 * NOTE: This function frees multiple resources. It continues
717 * releasing other resources even after it encounters error.
718 * The error code returned is the last error it encountered.
720 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
722 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
723 enum ice_status status, ret_status;
726 buf_len = sizeof(*sw_buf);
727 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
728 ice_malloc(hw, buf_len);
730 return ICE_ERR_NO_MEMORY;
732 /* Prepare buffer to free for switch ID res.
733 * The number of resource entries in buffer is passed as 1 since only a
734 * single switch/VEB instance is freed, and hence a single sw_id
737 sw_buf->num_elems = CPU_TO_LE16(1);
738 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
739 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
741 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
742 ice_aqc_opc_free_res, NULL);
745 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
747 /* Prepare buffer to free for VEB Counter resource */
748 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
749 ice_malloc(hw, buf_len);
751 ice_free(hw, sw_buf);
752 return ICE_ERR_NO_MEMORY;
755 /* The number of resource entries in buffer is passed as 1 since only a
756 * single switch/VEB instance is freed, and hence a single VEB counter
759 counter_buf->num_elems = CPU_TO_LE16(1);
760 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
761 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
763 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
764 ice_aqc_opc_free_res, NULL);
766 ice_debug(hw, ICE_DBG_SW,
767 "VEB counter resource could not be freed\n");
771 ice_free(hw, counter_buf);
772 ice_free(hw, sw_buf);
778 * @hw: pointer to the HW struct
779 * @vsi_ctx: pointer to a VSI context struct
780 * @cd: pointer to command details structure or NULL
782 * Add a VSI context to the hardware (0x0210)
785 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
786 struct ice_sq_cd *cd)
788 struct ice_aqc_add_update_free_vsi_resp *res;
789 struct ice_aqc_add_get_update_free_vsi *cmd;
790 struct ice_aq_desc desc;
791 enum ice_status status;
793 cmd = &desc.params.vsi_cmd;
794 res = &desc.params.add_update_free_vsi_res;
796 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
798 if (!vsi_ctx->alloc_from_pool)
799 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
800 ICE_AQ_VSI_IS_VALID);
802 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
804 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
806 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
807 sizeof(vsi_ctx->info), cd);
810 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
811 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
812 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
820 * @hw: pointer to the HW struct
821 * @vsi_ctx: pointer to a VSI context struct
822 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
823 * @cd: pointer to command details structure or NULL
825 * Free VSI context info from hardware (0x0213)
828 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
829 bool keep_vsi_alloc, struct ice_sq_cd *cd)
831 struct ice_aqc_add_update_free_vsi_resp *resp;
832 struct ice_aqc_add_get_update_free_vsi *cmd;
833 struct ice_aq_desc desc;
834 enum ice_status status;
836 cmd = &desc.params.vsi_cmd;
837 resp = &desc.params.add_update_free_vsi_res;
839 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
841 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
843 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
845 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
847 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
848 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
856 * @hw: pointer to the HW struct
857 * @vsi_ctx: pointer to a VSI context struct
858 * @cd: pointer to command details structure or NULL
860 * Update VSI context in the hardware (0x0211)
863 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
864 struct ice_sq_cd *cd)
866 struct ice_aqc_add_update_free_vsi_resp *resp;
867 struct ice_aqc_add_get_update_free_vsi *cmd;
868 struct ice_aq_desc desc;
869 enum ice_status status;
871 cmd = &desc.params.vsi_cmd;
872 resp = &desc.params.add_update_free_vsi_res;
874 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
876 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
878 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
880 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
881 sizeof(vsi_ctx->info), cd);
884 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
885 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
892 * ice_is_vsi_valid - check whether the VSI is valid or not
893 * @hw: pointer to the HW struct
894 * @vsi_handle: VSI handle
896 * check whether the VSI is valid or not
898 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
900 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
904 * ice_get_hw_vsi_num - return the HW VSI number
905 * @hw: pointer to the HW struct
906 * @vsi_handle: VSI handle
908 * return the HW VSI number
909 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
911 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
913 return hw->vsi_ctx[vsi_handle]->vsi_num;
917 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
918 * @hw: pointer to the HW struct
919 * @vsi_handle: VSI handle
921 * return the VSI context entry for a given VSI handle
923 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
925 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
929 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
930 * @hw: pointer to the HW struct
931 * @vsi_handle: VSI handle
932 * @vsi: VSI context pointer
934 * save the VSI context entry for a given VSI handle
937 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
939 hw->vsi_ctx[vsi_handle] = vsi;
943 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
944 * @hw: pointer to the HW struct
945 * @vsi_handle: VSI handle
947 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
949 struct ice_vsi_ctx *vsi;
952 vsi = ice_get_vsi_ctx(hw, vsi_handle);
955 ice_for_each_traffic_class(i) {
956 if (vsi->lan_q_ctx[i]) {
957 ice_free(hw, vsi->lan_q_ctx[i]);
958 vsi->lan_q_ctx[i] = NULL;
964 * ice_clear_vsi_ctx - clear the VSI context entry
965 * @hw: pointer to the HW struct
966 * @vsi_handle: VSI handle
968 * clear the VSI context entry
970 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
972 struct ice_vsi_ctx *vsi;
974 vsi = ice_get_vsi_ctx(hw, vsi_handle);
976 ice_clear_vsi_q_ctx(hw, vsi_handle);
978 hw->vsi_ctx[vsi_handle] = NULL;
983 * ice_clear_all_vsi_ctx - clear all the VSI context entries
984 * @hw: pointer to the HW struct
986 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
990 for (i = 0; i < ICE_MAX_VSI; i++)
991 ice_clear_vsi_ctx(hw, i);
995 * ice_add_vsi - add VSI context to the hardware and VSI handle list
996 * @hw: pointer to the HW struct
997 * @vsi_handle: unique VSI handle provided by drivers
998 * @vsi_ctx: pointer to a VSI context struct
999 * @cd: pointer to command details structure or NULL
1001 * Add a VSI context to the hardware also add it into the VSI handle list.
1002 * If this function gets called after reset for existing VSIs then update
1003 * with the new HW VSI number in the corresponding VSI handle list entry.
1006 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1007 struct ice_sq_cd *cd)
1009 struct ice_vsi_ctx *tmp_vsi_ctx;
1010 enum ice_status status;
1012 if (vsi_handle >= ICE_MAX_VSI)
1013 return ICE_ERR_PARAM;
1014 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1017 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1019 /* Create a new VSI context */
1020 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1021 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1023 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1024 return ICE_ERR_NO_MEMORY;
1026 *tmp_vsi_ctx = *vsi_ctx;
1028 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1030 /* update with new HW VSI num */
1031 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
1032 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1039 * ice_free_vsi- free VSI context from hardware and VSI handle list
1040 * @hw: pointer to the HW struct
1041 * @vsi_handle: unique VSI handle
1042 * @vsi_ctx: pointer to a VSI context struct
1043 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1044 * @cd: pointer to command details structure or NULL
1046 * Free VSI context info from hardware as well as from VSI handle list
1049 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1050 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1052 enum ice_status status;
1054 if (!ice_is_vsi_valid(hw, vsi_handle))
1055 return ICE_ERR_PARAM;
1056 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1057 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1059 ice_clear_vsi_ctx(hw, vsi_handle);
1065 * @hw: pointer to the HW struct
1066 * @vsi_handle: unique VSI handle
1067 * @vsi_ctx: pointer to a VSI context struct
1068 * @cd: pointer to command details structure or NULL
1070 * Update VSI context in the hardware
1073 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1074 struct ice_sq_cd *cd)
1076 if (!ice_is_vsi_valid(hw, vsi_handle))
1077 return ICE_ERR_PARAM;
1078 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1079 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1083 * ice_aq_get_vsi_params
1084 * @hw: pointer to the HW struct
1085 * @vsi_ctx: pointer to a VSI context struct
1086 * @cd: pointer to command details structure or NULL
1088 * Get VSI context info from hardware (0x0212)
1091 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1092 struct ice_sq_cd *cd)
1094 struct ice_aqc_add_get_update_free_vsi *cmd;
1095 struct ice_aqc_get_vsi_resp *resp;
1096 struct ice_aq_desc desc;
1097 enum ice_status status;
1099 cmd = &desc.params.vsi_cmd;
1100 resp = &desc.params.get_vsi_resp;
1102 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1104 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1106 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1107 sizeof(vsi_ctx->info), cd);
1109 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1111 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1112 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1119 * ice_aq_add_update_mir_rule - add/update a mirror rule
1120 * @hw: pointer to the HW struct
1121 * @rule_type: Rule Type
1122 * @dest_vsi: VSI number to which packets will be mirrored
1123 * @count: length of the list
1124 * @mr_buf: buffer for list of mirrored VSI numbers
1125 * @cd: pointer to command details structure or NULL
1128 * Add/Update Mirror Rule (0x260).
1131 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1132 u16 count, struct ice_mir_rule_buf *mr_buf,
1133 struct ice_sq_cd *cd, u16 *rule_id)
1135 struct ice_aqc_add_update_mir_rule *cmd;
1136 struct ice_aq_desc desc;
1137 enum ice_status status;
1138 __le16 *mr_list = NULL;
1141 switch (rule_type) {
1142 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1143 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1144 /* Make sure count and mr_buf are set for these rule_types */
1145 if (!(count && mr_buf))
1146 return ICE_ERR_PARAM;
1148 buf_size = count * sizeof(__le16);
1149 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1151 return ICE_ERR_NO_MEMORY;
1153 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1154 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1155 /* Make sure count and mr_buf are not set for these
1158 if (count || mr_buf)
1159 return ICE_ERR_PARAM;
1162 ice_debug(hw, ICE_DBG_SW,
1163 "Error due to unsupported rule_type %u\n", rule_type);
1164 return ICE_ERR_OUT_OF_RANGE;
1167 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1169 /* Pre-process 'mr_buf' items for add/update of virtual port
1170 * ingress/egress mirroring (but not physical port ingress/egress
1176 for (i = 0; i < count; i++) {
1179 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1181 /* Validate specified VSI number, make sure it is less
1182 * than ICE_MAX_VSI, if not return with error.
1184 if (id >= ICE_MAX_VSI) {
1185 ice_debug(hw, ICE_DBG_SW,
1186 "Error VSI index (%u) out-of-range\n",
1188 ice_free(hw, mr_list);
1189 return ICE_ERR_OUT_OF_RANGE;
1192 /* add VSI to mirror rule */
1195 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1196 else /* remove VSI from mirror rule */
1197 mr_list[i] = CPU_TO_LE16(id);
1201 cmd = &desc.params.add_update_rule;
1202 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1203 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1204 ICE_AQC_RULE_ID_VALID_M);
1205 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1206 cmd->num_entries = CPU_TO_LE16(count);
1207 cmd->dest = CPU_TO_LE16(dest_vsi);
1209 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1211 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1213 ice_free(hw, mr_list);
1219 * ice_aq_delete_mir_rule - delete a mirror rule
1220 * @hw: pointer to the HW struct
1221 * @rule_id: Mirror rule ID (to be deleted)
1222 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1223 * otherwise it is returned to the shared pool
1224 * @cd: pointer to command details structure or NULL
1226 * Delete Mirror Rule (0x261).
1229 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1230 struct ice_sq_cd *cd)
1232 struct ice_aqc_delete_mir_rule *cmd;
1233 struct ice_aq_desc desc;
1235 /* rule_id should be in the range 0...63 */
1236 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1237 return ICE_ERR_OUT_OF_RANGE;
1239 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1241 cmd = &desc.params.del_rule;
1242 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1243 cmd->rule_id = CPU_TO_LE16(rule_id);
1246 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1248 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1252 * ice_aq_alloc_free_vsi_list
1253 * @hw: pointer to the HW struct
1254 * @vsi_list_id: VSI list ID returned or used for lookup
1255 * @lkup_type: switch rule filter lookup type
1256 * @opc: switch rules population command type - pass in the command opcode
1258 * allocates or free a VSI list resource
1260 static enum ice_status
1261 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1262 enum ice_sw_lkup_type lkup_type,
1263 enum ice_adminq_opc opc)
1265 struct ice_aqc_alloc_free_res_elem *sw_buf;
1266 struct ice_aqc_res_elem *vsi_ele;
1267 enum ice_status status;
1270 buf_len = sizeof(*sw_buf);
1271 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1272 ice_malloc(hw, buf_len);
1274 return ICE_ERR_NO_MEMORY;
1275 sw_buf->num_elems = CPU_TO_LE16(1);
1277 if (lkup_type == ICE_SW_LKUP_MAC ||
1278 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1279 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1280 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1281 lkup_type == ICE_SW_LKUP_PROMISC ||
1282 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1283 lkup_type == ICE_SW_LKUP_LAST) {
1284 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1285 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1287 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1289 status = ICE_ERR_PARAM;
1290 goto ice_aq_alloc_free_vsi_list_exit;
1293 if (opc == ice_aqc_opc_free_res)
1294 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1296 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1298 goto ice_aq_alloc_free_vsi_list_exit;
1300 if (opc == ice_aqc_opc_alloc_res) {
1301 vsi_ele = &sw_buf->elem[0];
1302 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1305 ice_aq_alloc_free_vsi_list_exit:
1306 ice_free(hw, sw_buf);
1311 * ice_aq_set_storm_ctrl - Sets storm control configuration
1312 * @hw: pointer to the HW struct
1313 * @bcast_thresh: represents the upper threshold for broadcast storm control
1314 * @mcast_thresh: represents the upper threshold for multicast storm control
1315 * @ctl_bitmask: storm control control knobs
1317 * Sets the storm control configuration (0x0280)
1320 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1323 struct ice_aqc_storm_cfg *cmd;
1324 struct ice_aq_desc desc;
1326 cmd = &desc.params.storm_conf;
1328 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1330 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1331 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1332 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1334 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1338 * ice_aq_get_storm_ctrl - gets storm control configuration
1339 * @hw: pointer to the HW struct
1340 * @bcast_thresh: represents the upper threshold for broadcast storm control
1341 * @mcast_thresh: represents the upper threshold for multicast storm control
1342 * @ctl_bitmask: storm control control knobs
1344 * Gets the storm control configuration (0x0281)
1347 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1350 enum ice_status status;
1351 struct ice_aq_desc desc;
1353 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1355 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1357 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1360 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1363 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1366 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1373 * ice_aq_sw_rules - add/update/remove switch rules
1374 * @hw: pointer to the HW struct
1375 * @rule_list: pointer to switch rule population list
1376 * @rule_list_sz: total size of the rule list in bytes
1377 * @num_rules: number of switch rules in the rule_list
1378 * @opc: switch rules population command type - pass in the command opcode
1379 * @cd: pointer to command details structure or NULL
1381 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1383 static enum ice_status
1384 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1385 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1387 struct ice_aq_desc desc;
1389 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_sw_rules");
1391 if (opc != ice_aqc_opc_add_sw_rules &&
1392 opc != ice_aqc_opc_update_sw_rules &&
1393 opc != ice_aqc_opc_remove_sw_rules)
1394 return ICE_ERR_PARAM;
1396 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1398 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1399 desc.params.sw_rules.num_rules_fltr_entry_index =
1400 CPU_TO_LE16(num_rules);
1401 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1405 * ice_aq_add_recipe - add switch recipe
1406 * @hw: pointer to the HW struct
1407 * @s_recipe_list: pointer to switch rule population list
1408 * @num_recipes: number of switch recipes in the list
1409 * @cd: pointer to command details structure or NULL
1414 ice_aq_add_recipe(struct ice_hw *hw,
1415 struct ice_aqc_recipe_data_elem *s_recipe_list,
1416 u16 num_recipes, struct ice_sq_cd *cd)
1418 struct ice_aqc_add_get_recipe *cmd;
1419 struct ice_aq_desc desc;
1422 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_recipe");
1423 cmd = &desc.params.add_get_recipe;
1424 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1426 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1427 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1429 buf_size = num_recipes * sizeof(*s_recipe_list);
1431 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1435 * ice_aq_get_recipe - get switch recipe
1436 * @hw: pointer to the HW struct
1437 * @s_recipe_list: pointer to switch rule population list
1438 * @num_recipes: pointer to the number of recipes (input and output)
1439 * @recipe_root: root recipe number of recipe(s) to retrieve
1440 * @cd: pointer to command details structure or NULL
1444 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1445 * On output, *num_recipes will equal the number of entries returned in
1448 * The caller must supply enough space in s_recipe_list to hold all possible
1449 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1452 ice_aq_get_recipe(struct ice_hw *hw,
1453 struct ice_aqc_recipe_data_elem *s_recipe_list,
1454 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1456 struct ice_aqc_add_get_recipe *cmd;
1457 struct ice_aq_desc desc;
1458 enum ice_status status;
1461 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1462 return ICE_ERR_PARAM;
1464 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe");
1465 cmd = &desc.params.add_get_recipe;
1466 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1468 cmd->return_index = CPU_TO_LE16(recipe_root);
1469 cmd->num_sub_recipes = 0;
1471 buf_size = *num_recipes * sizeof(*s_recipe_list);
1473 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1474 /* cppcheck-suppress constArgument */
1475 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1481 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1482 * @hw: pointer to the HW struct
1483 * @profile_id: package profile ID to associate the recipe with
1484 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1485 * @cd: pointer to command details structure or NULL
1486 * Recipe to profile association (0x0291)
1489 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1490 struct ice_sq_cd *cd)
1492 struct ice_aqc_recipe_to_profile *cmd;
1493 struct ice_aq_desc desc;
1495 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_assoc_recipe_to_prof");
1496 cmd = &desc.params.recipe_to_profile;
1497 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1498 cmd->profile_id = CPU_TO_LE16(profile_id);
1499 /* Set the recipe ID bit in the bitmask to let the device know which
1500 * profile we are associating the recipe to
1502 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1503 ICE_NONDMA_TO_NONDMA);
1505 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1509 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1510 * @hw: pointer to the HW struct
1511 * @profile_id: package profile ID to associate the recipe with
1512 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1513 * @cd: pointer to command details structure or NULL
1514 * Associate profile ID with given recipe (0x0293)
1517 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1518 struct ice_sq_cd *cd)
1520 struct ice_aqc_recipe_to_profile *cmd;
1521 struct ice_aq_desc desc;
1522 enum ice_status status;
1524 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe_to_prof");
1525 cmd = &desc.params.recipe_to_profile;
1526 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1527 cmd->profile_id = CPU_TO_LE16(profile_id);
1529 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1531 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1532 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1538 * ice_alloc_recipe - add recipe resource
1539 * @hw: pointer to the hardware structure
1540 * @rid: recipe ID returned as response to AQ call
1542 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1544 struct ice_aqc_alloc_free_res_elem *sw_buf;
1545 enum ice_status status;
1548 buf_len = sizeof(*sw_buf);
1549 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1551 return ICE_ERR_NO_MEMORY;
1553 sw_buf->num_elems = CPU_TO_LE16(1);
1554 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1555 ICE_AQC_RES_TYPE_S) |
1556 ICE_AQC_RES_TYPE_FLAG_SHARED);
1557 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1558 ice_aqc_opc_alloc_res, NULL);
1560 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1561 ice_free(hw, sw_buf);
1566 /* ice_init_port_info - Initialize port_info with switch configuration data
1567 * @pi: pointer to port_info
1568 * @vsi_port_num: VSI number or port number
1569 * @type: Type of switch element (port or VSI)
1570 * @swid: switch ID of the switch the element is attached to
1571 * @pf_vf_num: PF or VF number
1572 * @is_vf: true if the element is a VF, false otherwise
1575 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1576 u16 swid, u16 pf_vf_num, bool is_vf)
1579 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1580 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1582 pi->pf_vf_num = pf_vf_num;
1584 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1585 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1588 ice_debug(pi->hw, ICE_DBG_SW,
1589 "incorrect VSI/port type received\n");
1594 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1595 * @hw: pointer to the hardware structure
1597 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1599 struct ice_aqc_get_sw_cfg_resp *rbuf;
1600 enum ice_status status;
1601 u16 num_total_ports;
1607 num_total_ports = 1;
1609 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1610 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1613 return ICE_ERR_NO_MEMORY;
1615 /* Multiple calls to ice_aq_get_sw_cfg may be required
1616 * to get all the switch configuration information. The need
1617 * for additional calls is indicated by ice_aq_get_sw_cfg
1618 * writing a non-zero value in req_desc
1621 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1622 &req_desc, &num_elems, NULL);
1627 for (i = 0; i < num_elems; i++) {
1628 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1629 u16 pf_vf_num, swid, vsi_port_num;
1633 ele = rbuf[i].elements;
1634 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1635 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1637 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1638 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1640 swid = LE16_TO_CPU(ele->swid);
1642 if (LE16_TO_CPU(ele->pf_vf_num) &
1643 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1646 type = LE16_TO_CPU(ele->vsi_port_num) >>
1647 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1650 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1651 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1652 if (j == num_total_ports) {
1653 ice_debug(hw, ICE_DBG_SW,
1654 "more ports than expected\n");
1655 status = ICE_ERR_CFG;
1658 ice_init_port_info(hw->port_info,
1659 vsi_port_num, type, swid,
1667 } while (req_desc && !status);
1671 ice_free(hw, (void *)rbuf);
1677 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1678 * @hw: pointer to the hardware structure
1679 * @fi: filter info structure to fill/update
1681 * This helper function populates the lb_en and lan_en elements of the provided
1682 * ice_fltr_info struct using the switch's type and characteristics of the
1683 * switch rule being configured.
1685 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1689 if ((fi->flag & ICE_FLTR_TX) &&
1690 (fi->fltr_act == ICE_FWD_TO_VSI ||
1691 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1692 fi->fltr_act == ICE_FWD_TO_Q ||
1693 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1694 /* Setting LB for prune actions will result in replicated
1695 * packets to the internal switch that will be dropped.
1697 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1700 /* Set lan_en to TRUE if
1701 * 1. The switch is a VEB AND
1703 * 2.1 The lookup is a directional lookup like ethertype,
1704 * promiscuous, ethertype-MAC, promiscuous-VLAN
1705 * and default-port OR
1706 * 2.2 The lookup is VLAN, OR
1707 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1708 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1712 * The switch is a VEPA.
1714 * In all other cases, the LAN enable has to be set to false.
1717 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1718 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1719 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1720 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1721 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1722 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1723 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1724 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1725 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1726 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1735 * ice_ilog2 - Calculates integer log base 2 of a number
1736 * @n: number on which to perform operation
1738 static int ice_ilog2(u64 n)
1742 for (i = 63; i >= 0; i--)
1743 if (((u64)1 << i) & n)
1750 * ice_fill_sw_rule - Helper function to fill switch rule structure
1751 * @hw: pointer to the hardware structure
1752 * @f_info: entry containing packet forwarding information
1753 * @s_rule: switch rule structure to be filled in based on mac_entry
1754 * @opc: switch rules population command type - pass in the command opcode
1757 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1758 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1760 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1768 if (opc == ice_aqc_opc_remove_sw_rules) {
1769 s_rule->pdata.lkup_tx_rx.act = 0;
1770 s_rule->pdata.lkup_tx_rx.index =
1771 CPU_TO_LE16(f_info->fltr_rule_id);
1772 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1776 eth_hdr_sz = sizeof(dummy_eth_header);
1777 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1779 /* initialize the ether header with a dummy header */
1780 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1781 ice_fill_sw_info(hw, f_info);
1783 switch (f_info->fltr_act) {
1784 case ICE_FWD_TO_VSI:
1785 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1786 ICE_SINGLE_ACT_VSI_ID_M;
1787 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1788 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1789 ICE_SINGLE_ACT_VALID_BIT;
1791 case ICE_FWD_TO_VSI_LIST:
1792 act |= ICE_SINGLE_ACT_VSI_LIST;
1793 act |= (f_info->fwd_id.vsi_list_id <<
1794 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1795 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1796 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1797 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1798 ICE_SINGLE_ACT_VALID_BIT;
1801 act |= ICE_SINGLE_ACT_TO_Q;
1802 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1803 ICE_SINGLE_ACT_Q_INDEX_M;
1805 case ICE_DROP_PACKET:
1806 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1807 ICE_SINGLE_ACT_VALID_BIT;
1809 case ICE_FWD_TO_QGRP:
1810 q_rgn = f_info->qgrp_size > 0 ?
1811 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1812 act |= ICE_SINGLE_ACT_TO_Q;
1813 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1814 ICE_SINGLE_ACT_Q_INDEX_M;
1815 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1816 ICE_SINGLE_ACT_Q_REGION_M;
1823 act |= ICE_SINGLE_ACT_LB_ENABLE;
1825 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1827 switch (f_info->lkup_type) {
1828 case ICE_SW_LKUP_MAC:
1829 daddr = f_info->l_data.mac.mac_addr;
1831 case ICE_SW_LKUP_VLAN:
1832 vlan_id = f_info->l_data.vlan.vlan_id;
1833 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1834 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1835 act |= ICE_SINGLE_ACT_PRUNE;
1836 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1839 case ICE_SW_LKUP_ETHERTYPE_MAC:
1840 daddr = f_info->l_data.ethertype_mac.mac_addr;
1842 case ICE_SW_LKUP_ETHERTYPE:
1843 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1844 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1846 case ICE_SW_LKUP_MAC_VLAN:
1847 daddr = f_info->l_data.mac_vlan.mac_addr;
1848 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1850 case ICE_SW_LKUP_PROMISC_VLAN:
1851 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1853 case ICE_SW_LKUP_PROMISC:
1854 daddr = f_info->l_data.mac_vlan.mac_addr;
1860 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1861 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1862 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1864 /* Recipe set depending on lookup type */
1865 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1866 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1867 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1870 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1871 ICE_NONDMA_TO_NONDMA);
1873 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1874 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1875 *off = CPU_TO_BE16(vlan_id);
1878 /* Create the switch rule with the final dummy Ethernet header */
1879 if (opc != ice_aqc_opc_update_sw_rules)
1880 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1884 * ice_add_marker_act
1885 * @hw: pointer to the hardware structure
1886 * @m_ent: the management entry for which sw marker needs to be added
1887 * @sw_marker: sw marker to tag the Rx descriptor with
1888 * @l_id: large action resource ID
1890 * Create a large action to hold software marker and update the switch rule
1891 * entry pointed by m_ent with newly created large action
1893 static enum ice_status
1894 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1895 u16 sw_marker, u16 l_id)
1897 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1898 /* For software marker we need 3 large actions
1899 * 1. FWD action: FWD TO VSI or VSI LIST
1900 * 2. GENERIC VALUE action to hold the profile ID
1901 * 3. GENERIC VALUE action to hold the software marker ID
1903 const u16 num_lg_acts = 3;
1904 enum ice_status status;
1910 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1911 return ICE_ERR_PARAM;
1913 /* Create two back-to-back switch rules and submit them to the HW using
1914 * one memory buffer:
1918 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1919 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1920 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1922 return ICE_ERR_NO_MEMORY;
1924 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1926 /* Fill in the first switch rule i.e. large action */
1927 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1928 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1929 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1931 /* First action VSI forwarding or VSI list forwarding depending on how
1934 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1935 m_ent->fltr_info.fwd_id.hw_vsi_id;
1937 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1938 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1939 ICE_LG_ACT_VSI_LIST_ID_M;
1940 if (m_ent->vsi_count > 1)
1941 act |= ICE_LG_ACT_VSI_LIST;
1942 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1944 /* Second action descriptor type */
1945 act = ICE_LG_ACT_GENERIC;
1947 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1948 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1950 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1951 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1953 /* Third action Marker value */
1954 act |= ICE_LG_ACT_GENERIC;
1955 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1956 ICE_LG_ACT_GENERIC_VALUE_M;
1958 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1960 /* call the fill switch rule to fill the lookup Tx Rx structure */
1961 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1962 ice_aqc_opc_update_sw_rules);
1964 /* Update the action to point to the large action ID */
1965 rx_tx->pdata.lkup_tx_rx.act =
1966 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1967 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1968 ICE_SINGLE_ACT_PTR_VAL_M));
1970 /* Use the filter rule ID of the previously created rule with single
1971 * act. Once the update happens, hardware will treat this as large
1974 rx_tx->pdata.lkup_tx_rx.index =
1975 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1977 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1978 ice_aqc_opc_update_sw_rules, NULL);
1980 m_ent->lg_act_idx = l_id;
1981 m_ent->sw_marker_id = sw_marker;
1984 ice_free(hw, lg_act);
1989 * ice_add_counter_act - add/update filter rule with counter action
1990 * @hw: pointer to the hardware structure
1991 * @m_ent: the management entry for which counter needs to be added
1992 * @counter_id: VLAN counter ID returned as part of allocate resource
1993 * @l_id: large action resource ID
1995 static enum ice_status
1996 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1997 u16 counter_id, u16 l_id)
1999 struct ice_aqc_sw_rules_elem *lg_act;
2000 struct ice_aqc_sw_rules_elem *rx_tx;
2001 enum ice_status status;
2002 /* 2 actions will be added while adding a large action counter */
2003 const int num_acts = 2;
2010 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2011 return ICE_ERR_PARAM;
2013 /* Create two back-to-back switch rules and submit them to the HW using
2014 * one memory buffer:
2018 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2019 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2020 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2023 return ICE_ERR_NO_MEMORY;
2025 rx_tx = (struct ice_aqc_sw_rules_elem *)
2026 ((u8 *)lg_act + lg_act_size);
2028 /* Fill in the first switch rule i.e. large action */
2029 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2030 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2031 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2033 /* First action VSI forwarding or VSI list forwarding depending on how
2036 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2037 m_ent->fltr_info.fwd_id.hw_vsi_id;
2039 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2040 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2041 ICE_LG_ACT_VSI_LIST_ID_M;
2042 if (m_ent->vsi_count > 1)
2043 act |= ICE_LG_ACT_VSI_LIST;
2044 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2046 /* Second action counter ID */
2047 act = ICE_LG_ACT_STAT_COUNT;
2048 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2049 ICE_LG_ACT_STAT_COUNT_M;
2050 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2052 /* call the fill switch rule to fill the lookup Tx Rx structure */
2053 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2054 ice_aqc_opc_update_sw_rules);
2056 act = ICE_SINGLE_ACT_PTR;
2057 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2058 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2060 /* Use the filter rule ID of the previously created rule with single
2061 * act. Once the update happens, hardware will treat this as large
2064 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2065 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2067 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2068 ice_aqc_opc_update_sw_rules, NULL);
2070 m_ent->lg_act_idx = l_id;
2071 m_ent->counter_index = counter_id;
2074 ice_free(hw, lg_act);
2079 * ice_create_vsi_list_map
2080 * @hw: pointer to the hardware structure
2081 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2082 * @num_vsi: number of VSI handles in the array
2083 * @vsi_list_id: VSI list ID generated as part of allocate resource
2085 * Helper function to create a new entry of VSI list ID to VSI mapping
2086 * using the given VSI list ID
2088 static struct ice_vsi_list_map_info *
2089 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2092 struct ice_switch_info *sw = hw->switch_info;
2093 struct ice_vsi_list_map_info *v_map;
2096 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2101 v_map->vsi_list_id = vsi_list_id;
2103 for (i = 0; i < num_vsi; i++)
2104 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2106 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2111 * ice_update_vsi_list_rule
2112 * @hw: pointer to the hardware structure
2113 * @vsi_handle_arr: array of VSI handles to form a VSI list
2114 * @num_vsi: number of VSI handles in the array
2115 * @vsi_list_id: VSI list ID generated as part of allocate resource
2116 * @remove: Boolean value to indicate if this is a remove action
2117 * @opc: switch rules population command type - pass in the command opcode
2118 * @lkup_type: lookup type of the filter
2120 * Call AQ command to add a new switch rule or update existing switch rule
2121 * using the given VSI list ID
2123 static enum ice_status
2124 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2125 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2126 enum ice_sw_lkup_type lkup_type)
2128 struct ice_aqc_sw_rules_elem *s_rule;
2129 enum ice_status status;
2135 return ICE_ERR_PARAM;
2137 if (lkup_type == ICE_SW_LKUP_MAC ||
2138 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2139 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2140 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2141 lkup_type == ICE_SW_LKUP_PROMISC ||
2142 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2143 lkup_type == ICE_SW_LKUP_LAST)
2144 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2145 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2146 else if (lkup_type == ICE_SW_LKUP_VLAN)
2147 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2148 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2150 return ICE_ERR_PARAM;
2152 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2153 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2155 return ICE_ERR_NO_MEMORY;
2156 for (i = 0; i < num_vsi; i++) {
2157 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2158 status = ICE_ERR_PARAM;
2161 /* AQ call requires hw_vsi_id(s) */
2162 s_rule->pdata.vsi_list.vsi[i] =
2163 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2166 s_rule->type = CPU_TO_LE16(type);
2167 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2168 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2170 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2173 ice_free(hw, s_rule);
2178 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2179 * @hw: pointer to the HW struct
2180 * @vsi_handle_arr: array of VSI handles to form a VSI list
2181 * @num_vsi: number of VSI handles in the array
2182 * @vsi_list_id: stores the ID of the VSI list to be created
2183 * @lkup_type: switch rule filter's lookup type
2185 static enum ice_status
2186 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2187 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2189 enum ice_status status;
2191 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2192 ice_aqc_opc_alloc_res);
2196 /* Update the newly created VSI list to include the specified VSIs */
2197 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2198 *vsi_list_id, false,
2199 ice_aqc_opc_add_sw_rules, lkup_type);
2203 * ice_create_pkt_fwd_rule
2204 * @hw: pointer to the hardware structure
2205 * @f_entry: entry containing packet forwarding information
2207 * Create switch rule with given filter information and add an entry
2208 * to the corresponding filter management list to track this switch rule
2211 static enum ice_status
2212 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2213 struct ice_fltr_list_entry *f_entry)
2215 struct ice_fltr_mgmt_list_entry *fm_entry;
2216 struct ice_aqc_sw_rules_elem *s_rule;
2217 enum ice_sw_lkup_type l_type;
2218 struct ice_sw_recipe *recp;
2219 enum ice_status status;
2221 s_rule = (struct ice_aqc_sw_rules_elem *)
2222 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2224 return ICE_ERR_NO_MEMORY;
2225 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2226 ice_malloc(hw, sizeof(*fm_entry));
2228 status = ICE_ERR_NO_MEMORY;
2229 goto ice_create_pkt_fwd_rule_exit;
2232 fm_entry->fltr_info = f_entry->fltr_info;
2234 /* Initialize all the fields for the management entry */
2235 fm_entry->vsi_count = 1;
2236 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2237 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2238 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2240 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2241 ice_aqc_opc_add_sw_rules);
2243 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2244 ice_aqc_opc_add_sw_rules, NULL);
2246 ice_free(hw, fm_entry);
2247 goto ice_create_pkt_fwd_rule_exit;
2250 f_entry->fltr_info.fltr_rule_id =
2251 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2252 fm_entry->fltr_info.fltr_rule_id =
2253 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2255 /* The book keeping entries will get removed when base driver
2256 * calls remove filter AQ command
2258 l_type = fm_entry->fltr_info.lkup_type;
2259 recp = &hw->switch_info->recp_list[l_type];
2260 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2262 ice_create_pkt_fwd_rule_exit:
2263 ice_free(hw, s_rule);
2268 * ice_update_pkt_fwd_rule
2269 * @hw: pointer to the hardware structure
2270 * @f_info: filter information for switch rule
2272 * Call AQ command to update a previously created switch rule with a
2275 static enum ice_status
2276 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2278 struct ice_aqc_sw_rules_elem *s_rule;
2279 enum ice_status status;
2281 s_rule = (struct ice_aqc_sw_rules_elem *)
2282 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2284 return ICE_ERR_NO_MEMORY;
2286 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2288 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2290 /* Update switch rule with new rule set to forward VSI list */
2291 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2292 ice_aqc_opc_update_sw_rules, NULL);
2294 ice_free(hw, s_rule);
2299 * ice_update_sw_rule_bridge_mode
2300 * @hw: pointer to the HW struct
2302 * Updates unicast switch filter rules based on VEB/VEPA mode
2304 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2306 struct ice_switch_info *sw = hw->switch_info;
2307 struct ice_fltr_mgmt_list_entry *fm_entry;
2308 enum ice_status status = ICE_SUCCESS;
2309 struct LIST_HEAD_TYPE *rule_head;
2310 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2312 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2313 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2315 ice_acquire_lock(rule_lock);
2316 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2318 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2319 u8 *addr = fi->l_data.mac.mac_addr;
2321 /* Update unicast Tx rules to reflect the selected
2324 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2325 (fi->fltr_act == ICE_FWD_TO_VSI ||
2326 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2327 fi->fltr_act == ICE_FWD_TO_Q ||
2328 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2329 status = ice_update_pkt_fwd_rule(hw, fi);
2335 ice_release_lock(rule_lock);
2341 * ice_add_update_vsi_list
2342 * @hw: pointer to the hardware structure
2343 * @m_entry: pointer to current filter management list entry
2344 * @cur_fltr: filter information from the book keeping entry
2345 * @new_fltr: filter information with the new VSI to be added
2347 * Call AQ command to add or update previously created VSI list with new VSI.
2349 * Helper function to do book keeping associated with adding filter information
2350 * The algorithm to do the book keeping is described below :
2351 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2352 * if only one VSI has been added till now
2353 * Allocate a new VSI list and add two VSIs
2354 * to this list using switch rule command
2355 * Update the previously created switch rule with the
2356 * newly created VSI list ID
2357 * if a VSI list was previously created
2358 * Add the new VSI to the previously created VSI list set
2359 * using the update switch rule command
2361 static enum ice_status
2362 ice_add_update_vsi_list(struct ice_hw *hw,
2363 struct ice_fltr_mgmt_list_entry *m_entry,
2364 struct ice_fltr_info *cur_fltr,
2365 struct ice_fltr_info *new_fltr)
2367 enum ice_status status = ICE_SUCCESS;
2368 u16 vsi_list_id = 0;
2370 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2371 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2372 return ICE_ERR_NOT_IMPL;
2374 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2375 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2376 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2377 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2378 return ICE_ERR_NOT_IMPL;
2380 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2381 /* Only one entry existed in the mapping and it was not already
2382 * a part of a VSI list. So, create a VSI list with the old and
2385 struct ice_fltr_info tmp_fltr;
2386 u16 vsi_handle_arr[2];
2388 /* A rule already exists with the new VSI being added */
2389 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2390 return ICE_ERR_ALREADY_EXISTS;
2392 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2393 vsi_handle_arr[1] = new_fltr->vsi_handle;
2394 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2396 new_fltr->lkup_type);
2400 tmp_fltr = *new_fltr;
2401 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2402 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2403 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2404 /* Update the previous switch rule of "MAC forward to VSI" to
2405 * "MAC fwd to VSI list"
2407 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2411 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2412 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2413 m_entry->vsi_list_info =
2414 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2417 /* If this entry was large action then the large action needs
2418 * to be updated to point to FWD to VSI list
2420 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2422 ice_add_marker_act(hw, m_entry,
2423 m_entry->sw_marker_id,
2424 m_entry->lg_act_idx);
2426 u16 vsi_handle = new_fltr->vsi_handle;
2427 enum ice_adminq_opc opcode;
2429 if (!m_entry->vsi_list_info)
2432 /* A rule already exists with the new VSI being added */
2433 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2436 /* Update the previously created VSI list set with
2437 * the new VSI ID passed in
2439 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2440 opcode = ice_aqc_opc_update_sw_rules;
2442 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2443 vsi_list_id, false, opcode,
2444 new_fltr->lkup_type);
2445 /* update VSI list mapping info with new VSI ID */
2447 ice_set_bit(vsi_handle,
2448 m_entry->vsi_list_info->vsi_map);
2451 m_entry->vsi_count++;
2456 * ice_find_rule_entry - Search a rule entry
2457 * @hw: pointer to the hardware structure
2458 * @recp_id: lookup type for which the specified rule needs to be searched
2459 * @f_info: rule information
2461 * Helper function to search for a given rule entry
2462 * Returns pointer to entry storing the rule if found
2464 static struct ice_fltr_mgmt_list_entry *
2465 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2467 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2468 struct ice_switch_info *sw = hw->switch_info;
2469 struct LIST_HEAD_TYPE *list_head;
2471 list_head = &sw->recp_list[recp_id].filt_rules;
2472 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2474 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2475 sizeof(f_info->l_data)) &&
2476 f_info->flag == list_itr->fltr_info.flag) {
2485 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2486 * @hw: pointer to the hardware structure
2487 * @recp_id: lookup type for which VSI lists needs to be searched
2488 * @vsi_handle: VSI handle to be found in VSI list
2489 * @vsi_list_id: VSI list ID found containing vsi_handle
2491 * Helper function to search a VSI list with single entry containing given VSI
2492 * handle element. This can be extended further to search VSI list with more
2493 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2495 static struct ice_vsi_list_map_info *
2496 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2499 struct ice_vsi_list_map_info *map_info = NULL;
2500 struct ice_switch_info *sw = hw->switch_info;
2501 struct LIST_HEAD_TYPE *list_head;
2503 list_head = &sw->recp_list[recp_id].filt_rules;
2504 if (sw->recp_list[recp_id].adv_rule) {
2505 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2507 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2508 ice_adv_fltr_mgmt_list_entry,
2510 if (list_itr->vsi_list_info) {
2511 map_info = list_itr->vsi_list_info;
2512 if (ice_is_bit_set(map_info->vsi_map,
2514 *vsi_list_id = map_info->vsi_list_id;
2520 struct ice_fltr_mgmt_list_entry *list_itr;
2522 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2523 ice_fltr_mgmt_list_entry,
2525 if (list_itr->vsi_count == 1 &&
2526 list_itr->vsi_list_info) {
2527 map_info = list_itr->vsi_list_info;
2528 if (ice_is_bit_set(map_info->vsi_map,
2530 *vsi_list_id = map_info->vsi_list_id;
2540 * ice_add_rule_internal - add rule for a given lookup type
2541 * @hw: pointer to the hardware structure
2542 * @recp_id: lookup type (recipe ID) for which rule has to be added
2543 * @f_entry: structure containing MAC forwarding information
2545 * Adds or updates the rule lists for a given recipe
2547 static enum ice_status
2548 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2549 struct ice_fltr_list_entry *f_entry)
2551 struct ice_switch_info *sw = hw->switch_info;
2552 struct ice_fltr_info *new_fltr, *cur_fltr;
2553 struct ice_fltr_mgmt_list_entry *m_entry;
2554 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2555 enum ice_status status = ICE_SUCCESS;
2557 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2558 return ICE_ERR_PARAM;
2560 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2561 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2562 f_entry->fltr_info.fwd_id.hw_vsi_id =
2563 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2565 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2567 ice_acquire_lock(rule_lock);
2568 new_fltr = &f_entry->fltr_info;
2569 if (new_fltr->flag & ICE_FLTR_RX)
2570 new_fltr->src = hw->port_info->lport;
2571 else if (new_fltr->flag & ICE_FLTR_TX)
2573 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2575 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2577 status = ice_create_pkt_fwd_rule(hw, f_entry);
2578 goto exit_add_rule_internal;
2581 cur_fltr = &m_entry->fltr_info;
2582 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2584 exit_add_rule_internal:
2585 ice_release_lock(rule_lock);
2590 * ice_remove_vsi_list_rule
2591 * @hw: pointer to the hardware structure
2592 * @vsi_list_id: VSI list ID generated as part of allocate resource
2593 * @lkup_type: switch rule filter lookup type
2595 * The VSI list should be emptied before this function is called to remove the
2598 static enum ice_status
2599 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2600 enum ice_sw_lkup_type lkup_type)
2602 struct ice_aqc_sw_rules_elem *s_rule;
2603 enum ice_status status;
2606 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2607 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2609 return ICE_ERR_NO_MEMORY;
2611 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2612 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2614 /* Free the vsi_list resource that we allocated. It is assumed that the
2615 * list is empty at this point.
2617 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2618 ice_aqc_opc_free_res);
2620 ice_free(hw, s_rule);
2625 * ice_rem_update_vsi_list
2626 * @hw: pointer to the hardware structure
2627 * @vsi_handle: VSI handle of the VSI to remove
2628 * @fm_list: filter management entry for which the VSI list management needs to
2631 static enum ice_status
2632 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2633 struct ice_fltr_mgmt_list_entry *fm_list)
2635 enum ice_sw_lkup_type lkup_type;
2636 enum ice_status status = ICE_SUCCESS;
2639 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2640 fm_list->vsi_count == 0)
2641 return ICE_ERR_PARAM;
2643 /* A rule with the VSI being removed does not exist */
2644 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2645 return ICE_ERR_DOES_NOT_EXIST;
2647 lkup_type = fm_list->fltr_info.lkup_type;
2648 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2649 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2650 ice_aqc_opc_update_sw_rules,
2655 fm_list->vsi_count--;
2656 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2658 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2659 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2660 struct ice_vsi_list_map_info *vsi_list_info =
2661 fm_list->vsi_list_info;
2664 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2666 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2667 return ICE_ERR_OUT_OF_RANGE;
2669 /* Make sure VSI list is empty before removing it below */
2670 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2672 ice_aqc_opc_update_sw_rules,
2677 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2678 tmp_fltr_info.fwd_id.hw_vsi_id =
2679 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2680 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2681 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2683 ice_debug(hw, ICE_DBG_SW,
2684 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2685 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2689 fm_list->fltr_info = tmp_fltr_info;
2692 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2693 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2694 struct ice_vsi_list_map_info *vsi_list_info =
2695 fm_list->vsi_list_info;
2697 /* Remove the VSI list since it is no longer used */
2698 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2700 ice_debug(hw, ICE_DBG_SW,
2701 "Failed to remove VSI list %d, error %d\n",
2702 vsi_list_id, status);
2706 LIST_DEL(&vsi_list_info->list_entry);
2707 ice_free(hw, vsi_list_info);
2708 fm_list->vsi_list_info = NULL;
2715 * ice_remove_rule_internal - Remove a filter rule of a given type
2717 * @hw: pointer to the hardware structure
2718 * @recp_id: recipe ID for which the rule needs to removed
2719 * @f_entry: rule entry containing filter information
2721 static enum ice_status
2722 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2723 struct ice_fltr_list_entry *f_entry)
2725 struct ice_switch_info *sw = hw->switch_info;
2726 struct ice_fltr_mgmt_list_entry *list_elem;
2727 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2728 enum ice_status status = ICE_SUCCESS;
2729 bool remove_rule = false;
2732 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2733 return ICE_ERR_PARAM;
2734 f_entry->fltr_info.fwd_id.hw_vsi_id =
2735 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2737 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2738 ice_acquire_lock(rule_lock);
2739 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2741 status = ICE_ERR_DOES_NOT_EXIST;
2745 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2747 } else if (!list_elem->vsi_list_info) {
2748 status = ICE_ERR_DOES_NOT_EXIST;
2750 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2751 /* a ref_cnt > 1 indicates that the vsi_list is being
2752 * shared by multiple rules. Decrement the ref_cnt and
2753 * remove this rule, but do not modify the list, as it
2754 * is in-use by other rules.
2756 list_elem->vsi_list_info->ref_cnt--;
2759 /* a ref_cnt of 1 indicates the vsi_list is only used
2760 * by one rule. However, the original removal request is only
2761 * for a single VSI. Update the vsi_list first, and only
2762 * remove the rule if there are no further VSIs in this list.
2764 vsi_handle = f_entry->fltr_info.vsi_handle;
2765 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2768 /* if VSI count goes to zero after updating the VSI list */
2769 if (list_elem->vsi_count == 0)
2774 /* Remove the lookup rule */
2775 struct ice_aqc_sw_rules_elem *s_rule;
2777 s_rule = (struct ice_aqc_sw_rules_elem *)
2778 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2780 status = ICE_ERR_NO_MEMORY;
2784 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2785 ice_aqc_opc_remove_sw_rules);
2787 status = ice_aq_sw_rules(hw, s_rule,
2788 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2789 ice_aqc_opc_remove_sw_rules, NULL);
2793 /* Remove a book keeping from the list */
2794 ice_free(hw, s_rule);
2796 LIST_DEL(&list_elem->list_entry);
2797 ice_free(hw, list_elem);
2800 ice_release_lock(rule_lock);
2805 * ice_aq_get_res_alloc - get allocated resources
2806 * @hw: pointer to the HW struct
2807 * @num_entries: pointer to u16 to store the number of resource entries returned
2808 * @buf: pointer to user-supplied buffer
2809 * @buf_size: size of buff
2810 * @cd: pointer to command details structure or NULL
2812 * The user-supplied buffer must be large enough to store the resource
2813 * information for all resource types. Each resource type is an
2814 * ice_aqc_get_res_resp_data_elem structure.
2817 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2818 u16 buf_size, struct ice_sq_cd *cd)
2820 struct ice_aqc_get_res_alloc *resp;
2821 enum ice_status status;
2822 struct ice_aq_desc desc;
2825 return ICE_ERR_BAD_PTR;
2827 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2828 return ICE_ERR_INVAL_SIZE;
2830 resp = &desc.params.get_res;
2832 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2833 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2835 if (!status && num_entries)
2836 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2842 * ice_aq_get_res_descs - get allocated resource descriptors
2843 * @hw: pointer to the hardware structure
2844 * @num_entries: number of resource entries in buffer
2845 * @buf: Indirect buffer to hold data parameters and response
2846 * @buf_size: size of buffer for indirect commands
2847 * @res_type: resource type
2848 * @res_shared: is resource shared
2849 * @desc_id: input - first desc ID to start; output - next desc ID
2850 * @cd: pointer to command details structure or NULL
2853 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2854 struct ice_aqc_get_allocd_res_desc_resp *buf,
2855 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2856 struct ice_sq_cd *cd)
2858 struct ice_aqc_get_allocd_res_desc *cmd;
2859 struct ice_aq_desc desc;
2860 enum ice_status status;
2862 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_res_descs");
2864 cmd = &desc.params.get_res_desc;
2867 return ICE_ERR_PARAM;
2869 if (buf_size != (num_entries * sizeof(*buf)))
2870 return ICE_ERR_PARAM;
2872 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2874 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2875 ICE_AQC_RES_TYPE_M) | (res_shared ?
2876 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2877 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2879 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2881 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2883 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2889 * ice_add_mac - Add a MAC address based filter rule
2890 * @hw: pointer to the hardware structure
2891 * @m_list: list of MAC addresses and forwarding information
2893 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2894 * multiple unicast addresses, the function assumes that all the
2895 * addresses are unique in a given add_mac call. It doesn't
2896 * check for duplicates in this case, removing duplicates from a given
2897 * list should be taken care of in the caller of this function.
2900 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2902 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2903 struct ice_fltr_list_entry *m_list_itr;
2904 struct LIST_HEAD_TYPE *rule_head;
2905 u16 elem_sent, total_elem_left;
2906 struct ice_switch_info *sw;
2907 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2908 enum ice_status status = ICE_SUCCESS;
2909 u16 num_unicast = 0;
2913 return ICE_ERR_PARAM;
2915 sw = hw->switch_info;
2916 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2917 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2919 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2923 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2924 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2925 if (!ice_is_vsi_valid(hw, vsi_handle))
2926 return ICE_ERR_PARAM;
2927 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2928 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2929 /* update the src in case it is VSI num */
2930 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2931 return ICE_ERR_PARAM;
2932 m_list_itr->fltr_info.src = hw_vsi_id;
2933 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2934 IS_ZERO_ETHER_ADDR(add))
2935 return ICE_ERR_PARAM;
2936 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
2937 /* Don't overwrite the unicast address */
2938 ice_acquire_lock(rule_lock);
2939 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2940 &m_list_itr->fltr_info)) {
2941 ice_release_lock(rule_lock);
2942 return ICE_ERR_ALREADY_EXISTS;
2944 ice_release_lock(rule_lock);
2946 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
2947 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
2948 m_list_itr->status =
2949 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2951 if (m_list_itr->status)
2952 return m_list_itr->status;
2956 ice_acquire_lock(rule_lock);
2957 /* Exit if no suitable entries were found for adding bulk switch rule */
2959 status = ICE_SUCCESS;
2960 goto ice_add_mac_exit;
2963 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2965 /* Allocate switch rule buffer for the bulk update for unicast */
2966 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2967 s_rule = (struct ice_aqc_sw_rules_elem *)
2968 ice_calloc(hw, num_unicast, s_rule_size);
2970 status = ICE_ERR_NO_MEMORY;
2971 goto ice_add_mac_exit;
2975 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2977 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2978 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2980 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2981 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2982 ice_aqc_opc_add_sw_rules);
2983 r_iter = (struct ice_aqc_sw_rules_elem *)
2984 ((u8 *)r_iter + s_rule_size);
2988 /* Call AQ bulk switch rule update for all unicast addresses */
2990 /* Call AQ switch rule in AQ_MAX chunk */
2991 for (total_elem_left = num_unicast; total_elem_left > 0;
2992 total_elem_left -= elem_sent) {
2993 struct ice_aqc_sw_rules_elem *entry = r_iter;
2995 elem_sent = min(total_elem_left,
2996 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
2997 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2998 elem_sent, ice_aqc_opc_add_sw_rules,
3001 goto ice_add_mac_exit;
3002 r_iter = (struct ice_aqc_sw_rules_elem *)
3003 ((u8 *)r_iter + (elem_sent * s_rule_size));
3006 /* Fill up rule ID based on the value returned from FW */
3008 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3010 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3011 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3012 struct ice_fltr_mgmt_list_entry *fm_entry;
3014 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3015 f_info->fltr_rule_id =
3016 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3017 f_info->fltr_act = ICE_FWD_TO_VSI;
3018 /* Create an entry to track this MAC address */
3019 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3020 ice_malloc(hw, sizeof(*fm_entry));
3022 status = ICE_ERR_NO_MEMORY;
3023 goto ice_add_mac_exit;
3025 fm_entry->fltr_info = *f_info;
3026 fm_entry->vsi_count = 1;
3027 /* The book keeping entries will get removed when
3028 * base driver calls remove filter AQ command
3031 LIST_ADD(&fm_entry->list_entry, rule_head);
3032 r_iter = (struct ice_aqc_sw_rules_elem *)
3033 ((u8 *)r_iter + s_rule_size);
3038 ice_release_lock(rule_lock);
3040 ice_free(hw, s_rule);
3045 * ice_add_vlan_internal - Add one VLAN based filter rule
3046 * @hw: pointer to the hardware structure
3047 * @f_entry: filter entry containing one VLAN information
3049 static enum ice_status
3050 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3052 struct ice_switch_info *sw = hw->switch_info;
3053 struct ice_fltr_mgmt_list_entry *v_list_itr;
3054 struct ice_fltr_info *new_fltr, *cur_fltr;
3055 enum ice_sw_lkup_type lkup_type;
3056 u16 vsi_list_id = 0, vsi_handle;
3057 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3058 enum ice_status status = ICE_SUCCESS;
3060 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3061 return ICE_ERR_PARAM;
3063 f_entry->fltr_info.fwd_id.hw_vsi_id =
3064 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3065 new_fltr = &f_entry->fltr_info;
3067 /* VLAN ID should only be 12 bits */
3068 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3069 return ICE_ERR_PARAM;
3071 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3072 return ICE_ERR_PARAM;
3074 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3075 lkup_type = new_fltr->lkup_type;
3076 vsi_handle = new_fltr->vsi_handle;
3077 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3078 ice_acquire_lock(rule_lock);
3079 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3081 struct ice_vsi_list_map_info *map_info = NULL;
3083 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3084 /* All VLAN pruning rules use a VSI list. Check if
3085 * there is already a VSI list containing VSI that we
3086 * want to add. If found, use the same vsi_list_id for
3087 * this new VLAN rule or else create a new list.
3089 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3093 status = ice_create_vsi_list_rule(hw,
3101 /* Convert the action to forwarding to a VSI list. */
3102 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3103 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3106 status = ice_create_pkt_fwd_rule(hw, f_entry);
3108 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3111 status = ICE_ERR_DOES_NOT_EXIST;
3114 /* reuse VSI list for new rule and increment ref_cnt */
3116 v_list_itr->vsi_list_info = map_info;
3117 map_info->ref_cnt++;
3119 v_list_itr->vsi_list_info =
3120 ice_create_vsi_list_map(hw, &vsi_handle,
3124 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3125 /* Update existing VSI list to add new VSI ID only if it used
3128 cur_fltr = &v_list_itr->fltr_info;
3129 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3132 /* If VLAN rule exists and VSI list being used by this rule is
3133 * referenced by more than 1 VLAN rule. Then create a new VSI
3134 * list appending previous VSI with new VSI and update existing
3135 * VLAN rule to point to new VSI list ID
3137 struct ice_fltr_info tmp_fltr;
3138 u16 vsi_handle_arr[2];
3141 /* Current implementation only supports reusing VSI list with
3142 * one VSI count. We should never hit below condition
3144 if (v_list_itr->vsi_count > 1 &&
3145 v_list_itr->vsi_list_info->ref_cnt > 1) {
3146 ice_debug(hw, ICE_DBG_SW,
3147 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3148 status = ICE_ERR_CFG;
3153 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3156 /* A rule already exists with the new VSI being added */
3157 if (cur_handle == vsi_handle) {
3158 status = ICE_ERR_ALREADY_EXISTS;
3162 vsi_handle_arr[0] = cur_handle;
3163 vsi_handle_arr[1] = vsi_handle;
3164 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3165 &vsi_list_id, lkup_type);
3169 tmp_fltr = v_list_itr->fltr_info;
3170 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3171 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3172 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3173 /* Update the previous switch rule to a new VSI list which
3174 * includes current VSI that is requested
3176 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3180 /* before overriding VSI list map info. decrement ref_cnt of
3183 v_list_itr->vsi_list_info->ref_cnt--;
3185 /* now update to newly created list */
3186 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3187 v_list_itr->vsi_list_info =
3188 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3190 v_list_itr->vsi_count++;
3194 ice_release_lock(rule_lock);
3199 * ice_add_vlan - Add VLAN based filter rule
3200 * @hw: pointer to the hardware structure
3201 * @v_list: list of VLAN entries and forwarding information
3204 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3206 struct ice_fltr_list_entry *v_list_itr;
3209 return ICE_ERR_PARAM;
3211 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3213 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3214 return ICE_ERR_PARAM;
3215 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3216 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3217 if (v_list_itr->status)
3218 return v_list_itr->status;
3224 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3225 * @hw: pointer to the hardware structure
3226 * @mv_list: list of MAC and VLAN filters
3228 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3229 * pruning bits enabled, then it is the responsibility of the caller to make
3230 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3231 * VLAN won't be received on that VSI otherwise.
3234 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3236 struct ice_fltr_list_entry *mv_list_itr;
3238 if (!mv_list || !hw)
3239 return ICE_ERR_PARAM;
3241 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3243 enum ice_sw_lkup_type l_type =
3244 mv_list_itr->fltr_info.lkup_type;
3246 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3247 return ICE_ERR_PARAM;
3248 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3249 mv_list_itr->status =
3250 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3252 if (mv_list_itr->status)
3253 return mv_list_itr->status;
3259 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3260 * @hw: pointer to the hardware structure
3261 * @em_list: list of ether type MAC filter, MAC is optional
3263 * This function requires the caller to populate the entries in
3264 * the filter list with the necessary fields (including flags to
3265 * indicate Tx or Rx rules).
3268 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3270 struct ice_fltr_list_entry *em_list_itr;
3272 if (!em_list || !hw)
3273 return ICE_ERR_PARAM;
3275 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3277 enum ice_sw_lkup_type l_type =
3278 em_list_itr->fltr_info.lkup_type;
3280 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3281 l_type != ICE_SW_LKUP_ETHERTYPE)
3282 return ICE_ERR_PARAM;
3284 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3286 if (em_list_itr->status)
3287 return em_list_itr->status;
3293 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3294 * @hw: pointer to the hardware structure
3295 * @em_list: list of ethertype or ethertype MAC entries
3298 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3300 struct ice_fltr_list_entry *em_list_itr, *tmp;
3302 if (!em_list || !hw)
3303 return ICE_ERR_PARAM;
3305 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3307 enum ice_sw_lkup_type l_type =
3308 em_list_itr->fltr_info.lkup_type;
3310 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3311 l_type != ICE_SW_LKUP_ETHERTYPE)
3312 return ICE_ERR_PARAM;
3314 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3316 if (em_list_itr->status)
3317 return em_list_itr->status;
3324 * ice_rem_sw_rule_info
3325 * @hw: pointer to the hardware structure
3326 * @rule_head: pointer to the switch list structure that we want to delete
3329 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3331 if (!LIST_EMPTY(rule_head)) {
3332 struct ice_fltr_mgmt_list_entry *entry;
3333 struct ice_fltr_mgmt_list_entry *tmp;
3335 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3336 ice_fltr_mgmt_list_entry, list_entry) {
3337 LIST_DEL(&entry->list_entry);
3338 ice_free(hw, entry);
3344 * ice_rem_adv_rule_info
3345 * @hw: pointer to the hardware structure
3346 * @rule_head: pointer to the switch list structure that we want to delete
3349 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3351 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3352 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3354 if (LIST_EMPTY(rule_head))
3357 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3358 ice_adv_fltr_mgmt_list_entry, list_entry) {
3359 LIST_DEL(&lst_itr->list_entry);
3360 ice_free(hw, lst_itr->lkups);
3361 ice_free(hw, lst_itr);
3366 * ice_rem_all_sw_rules_info
3367 * @hw: pointer to the hardware structure
3369 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3371 struct ice_switch_info *sw = hw->switch_info;
3374 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3375 struct LIST_HEAD_TYPE *rule_head;
3377 rule_head = &sw->recp_list[i].filt_rules;
3378 if (!sw->recp_list[i].adv_rule)
3379 ice_rem_sw_rule_info(hw, rule_head);
3381 ice_rem_adv_rule_info(hw, rule_head);
3386 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3387 * @pi: pointer to the port_info structure
3388 * @vsi_handle: VSI handle to set as default
3389 * @set: true to add the above mentioned switch rule, false to remove it
3390 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3392 * add filter rule to set/unset given VSI as default VSI for the switch
3393 * (represented by swid)
3396 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3399 struct ice_aqc_sw_rules_elem *s_rule;
3400 struct ice_fltr_info f_info;
3401 struct ice_hw *hw = pi->hw;
3402 enum ice_adminq_opc opcode;
3403 enum ice_status status;
3407 if (!ice_is_vsi_valid(hw, vsi_handle))
3408 return ICE_ERR_PARAM;
3409 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3411 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3412 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3413 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3415 return ICE_ERR_NO_MEMORY;
3417 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3419 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3420 f_info.flag = direction;
3421 f_info.fltr_act = ICE_FWD_TO_VSI;
3422 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3424 if (f_info.flag & ICE_FLTR_RX) {
3425 f_info.src = pi->lport;
3426 f_info.src_id = ICE_SRC_ID_LPORT;
3428 f_info.fltr_rule_id =
3429 pi->dflt_rx_vsi_rule_id;
3430 } else if (f_info.flag & ICE_FLTR_TX) {
3431 f_info.src_id = ICE_SRC_ID_VSI;
3432 f_info.src = hw_vsi_id;
3434 f_info.fltr_rule_id =
3435 pi->dflt_tx_vsi_rule_id;
3439 opcode = ice_aqc_opc_add_sw_rules;
3441 opcode = ice_aqc_opc_remove_sw_rules;
3443 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3445 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3446 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3449 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3451 if (f_info.flag & ICE_FLTR_TX) {
3452 pi->dflt_tx_vsi_num = hw_vsi_id;
3453 pi->dflt_tx_vsi_rule_id = index;
3454 } else if (f_info.flag & ICE_FLTR_RX) {
3455 pi->dflt_rx_vsi_num = hw_vsi_id;
3456 pi->dflt_rx_vsi_rule_id = index;
3459 if (f_info.flag & ICE_FLTR_TX) {
3460 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3461 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3462 } else if (f_info.flag & ICE_FLTR_RX) {
3463 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3464 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3469 ice_free(hw, s_rule);
3474 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3475 * @hw: pointer to the hardware structure
3476 * @recp_id: lookup type for which the specified rule needs to be searched
3477 * @f_info: rule information
3479 * Helper function to search for a unicast rule entry - this is to be used
3480 * to remove unicast MAC filter that is not shared with other VSIs on the
3483 * Returns pointer to entry storing the rule if found
3485 static struct ice_fltr_mgmt_list_entry *
3486 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3487 struct ice_fltr_info *f_info)
3489 struct ice_switch_info *sw = hw->switch_info;
3490 struct ice_fltr_mgmt_list_entry *list_itr;
3491 struct LIST_HEAD_TYPE *list_head;
3493 list_head = &sw->recp_list[recp_id].filt_rules;
3494 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3496 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3497 sizeof(f_info->l_data)) &&
3498 f_info->fwd_id.hw_vsi_id ==
3499 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3500 f_info->flag == list_itr->fltr_info.flag)
3507 * ice_remove_mac - remove a MAC address based filter rule
3508 * @hw: pointer to the hardware structure
3509 * @m_list: list of MAC addresses and forwarding information
3511 * This function removes either a MAC filter rule or a specific VSI from a
3512 * VSI list for a multicast MAC address.
3514 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3515 * ice_add_mac. Caller should be aware that this call will only work if all
3516 * the entries passed into m_list were added previously. It will not attempt to
3517 * do a partial remove of entries that were found.
3520 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3522 struct ice_fltr_list_entry *list_itr, *tmp;
3523 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3526 return ICE_ERR_PARAM;
3528 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3529 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3531 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3532 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3535 if (l_type != ICE_SW_LKUP_MAC)
3536 return ICE_ERR_PARAM;
3538 vsi_handle = list_itr->fltr_info.vsi_handle;
3539 if (!ice_is_vsi_valid(hw, vsi_handle))
3540 return ICE_ERR_PARAM;
3542 list_itr->fltr_info.fwd_id.hw_vsi_id =
3543 ice_get_hw_vsi_num(hw, vsi_handle);
3544 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3545 /* Don't remove the unicast address that belongs to
3546 * another VSI on the switch, since it is not being
3549 ice_acquire_lock(rule_lock);
3550 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3551 &list_itr->fltr_info)) {
3552 ice_release_lock(rule_lock);
3553 return ICE_ERR_DOES_NOT_EXIST;
3555 ice_release_lock(rule_lock);
3557 list_itr->status = ice_remove_rule_internal(hw,
3560 if (list_itr->status)
3561 return list_itr->status;
3567 * ice_remove_vlan - Remove VLAN based filter rule
3568 * @hw: pointer to the hardware structure
3569 * @v_list: list of VLAN entries and forwarding information
3572 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3574 struct ice_fltr_list_entry *v_list_itr, *tmp;
3577 return ICE_ERR_PARAM;
3579 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3581 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3583 if (l_type != ICE_SW_LKUP_VLAN)
3584 return ICE_ERR_PARAM;
3585 v_list_itr->status = ice_remove_rule_internal(hw,
3588 if (v_list_itr->status)
3589 return v_list_itr->status;
3595 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3596 * @hw: pointer to the hardware structure
3597 * @v_list: list of MAC VLAN entries and forwarding information
3600 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3602 struct ice_fltr_list_entry *v_list_itr, *tmp;
3605 return ICE_ERR_PARAM;
3607 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3609 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3611 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3612 return ICE_ERR_PARAM;
3613 v_list_itr->status =
3614 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3616 if (v_list_itr->status)
3617 return v_list_itr->status;
3623 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3624 * @fm_entry: filter entry to inspect
3625 * @vsi_handle: VSI handle to compare with filter info
3628 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3630 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3631 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3632 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3633 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3638 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3639 * @hw: pointer to the hardware structure
3640 * @vsi_handle: VSI handle to remove filters from
3641 * @vsi_list_head: pointer to the list to add entry to
3642 * @fi: pointer to fltr_info of filter entry to copy & add
3644 * Helper function, used when creating a list of filters to remove from
3645 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3646 * original filter entry, with the exception of fltr_info.fltr_act and
3647 * fltr_info.fwd_id fields. These are set such that later logic can
3648 * extract which VSI to remove the fltr from, and pass on that information.
3650 static enum ice_status
3651 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3652 struct LIST_HEAD_TYPE *vsi_list_head,
3653 struct ice_fltr_info *fi)
3655 struct ice_fltr_list_entry *tmp;
3657 /* this memory is freed up in the caller function
3658 * once filters for this VSI are removed
3660 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3662 return ICE_ERR_NO_MEMORY;
3664 tmp->fltr_info = *fi;
3666 /* Overwrite these fields to indicate which VSI to remove filter from,
3667 * so find and remove logic can extract the information from the
3668 * list entries. Note that original entries will still have proper
3671 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3672 tmp->fltr_info.vsi_handle = vsi_handle;
3673 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3675 LIST_ADD(&tmp->list_entry, vsi_list_head);
3681 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3682 * @hw: pointer to the hardware structure
3683 * @vsi_handle: VSI handle to remove filters from
3684 * @lkup_list_head: pointer to the list that has certain lookup type filters
3685 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3687 * Locates all filters in lkup_list_head that are used by the given VSI,
3688 * and adds COPIES of those entries to vsi_list_head (intended to be used
3689 * to remove the listed filters).
3690 * Note that this means all entries in vsi_list_head must be explicitly
3691 * deallocated by the caller when done with list.
3693 static enum ice_status
3694 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3695 struct LIST_HEAD_TYPE *lkup_list_head,
3696 struct LIST_HEAD_TYPE *vsi_list_head)
3698 struct ice_fltr_mgmt_list_entry *fm_entry;
3699 enum ice_status status = ICE_SUCCESS;
3701 /* check to make sure VSI ID is valid and within boundary */
3702 if (!ice_is_vsi_valid(hw, vsi_handle))
3703 return ICE_ERR_PARAM;
3705 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3706 ice_fltr_mgmt_list_entry, list_entry) {
3707 struct ice_fltr_info *fi;
3709 fi = &fm_entry->fltr_info;
3710 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3713 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3723 * ice_determine_promisc_mask
3724 * @fi: filter info to parse
3726 * Helper function to determine which ICE_PROMISC_ mask corresponds
3727 * to given filter into.
3729 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3731 u16 vid = fi->l_data.mac_vlan.vlan_id;
3732 u8 *macaddr = fi->l_data.mac.mac_addr;
3733 bool is_tx_fltr = false;
3734 u8 promisc_mask = 0;
3736 if (fi->flag == ICE_FLTR_TX)
3739 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3740 promisc_mask |= is_tx_fltr ?
3741 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3742 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3743 promisc_mask |= is_tx_fltr ?
3744 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3745 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3746 promisc_mask |= is_tx_fltr ?
3747 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3749 promisc_mask |= is_tx_fltr ?
3750 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3752 return promisc_mask;
3756 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3757 * @hw: pointer to the hardware structure
3758 * @vsi_handle: VSI handle to retrieve info from
3759 * @promisc_mask: pointer to mask to be filled in
3760 * @vid: VLAN ID of promisc VLAN VSI
3763 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3766 struct ice_switch_info *sw = hw->switch_info;
3767 struct ice_fltr_mgmt_list_entry *itr;
3768 struct LIST_HEAD_TYPE *rule_head;
3769 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3771 if (!ice_is_vsi_valid(hw, vsi_handle))
3772 return ICE_ERR_PARAM;
3776 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3777 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3779 ice_acquire_lock(rule_lock);
3780 LIST_FOR_EACH_ENTRY(itr, rule_head,
3781 ice_fltr_mgmt_list_entry, list_entry) {
3782 /* Continue if this filter doesn't apply to this VSI or the
3783 * VSI ID is not in the VSI map for this filter
3785 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3788 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3790 ice_release_lock(rule_lock);
3796 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3797 * @hw: pointer to the hardware structure
3798 * @vsi_handle: VSI handle to retrieve info from
3799 * @promisc_mask: pointer to mask to be filled in
3800 * @vid: VLAN ID of promisc VLAN VSI
3803 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3806 struct ice_switch_info *sw = hw->switch_info;
3807 struct ice_fltr_mgmt_list_entry *itr;
3808 struct LIST_HEAD_TYPE *rule_head;
3809 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3811 if (!ice_is_vsi_valid(hw, vsi_handle))
3812 return ICE_ERR_PARAM;
3816 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3817 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3819 ice_acquire_lock(rule_lock);
3820 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3822 /* Continue if this filter doesn't apply to this VSI or the
3823 * VSI ID is not in the VSI map for this filter
3825 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3828 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3830 ice_release_lock(rule_lock);
3836 * ice_remove_promisc - Remove promisc based filter rules
3837 * @hw: pointer to the hardware structure
3838 * @recp_id: recipe ID for which the rule needs to removed
3839 * @v_list: list of promisc entries
3841 static enum ice_status
3842 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3843 struct LIST_HEAD_TYPE *v_list)
3845 struct ice_fltr_list_entry *v_list_itr, *tmp;
3847 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3849 v_list_itr->status =
3850 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3851 if (v_list_itr->status)
3852 return v_list_itr->status;
3858 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3859 * @hw: pointer to the hardware structure
3860 * @vsi_handle: VSI handle to clear mode
3861 * @promisc_mask: mask of promiscuous config bits to clear
3862 * @vid: VLAN ID to clear VLAN promiscuous
3865 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3868 struct ice_switch_info *sw = hw->switch_info;
3869 struct ice_fltr_list_entry *fm_entry, *tmp;
3870 struct LIST_HEAD_TYPE remove_list_head;
3871 struct ice_fltr_mgmt_list_entry *itr;
3872 struct LIST_HEAD_TYPE *rule_head;
3873 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3874 enum ice_status status = ICE_SUCCESS;
3877 if (!ice_is_vsi_valid(hw, vsi_handle))
3878 return ICE_ERR_PARAM;
3881 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3883 recipe_id = ICE_SW_LKUP_PROMISC;
3885 rule_head = &sw->recp_list[recipe_id].filt_rules;
3886 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3888 INIT_LIST_HEAD(&remove_list_head);
3890 ice_acquire_lock(rule_lock);
3891 LIST_FOR_EACH_ENTRY(itr, rule_head,
3892 ice_fltr_mgmt_list_entry, list_entry) {
3893 u8 fltr_promisc_mask = 0;
3895 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3898 fltr_promisc_mask |=
3899 ice_determine_promisc_mask(&itr->fltr_info);
3901 /* Skip if filter is not completely specified by given mask */
3902 if (fltr_promisc_mask & ~promisc_mask)
3905 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3909 ice_release_lock(rule_lock);
3910 goto free_fltr_list;
3913 ice_release_lock(rule_lock);
3915 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3918 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3919 ice_fltr_list_entry, list_entry) {
3920 LIST_DEL(&fm_entry->list_entry);
3921 ice_free(hw, fm_entry);
3928 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3929 * @hw: pointer to the hardware structure
3930 * @vsi_handle: VSI handle to configure
3931 * @promisc_mask: mask of promiscuous config bits
3932 * @vid: VLAN ID to set VLAN promiscuous
3935 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3937 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3938 struct ice_fltr_list_entry f_list_entry;
3939 struct ice_fltr_info new_fltr;
3940 enum ice_status status = ICE_SUCCESS;
3946 ice_debug(hw, ICE_DBG_TRACE, "ice_set_vsi_promisc\n");
3948 if (!ice_is_vsi_valid(hw, vsi_handle))
3949 return ICE_ERR_PARAM;
3950 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3952 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3954 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3955 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3956 new_fltr.l_data.mac_vlan.vlan_id = vid;
3957 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3959 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3960 recipe_id = ICE_SW_LKUP_PROMISC;
3963 /* Separate filters must be set for each direction/packet type
3964 * combination, so we will loop over the mask value, store the
3965 * individual type, and clear it out in the input mask as it
3968 while (promisc_mask) {
3974 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3975 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3976 pkt_type = UCAST_FLTR;
3977 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3978 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3979 pkt_type = UCAST_FLTR;
3981 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3982 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3983 pkt_type = MCAST_FLTR;
3984 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3985 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3986 pkt_type = MCAST_FLTR;
3988 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3989 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3990 pkt_type = BCAST_FLTR;
3991 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3992 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3993 pkt_type = BCAST_FLTR;
3997 /* Check for VLAN promiscuous flag */
3998 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3999 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4000 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4001 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4005 /* Set filter DA based on packet type */
4006 mac_addr = new_fltr.l_data.mac.mac_addr;
4007 if (pkt_type == BCAST_FLTR) {
4008 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4009 } else if (pkt_type == MCAST_FLTR ||
4010 pkt_type == UCAST_FLTR) {
4011 /* Use the dummy ether header DA */
4012 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4013 ICE_NONDMA_TO_NONDMA);
4014 if (pkt_type == MCAST_FLTR)
4015 mac_addr[0] |= 0x1; /* Set multicast bit */
4018 /* Need to reset this to zero for all iterations */
4021 new_fltr.flag |= ICE_FLTR_TX;
4022 new_fltr.src = hw_vsi_id;
4024 new_fltr.flag |= ICE_FLTR_RX;
4025 new_fltr.src = hw->port_info->lport;
4028 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4029 new_fltr.vsi_handle = vsi_handle;
4030 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4031 f_list_entry.fltr_info = new_fltr;
4033 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4034 if (status != ICE_SUCCESS)
4035 goto set_promisc_exit;
4043 * ice_set_vlan_vsi_promisc
4044 * @hw: pointer to the hardware structure
4045 * @vsi_handle: VSI handle to configure
4046 * @promisc_mask: mask of promiscuous config bits
4047 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4049 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4052 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4053 bool rm_vlan_promisc)
4055 struct ice_switch_info *sw = hw->switch_info;
4056 struct ice_fltr_list_entry *list_itr, *tmp;
4057 struct LIST_HEAD_TYPE vsi_list_head;
4058 struct LIST_HEAD_TYPE *vlan_head;
4059 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4060 enum ice_status status;
4063 INIT_LIST_HEAD(&vsi_list_head);
4064 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4065 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4066 ice_acquire_lock(vlan_lock);
4067 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4069 ice_release_lock(vlan_lock);
4071 goto free_fltr_list;
4073 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4075 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4076 if (rm_vlan_promisc)
4077 status = ice_clear_vsi_promisc(hw, vsi_handle,
4078 promisc_mask, vlan_id);
4080 status = ice_set_vsi_promisc(hw, vsi_handle,
4081 promisc_mask, vlan_id);
4087 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4088 ice_fltr_list_entry, list_entry) {
4089 LIST_DEL(&list_itr->list_entry);
4090 ice_free(hw, list_itr);
4096 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4097 * @hw: pointer to the hardware structure
4098 * @vsi_handle: VSI handle to remove filters from
4099 * @lkup: switch rule filter lookup type
4102 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4103 enum ice_sw_lkup_type lkup)
4105 struct ice_switch_info *sw = hw->switch_info;
4106 struct ice_fltr_list_entry *fm_entry;
4107 struct LIST_HEAD_TYPE remove_list_head;
4108 struct LIST_HEAD_TYPE *rule_head;
4109 struct ice_fltr_list_entry *tmp;
4110 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4111 enum ice_status status;
4113 INIT_LIST_HEAD(&remove_list_head);
4114 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4115 rule_head = &sw->recp_list[lkup].filt_rules;
4116 ice_acquire_lock(rule_lock);
4117 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4119 ice_release_lock(rule_lock);
4124 case ICE_SW_LKUP_MAC:
4125 ice_remove_mac(hw, &remove_list_head);
4127 case ICE_SW_LKUP_VLAN:
4128 ice_remove_vlan(hw, &remove_list_head);
4130 case ICE_SW_LKUP_PROMISC:
4131 case ICE_SW_LKUP_PROMISC_VLAN:
4132 ice_remove_promisc(hw, lkup, &remove_list_head);
4134 case ICE_SW_LKUP_MAC_VLAN:
4135 ice_remove_mac_vlan(hw, &remove_list_head);
4137 case ICE_SW_LKUP_ETHERTYPE:
4138 case ICE_SW_LKUP_ETHERTYPE_MAC:
4139 ice_remove_eth_mac(hw, &remove_list_head);
4141 case ICE_SW_LKUP_DFLT:
4142 ice_debug(hw, ICE_DBG_SW,
4143 "Remove filters for this lookup type hasn't been implemented yet\n");
4145 case ICE_SW_LKUP_LAST:
4146 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4150 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4151 ice_fltr_list_entry, list_entry) {
4152 LIST_DEL(&fm_entry->list_entry);
4153 ice_free(hw, fm_entry);
4158 * ice_remove_vsi_fltr - Remove all filters for a VSI
4159 * @hw: pointer to the hardware structure
4160 * @vsi_handle: VSI handle to remove filters from
4162 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4164 ice_debug(hw, ICE_DBG_TRACE, "ice_remove_vsi_fltr\n");
4166 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4167 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4168 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4169 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4170 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4171 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4172 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4173 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4177 * ice_alloc_res_cntr - allocating resource counter
4178 * @hw: pointer to the hardware structure
4179 * @type: type of resource
4180 * @alloc_shared: if set it is shared else dedicated
4181 * @num_items: number of entries requested for FD resource type
4182 * @counter_id: counter index returned by AQ call
4185 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4188 struct ice_aqc_alloc_free_res_elem *buf;
4189 enum ice_status status;
4192 /* Allocate resource */
4193 buf_len = sizeof(*buf);
4194 buf = (struct ice_aqc_alloc_free_res_elem *)
4195 ice_malloc(hw, buf_len);
4197 return ICE_ERR_NO_MEMORY;
4199 buf->num_elems = CPU_TO_LE16(num_items);
4200 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4201 ICE_AQC_RES_TYPE_M) | alloc_shared);
4203 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4204 ice_aqc_opc_alloc_res, NULL);
4208 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4216 * ice_free_res_cntr - free resource counter
4217 * @hw: pointer to the hardware structure
4218 * @type: type of resource
4219 * @alloc_shared: if set it is shared else dedicated
4220 * @num_items: number of entries to be freed for FD resource type
4221 * @counter_id: counter ID resource which needs to be freed
4224 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4227 struct ice_aqc_alloc_free_res_elem *buf;
4228 enum ice_status status;
4232 buf_len = sizeof(*buf);
4233 buf = (struct ice_aqc_alloc_free_res_elem *)
4234 ice_malloc(hw, buf_len);
4236 return ICE_ERR_NO_MEMORY;
4238 buf->num_elems = CPU_TO_LE16(num_items);
4239 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4240 ICE_AQC_RES_TYPE_M) | alloc_shared);
4241 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4243 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4244 ice_aqc_opc_free_res, NULL);
4246 ice_debug(hw, ICE_DBG_SW,
4247 "counter resource could not be freed\n");
4254 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4255 * @hw: pointer to the hardware structure
4256 * @counter_id: returns counter index
4258 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4260 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4261 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4266 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4267 * @hw: pointer to the hardware structure
4268 * @counter_id: counter index to be freed
4270 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4272 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4273 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4278 * ice_alloc_res_lg_act - add large action resource
4279 * @hw: pointer to the hardware structure
4280 * @l_id: large action ID to fill it in
4281 * @num_acts: number of actions to hold with a large action entry
4283 static enum ice_status
4284 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4286 struct ice_aqc_alloc_free_res_elem *sw_buf;
4287 enum ice_status status;
4290 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4291 return ICE_ERR_PARAM;
4293 /* Allocate resource for large action */
4294 buf_len = sizeof(*sw_buf);
4295 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4296 ice_malloc(hw, buf_len);
4298 return ICE_ERR_NO_MEMORY;
4300 sw_buf->num_elems = CPU_TO_LE16(1);
4302 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4303 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4304 * If num_acts is greater than 2, then use
4305 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4306 * The num_acts cannot exceed 4. This was ensured at the
4307 * beginning of the function.
4310 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4311 else if (num_acts == 2)
4312 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4314 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4316 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4317 ice_aqc_opc_alloc_res, NULL);
4319 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4321 ice_free(hw, sw_buf);
4326 * ice_add_mac_with_sw_marker - add filter with sw marker
4327 * @hw: pointer to the hardware structure
4328 * @f_info: filter info structure containing the MAC filter information
4329 * @sw_marker: sw marker to tag the Rx descriptor with
4332 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4335 struct ice_switch_info *sw = hw->switch_info;
4336 struct ice_fltr_mgmt_list_entry *m_entry;
4337 struct ice_fltr_list_entry fl_info;
4338 struct LIST_HEAD_TYPE l_head;
4339 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4340 enum ice_status ret;
4344 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4345 return ICE_ERR_PARAM;
4347 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4348 return ICE_ERR_PARAM;
4350 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4351 return ICE_ERR_PARAM;
4353 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4354 return ICE_ERR_PARAM;
4355 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4357 /* Add filter if it doesn't exist so then the adding of large
4358 * action always results in update
4361 INIT_LIST_HEAD(&l_head);
4362 fl_info.fltr_info = *f_info;
4363 LIST_ADD(&fl_info.list_entry, &l_head);
4365 entry_exists = false;
4366 ret = ice_add_mac(hw, &l_head);
4367 if (ret == ICE_ERR_ALREADY_EXISTS)
4368 entry_exists = true;
4372 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4373 ice_acquire_lock(rule_lock);
4374 /* Get the book keeping entry for the filter */
4375 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4379 /* If counter action was enabled for this rule then don't enable
4380 * sw marker large action
4382 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4383 ret = ICE_ERR_PARAM;
4387 /* if same marker was added before */
4388 if (m_entry->sw_marker_id == sw_marker) {
4389 ret = ICE_ERR_ALREADY_EXISTS;
4393 /* Allocate a hardware table entry to hold large act. Three actions
4394 * for marker based large action
4396 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4400 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4403 /* Update the switch rule to add the marker action */
4404 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4406 ice_release_lock(rule_lock);
4411 ice_release_lock(rule_lock);
4412 /* only remove entry if it did not exist previously */
4414 ret = ice_remove_mac(hw, &l_head);
4420 * ice_add_mac_with_counter - add filter with counter enabled
4421 * @hw: pointer to the hardware structure
4422 * @f_info: pointer to filter info structure containing the MAC filter
4426 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4428 struct ice_switch_info *sw = hw->switch_info;
4429 struct ice_fltr_mgmt_list_entry *m_entry;
4430 struct ice_fltr_list_entry fl_info;
4431 struct LIST_HEAD_TYPE l_head;
4432 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4433 enum ice_status ret;
4438 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4439 return ICE_ERR_PARAM;
4441 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4442 return ICE_ERR_PARAM;
4444 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4445 return ICE_ERR_PARAM;
4446 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4448 entry_exist = false;
4450 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4452 /* Add filter if it doesn't exist so then the adding of large
4453 * action always results in update
4455 INIT_LIST_HEAD(&l_head);
4457 fl_info.fltr_info = *f_info;
4458 LIST_ADD(&fl_info.list_entry, &l_head);
4460 ret = ice_add_mac(hw, &l_head);
4461 if (ret == ICE_ERR_ALREADY_EXISTS)
4466 ice_acquire_lock(rule_lock);
4467 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4469 ret = ICE_ERR_BAD_PTR;
4473 /* Don't enable counter for a filter for which sw marker was enabled */
4474 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4475 ret = ICE_ERR_PARAM;
4479 /* If a counter was already enabled then don't need to add again */
4480 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4481 ret = ICE_ERR_ALREADY_EXISTS;
4485 /* Allocate a hardware table entry to VLAN counter */
4486 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4490 /* Allocate a hardware table entry to hold large act. Two actions for
4491 * counter based large action
4493 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4497 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4500 /* Update the switch rule to add the counter action */
4501 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4503 ice_release_lock(rule_lock);
4508 ice_release_lock(rule_lock);
4509 /* only remove entry if it did not exist previously */
4511 ret = ice_remove_mac(hw, &l_head);
4516 /* This is mapping table entry that maps every word within a given protocol
4517 * structure to the real byte offset as per the specification of that
4519 * for example dst address is 3 words in ethertype header and corresponding
4520 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4521 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4522 * matching entry describing its field. This needs to be updated if new
4523 * structure is added to that union.
4525 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4526 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4527 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4528 { ICE_ETYPE_OL, { 0 } },
4529 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4530 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4531 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4532 26, 28, 30, 32, 34, 36, 38 } },
4533 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4534 26, 28, 30, 32, 34, 36, 38 } },
4535 { ICE_TCP_IL, { 0, 2 } },
4536 { ICE_UDP_OF, { 0, 2 } },
4537 { ICE_UDP_ILOS, { 0, 2 } },
4538 { ICE_SCTP_IL, { 0, 2 } },
4539 { ICE_VXLAN, { 8, 10, 12, 14 } },
4540 { ICE_GENEVE, { 8, 10, 12, 14 } },
4541 { ICE_VXLAN_GPE, { 0, 2, 4 } },
4542 { ICE_NVGRE, { 0, 2, 4, 6 } },
4543 { ICE_PROTOCOL_LAST, { 0 } }
4546 /* The following table describes preferred grouping of recipes.
4547 * If a recipe that needs to be programmed is a superset or matches one of the
4548 * following combinations, then the recipe needs to be chained as per the
4551 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4552 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4553 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4554 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4555 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4556 { 0xffff, 0xffff, 0xffff, 0xffff } },
4557 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4558 { 0xffff, 0xffff, 0xffff, 0xffff } },
4559 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4560 { 0xffff, 0xffff, 0xffff, 0xffff } },
4563 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4564 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4565 { ICE_MAC_IL, ICE_MAC_IL_HW },
4566 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4567 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4568 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4569 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4570 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4571 { ICE_TCP_IL, ICE_TCP_IL_HW },
4572 { ICE_UDP_OF, ICE_UDP_OF_HW },
4573 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4574 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4575 { ICE_VXLAN, ICE_UDP_OF_HW },
4576 { ICE_GENEVE, ICE_UDP_OF_HW },
4577 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4578 { ICE_NVGRE, ICE_GRE_OF_HW },
4579 { ICE_PROTOCOL_LAST, 0 }
4583 * ice_find_recp - find a recipe
4584 * @hw: pointer to the hardware structure
4585 * @lkup_exts: extension sequence to match
4587 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4589 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4591 bool refresh_required = true;
4592 struct ice_sw_recipe *recp;
4595 /* Initialize available_result_ids which tracks available result idx */
4596 for (i = 0; i <= ICE_CHAIN_FV_INDEX_START; i++)
4597 ice_set_bit(ICE_CHAIN_FV_INDEX_START - i,
4598 available_result_ids);
4600 /* Walk through existing recipes to find a match */
4601 recp = hw->switch_info->recp_list;
4602 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4603 /* If recipe was not created for this ID, in SW bookkeeping,
4604 * check if FW has an entry for this recipe. If the FW has an
4605 * entry update it in our SW bookkeeping and continue with the
4608 if (!recp[i].recp_created)
4609 if (ice_get_recp_frm_fw(hw,
4610 hw->switch_info->recp_list, i,
4614 /* if number of words we are looking for match */
4615 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4616 struct ice_fv_word *a = lkup_exts->fv_words;
4617 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4621 for (p = 0; p < lkup_exts->n_val_words; p++) {
4622 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4624 if (a[p].off == b[q].off &&
4625 a[p].prot_id == b[q].prot_id)
4626 /* Found the "p"th word in the
4631 /* After walking through all the words in the
4632 * "i"th recipe if "p"th word was not found then
4633 * this recipe is not what we are looking for.
4634 * So break out from this loop and try the next
4637 if (q >= recp[i].lkup_exts.n_val_words) {
4642 /* If for "i"th recipe the found was never set to false
4643 * then it means we found our match
4646 return i; /* Return the recipe ID */
4649 return ICE_MAX_NUM_RECIPES;
4653 * ice_prot_type_to_id - get protocol ID from protocol type
4654 * @type: protocol type
4655 * @id: pointer to variable that will receive the ID
4657 * Returns true if found, false otherwise
4659 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4663 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4664 if (ice_prot_id_tbl[i].type == type) {
4665 *id = ice_prot_id_tbl[i].protocol_id;
4672 * ice_find_valid_words - count valid words
4673 * @rule: advanced rule with lookup information
4674 * @lkup_exts: byte offset extractions of the words that are valid
4676 * calculate valid words in a lookup rule using mask value
4679 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4680 struct ice_prot_lkup_ext *lkup_exts)
4686 if (!ice_prot_type_to_id(rule->type, &prot_id))
4689 word = lkup_exts->n_val_words;
4691 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4692 if (((u16 *)&rule->m_u)[j] &&
4693 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4694 /* No more space to accommodate */
4695 if (word >= ICE_MAX_CHAIN_WORDS)
4697 lkup_exts->fv_words[word].off =
4698 ice_prot_ext[rule->type].offs[j];
4699 lkup_exts->fv_words[word].prot_id =
4700 ice_prot_id_tbl[rule->type].protocol_id;
4701 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4705 ret_val = word - lkup_exts->n_val_words;
4706 lkup_exts->n_val_words = word;
4712 * ice_find_prot_off_ind - check for specific ID and offset in rule
4713 * @lkup_exts: an array of protocol header extractions
4714 * @prot_type: protocol type to check
4715 * @off: expected offset of the extraction
4717 * Check if the prot_ext has given protocol ID and offset
4720 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4725 for (j = 0; j < lkup_exts->n_val_words; j++)
4726 if (lkup_exts->fv_words[j].off == off &&
4727 lkup_exts->fv_words[j].prot_id == prot_type)
4730 return ICE_MAX_CHAIN_WORDS;
4734 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4735 * @lkup_exts: an array of protocol header extractions
4736 * @r_policy: preferred recipe grouping policy
4738 * Helper function to check if given recipe group is subset we need to check if
4739 * all the words described by the given recipe group exist in the advanced rule
4740 * look up information
4743 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4744 const struct ice_pref_recipe_group *r_policy)
4746 u8 ind[ICE_NUM_WORDS_RECIPE];
4750 /* check if everything in the r_policy is part of the entire rule */
4751 for (i = 0; i < r_policy->n_val_pairs; i++) {
4754 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4755 r_policy->pairs[i].off);
4756 if (j >= ICE_MAX_CHAIN_WORDS)
4759 /* store the indexes temporarily found by the find function
4760 * this will be used to mark the words as 'done'
4765 /* If the entire policy recipe was a true match, then mark the fields
4766 * that are covered by the recipe as 'done' meaning that these words
4767 * will be clumped together in one recipe.
4768 * "Done" here means in our searching if certain recipe group
4769 * matches or is subset of the given rule, then we mark all
4770 * the corresponding offsets as found. So the remaining recipes should
4771 * be created with whatever words that were left.
4773 for (i = 0; i < count; i++) {
4776 ice_set_bit(in, lkup_exts->done);
4782 * ice_create_first_fit_recp_def - Create a recipe grouping
4783 * @hw: pointer to the hardware structure
4784 * @lkup_exts: an array of protocol header extractions
4785 * @rg_list: pointer to a list that stores new recipe groups
4786 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4788 * Using first fit algorithm, take all the words that are still not done
4789 * and start grouping them in 4-word groups. Each group makes up one
4792 static enum ice_status
4793 ice_create_first_fit_recp_def(struct ice_hw *hw,
4794 struct ice_prot_lkup_ext *lkup_exts,
4795 struct LIST_HEAD_TYPE *rg_list,
4798 struct ice_pref_recipe_group *grp = NULL;
4803 /* Walk through every word in the rule to check if it is not done. If so
4804 * then this word needs to be part of a new recipe.
4806 for (j = 0; j < lkup_exts->n_val_words; j++)
4807 if (!ice_is_bit_set(lkup_exts->done, j)) {
4809 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4810 struct ice_recp_grp_entry *entry;
4812 entry = (struct ice_recp_grp_entry *)
4813 ice_malloc(hw, sizeof(*entry));
4815 return ICE_ERR_NO_MEMORY;
4816 LIST_ADD(&entry->l_entry, rg_list);
4817 grp = &entry->r_group;
4821 grp->pairs[grp->n_val_pairs].prot_id =
4822 lkup_exts->fv_words[j].prot_id;
4823 grp->pairs[grp->n_val_pairs].off =
4824 lkup_exts->fv_words[j].off;
4825 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4833 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4834 * @hw: pointer to the hardware structure
4835 * @fv_list: field vector with the extraction sequence information
4836 * @rg_list: recipe groupings with protocol-offset pairs
4838 * Helper function to fill in the field vector indices for protocol-offset
4839 * pairs. These indexes are then ultimately programmed into a recipe.
4842 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4843 struct LIST_HEAD_TYPE *rg_list)
4845 struct ice_sw_fv_list_entry *fv;
4846 struct ice_recp_grp_entry *rg;
4847 struct ice_fv_word *fv_ext;
4849 if (LIST_EMPTY(fv_list))
4852 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4853 fv_ext = fv->fv_ptr->ew;
4855 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4858 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4859 struct ice_fv_word *pr;
4863 pr = &rg->r_group.pairs[i];
4864 mask = rg->r_group.mask[i];
4866 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4867 if (fv_ext[j].prot_id == pr->prot_id &&
4868 fv_ext[j].off == pr->off) {
4869 /* Store index of field vector */
4871 /* Mask is given by caller as big
4872 * endian, but sent to FW as little
4875 rg->fv_mask[i] = mask << 8 | mask >> 8;
4883 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4884 * @hw: pointer to hardware structure
4885 * @rm: recipe management list entry
4886 * @match_tun: if field vector index for tunnel needs to be programmed
4888 static enum ice_status
4889 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4892 struct ice_aqc_recipe_data_elem *tmp;
4893 struct ice_aqc_recipe_data_elem *buf;
4894 struct ice_recp_grp_entry *entry;
4895 enum ice_status status;
4900 /* When more than one recipe are required, another recipe is needed to
4901 * chain them together. Matching a tunnel metadata ID takes up one of
4902 * the match fields in the chaining recipe reducing the number of
4903 * chained recipes by one.
4905 if (rm->n_grp_count > 1)
4907 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE ||
4908 (match_tun && rm->n_grp_count > (ICE_MAX_CHAIN_RECIPE - 1)))
4909 return ICE_ERR_MAX_LIMIT;
4911 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
4912 ICE_MAX_NUM_RECIPES,
4915 return ICE_ERR_NO_MEMORY;
4917 buf = (struct ice_aqc_recipe_data_elem *)
4918 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
4920 status = ICE_ERR_NO_MEMORY;
4924 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4925 recipe_count = ICE_MAX_NUM_RECIPES;
4926 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4928 if (status || recipe_count == 0)
4931 /* Allocate the recipe resources, and configure them according to the
4932 * match fields from protocol headers and extracted field vectors.
4934 chain_idx = ICE_CHAIN_FV_INDEX_START -
4935 ice_find_first_bit(available_result_ids,
4936 ICE_CHAIN_FV_INDEX_START + 1);
4937 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4940 status = ice_alloc_recipe(hw, &entry->rid);
4944 /* Clear the result index of the located recipe, as this will be
4945 * updated, if needed, later in the recipe creation process.
4947 tmp[0].content.result_indx = 0;
4949 buf[recps] = tmp[0];
4950 buf[recps].recipe_indx = (u8)entry->rid;
4951 /* if the recipe is a non-root recipe RID should be programmed
4952 * as 0 for the rules to be applied correctly.
4954 buf[recps].content.rid = 0;
4955 ice_memset(&buf[recps].content.lkup_indx, 0,
4956 sizeof(buf[recps].content.lkup_indx),
4959 /* All recipes use look-up index 0 to match switch ID. */
4960 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4961 buf[recps].content.mask[0] =
4962 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4963 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4966 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4967 buf[recps].content.lkup_indx[i] = 0x80;
4968 buf[recps].content.mask[i] = 0;
4971 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4972 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4973 buf[recps].content.mask[i + 1] =
4974 CPU_TO_LE16(entry->fv_mask[i]);
4977 if (rm->n_grp_count > 1) {
4978 entry->chain_idx = chain_idx;
4979 buf[recps].content.result_indx =
4980 ICE_AQ_RECIPE_RESULT_EN |
4981 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4982 ICE_AQ_RECIPE_RESULT_DATA_M);
4983 ice_clear_bit(ICE_CHAIN_FV_INDEX_START - chain_idx,
4984 available_result_ids);
4985 chain_idx = ICE_CHAIN_FV_INDEX_START -
4986 ice_find_first_bit(available_result_ids,
4987 ICE_CHAIN_FV_INDEX_START +
4991 /* fill recipe dependencies */
4992 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
4993 ICE_MAX_NUM_RECIPES);
4994 ice_set_bit(buf[recps].recipe_indx,
4995 (ice_bitmap_t *)buf[recps].recipe_bitmap);
4996 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5000 if (rm->n_grp_count == 1) {
5001 rm->root_rid = buf[0].recipe_indx;
5002 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5003 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5004 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5005 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5006 sizeof(buf[0].recipe_bitmap),
5007 ICE_NONDMA_TO_NONDMA);
5009 status = ICE_ERR_BAD_PTR;
5012 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5013 * the recipe which is getting created if specified
5014 * by user. Usually any advanced switch filter, which results
5015 * into new extraction sequence, ended up creating a new recipe
5016 * of type ROOT and usually recipes are associated with profiles
5017 * Switch rule referreing newly created recipe, needs to have
5018 * either/or 'fwd' or 'join' priority, otherwise switch rule
5019 * evaluation will not happen correctly. In other words, if
5020 * switch rule to be evaluated on priority basis, then recipe
5021 * needs to have priority, otherwise it will be evaluated last.
5023 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5025 struct ice_recp_grp_entry *last_chain_entry;
5028 /* Allocate the last recipe that will chain the outcomes of the
5029 * other recipes together
5031 status = ice_alloc_recipe(hw, &rid);
5035 buf[recps].recipe_indx = (u8)rid;
5036 buf[recps].content.rid = (u8)rid;
5037 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5038 /* the new entry created should also be part of rg_list to
5039 * make sure we have complete recipe
5041 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5042 sizeof(*last_chain_entry));
5043 if (!last_chain_entry) {
5044 status = ICE_ERR_NO_MEMORY;
5047 last_chain_entry->rid = rid;
5048 ice_memset(&buf[recps].content.lkup_indx, 0,
5049 sizeof(buf[recps].content.lkup_indx),
5051 /* All recipes use look-up index 0 to match switch ID. */
5052 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5053 buf[recps].content.mask[0] =
5054 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5055 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5056 buf[recps].content.lkup_indx[i] =
5057 ICE_AQ_RECIPE_LKUP_IGNORE;
5058 buf[recps].content.mask[i] = 0;
5062 /* update r_bitmap with the recp that is used for chaining */
5063 ice_set_bit(rid, rm->r_bitmap);
5064 /* this is the recipe that chains all the other recipes so it
5065 * should not have a chaining ID to indicate the same
5067 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5068 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5070 last_chain_entry->fv_idx[i] = entry->chain_idx;
5071 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5072 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5073 ice_set_bit(entry->rid, rm->r_bitmap);
5075 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5076 if (sizeof(buf[recps].recipe_bitmap) >=
5077 sizeof(rm->r_bitmap)) {
5078 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5079 sizeof(buf[recps].recipe_bitmap),
5080 ICE_NONDMA_TO_NONDMA);
5082 status = ICE_ERR_BAD_PTR;
5085 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5087 /* To differentiate among different UDP tunnels, a meta data ID
5091 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5092 buf[recps].content.mask[i] =
5093 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5097 rm->root_rid = (u8)rid;
5099 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5103 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5104 ice_release_change_lock(hw);
5108 /* Every recipe that just got created add it to the recipe
5111 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5112 struct ice_switch_info *sw = hw->switch_info;
5113 struct ice_sw_recipe *recp;
5115 recp = &sw->recp_list[entry->rid];
5116 recp->root_rid = entry->rid;
5117 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5118 entry->r_group.n_val_pairs *
5119 sizeof(struct ice_fv_word),
5120 ICE_NONDMA_TO_NONDMA);
5122 recp->n_ext_words = entry->r_group.n_val_pairs;
5123 recp->chain_idx = entry->chain_idx;
5124 recp->recp_created = true;
5125 recp->big_recp = false;
5139 * ice_create_recipe_group - creates recipe group
5140 * @hw: pointer to hardware structure
5141 * @rm: recipe management list entry
5142 * @lkup_exts: lookup elements
5144 static enum ice_status
5145 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5146 struct ice_prot_lkup_ext *lkup_exts)
5148 struct ice_recp_grp_entry *entry;
5149 struct ice_recp_grp_entry *tmp;
5150 enum ice_status status;
5154 rm->n_grp_count = 0;
5156 /* Each switch recipe can match up to 5 words or metadata. One word in
5157 * each recipe is used to match the switch ID. Four words are left for
5158 * matching other values. If the new advanced recipe requires more than
5159 * 4 words, it needs to be split into multiple recipes which are chained
5160 * together using the intermediate result that each produces as input to
5161 * the other recipes in the sequence.
5163 groups = ARRAY_SIZE(ice_recipe_pack);
5165 /* Check if any of the preferred recipes from the grouping policy
5168 for (i = 0; i < groups; i++)
5169 /* Check if the recipe from the preferred grouping matches
5170 * or is a subset of the fields that needs to be looked up.
5172 if (ice_is_recipe_subset(lkup_exts, &ice_recipe_pack[i])) {
5173 /* This recipe can be used by itself or grouped with
5176 entry = (struct ice_recp_grp_entry *)
5177 ice_malloc(hw, sizeof(*entry));
5179 status = ICE_ERR_NO_MEMORY;
5182 entry->r_group = ice_recipe_pack[i];
5183 LIST_ADD(&entry->l_entry, &rm->rg_list);
5187 /* Create recipes for words that are marked not done by packing them
5190 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5191 &rm->rg_list, &recp_count);
5193 rm->n_grp_count += recp_count;
5194 rm->n_ext_words = lkup_exts->n_val_words;
5195 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5196 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5197 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5198 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5203 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5205 LIST_DEL(&entry->l_entry);
5206 ice_free(hw, entry);
5214 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5215 * @hw: pointer to hardware structure
5216 * @lkups: lookup elements or match criteria for the advanced recipe, one
5217 * structure per protocol header
5218 * @lkups_cnt: number of protocols
5219 * @fv_list: pointer to a list that holds the returned field vectors
5221 static enum ice_status
5222 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5223 struct LIST_HEAD_TYPE *fv_list)
5225 enum ice_status status;
5229 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5231 return ICE_ERR_NO_MEMORY;
5233 for (i = 0; i < lkups_cnt; i++)
5234 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5235 status = ICE_ERR_CFG;
5239 /* Find field vectors that include all specified protocol types */
5240 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
5243 ice_free(hw, prot_ids);
5248 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5249 * @hw: pointer to hardware structure
5250 * @lkups: lookup elements or match criteria for the advanced recipe, one
5251 * structure per protocol header
5252 * @lkups_cnt: number of protocols
5253 * @rinfo: other information regarding the rule e.g. priority and action info
5254 * @rid: return the recipe ID of the recipe created
5256 static enum ice_status
5257 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5258 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5260 struct ice_prot_lkup_ext *lkup_exts;
5261 struct ice_recp_grp_entry *r_entry;
5262 struct ice_sw_fv_list_entry *fvit;
5263 struct ice_recp_grp_entry *r_tmp;
5264 struct ice_sw_fv_list_entry *tmp;
5265 enum ice_status status = ICE_SUCCESS;
5266 struct ice_sw_recipe *rm;
5267 bool match_tun = false;
5271 return ICE_ERR_PARAM;
5273 lkup_exts = (struct ice_prot_lkup_ext *)
5274 ice_malloc(hw, sizeof(*lkup_exts));
5276 return ICE_ERR_NO_MEMORY;
5278 /* Determine the number of words to be matched and if it exceeds a
5279 * recipe's restrictions
5281 for (i = 0; i < lkups_cnt; i++) {
5284 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5285 status = ICE_ERR_CFG;
5286 goto err_free_lkup_exts;
5289 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5291 status = ICE_ERR_CFG;
5292 goto err_free_lkup_exts;
5296 *rid = ice_find_recp(hw, lkup_exts);
5297 if (*rid < ICE_MAX_NUM_RECIPES)
5298 /* Success if found a recipe that match the existing criteria */
5299 goto err_free_lkup_exts;
5301 /* Recipe we need does not exist, add a recipe */
5303 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5305 status = ICE_ERR_NO_MEMORY;
5306 goto err_free_lkup_exts;
5309 /* Get field vectors that contain fields extracted from all the protocol
5310 * headers being programmed.
5312 INIT_LIST_HEAD(&rm->fv_list);
5313 INIT_LIST_HEAD(&rm->rg_list);
5315 status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
5319 /* Group match words into recipes using preferred recipe grouping
5322 status = ice_create_recipe_group(hw, rm, lkup_exts);
5326 /* There is only profile for UDP tunnels. So, it is necessary to use a
5327 * metadata ID flag to differentiate different tunnel types. A separate
5328 * recipe needs to be used for the metadata.
5330 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5331 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5332 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5335 /* set the recipe priority if specified */
5336 rm->priority = rinfo->priority ? rinfo->priority : 0;
5338 /* Find offsets from the field vector. Pick the first one for all the
5341 ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5342 status = ice_add_sw_recipe(hw, rm, match_tun);
5346 /* Associate all the recipes created with all the profiles in the
5347 * common field vector.
5349 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5351 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5353 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5354 (u8 *)r_bitmap, NULL);
5358 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5359 ICE_MAX_NUM_RECIPES);
5360 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5364 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5367 ice_release_change_lock(hw);
5373 *rid = rm->root_rid;
5374 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5375 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5377 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5378 ice_recp_grp_entry, l_entry) {
5379 LIST_DEL(&r_entry->l_entry);
5380 ice_free(hw, r_entry);
5383 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5385 LIST_DEL(&fvit->list_entry);
5390 ice_free(hw, rm->root_buf);
5395 ice_free(hw, lkup_exts);
5401 * ice_find_dummy_packet - find dummy packet by tunnel type
5403 * @lkups: lookup elements or match criteria for the advanced recipe, one
5404 * structure per protocol header
5405 * @lkups_cnt: number of protocols
5406 * @tun_type: tunnel type from the match criteria
5407 * @pkt: dummy packet to fill according to filter match criteria
5408 * @pkt_len: packet length of dummy packet
5409 * @offsets: pointer to receive the pointer to the offsets for the packet
5412 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5413 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5415 const struct ice_dummy_pkt_offsets **offsets)
5417 bool tcp = false, udp = false, ipv6 = false;
5420 for (i = 0; i < lkups_cnt; i++) {
5421 if (lkups[i].type == ICE_UDP_ILOS)
5423 else if (lkups[i].type == ICE_TCP_IL)
5425 else if (lkups[i].type == ICE_IPV6_OFOS)
5429 if (tun_type == ICE_ALL_TUNNELS) {
5430 *pkt = dummy_gre_udp_packet;
5431 *pkt_len = sizeof(dummy_gre_udp_packet);
5432 *offsets = dummy_gre_udp_packet_offsets;
5436 if (tun_type == ICE_SW_TUN_NVGRE) {
5438 *pkt = dummy_gre_tcp_packet;
5439 *pkt_len = sizeof(dummy_gre_tcp_packet);
5440 *offsets = dummy_gre_tcp_packet_offsets;
5444 *pkt = dummy_gre_udp_packet;
5445 *pkt_len = sizeof(dummy_gre_udp_packet);
5446 *offsets = dummy_gre_udp_packet_offsets;
5450 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5451 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5453 *pkt = dummy_udp_tun_tcp_packet;
5454 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5455 *offsets = dummy_udp_tun_tcp_packet_offsets;
5459 *pkt = dummy_udp_tun_udp_packet;
5460 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5461 *offsets = dummy_udp_tun_udp_packet_offsets;
5466 *pkt = dummy_udp_packet;
5467 *pkt_len = sizeof(dummy_udp_packet);
5468 *offsets = dummy_udp_packet_offsets;
5470 } else if (udp && ipv6) {
5471 *pkt = dummy_udp_ipv6_packet;
5472 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5473 *offsets = dummy_udp_ipv6_packet_offsets;
5475 } else if ((tcp && ipv6) || ipv6) {
5476 *pkt = dummy_tcp_ipv6_packet;
5477 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5478 *offsets = dummy_tcp_ipv6_packet_offsets;
5482 *pkt = dummy_tcp_packet;
5483 *pkt_len = sizeof(dummy_tcp_packet);
5484 *offsets = dummy_tcp_packet_offsets;
5488 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5490 * @lkups: lookup elements or match criteria for the advanced recipe, one
5491 * structure per protocol header
5492 * @lkups_cnt: number of protocols
5493 * @s_rule: stores rule information from the match criteria
5494 * @dummy_pkt: dummy packet to fill according to filter match criteria
5495 * @pkt_len: packet length of dummy packet
5496 * @offsets: offset info for the dummy packet
5498 static enum ice_status
5499 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5500 struct ice_aqc_sw_rules_elem *s_rule,
5501 const u8 *dummy_pkt, u16 pkt_len,
5502 const struct ice_dummy_pkt_offsets *offsets)
5507 /* Start with a packet with a pre-defined/dummy content. Then, fill
5508 * in the header values to be looked up or matched.
5510 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5512 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5514 for (i = 0; i < lkups_cnt; i++) {
5515 enum ice_protocol_type type;
5516 u16 offset = 0, len = 0, j;
5519 /* find the start of this layer; it should be found since this
5520 * was already checked when search for the dummy packet
5522 type = lkups[i].type;
5523 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5524 if (type == offsets[j].type) {
5525 offset = offsets[j].offset;
5530 /* this should never happen in a correct calling sequence */
5532 return ICE_ERR_PARAM;
5534 switch (lkups[i].type) {
5537 len = sizeof(struct ice_ether_hdr);
5540 len = sizeof(struct ice_ethtype_hdr);
5544 len = sizeof(struct ice_ipv4_hdr);
5548 len = sizeof(struct ice_ipv6_hdr);
5553 len = sizeof(struct ice_l4_hdr);
5556 len = sizeof(struct ice_sctp_hdr);
5559 len = sizeof(struct ice_nvgre);
5564 len = sizeof(struct ice_udp_tnl_hdr);
5567 return ICE_ERR_PARAM;
5570 /* the length should be a word multiple */
5571 if (len % ICE_BYTES_PER_WORD)
5574 /* We have the offset to the header start, the length, the
5575 * caller's header values and mask. Use this information to
5576 * copy the data into the dummy packet appropriately based on
5577 * the mask. Note that we need to only write the bits as
5578 * indicated by the mask to make sure we don't improperly write
5579 * over any significant packet data.
5581 for (j = 0; j < len / sizeof(u16); j++)
5582 if (((u16 *)&lkups[i].m_u)[j])
5583 ((u16 *)(pkt + offset))[j] =
5584 (((u16 *)(pkt + offset))[j] &
5585 ~((u16 *)&lkups[i].m_u)[j]) |
5586 (((u16 *)&lkups[i].h_u)[j] &
5587 ((u16 *)&lkups[i].m_u)[j]);
5590 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5596 * ice_find_adv_rule_entry - Search a rule entry
5597 * @hw: pointer to the hardware structure
5598 * @lkups: lookup elements or match criteria for the advanced recipe, one
5599 * structure per protocol header
5600 * @lkups_cnt: number of protocols
5601 * @recp_id: recipe ID for which we are finding the rule
5602 * @rinfo: other information regarding the rule e.g. priority and action info
5604 * Helper function to search for a given advance rule entry
5605 * Returns pointer to entry storing the rule if found
5607 static struct ice_adv_fltr_mgmt_list_entry *
5608 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5609 u16 lkups_cnt, u8 recp_id,
5610 struct ice_adv_rule_info *rinfo)
5612 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5613 struct ice_switch_info *sw = hw->switch_info;
5616 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5617 ice_adv_fltr_mgmt_list_entry, list_entry) {
5618 bool lkups_matched = true;
5620 if (lkups_cnt != list_itr->lkups_cnt)
5622 for (i = 0; i < list_itr->lkups_cnt; i++)
5623 if (memcmp(&list_itr->lkups[i], &lkups[i],
5625 lkups_matched = false;
5628 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5629 rinfo->tun_type == list_itr->rule_info.tun_type &&
5637 * ice_adv_add_update_vsi_list
5638 * @hw: pointer to the hardware structure
5639 * @m_entry: pointer to current adv filter management list entry
5640 * @cur_fltr: filter information from the book keeping entry
5641 * @new_fltr: filter information with the new VSI to be added
5643 * Call AQ command to add or update previously created VSI list with new VSI.
5645 * Helper function to do book keeping associated with adding filter information
5646 * The algorithm to do the booking keeping is described below :
5647 * When a VSI needs to subscribe to a given advanced filter
5648 * if only one VSI has been added till now
5649 * Allocate a new VSI list and add two VSIs
5650 * to this list using switch rule command
5651 * Update the previously created switch rule with the
5652 * newly created VSI list ID
5653 * if a VSI list was previously created
5654 * Add the new VSI to the previously created VSI list set
5655 * using the update switch rule command
5657 static enum ice_status
5658 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5659 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5660 struct ice_adv_rule_info *cur_fltr,
5661 struct ice_adv_rule_info *new_fltr)
5663 enum ice_status status;
5664 u16 vsi_list_id = 0;
5666 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5667 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5668 return ICE_ERR_NOT_IMPL;
5670 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5671 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5672 return ICE_ERR_ALREADY_EXISTS;
5674 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5675 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5676 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5677 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5678 return ICE_ERR_NOT_IMPL;
5680 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5681 /* Only one entry existed in the mapping and it was not already
5682 * a part of a VSI list. So, create a VSI list with the old and
5685 struct ice_fltr_info tmp_fltr;
5686 u16 vsi_handle_arr[2];
5688 /* A rule already exists with the new VSI being added */
5689 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5690 new_fltr->sw_act.fwd_id.hw_vsi_id)
5691 return ICE_ERR_ALREADY_EXISTS;
5693 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5694 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5695 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5701 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5702 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5703 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5704 /* Update the previous switch rule of "forward to VSI" to
5707 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5711 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5712 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5713 m_entry->vsi_list_info =
5714 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5717 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5719 if (!m_entry->vsi_list_info)
5722 /* A rule already exists with the new VSI being added */
5723 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5726 /* Update the previously created VSI list set with
5727 * the new VSI ID passed in
5729 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5731 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5733 ice_aqc_opc_update_sw_rules,
5735 /* update VSI list mapping info with new VSI ID */
5737 ice_set_bit(vsi_handle,
5738 m_entry->vsi_list_info->vsi_map);
5741 m_entry->vsi_count++;
5746 * ice_add_adv_rule - helper function to create an advanced switch rule
5747 * @hw: pointer to the hardware structure
5748 * @lkups: information on the words that needs to be looked up. All words
5749 * together makes one recipe
5750 * @lkups_cnt: num of entries in the lkups array
5751 * @rinfo: other information related to the rule that needs to be programmed
5752 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5753 * ignored is case of error.
5755 * This function can program only 1 rule at a time. The lkups is used to
5756 * describe the all the words that forms the "lookup" portion of the recipe.
5757 * These words can span multiple protocols. Callers to this function need to
5758 * pass in a list of protocol headers with lookup information along and mask
5759 * that determines which words are valid from the given protocol header.
5760 * rinfo describes other information related to this rule such as forwarding
5761 * IDs, priority of this rule, etc.
5764 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5765 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5766 struct ice_rule_query_data *added_entry)
5768 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5769 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5770 const struct ice_dummy_pkt_offsets *pkt_offsets;
5771 struct ice_aqc_sw_rules_elem *s_rule = NULL;
5772 struct LIST_HEAD_TYPE *rule_head;
5773 struct ice_switch_info *sw;
5774 enum ice_status status;
5775 const u8 *pkt = NULL;
5781 return ICE_ERR_PARAM;
5783 for (i = 0; i < lkups_cnt; i++) {
5786 /* Validate match masks to make sure that there is something
5789 ptr = (u16 *)&lkups[i].m_u;
5790 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5797 return ICE_ERR_PARAM;
5799 /* make sure that we can locate a dummy packet */
5800 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5803 status = ICE_ERR_PARAM;
5804 goto err_ice_add_adv_rule;
5807 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5808 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5809 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5810 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5813 vsi_handle = rinfo->sw_act.vsi_handle;
5814 if (!ice_is_vsi_valid(hw, vsi_handle))
5815 return ICE_ERR_PARAM;
5817 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5818 rinfo->sw_act.fwd_id.hw_vsi_id =
5819 ice_get_hw_vsi_num(hw, vsi_handle);
5820 if (rinfo->sw_act.flag & ICE_FLTR_TX)
5821 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5823 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5826 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5828 /* we have to add VSI to VSI_LIST and increment vsi_count.
5829 * Also Update VSI list so that we can change forwarding rule
5830 * if the rule already exists, we will check if it exists with
5831 * same vsi_id, if not then add it to the VSI list if it already
5832 * exists if not then create a VSI list and add the existing VSI
5833 * ID and the new VSI ID to the list
5834 * We will add that VSI to the list
5836 status = ice_adv_add_update_vsi_list(hw, m_entry,
5837 &m_entry->rule_info,
5840 added_entry->rid = rid;
5841 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5842 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5846 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5847 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
5849 return ICE_ERR_NO_MEMORY;
5850 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
5851 switch (rinfo->sw_act.fltr_act) {
5852 case ICE_FWD_TO_VSI:
5853 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5854 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5855 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5858 act |= ICE_SINGLE_ACT_TO_Q;
5859 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5860 ICE_SINGLE_ACT_Q_INDEX_M;
5862 case ICE_FWD_TO_QGRP:
5863 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
5864 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
5865 act |= ICE_SINGLE_ACT_TO_Q;
5866 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5867 ICE_SINGLE_ACT_Q_INDEX_M;
5868 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
5869 ICE_SINGLE_ACT_Q_REGION_M;
5871 case ICE_DROP_PACKET:
5872 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5873 ICE_SINGLE_ACT_VALID_BIT;
5876 status = ICE_ERR_CFG;
5877 goto err_ice_add_adv_rule;
5880 /* set the rule LOOKUP type based on caller specified 'RX'
5881 * instead of hardcoding it to be either LOOKUP_TX/RX
5883 * for 'RX' set the source to be the port number
5884 * for 'TX' set the source to be the source HW VSI number (determined
5888 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
5889 s_rule->pdata.lkup_tx_rx.src =
5890 CPU_TO_LE16(hw->port_info->lport);
5892 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
5893 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
5896 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
5897 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
5899 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
5902 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5903 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5906 goto err_ice_add_adv_rule;
5907 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
5908 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
5910 status = ICE_ERR_NO_MEMORY;
5911 goto err_ice_add_adv_rule;
5914 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
5915 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
5916 ICE_NONDMA_TO_NONDMA);
5917 if (!adv_fltr->lkups) {
5918 status = ICE_ERR_NO_MEMORY;
5919 goto err_ice_add_adv_rule;
5922 adv_fltr->lkups_cnt = lkups_cnt;
5923 adv_fltr->rule_info = *rinfo;
5924 adv_fltr->rule_info.fltr_rule_id =
5925 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5926 sw = hw->switch_info;
5927 sw->recp_list[rid].adv_rule = true;
5928 rule_head = &sw->recp_list[rid].filt_rules;
5930 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
5931 struct ice_fltr_info tmp_fltr;
5933 tmp_fltr.fltr_rule_id =
5934 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5935 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5936 tmp_fltr.fwd_id.hw_vsi_id =
5937 ice_get_hw_vsi_num(hw, vsi_handle);
5938 tmp_fltr.vsi_handle = vsi_handle;
5939 /* Update the previous switch rule of "forward to VSI" to
5942 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5944 goto err_ice_add_adv_rule;
5945 adv_fltr->vsi_count = 1;
5948 /* Add rule entry to book keeping list */
5949 LIST_ADD(&adv_fltr->list_entry, rule_head);
5951 added_entry->rid = rid;
5952 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5953 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5955 err_ice_add_adv_rule:
5956 if (status && adv_fltr) {
5957 ice_free(hw, adv_fltr->lkups);
5958 ice_free(hw, adv_fltr);
5961 ice_free(hw, s_rule);
5967 * ice_adv_rem_update_vsi_list
5968 * @hw: pointer to the hardware structure
5969 * @vsi_handle: VSI handle of the VSI to remove
5970 * @fm_list: filter management entry for which the VSI list management needs to
5973 static enum ice_status
5974 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5975 struct ice_adv_fltr_mgmt_list_entry *fm_list)
5977 struct ice_vsi_list_map_info *vsi_list_info;
5978 enum ice_sw_lkup_type lkup_type;
5979 enum ice_status status;
5982 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5983 fm_list->vsi_count == 0)
5984 return ICE_ERR_PARAM;
5986 /* A rule with the VSI being removed does not exist */
5987 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
5988 return ICE_ERR_DOES_NOT_EXIST;
5990 lkup_type = ICE_SW_LKUP_LAST;
5991 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5992 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5993 ice_aqc_opc_update_sw_rules,
5998 fm_list->vsi_count--;
5999 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6000 vsi_list_info = fm_list->vsi_list_info;
6001 if (fm_list->vsi_count == 1) {
6002 struct ice_fltr_info tmp_fltr;
6005 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6007 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6008 return ICE_ERR_OUT_OF_RANGE;
6010 /* Make sure VSI list is empty before removing it below */
6011 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6013 ice_aqc_opc_update_sw_rules,
6017 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6018 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6019 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6020 tmp_fltr.fwd_id.hw_vsi_id =
6021 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6022 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6023 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6025 /* Update the previous switch rule of "MAC forward to VSI" to
6026 * "MAC fwd to VSI list"
6028 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6030 ice_debug(hw, ICE_DBG_SW,
6031 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6032 tmp_fltr.fwd_id.hw_vsi_id, status);
6037 if (fm_list->vsi_count == 1) {
6038 /* Remove the VSI list since it is no longer used */
6039 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6041 ice_debug(hw, ICE_DBG_SW,
6042 "Failed to remove VSI list %d, error %d\n",
6043 vsi_list_id, status);
6047 LIST_DEL(&vsi_list_info->list_entry);
6048 ice_free(hw, vsi_list_info);
6049 fm_list->vsi_list_info = NULL;
6056 * ice_rem_adv_rule - removes existing advanced switch rule
6057 * @hw: pointer to the hardware structure
6058 * @lkups: information on the words that needs to be looked up. All words
6059 * together makes one recipe
6060 * @lkups_cnt: num of entries in the lkups array
6061 * @rinfo: Its the pointer to the rule information for the rule
6063 * This function can be used to remove 1 rule at a time. The lkups is
6064 * used to describe all the words that forms the "lookup" portion of the
6065 * rule. These words can span multiple protocols. Callers to this function
6066 * need to pass in a list of protocol headers with lookup information along
6067 * and mask that determines which words are valid from the given protocol
6068 * header. rinfo describes other information related to this rule such as
6069 * forwarding IDs, priority of this rule, etc.
6072 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6073 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6075 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6076 const struct ice_dummy_pkt_offsets *offsets;
6077 struct ice_prot_lkup_ext lkup_exts;
6078 u16 rule_buf_sz, pkt_len, i, rid;
6079 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6080 enum ice_status status = ICE_SUCCESS;
6081 bool remove_rule = false;
6082 const u8 *pkt = NULL;
6085 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6086 for (i = 0; i < lkups_cnt; i++) {
6089 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6092 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6096 rid = ice_find_recp(hw, &lkup_exts);
6097 /* If did not find a recipe that match the existing criteria */
6098 if (rid == ICE_MAX_NUM_RECIPES)
6099 return ICE_ERR_PARAM;
6101 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6102 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6103 /* the rule is already removed */
6106 ice_acquire_lock(rule_lock);
6107 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6109 } else if (list_elem->vsi_count > 1) {
6110 list_elem->vsi_list_info->ref_cnt--;
6111 remove_rule = false;
6112 vsi_handle = rinfo->sw_act.vsi_handle;
6113 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6115 vsi_handle = rinfo->sw_act.vsi_handle;
6116 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6118 ice_release_lock(rule_lock);
6121 if (list_elem->vsi_count == 0)
6124 ice_release_lock(rule_lock);
6126 struct ice_aqc_sw_rules_elem *s_rule;
6128 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
6129 &pkt_len, &offsets);
6130 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6132 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6135 return ICE_ERR_NO_MEMORY;
6136 s_rule->pdata.lkup_tx_rx.act = 0;
6137 s_rule->pdata.lkup_tx_rx.index =
6138 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6139 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6140 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6142 ice_aqc_opc_remove_sw_rules, NULL);
6143 if (status == ICE_SUCCESS) {
6144 ice_acquire_lock(rule_lock);
6145 LIST_DEL(&list_elem->list_entry);
6146 ice_free(hw, list_elem->lkups);
6147 ice_free(hw, list_elem);
6148 ice_release_lock(rule_lock);
6150 ice_free(hw, s_rule);
6156 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6157 * @hw: pointer to the hardware structure
6158 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6160 * This function is used to remove 1 rule at a time. The removal is based on
6161 * the remove_entry parameter. This function will remove rule for a given
6162 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6165 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6166 struct ice_rule_query_data *remove_entry)
6168 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6169 struct LIST_HEAD_TYPE *list_head;
6170 struct ice_adv_rule_info rinfo;
6171 struct ice_switch_info *sw;
6173 sw = hw->switch_info;
6174 if (!sw->recp_list[remove_entry->rid].recp_created)
6175 return ICE_ERR_PARAM;
6176 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6177 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6179 if (list_itr->rule_info.fltr_rule_id ==
6180 remove_entry->rule_id) {
6181 rinfo = list_itr->rule_info;
6182 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6183 return ice_rem_adv_rule(hw, list_itr->lkups,
6184 list_itr->lkups_cnt, &rinfo);
6187 return ICE_ERR_PARAM;
6191 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6193 * @hw: pointer to the hardware structure
6194 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6196 * This function is used to remove all the rules for a given VSI and as soon
6197 * as removing a rule fails, it will return immediately with the error code,
6198 * else it will return ICE_SUCCESS
6201 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6203 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6204 struct ice_vsi_list_map_info *map_info;
6205 struct LIST_HEAD_TYPE *list_head;
6206 struct ice_adv_rule_info rinfo;
6207 struct ice_switch_info *sw;
6208 enum ice_status status;
6209 u16 vsi_list_id = 0;
6212 sw = hw->switch_info;
6213 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6214 if (!sw->recp_list[rid].recp_created)
6216 if (!sw->recp_list[rid].adv_rule)
6218 list_head = &sw->recp_list[rid].filt_rules;
6220 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6221 ice_adv_fltr_mgmt_list_entry, list_entry) {
6222 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6226 rinfo = list_itr->rule_info;
6227 rinfo.sw_act.vsi_handle = vsi_handle;
6228 status = ice_rem_adv_rule(hw, list_itr->lkups,
6229 list_itr->lkups_cnt, &rinfo);
6239 * ice_replay_fltr - Replay all the filters stored by a specific list head
6240 * @hw: pointer to the hardware structure
6241 * @list_head: list for which filters needs to be replayed
6242 * @recp_id: Recipe ID for which rules need to be replayed
6244 static enum ice_status
6245 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6247 struct ice_fltr_mgmt_list_entry *itr;
6248 struct LIST_HEAD_TYPE l_head;
6249 enum ice_status status = ICE_SUCCESS;
6251 if (LIST_EMPTY(list_head))
6254 /* Move entries from the given list_head to a temporary l_head so that
6255 * they can be replayed. Otherwise when trying to re-add the same
6256 * filter, the function will return already exists
6258 LIST_REPLACE_INIT(list_head, &l_head);
6260 /* Mark the given list_head empty by reinitializing it so filters
6261 * could be added again by *handler
6263 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6265 struct ice_fltr_list_entry f_entry;
6267 f_entry.fltr_info = itr->fltr_info;
6268 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6269 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6270 if (status != ICE_SUCCESS)
6275 /* Add a filter per VSI separately */
6280 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6282 if (!ice_is_vsi_valid(hw, vsi_handle))
6285 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6286 f_entry.fltr_info.vsi_handle = vsi_handle;
6287 f_entry.fltr_info.fwd_id.hw_vsi_id =
6288 ice_get_hw_vsi_num(hw, vsi_handle);
6289 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6290 if (recp_id == ICE_SW_LKUP_VLAN)
6291 status = ice_add_vlan_internal(hw, &f_entry);
6293 status = ice_add_rule_internal(hw, recp_id,
6295 if (status != ICE_SUCCESS)
6300 /* Clear the filter management list */
6301 ice_rem_sw_rule_info(hw, &l_head);
6306 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6307 * @hw: pointer to the hardware structure
6309 * NOTE: This function does not clean up partially added filters on error.
6310 * It is up to caller of the function to issue a reset or fail early.
6312 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6314 struct ice_switch_info *sw = hw->switch_info;
6315 enum ice_status status = ICE_SUCCESS;
6318 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6319 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6321 status = ice_replay_fltr(hw, i, head);
6322 if (status != ICE_SUCCESS)
6329 * ice_replay_vsi_fltr - Replay filters for requested VSI
6330 * @hw: pointer to the hardware structure
6331 * @vsi_handle: driver VSI handle
6332 * @recp_id: Recipe ID for which rules need to be replayed
6333 * @list_head: list for which filters need to be replayed
6335 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6336 * It is required to pass valid VSI handle.
6338 static enum ice_status
6339 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6340 struct LIST_HEAD_TYPE *list_head)
6342 struct ice_fltr_mgmt_list_entry *itr;
6343 enum ice_status status = ICE_SUCCESS;
6346 if (LIST_EMPTY(list_head))
6348 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6350 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6352 struct ice_fltr_list_entry f_entry;
6354 f_entry.fltr_info = itr->fltr_info;
6355 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6356 itr->fltr_info.vsi_handle == vsi_handle) {
6357 /* update the src in case it is VSI num */
6358 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6359 f_entry.fltr_info.src = hw_vsi_id;
6360 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6361 if (status != ICE_SUCCESS)
6365 if (!itr->vsi_list_info ||
6366 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6368 /* Clearing it so that the logic can add it back */
6369 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6370 f_entry.fltr_info.vsi_handle = vsi_handle;
6371 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6372 /* update the src in case it is VSI num */
6373 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6374 f_entry.fltr_info.src = hw_vsi_id;
6375 if (recp_id == ICE_SW_LKUP_VLAN)
6376 status = ice_add_vlan_internal(hw, &f_entry);
6378 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6379 if (status != ICE_SUCCESS)
6387 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6388 * @hw: pointer to the hardware structure
6389 * @vsi_handle: driver VSI handle
6390 * @list_head: list for which filters need to be replayed
6392 * Replay the advanced rule for the given VSI.
6394 static enum ice_status
6395 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6396 struct LIST_HEAD_TYPE *list_head)
6398 struct ice_rule_query_data added_entry = { 0 };
6399 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6400 enum ice_status status = ICE_SUCCESS;
6402 if (LIST_EMPTY(list_head))
6404 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6406 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6407 u16 lk_cnt = adv_fltr->lkups_cnt;
6409 if (vsi_handle != rinfo->sw_act.vsi_handle)
6411 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6420 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6421 * @hw: pointer to the hardware structure
6422 * @vsi_handle: driver VSI handle
6424 * Replays filters for requested VSI via vsi_handle.
6426 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6428 struct ice_switch_info *sw = hw->switch_info;
6429 enum ice_status status;
6432 /* Update the recipes that were created */
6433 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6434 struct LIST_HEAD_TYPE *head;
6436 head = &sw->recp_list[i].filt_replay_rules;
6437 if (!sw->recp_list[i].adv_rule)
6438 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6440 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6441 if (status != ICE_SUCCESS)
6449 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6450 * @hw: pointer to the HW struct
6452 * Deletes the filter replay rules.
6454 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6456 struct ice_switch_info *sw = hw->switch_info;
6462 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6463 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6464 struct LIST_HEAD_TYPE *l_head;
6466 l_head = &sw->recp_list[i].filt_replay_rules;
6467 if (!sw->recp_list[i].adv_rule)
6468 ice_rem_sw_rule_info(hw, l_head);
6470 ice_rem_adv_rule_info(hw, l_head);