1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
70 { ICE_PROTOCOL_LAST, 0 },
74 u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
109 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
111 { ICE_ETYPE_OL, 12 },
112 { ICE_IPV4_OFOS, 14 },
116 { ICE_UDP_ILOS, 76 },
117 { ICE_PROTOCOL_LAST, 0 },
121 u8 dummy_gre_udp_packet[] = {
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_OL 12 */
128 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x2F, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
135 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x00, 0x00, 0x00,
142 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
149 0x00, 0x08, 0x00, 0x00,
153 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
155 { ICE_ETYPE_OL, 12 },
156 { ICE_IPV4_OFOS, 14 },
162 { ICE_PROTOCOL_LAST, 0 },
166 u8 dummy_udp_tun_tcp_packet[] = {
167 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
168 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
171 0x08, 0x00, /* ICE_ETYPE_OL 12 */
173 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
174 0x00, 0x01, 0x00, 0x00,
175 0x40, 0x11, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
180 0x00, 0x46, 0x00, 0x00,
182 0x04, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
186 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00,
190 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
191 0x00, 0x01, 0x00, 0x00,
192 0x40, 0x06, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
199 0x50, 0x02, 0x20, 0x00,
200 0x00, 0x00, 0x00, 0x00
204 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
206 { ICE_ETYPE_OL, 12 },
207 { ICE_IPV4_OFOS, 14 },
212 { ICE_UDP_ILOS, 84 },
213 { ICE_PROTOCOL_LAST, 0 },
217 u8 dummy_udp_tun_udp_packet[] = {
218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
222 0x08, 0x00, /* ICE_ETYPE_OL 12 */
224 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
225 0x00, 0x01, 0x00, 0x00,
226 0x00, 0x11, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
231 0x00, 0x3a, 0x00, 0x00,
233 0x0c, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
234 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
237 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00,
241 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
242 0x00, 0x01, 0x00, 0x00,
243 0x00, 0x11, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
248 0x00, 0x08, 0x00, 0x00,
252 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
254 { ICE_ETYPE_OL, 12 },
255 { ICE_IPV4_OFOS, 14 },
256 { ICE_UDP_ILOS, 34 },
257 { ICE_PROTOCOL_LAST, 0 },
261 dummy_udp_packet[] = {
262 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
263 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00,
266 0x08, 0x00, /* ICE_ETYPE_OL 12 */
268 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
269 0x00, 0x01, 0x00, 0x00,
270 0x00, 0x11, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
275 0x00, 0x08, 0x00, 0x00,
277 0x00, 0x00, /* 2 bytes for 4 byte alignment */
281 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
283 { ICE_ETYPE_OL, 12 },
284 { ICE_IPV4_OFOS, 14 },
286 { ICE_PROTOCOL_LAST, 0 },
290 dummy_tcp_packet[] = {
291 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
292 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00,
295 0x08, 0x00, /* ICE_ETYPE_OL 12 */
297 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
298 0x00, 0x01, 0x00, 0x00,
299 0x00, 0x06, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
306 0x50, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, /* 2 bytes for 4 byte alignment */
313 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
315 { ICE_ETYPE_OL, 12 },
316 { ICE_IPV6_OFOS, 14 },
318 { ICE_PROTOCOL_LAST, 0 },
322 dummy_tcp_ipv6_packet[] = {
323 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
327 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
329 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
330 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
341 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00,
343 0x50, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, /* 2 bytes for 4 byte alignment */
350 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
352 { ICE_ETYPE_OL, 12 },
353 { ICE_IPV6_OFOS, 14 },
354 { ICE_UDP_ILOS, 54 },
355 { ICE_PROTOCOL_LAST, 0 },
359 dummy_udp_ipv6_packet[] = {
360 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
364 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
366 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
367 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
378 0x00, 0x08, 0x00, 0x00,
380 0x00, 0x00, /* 2 bytes for 4 byte alignment */
384 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
386 { ICE_IPV4_OFOS, 14 },
389 { ICE_PROTOCOL_LAST, 0 },
393 dummy_udp_gtp_packet[] = {
394 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
399 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x11, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
406 0x00, 0x1c, 0x00, 0x00,
408 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
409 0x00, 0x00, 0x00, 0x00,
410 0x00, 0x00, 0x00, 0x85,
412 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
413 0x00, 0x00, 0x00, 0x00,
417 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
420 { ICE_PROTOCOL_LAST, 0 },
424 dummy_pppoe_packet[] = {
425 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
426 0x00, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
430 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 14 */
431 0x00, 0x4e, 0x00, 0x21,
433 0x45, 0x00, 0x00, 0x30, /* PDU */
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x11, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
440 /* this is a recipe to profile association bitmap */
441 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
442 ICE_MAX_NUM_PROFILES);
444 /* this is a profile to recipe association bitmap */
445 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
446 ICE_MAX_NUM_RECIPES);
448 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
451 * ice_collect_result_idx - copy result index values
452 * @buf: buffer that contains the result index
453 * @recp: the recipe struct to copy data into
455 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
456 struct ice_sw_recipe *recp)
458 if (buf->content.result_indx & ICE_AQ_RECIPE_ID_IS_ROOT)
459 ice_set_bit(buf->content.result_indx &
460 ~ICE_AQ_RECIPE_ID_IS_ROOT, recp->res_idxs);
464 * ice_collect_result_idx_from_bitmap - copy result index values using bitmap
465 * @hw: pointer to hardware structure
466 * @recp: the recipe struct to copy data into
469 ice_collect_result_idx_from_bitmap(struct ice_hw *hw,
470 struct ice_sw_recipe *recp)
474 while (ICE_MAX_NUM_RECIPES >
475 (bit = ice_find_next_bit(recp->r_bitmap, ICE_MAX_NUM_RECIPES,
477 ice_collect_result_idx(hw->switch_info->recp_list[bit].root_buf,
484 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
485 * @hw: pointer to hardware structure
486 * @recps: struct that we need to populate
487 * @rid: recipe ID that we are populating
488 * @refresh_required: true if we should get recipe to profile mapping from FW
490 * This function is used to populate all the necessary entries into our
491 * bookkeeping so that we have a current list of all the recipes that are
492 * programmed in the firmware.
494 static enum ice_status
495 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
496 bool *refresh_required)
498 u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
499 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
500 u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
501 struct ice_aqc_recipe_data_elem *tmp;
502 u16 num_recps = ICE_MAX_NUM_RECIPES;
503 struct ice_prot_lkup_ext *lkup_exts;
504 enum ice_status status;
507 /* we need a buffer big enough to accommodate all the recipes */
508 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
509 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
511 return ICE_ERR_NO_MEMORY;
513 tmp[0].recipe_indx = rid;
514 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
515 /* non-zero status meaning recipe doesn't exist */
519 /* Get recipe to profile map so that we can get the fv from lkups that
520 * we read for a recipe from FW. Since we want to minimize the number of
521 * times we make this FW call, just make one call and cache the copy
522 * until a new recipe is added. This operation is only required the
523 * first time to get the changes from FW. Then to search existing
524 * entries we don't need to update the cache again until another recipe
527 if (*refresh_required) {
528 ice_get_recp_to_prof_map(hw);
529 *refresh_required = false;
531 lkup_exts = &recps[rid].lkup_exts;
532 /* start populating all the entries for recps[rid] based on lkups from
535 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
536 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
537 struct ice_recp_grp_entry *rg_entry;
538 u8 prof_id, prot = 0;
541 rg_entry = (struct ice_recp_grp_entry *)
542 ice_malloc(hw, sizeof(*rg_entry));
544 status = ICE_ERR_NO_MEMORY;
547 /* When copying, clear the result index enable bit */
548 result_idxs[result_idx] = root_bufs.content.result_indx &
549 ~ICE_AQ_RECIPE_RESULT_EN;
552 recipe_to_profile[tmp[sub_recps].recipe_indx],
553 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
554 /* get the first profile that is associated with rid */
555 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
556 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
557 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
559 rg_entry->fv_idx[i] = lkup_indx;
560 rg_entry->fv_mask[i] =
561 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
563 /* If the recipe is a chained recipe then all its
564 * child recipe's result will have a result index.
565 * To fill fv_words we should not use those result
566 * index, we only need the protocol ids and offsets.
567 * We will skip all the fv_idx which stores result
568 * index in them. We also need to skip any fv_idx which
569 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
570 * valid offset value.
572 if (result_idxs[0] == rg_entry->fv_idx[i] ||
573 result_idxs[1] == rg_entry->fv_idx[i] ||
574 result_idxs[2] == rg_entry->fv_idx[i] ||
575 result_idxs[3] == rg_entry->fv_idx[i] ||
576 result_idxs[4] == rg_entry->fv_idx[i] ||
577 rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
578 rg_entry->fv_idx[i] == 0)
581 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
582 rg_entry->fv_idx[i], &prot, &off);
583 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
584 lkup_exts->fv_words[fv_word_idx].off = off;
587 /* populate rg_list with the data from the child entry of this
590 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
592 lkup_exts->n_val_words = fv_word_idx;
593 recps[rid].n_grp_count = num_recps;
594 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
595 ice_calloc(hw, recps[rid].n_grp_count,
596 sizeof(struct ice_aqc_recipe_data_elem));
597 if (!recps[rid].root_buf)
600 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
601 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
603 ice_memcpy(recps[rid].r_bitmap, tmp->recipe_bitmap,
604 sizeof(recps[rid].r_bitmap), ICE_NONDMA_TO_NONDMA);
606 if (tmp->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
607 recps[rid].chain_idx = tmp->content.result_indx &
608 ~ICE_AQ_RECIPE_RESULT_EN;
610 recps[rid].chain_idx = ICE_INVAL_CHAIN_IND;
612 recps[rid].root_rid = tmp->content.rid & ~ICE_AQ_RECIPE_ID_IS_ROOT;
613 is_root = (tmp->content.rid & ICE_AQ_RECIPE_ID_IS_ROOT) != 0;
614 recps[rid].is_root = is_root;
615 recps[rid].big_recp = (is_root && recps[rid].n_grp_count > 1);
617 /* Copy non-result fv index values to recipe. This call will also update
618 * the result index bitmap appropriately.
620 ice_collect_result_idx_from_bitmap(hw, &recps[rid]);
622 recps[rid].priority = tmp->content.act_ctrl_fwd_priority;
623 recps[rid].recp_created = true;
631 * ice_get_recp_to_prof_map - updates recipe to profile mapping
632 * @hw: pointer to hardware structure
634 * This function is used to populate recipe_to_profile matrix where index to
635 * this array is the recipe ID and the element is the mapping of which profiles
636 * is this recipe mapped to.
639 ice_get_recp_to_prof_map(struct ice_hw *hw)
641 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
644 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
647 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
648 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
649 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
651 ice_memcpy(profile_to_recipe[i], r_bitmap,
652 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
653 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
654 if (ice_is_bit_set(r_bitmap, j))
655 ice_set_bit(i, recipe_to_profile[j]);
660 * ice_init_def_sw_recp - initialize the recipe book keeping tables
661 * @hw: pointer to the HW struct
663 * Allocate memory for the entire recipe table and initialize the structures/
664 * entries corresponding to basic recipes.
666 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
668 struct ice_sw_recipe *recps;
671 recps = (struct ice_sw_recipe *)
672 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
674 return ICE_ERR_NO_MEMORY;
676 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
677 recps[i].root_rid = i;
678 INIT_LIST_HEAD(&recps[i].filt_rules);
679 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
680 INIT_LIST_HEAD(&recps[i].rg_list);
681 ice_init_lock(&recps[i].filt_rule_lock);
684 hw->switch_info->recp_list = recps;
690 * ice_aq_get_sw_cfg - get switch configuration
691 * @hw: pointer to the hardware structure
692 * @buf: pointer to the result buffer
693 * @buf_size: length of the buffer available for response
694 * @req_desc: pointer to requested descriptor
695 * @num_elems: pointer to number of elements
696 * @cd: pointer to command details structure or NULL
698 * Get switch configuration (0x0200) to be placed in 'buff'.
699 * This admin command returns information such as initial VSI/port number
700 * and switch ID it belongs to.
702 * NOTE: *req_desc is both an input/output parameter.
703 * The caller of this function first calls this function with *request_desc set
704 * to 0. If the response from f/w has *req_desc set to 0, all the switch
705 * configuration information has been returned; if non-zero (meaning not all
706 * the information was returned), the caller should call this function again
707 * with *req_desc set to the previous value returned by f/w to get the
708 * next block of switch configuration information.
710 * *num_elems is output only parameter. This reflects the number of elements
711 * in response buffer. The caller of this function to use *num_elems while
712 * parsing the response buffer.
714 static enum ice_status
715 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
716 u16 buf_size, u16 *req_desc, u16 *num_elems,
717 struct ice_sq_cd *cd)
719 struct ice_aqc_get_sw_cfg *cmd;
720 enum ice_status status;
721 struct ice_aq_desc desc;
723 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
724 cmd = &desc.params.get_sw_conf;
725 cmd->element = CPU_TO_LE16(*req_desc);
727 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
729 *req_desc = LE16_TO_CPU(cmd->element);
730 *num_elems = LE16_TO_CPU(cmd->num_elems);
738 * ice_alloc_sw - allocate resources specific to switch
739 * @hw: pointer to the HW struct
740 * @ena_stats: true to turn on VEB stats
741 * @shared_res: true for shared resource, false for dedicated resource
742 * @sw_id: switch ID returned
743 * @counter_id: VEB counter ID returned
745 * allocates switch resources (SWID and VEB counter) (0x0208)
748 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
751 struct ice_aqc_alloc_free_res_elem *sw_buf;
752 struct ice_aqc_res_elem *sw_ele;
753 enum ice_status status;
756 buf_len = sizeof(*sw_buf);
757 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
758 ice_malloc(hw, buf_len);
760 return ICE_ERR_NO_MEMORY;
762 /* Prepare buffer for switch ID.
763 * The number of resource entries in buffer is passed as 1 since only a
764 * single switch/VEB instance is allocated, and hence a single sw_id
767 sw_buf->num_elems = CPU_TO_LE16(1);
769 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
770 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
771 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
773 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
774 ice_aqc_opc_alloc_res, NULL);
777 goto ice_alloc_sw_exit;
779 sw_ele = &sw_buf->elem[0];
780 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
783 /* Prepare buffer for VEB Counter */
784 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
785 struct ice_aqc_alloc_free_res_elem *counter_buf;
786 struct ice_aqc_res_elem *counter_ele;
788 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
789 ice_malloc(hw, buf_len);
791 status = ICE_ERR_NO_MEMORY;
792 goto ice_alloc_sw_exit;
795 /* The number of resource entries in buffer is passed as 1 since
796 * only a single switch/VEB instance is allocated, and hence a
797 * single VEB counter is requested.
799 counter_buf->num_elems = CPU_TO_LE16(1);
800 counter_buf->res_type =
801 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
802 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
803 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
807 ice_free(hw, counter_buf);
808 goto ice_alloc_sw_exit;
810 counter_ele = &counter_buf->elem[0];
811 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
812 ice_free(hw, counter_buf);
816 ice_free(hw, sw_buf);
821 * ice_free_sw - free resources specific to switch
822 * @hw: pointer to the HW struct
823 * @sw_id: switch ID returned
824 * @counter_id: VEB counter ID returned
826 * free switch resources (SWID and VEB counter) (0x0209)
828 * NOTE: This function frees multiple resources. It continues
829 * releasing other resources even after it encounters error.
830 * The error code returned is the last error it encountered.
832 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
834 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
835 enum ice_status status, ret_status;
838 buf_len = sizeof(*sw_buf);
839 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
840 ice_malloc(hw, buf_len);
842 return ICE_ERR_NO_MEMORY;
844 /* Prepare buffer to free for switch ID res.
845 * The number of resource entries in buffer is passed as 1 since only a
846 * single switch/VEB instance is freed, and hence a single sw_id
849 sw_buf->num_elems = CPU_TO_LE16(1);
850 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
851 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
853 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
854 ice_aqc_opc_free_res, NULL);
857 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
859 /* Prepare buffer to free for VEB Counter resource */
860 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
861 ice_malloc(hw, buf_len);
863 ice_free(hw, sw_buf);
864 return ICE_ERR_NO_MEMORY;
867 /* The number of resource entries in buffer is passed as 1 since only a
868 * single switch/VEB instance is freed, and hence a single VEB counter
871 counter_buf->num_elems = CPU_TO_LE16(1);
872 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
873 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
875 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
876 ice_aqc_opc_free_res, NULL);
878 ice_debug(hw, ICE_DBG_SW,
879 "VEB counter resource could not be freed\n");
883 ice_free(hw, counter_buf);
884 ice_free(hw, sw_buf);
890 * @hw: pointer to the HW struct
891 * @vsi_ctx: pointer to a VSI context struct
892 * @cd: pointer to command details structure or NULL
894 * Add a VSI context to the hardware (0x0210)
897 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
898 struct ice_sq_cd *cd)
900 struct ice_aqc_add_update_free_vsi_resp *res;
901 struct ice_aqc_add_get_update_free_vsi *cmd;
902 struct ice_aq_desc desc;
903 enum ice_status status;
905 cmd = &desc.params.vsi_cmd;
906 res = &desc.params.add_update_free_vsi_res;
908 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
910 if (!vsi_ctx->alloc_from_pool)
911 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
912 ICE_AQ_VSI_IS_VALID);
914 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
916 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
918 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
919 sizeof(vsi_ctx->info), cd);
922 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
923 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
924 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
932 * @hw: pointer to the HW struct
933 * @vsi_ctx: pointer to a VSI context struct
934 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
935 * @cd: pointer to command details structure or NULL
937 * Free VSI context info from hardware (0x0213)
940 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
941 bool keep_vsi_alloc, struct ice_sq_cd *cd)
943 struct ice_aqc_add_update_free_vsi_resp *resp;
944 struct ice_aqc_add_get_update_free_vsi *cmd;
945 struct ice_aq_desc desc;
946 enum ice_status status;
948 cmd = &desc.params.vsi_cmd;
949 resp = &desc.params.add_update_free_vsi_res;
951 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
953 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
955 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
957 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
959 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
960 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
968 * @hw: pointer to the HW struct
969 * @vsi_ctx: pointer to a VSI context struct
970 * @cd: pointer to command details structure or NULL
972 * Update VSI context in the hardware (0x0211)
975 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
976 struct ice_sq_cd *cd)
978 struct ice_aqc_add_update_free_vsi_resp *resp;
979 struct ice_aqc_add_get_update_free_vsi *cmd;
980 struct ice_aq_desc desc;
981 enum ice_status status;
983 cmd = &desc.params.vsi_cmd;
984 resp = &desc.params.add_update_free_vsi_res;
986 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
988 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
990 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
992 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
993 sizeof(vsi_ctx->info), cd);
996 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
997 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1004 * ice_is_vsi_valid - check whether the VSI is valid or not
1005 * @hw: pointer to the HW struct
1006 * @vsi_handle: VSI handle
1008 * check whether the VSI is valid or not
1010 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1012 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1016 * ice_get_hw_vsi_num - return the HW VSI number
1017 * @hw: pointer to the HW struct
1018 * @vsi_handle: VSI handle
1020 * return the HW VSI number
1021 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1023 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1025 return hw->vsi_ctx[vsi_handle]->vsi_num;
1029 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1030 * @hw: pointer to the HW struct
1031 * @vsi_handle: VSI handle
1033 * return the VSI context entry for a given VSI handle
1035 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1037 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1041 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1042 * @hw: pointer to the HW struct
1043 * @vsi_handle: VSI handle
1044 * @vsi: VSI context pointer
1046 * save the VSI context entry for a given VSI handle
1049 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1051 hw->vsi_ctx[vsi_handle] = vsi;
1055 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1056 * @hw: pointer to the HW struct
1057 * @vsi_handle: VSI handle
1059 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1061 struct ice_vsi_ctx *vsi;
1064 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1067 ice_for_each_traffic_class(i) {
1068 if (vsi->lan_q_ctx[i]) {
1069 ice_free(hw, vsi->lan_q_ctx[i]);
1070 vsi->lan_q_ctx[i] = NULL;
1076 * ice_clear_vsi_ctx - clear the VSI context entry
1077 * @hw: pointer to the HW struct
1078 * @vsi_handle: VSI handle
1080 * clear the VSI context entry
1082 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1084 struct ice_vsi_ctx *vsi;
1086 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1088 ice_clear_vsi_q_ctx(hw, vsi_handle);
1090 hw->vsi_ctx[vsi_handle] = NULL;
1095 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1096 * @hw: pointer to the HW struct
1098 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1102 for (i = 0; i < ICE_MAX_VSI; i++)
1103 ice_clear_vsi_ctx(hw, i);
1107 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1108 * @hw: pointer to the HW struct
1109 * @vsi_handle: unique VSI handle provided by drivers
1110 * @vsi_ctx: pointer to a VSI context struct
1111 * @cd: pointer to command details structure or NULL
1113 * Add a VSI context to the hardware also add it into the VSI handle list.
1114 * If this function gets called after reset for existing VSIs then update
1115 * with the new HW VSI number in the corresponding VSI handle list entry.
1118 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1119 struct ice_sq_cd *cd)
1121 struct ice_vsi_ctx *tmp_vsi_ctx;
1122 enum ice_status status;
1124 if (vsi_handle >= ICE_MAX_VSI)
1125 return ICE_ERR_PARAM;
1126 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1129 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1131 /* Create a new VSI context */
1132 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1133 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1135 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1136 return ICE_ERR_NO_MEMORY;
1138 *tmp_vsi_ctx = *vsi_ctx;
1140 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1142 /* update with new HW VSI num */
1143 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
1144 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1151 * ice_free_vsi- free VSI context from hardware and VSI handle list
1152 * @hw: pointer to the HW struct
1153 * @vsi_handle: unique VSI handle
1154 * @vsi_ctx: pointer to a VSI context struct
1155 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1156 * @cd: pointer to command details structure or NULL
1158 * Free VSI context info from hardware as well as from VSI handle list
1161 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1162 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1164 enum ice_status status;
1166 if (!ice_is_vsi_valid(hw, vsi_handle))
1167 return ICE_ERR_PARAM;
1168 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1169 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1171 ice_clear_vsi_ctx(hw, vsi_handle);
1177 * @hw: pointer to the HW struct
1178 * @vsi_handle: unique VSI handle
1179 * @vsi_ctx: pointer to a VSI context struct
1180 * @cd: pointer to command details structure or NULL
1182 * Update VSI context in the hardware
1185 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1186 struct ice_sq_cd *cd)
1188 if (!ice_is_vsi_valid(hw, vsi_handle))
1189 return ICE_ERR_PARAM;
1190 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1191 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1195 * ice_aq_get_vsi_params
1196 * @hw: pointer to the HW struct
1197 * @vsi_ctx: pointer to a VSI context struct
1198 * @cd: pointer to command details structure or NULL
1200 * Get VSI context info from hardware (0x0212)
1203 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1204 struct ice_sq_cd *cd)
1206 struct ice_aqc_add_get_update_free_vsi *cmd;
1207 struct ice_aqc_get_vsi_resp *resp;
1208 struct ice_aq_desc desc;
1209 enum ice_status status;
1211 cmd = &desc.params.vsi_cmd;
1212 resp = &desc.params.get_vsi_resp;
1214 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1216 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1218 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1219 sizeof(vsi_ctx->info), cd);
1221 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1223 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1224 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1231 * ice_aq_add_update_mir_rule - add/update a mirror rule
1232 * @hw: pointer to the HW struct
1233 * @rule_type: Rule Type
1234 * @dest_vsi: VSI number to which packets will be mirrored
1235 * @count: length of the list
1236 * @mr_buf: buffer for list of mirrored VSI numbers
1237 * @cd: pointer to command details structure or NULL
1240 * Add/Update Mirror Rule (0x260).
1243 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1244 u16 count, struct ice_mir_rule_buf *mr_buf,
1245 struct ice_sq_cd *cd, u16 *rule_id)
1247 struct ice_aqc_add_update_mir_rule *cmd;
1248 struct ice_aq_desc desc;
1249 enum ice_status status;
1250 __le16 *mr_list = NULL;
1253 switch (rule_type) {
1254 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1255 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1256 /* Make sure count and mr_buf are set for these rule_types */
1257 if (!(count && mr_buf))
1258 return ICE_ERR_PARAM;
1260 buf_size = count * sizeof(__le16);
1261 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1263 return ICE_ERR_NO_MEMORY;
1265 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1266 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1267 /* Make sure count and mr_buf are not set for these
1270 if (count || mr_buf)
1271 return ICE_ERR_PARAM;
1274 ice_debug(hw, ICE_DBG_SW,
1275 "Error due to unsupported rule_type %u\n", rule_type);
1276 return ICE_ERR_OUT_OF_RANGE;
1279 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1281 /* Pre-process 'mr_buf' items for add/update of virtual port
1282 * ingress/egress mirroring (but not physical port ingress/egress
1288 for (i = 0; i < count; i++) {
1291 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1293 /* Validate specified VSI number, make sure it is less
1294 * than ICE_MAX_VSI, if not return with error.
1296 if (id >= ICE_MAX_VSI) {
1297 ice_debug(hw, ICE_DBG_SW,
1298 "Error VSI index (%u) out-of-range\n",
1300 ice_free(hw, mr_list);
1301 return ICE_ERR_OUT_OF_RANGE;
1304 /* add VSI to mirror rule */
1307 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1308 else /* remove VSI from mirror rule */
1309 mr_list[i] = CPU_TO_LE16(id);
1313 cmd = &desc.params.add_update_rule;
1314 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1315 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1316 ICE_AQC_RULE_ID_VALID_M);
1317 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1318 cmd->num_entries = CPU_TO_LE16(count);
1319 cmd->dest = CPU_TO_LE16(dest_vsi);
1321 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1323 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1325 ice_free(hw, mr_list);
1331 * ice_aq_delete_mir_rule - delete a mirror rule
1332 * @hw: pointer to the HW struct
1333 * @rule_id: Mirror rule ID (to be deleted)
1334 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1335 * otherwise it is returned to the shared pool
1336 * @cd: pointer to command details structure or NULL
1338 * Delete Mirror Rule (0x261).
1341 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1342 struct ice_sq_cd *cd)
1344 struct ice_aqc_delete_mir_rule *cmd;
1345 struct ice_aq_desc desc;
1347 /* rule_id should be in the range 0...63 */
1348 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1349 return ICE_ERR_OUT_OF_RANGE;
1351 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1353 cmd = &desc.params.del_rule;
1354 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1355 cmd->rule_id = CPU_TO_LE16(rule_id);
1358 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1360 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1364 * ice_aq_alloc_free_vsi_list
1365 * @hw: pointer to the HW struct
1366 * @vsi_list_id: VSI list ID returned or used for lookup
1367 * @lkup_type: switch rule filter lookup type
1368 * @opc: switch rules population command type - pass in the command opcode
1370 * allocates or free a VSI list resource
1372 static enum ice_status
1373 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1374 enum ice_sw_lkup_type lkup_type,
1375 enum ice_adminq_opc opc)
1377 struct ice_aqc_alloc_free_res_elem *sw_buf;
1378 struct ice_aqc_res_elem *vsi_ele;
1379 enum ice_status status;
1382 buf_len = sizeof(*sw_buf);
1383 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1384 ice_malloc(hw, buf_len);
1386 return ICE_ERR_NO_MEMORY;
1387 sw_buf->num_elems = CPU_TO_LE16(1);
1389 if (lkup_type == ICE_SW_LKUP_MAC ||
1390 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1391 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1392 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1393 lkup_type == ICE_SW_LKUP_PROMISC ||
1394 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1395 lkup_type == ICE_SW_LKUP_LAST) {
1396 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1397 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1399 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1401 status = ICE_ERR_PARAM;
1402 goto ice_aq_alloc_free_vsi_list_exit;
1405 if (opc == ice_aqc_opc_free_res)
1406 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1408 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1410 goto ice_aq_alloc_free_vsi_list_exit;
1412 if (opc == ice_aqc_opc_alloc_res) {
1413 vsi_ele = &sw_buf->elem[0];
1414 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1417 ice_aq_alloc_free_vsi_list_exit:
1418 ice_free(hw, sw_buf);
1423 * ice_aq_set_storm_ctrl - Sets storm control configuration
1424 * @hw: pointer to the HW struct
1425 * @bcast_thresh: represents the upper threshold for broadcast storm control
1426 * @mcast_thresh: represents the upper threshold for multicast storm control
1427 * @ctl_bitmask: storm control control knobs
1429 * Sets the storm control configuration (0x0280)
1432 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1435 struct ice_aqc_storm_cfg *cmd;
1436 struct ice_aq_desc desc;
1438 cmd = &desc.params.storm_conf;
1440 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1442 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1443 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1444 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1446 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1450 * ice_aq_get_storm_ctrl - gets storm control configuration
1451 * @hw: pointer to the HW struct
1452 * @bcast_thresh: represents the upper threshold for broadcast storm control
1453 * @mcast_thresh: represents the upper threshold for multicast storm control
1454 * @ctl_bitmask: storm control control knobs
1456 * Gets the storm control configuration (0x0281)
1459 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1462 enum ice_status status;
1463 struct ice_aq_desc desc;
1465 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1467 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1469 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1472 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1475 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1478 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1485 * ice_aq_sw_rules - add/update/remove switch rules
1486 * @hw: pointer to the HW struct
1487 * @rule_list: pointer to switch rule population list
1488 * @rule_list_sz: total size of the rule list in bytes
1489 * @num_rules: number of switch rules in the rule_list
1490 * @opc: switch rules population command type - pass in the command opcode
1491 * @cd: pointer to command details structure or NULL
1493 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1495 static enum ice_status
1496 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1497 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1499 struct ice_aq_desc desc;
1501 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1503 if (opc != ice_aqc_opc_add_sw_rules &&
1504 opc != ice_aqc_opc_update_sw_rules &&
1505 opc != ice_aqc_opc_remove_sw_rules)
1506 return ICE_ERR_PARAM;
1508 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1510 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1511 desc.params.sw_rules.num_rules_fltr_entry_index =
1512 CPU_TO_LE16(num_rules);
1513 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1517 * ice_aq_add_recipe - add switch recipe
1518 * @hw: pointer to the HW struct
1519 * @s_recipe_list: pointer to switch rule population list
1520 * @num_recipes: number of switch recipes in the list
1521 * @cd: pointer to command details structure or NULL
1526 ice_aq_add_recipe(struct ice_hw *hw,
1527 struct ice_aqc_recipe_data_elem *s_recipe_list,
1528 u16 num_recipes, struct ice_sq_cd *cd)
1530 struct ice_aqc_add_get_recipe *cmd;
1531 struct ice_aq_desc desc;
1534 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1535 cmd = &desc.params.add_get_recipe;
1536 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1538 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1539 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1541 buf_size = num_recipes * sizeof(*s_recipe_list);
1543 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1547 * ice_aq_get_recipe - get switch recipe
1548 * @hw: pointer to the HW struct
1549 * @s_recipe_list: pointer to switch rule population list
1550 * @num_recipes: pointer to the number of recipes (input and output)
1551 * @recipe_root: root recipe number of recipe(s) to retrieve
1552 * @cd: pointer to command details structure or NULL
1556 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1557 * On output, *num_recipes will equal the number of entries returned in
1560 * The caller must supply enough space in s_recipe_list to hold all possible
1561 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1564 ice_aq_get_recipe(struct ice_hw *hw,
1565 struct ice_aqc_recipe_data_elem *s_recipe_list,
1566 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1568 struct ice_aqc_add_get_recipe *cmd;
1569 struct ice_aq_desc desc;
1570 enum ice_status status;
1573 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1574 return ICE_ERR_PARAM;
1576 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1577 cmd = &desc.params.add_get_recipe;
1578 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1580 cmd->return_index = CPU_TO_LE16(recipe_root);
1581 cmd->num_sub_recipes = 0;
1583 buf_size = *num_recipes * sizeof(*s_recipe_list);
1585 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1586 /* cppcheck-suppress constArgument */
1587 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1593 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1594 * @hw: pointer to the HW struct
1595 * @profile_id: package profile ID to associate the recipe with
1596 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1597 * @cd: pointer to command details structure or NULL
1598 * Recipe to profile association (0x0291)
1601 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1602 struct ice_sq_cd *cd)
1604 struct ice_aqc_recipe_to_profile *cmd;
1605 struct ice_aq_desc desc;
1607 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1608 cmd = &desc.params.recipe_to_profile;
1609 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1610 cmd->profile_id = CPU_TO_LE16(profile_id);
1611 /* Set the recipe ID bit in the bitmask to let the device know which
1612 * profile we are associating the recipe to
1614 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1615 ICE_NONDMA_TO_NONDMA);
1617 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1621 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1622 * @hw: pointer to the HW struct
1623 * @profile_id: package profile ID to associate the recipe with
1624 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1625 * @cd: pointer to command details structure or NULL
1626 * Associate profile ID with given recipe (0x0293)
1629 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1630 struct ice_sq_cd *cd)
1632 struct ice_aqc_recipe_to_profile *cmd;
1633 struct ice_aq_desc desc;
1634 enum ice_status status;
1636 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1637 cmd = &desc.params.recipe_to_profile;
1638 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1639 cmd->profile_id = CPU_TO_LE16(profile_id);
1641 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1643 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1644 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1650 * ice_alloc_recipe - add recipe resource
1651 * @hw: pointer to the hardware structure
1652 * @rid: recipe ID returned as response to AQ call
1654 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1656 struct ice_aqc_alloc_free_res_elem *sw_buf;
1657 enum ice_status status;
1660 buf_len = sizeof(*sw_buf);
1661 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1663 return ICE_ERR_NO_MEMORY;
1665 sw_buf->num_elems = CPU_TO_LE16(1);
1666 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1667 ICE_AQC_RES_TYPE_S) |
1668 ICE_AQC_RES_TYPE_FLAG_SHARED);
1669 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1670 ice_aqc_opc_alloc_res, NULL);
1672 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1673 ice_free(hw, sw_buf);
1678 /* ice_init_port_info - Initialize port_info with switch configuration data
1679 * @pi: pointer to port_info
1680 * @vsi_port_num: VSI number or port number
1681 * @type: Type of switch element (port or VSI)
1682 * @swid: switch ID of the switch the element is attached to
1683 * @pf_vf_num: PF or VF number
1684 * @is_vf: true if the element is a VF, false otherwise
1687 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1688 u16 swid, u16 pf_vf_num, bool is_vf)
1691 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1692 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1694 pi->pf_vf_num = pf_vf_num;
1696 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1697 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1700 ice_debug(pi->hw, ICE_DBG_SW,
1701 "incorrect VSI/port type received\n");
1706 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1707 * @hw: pointer to the hardware structure
1709 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1711 struct ice_aqc_get_sw_cfg_resp *rbuf;
1712 enum ice_status status;
1713 u16 num_total_ports;
1719 num_total_ports = 1;
1721 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1722 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1725 return ICE_ERR_NO_MEMORY;
1727 /* Multiple calls to ice_aq_get_sw_cfg may be required
1728 * to get all the switch configuration information. The need
1729 * for additional calls is indicated by ice_aq_get_sw_cfg
1730 * writing a non-zero value in req_desc
1733 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1734 &req_desc, &num_elems, NULL);
1739 for (i = 0; i < num_elems; i++) {
1740 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1741 u16 pf_vf_num, swid, vsi_port_num;
1745 ele = rbuf[i].elements;
1746 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1747 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1749 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1750 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1752 swid = LE16_TO_CPU(ele->swid);
1754 if (LE16_TO_CPU(ele->pf_vf_num) &
1755 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1758 type = LE16_TO_CPU(ele->vsi_port_num) >>
1759 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1762 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1763 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1764 if (j == num_total_ports) {
1765 ice_debug(hw, ICE_DBG_SW,
1766 "more ports than expected\n");
1767 status = ICE_ERR_CFG;
1770 ice_init_port_info(hw->port_info,
1771 vsi_port_num, type, swid,
1779 } while (req_desc && !status);
1783 ice_free(hw, (void *)rbuf);
1789 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1790 * @hw: pointer to the hardware structure
1791 * @fi: filter info structure to fill/update
1793 * This helper function populates the lb_en and lan_en elements of the provided
1794 * ice_fltr_info struct using the switch's type and characteristics of the
1795 * switch rule being configured.
1797 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1801 if ((fi->flag & ICE_FLTR_TX) &&
1802 (fi->fltr_act == ICE_FWD_TO_VSI ||
1803 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1804 fi->fltr_act == ICE_FWD_TO_Q ||
1805 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1806 /* Setting LB for prune actions will result in replicated
1807 * packets to the internal switch that will be dropped.
1809 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1812 /* Set lan_en to TRUE if
1813 * 1. The switch is a VEB AND
1815 * 2.1 The lookup is a directional lookup like ethertype,
1816 * promiscuous, ethertype-MAC, promiscuous-VLAN
1817 * and default-port OR
1818 * 2.2 The lookup is VLAN, OR
1819 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1820 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1824 * The switch is a VEPA.
1826 * In all other cases, the LAN enable has to be set to false.
1829 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1830 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1831 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1832 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1833 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1834 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1835 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1836 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1837 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1838 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1847 * ice_ilog2 - Calculates integer log base 2 of a number
1848 * @n: number on which to perform operation
1850 static int ice_ilog2(u64 n)
1854 for (i = 63; i >= 0; i--)
1855 if (((u64)1 << i) & n)
1862 * ice_fill_sw_rule - Helper function to fill switch rule structure
1863 * @hw: pointer to the hardware structure
1864 * @f_info: entry containing packet forwarding information
1865 * @s_rule: switch rule structure to be filled in based on mac_entry
1866 * @opc: switch rules population command type - pass in the command opcode
1869 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1870 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1872 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1880 if (opc == ice_aqc_opc_remove_sw_rules) {
1881 s_rule->pdata.lkup_tx_rx.act = 0;
1882 s_rule->pdata.lkup_tx_rx.index =
1883 CPU_TO_LE16(f_info->fltr_rule_id);
1884 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1888 eth_hdr_sz = sizeof(dummy_eth_header);
1889 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1891 /* initialize the ether header with a dummy header */
1892 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1893 ice_fill_sw_info(hw, f_info);
1895 switch (f_info->fltr_act) {
1896 case ICE_FWD_TO_VSI:
1897 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1898 ICE_SINGLE_ACT_VSI_ID_M;
1899 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1900 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1901 ICE_SINGLE_ACT_VALID_BIT;
1903 case ICE_FWD_TO_VSI_LIST:
1904 act |= ICE_SINGLE_ACT_VSI_LIST;
1905 act |= (f_info->fwd_id.vsi_list_id <<
1906 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1907 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1908 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1909 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1910 ICE_SINGLE_ACT_VALID_BIT;
1913 act |= ICE_SINGLE_ACT_TO_Q;
1914 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1915 ICE_SINGLE_ACT_Q_INDEX_M;
1917 case ICE_DROP_PACKET:
1918 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1919 ICE_SINGLE_ACT_VALID_BIT;
1921 case ICE_FWD_TO_QGRP:
1922 q_rgn = f_info->qgrp_size > 0 ?
1923 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1924 act |= ICE_SINGLE_ACT_TO_Q;
1925 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1926 ICE_SINGLE_ACT_Q_INDEX_M;
1927 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1928 ICE_SINGLE_ACT_Q_REGION_M;
1935 act |= ICE_SINGLE_ACT_LB_ENABLE;
1937 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1939 switch (f_info->lkup_type) {
1940 case ICE_SW_LKUP_MAC:
1941 daddr = f_info->l_data.mac.mac_addr;
1943 case ICE_SW_LKUP_VLAN:
1944 vlan_id = f_info->l_data.vlan.vlan_id;
1945 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1946 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1947 act |= ICE_SINGLE_ACT_PRUNE;
1948 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1951 case ICE_SW_LKUP_ETHERTYPE_MAC:
1952 daddr = f_info->l_data.ethertype_mac.mac_addr;
1954 case ICE_SW_LKUP_ETHERTYPE:
1955 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1956 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1958 case ICE_SW_LKUP_MAC_VLAN:
1959 daddr = f_info->l_data.mac_vlan.mac_addr;
1960 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1962 case ICE_SW_LKUP_PROMISC_VLAN:
1963 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1965 case ICE_SW_LKUP_PROMISC:
1966 daddr = f_info->l_data.mac_vlan.mac_addr;
1972 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1973 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1974 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1976 /* Recipe set depending on lookup type */
1977 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1978 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1979 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1982 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1983 ICE_NONDMA_TO_NONDMA);
1985 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1986 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1987 *off = CPU_TO_BE16(vlan_id);
1990 /* Create the switch rule with the final dummy Ethernet header */
1991 if (opc != ice_aqc_opc_update_sw_rules)
1992 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1996 * ice_add_marker_act
1997 * @hw: pointer to the hardware structure
1998 * @m_ent: the management entry for which sw marker needs to be added
1999 * @sw_marker: sw marker to tag the Rx descriptor with
2000 * @l_id: large action resource ID
2002 * Create a large action to hold software marker and update the switch rule
2003 * entry pointed by m_ent with newly created large action
2005 static enum ice_status
2006 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2007 u16 sw_marker, u16 l_id)
2009 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2010 /* For software marker we need 3 large actions
2011 * 1. FWD action: FWD TO VSI or VSI LIST
2012 * 2. GENERIC VALUE action to hold the profile ID
2013 * 3. GENERIC VALUE action to hold the software marker ID
2015 const u16 num_lg_acts = 3;
2016 enum ice_status status;
2022 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2023 return ICE_ERR_PARAM;
2025 /* Create two back-to-back switch rules and submit them to the HW using
2026 * one memory buffer:
2030 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2031 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2032 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2034 return ICE_ERR_NO_MEMORY;
2036 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2038 /* Fill in the first switch rule i.e. large action */
2039 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2040 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2041 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2043 /* First action VSI forwarding or VSI list forwarding depending on how
2046 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2047 m_ent->fltr_info.fwd_id.hw_vsi_id;
2049 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2050 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2051 ICE_LG_ACT_VSI_LIST_ID_M;
2052 if (m_ent->vsi_count > 1)
2053 act |= ICE_LG_ACT_VSI_LIST;
2054 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2056 /* Second action descriptor type */
2057 act = ICE_LG_ACT_GENERIC;
2059 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2060 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2062 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2063 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2065 /* Third action Marker value */
2066 act |= ICE_LG_ACT_GENERIC;
2067 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2068 ICE_LG_ACT_GENERIC_VALUE_M;
2070 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2072 /* call the fill switch rule to fill the lookup Tx Rx structure */
2073 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2074 ice_aqc_opc_update_sw_rules);
2076 /* Update the action to point to the large action ID */
2077 rx_tx->pdata.lkup_tx_rx.act =
2078 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2079 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2080 ICE_SINGLE_ACT_PTR_VAL_M));
2082 /* Use the filter rule ID of the previously created rule with single
2083 * act. Once the update happens, hardware will treat this as large
2086 rx_tx->pdata.lkup_tx_rx.index =
2087 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2089 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2090 ice_aqc_opc_update_sw_rules, NULL);
2092 m_ent->lg_act_idx = l_id;
2093 m_ent->sw_marker_id = sw_marker;
2096 ice_free(hw, lg_act);
2101 * ice_add_counter_act - add/update filter rule with counter action
2102 * @hw: pointer to the hardware structure
2103 * @m_ent: the management entry for which counter needs to be added
2104 * @counter_id: VLAN counter ID returned as part of allocate resource
2105 * @l_id: large action resource ID
2107 static enum ice_status
2108 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2109 u16 counter_id, u16 l_id)
2111 struct ice_aqc_sw_rules_elem *lg_act;
2112 struct ice_aqc_sw_rules_elem *rx_tx;
2113 enum ice_status status;
2114 /* 2 actions will be added while adding a large action counter */
2115 const int num_acts = 2;
2122 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2123 return ICE_ERR_PARAM;
2125 /* Create two back-to-back switch rules and submit them to the HW using
2126 * one memory buffer:
2130 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2131 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2132 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2135 return ICE_ERR_NO_MEMORY;
2137 rx_tx = (struct ice_aqc_sw_rules_elem *)
2138 ((u8 *)lg_act + lg_act_size);
2140 /* Fill in the first switch rule i.e. large action */
2141 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2142 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2143 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2145 /* First action VSI forwarding or VSI list forwarding depending on how
2148 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2149 m_ent->fltr_info.fwd_id.hw_vsi_id;
2151 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2152 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2153 ICE_LG_ACT_VSI_LIST_ID_M;
2154 if (m_ent->vsi_count > 1)
2155 act |= ICE_LG_ACT_VSI_LIST;
2156 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2158 /* Second action counter ID */
2159 act = ICE_LG_ACT_STAT_COUNT;
2160 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2161 ICE_LG_ACT_STAT_COUNT_M;
2162 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2164 /* call the fill switch rule to fill the lookup Tx Rx structure */
2165 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2166 ice_aqc_opc_update_sw_rules);
2168 act = ICE_SINGLE_ACT_PTR;
2169 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2170 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2172 /* Use the filter rule ID of the previously created rule with single
2173 * act. Once the update happens, hardware will treat this as large
2176 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2177 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2179 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2180 ice_aqc_opc_update_sw_rules, NULL);
2182 m_ent->lg_act_idx = l_id;
2183 m_ent->counter_index = counter_id;
2186 ice_free(hw, lg_act);
2191 * ice_create_vsi_list_map
2192 * @hw: pointer to the hardware structure
2193 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2194 * @num_vsi: number of VSI handles in the array
2195 * @vsi_list_id: VSI list ID generated as part of allocate resource
2197 * Helper function to create a new entry of VSI list ID to VSI mapping
2198 * using the given VSI list ID
2200 static struct ice_vsi_list_map_info *
2201 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2204 struct ice_switch_info *sw = hw->switch_info;
2205 struct ice_vsi_list_map_info *v_map;
2208 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2213 v_map->vsi_list_id = vsi_list_id;
2215 for (i = 0; i < num_vsi; i++)
2216 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2218 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2223 * ice_update_vsi_list_rule
2224 * @hw: pointer to the hardware structure
2225 * @vsi_handle_arr: array of VSI handles to form a VSI list
2226 * @num_vsi: number of VSI handles in the array
2227 * @vsi_list_id: VSI list ID generated as part of allocate resource
2228 * @remove: Boolean value to indicate if this is a remove action
2229 * @opc: switch rules population command type - pass in the command opcode
2230 * @lkup_type: lookup type of the filter
2232 * Call AQ command to add a new switch rule or update existing switch rule
2233 * using the given VSI list ID
2235 static enum ice_status
2236 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2237 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2238 enum ice_sw_lkup_type lkup_type)
2240 struct ice_aqc_sw_rules_elem *s_rule;
2241 enum ice_status status;
2247 return ICE_ERR_PARAM;
2249 if (lkup_type == ICE_SW_LKUP_MAC ||
2250 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2251 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2252 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2253 lkup_type == ICE_SW_LKUP_PROMISC ||
2254 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2255 lkup_type == ICE_SW_LKUP_LAST)
2256 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2257 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2258 else if (lkup_type == ICE_SW_LKUP_VLAN)
2259 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2260 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2262 return ICE_ERR_PARAM;
2264 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2265 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2267 return ICE_ERR_NO_MEMORY;
2268 for (i = 0; i < num_vsi; i++) {
2269 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2270 status = ICE_ERR_PARAM;
2273 /* AQ call requires hw_vsi_id(s) */
2274 s_rule->pdata.vsi_list.vsi[i] =
2275 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2278 s_rule->type = CPU_TO_LE16(type);
2279 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2280 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2282 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2285 ice_free(hw, s_rule);
2290 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2291 * @hw: pointer to the HW struct
2292 * @vsi_handle_arr: array of VSI handles to form a VSI list
2293 * @num_vsi: number of VSI handles in the array
2294 * @vsi_list_id: stores the ID of the VSI list to be created
2295 * @lkup_type: switch rule filter's lookup type
2297 static enum ice_status
2298 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2299 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2301 enum ice_status status;
2303 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2304 ice_aqc_opc_alloc_res);
2308 /* Update the newly created VSI list to include the specified VSIs */
2309 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2310 *vsi_list_id, false,
2311 ice_aqc_opc_add_sw_rules, lkup_type);
2315 * ice_create_pkt_fwd_rule
2316 * @hw: pointer to the hardware structure
2317 * @f_entry: entry containing packet forwarding information
2319 * Create switch rule with given filter information and add an entry
2320 * to the corresponding filter management list to track this switch rule
2323 static enum ice_status
2324 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2325 struct ice_fltr_list_entry *f_entry)
2327 struct ice_fltr_mgmt_list_entry *fm_entry;
2328 struct ice_aqc_sw_rules_elem *s_rule;
2329 enum ice_sw_lkup_type l_type;
2330 struct ice_sw_recipe *recp;
2331 enum ice_status status;
2333 s_rule = (struct ice_aqc_sw_rules_elem *)
2334 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2336 return ICE_ERR_NO_MEMORY;
2337 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2338 ice_malloc(hw, sizeof(*fm_entry));
2340 status = ICE_ERR_NO_MEMORY;
2341 goto ice_create_pkt_fwd_rule_exit;
2344 fm_entry->fltr_info = f_entry->fltr_info;
2346 /* Initialize all the fields for the management entry */
2347 fm_entry->vsi_count = 1;
2348 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2349 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2350 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2352 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2353 ice_aqc_opc_add_sw_rules);
2355 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2356 ice_aqc_opc_add_sw_rules, NULL);
2358 ice_free(hw, fm_entry);
2359 goto ice_create_pkt_fwd_rule_exit;
2362 f_entry->fltr_info.fltr_rule_id =
2363 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2364 fm_entry->fltr_info.fltr_rule_id =
2365 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2367 /* The book keeping entries will get removed when base driver
2368 * calls remove filter AQ command
2370 l_type = fm_entry->fltr_info.lkup_type;
2371 recp = &hw->switch_info->recp_list[l_type];
2372 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2374 ice_create_pkt_fwd_rule_exit:
2375 ice_free(hw, s_rule);
2380 * ice_update_pkt_fwd_rule
2381 * @hw: pointer to the hardware structure
2382 * @f_info: filter information for switch rule
2384 * Call AQ command to update a previously created switch rule with a
2387 static enum ice_status
2388 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2390 struct ice_aqc_sw_rules_elem *s_rule;
2391 enum ice_status status;
2393 s_rule = (struct ice_aqc_sw_rules_elem *)
2394 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2396 return ICE_ERR_NO_MEMORY;
2398 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2400 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2402 /* Update switch rule with new rule set to forward VSI list */
2403 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2404 ice_aqc_opc_update_sw_rules, NULL);
2406 ice_free(hw, s_rule);
2411 * ice_update_sw_rule_bridge_mode
2412 * @hw: pointer to the HW struct
2414 * Updates unicast switch filter rules based on VEB/VEPA mode
2416 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2418 struct ice_switch_info *sw = hw->switch_info;
2419 struct ice_fltr_mgmt_list_entry *fm_entry;
2420 enum ice_status status = ICE_SUCCESS;
2421 struct LIST_HEAD_TYPE *rule_head;
2422 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2424 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2425 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2427 ice_acquire_lock(rule_lock);
2428 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2430 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2431 u8 *addr = fi->l_data.mac.mac_addr;
2433 /* Update unicast Tx rules to reflect the selected
2436 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2437 (fi->fltr_act == ICE_FWD_TO_VSI ||
2438 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2439 fi->fltr_act == ICE_FWD_TO_Q ||
2440 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2441 status = ice_update_pkt_fwd_rule(hw, fi);
2447 ice_release_lock(rule_lock);
2453 * ice_add_update_vsi_list
2454 * @hw: pointer to the hardware structure
2455 * @m_entry: pointer to current filter management list entry
2456 * @cur_fltr: filter information from the book keeping entry
2457 * @new_fltr: filter information with the new VSI to be added
2459 * Call AQ command to add or update previously created VSI list with new VSI.
2461 * Helper function to do book keeping associated with adding filter information
2462 * The algorithm to do the book keeping is described below :
2463 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2464 * if only one VSI has been added till now
2465 * Allocate a new VSI list and add two VSIs
2466 * to this list using switch rule command
2467 * Update the previously created switch rule with the
2468 * newly created VSI list ID
2469 * if a VSI list was previously created
2470 * Add the new VSI to the previously created VSI list set
2471 * using the update switch rule command
2473 static enum ice_status
2474 ice_add_update_vsi_list(struct ice_hw *hw,
2475 struct ice_fltr_mgmt_list_entry *m_entry,
2476 struct ice_fltr_info *cur_fltr,
2477 struct ice_fltr_info *new_fltr)
2479 enum ice_status status = ICE_SUCCESS;
2480 u16 vsi_list_id = 0;
2482 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2483 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2484 return ICE_ERR_NOT_IMPL;
2486 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2487 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2488 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2489 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2490 return ICE_ERR_NOT_IMPL;
2492 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2493 /* Only one entry existed in the mapping and it was not already
2494 * a part of a VSI list. So, create a VSI list with the old and
2497 struct ice_fltr_info tmp_fltr;
2498 u16 vsi_handle_arr[2];
2500 /* A rule already exists with the new VSI being added */
2501 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2502 return ICE_ERR_ALREADY_EXISTS;
2504 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2505 vsi_handle_arr[1] = new_fltr->vsi_handle;
2506 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2508 new_fltr->lkup_type);
2512 tmp_fltr = *new_fltr;
2513 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2514 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2515 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2516 /* Update the previous switch rule of "MAC forward to VSI" to
2517 * "MAC fwd to VSI list"
2519 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2523 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2524 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2525 m_entry->vsi_list_info =
2526 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2529 /* If this entry was large action then the large action needs
2530 * to be updated to point to FWD to VSI list
2532 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2534 ice_add_marker_act(hw, m_entry,
2535 m_entry->sw_marker_id,
2536 m_entry->lg_act_idx);
2538 u16 vsi_handle = new_fltr->vsi_handle;
2539 enum ice_adminq_opc opcode;
2541 if (!m_entry->vsi_list_info)
2544 /* A rule already exists with the new VSI being added */
2545 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2548 /* Update the previously created VSI list set with
2549 * the new VSI ID passed in
2551 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2552 opcode = ice_aqc_opc_update_sw_rules;
2554 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2555 vsi_list_id, false, opcode,
2556 new_fltr->lkup_type);
2557 /* update VSI list mapping info with new VSI ID */
2559 ice_set_bit(vsi_handle,
2560 m_entry->vsi_list_info->vsi_map);
2563 m_entry->vsi_count++;
2568 * ice_find_rule_entry - Search a rule entry
2569 * @hw: pointer to the hardware structure
2570 * @recp_id: lookup type for which the specified rule needs to be searched
2571 * @f_info: rule information
2573 * Helper function to search for a given rule entry
2574 * Returns pointer to entry storing the rule if found
2576 static struct ice_fltr_mgmt_list_entry *
2577 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2579 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2580 struct ice_switch_info *sw = hw->switch_info;
2581 struct LIST_HEAD_TYPE *list_head;
2583 list_head = &sw->recp_list[recp_id].filt_rules;
2584 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2586 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2587 sizeof(f_info->l_data)) &&
2588 f_info->flag == list_itr->fltr_info.flag) {
2597 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2598 * @hw: pointer to the hardware structure
2599 * @recp_id: lookup type for which VSI lists needs to be searched
2600 * @vsi_handle: VSI handle to be found in VSI list
2601 * @vsi_list_id: VSI list ID found containing vsi_handle
2603 * Helper function to search a VSI list with single entry containing given VSI
2604 * handle element. This can be extended further to search VSI list with more
2605 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2607 static struct ice_vsi_list_map_info *
2608 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2611 struct ice_vsi_list_map_info *map_info = NULL;
2612 struct ice_switch_info *sw = hw->switch_info;
2613 struct LIST_HEAD_TYPE *list_head;
2615 list_head = &sw->recp_list[recp_id].filt_rules;
2616 if (sw->recp_list[recp_id].adv_rule) {
2617 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2619 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2620 ice_adv_fltr_mgmt_list_entry,
2622 if (list_itr->vsi_list_info) {
2623 map_info = list_itr->vsi_list_info;
2624 if (ice_is_bit_set(map_info->vsi_map,
2626 *vsi_list_id = map_info->vsi_list_id;
2632 struct ice_fltr_mgmt_list_entry *list_itr;
2634 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2635 ice_fltr_mgmt_list_entry,
2637 if (list_itr->vsi_count == 1 &&
2638 list_itr->vsi_list_info) {
2639 map_info = list_itr->vsi_list_info;
2640 if (ice_is_bit_set(map_info->vsi_map,
2642 *vsi_list_id = map_info->vsi_list_id;
2652 * ice_add_rule_internal - add rule for a given lookup type
2653 * @hw: pointer to the hardware structure
2654 * @recp_id: lookup type (recipe ID) for which rule has to be added
2655 * @f_entry: structure containing MAC forwarding information
2657 * Adds or updates the rule lists for a given recipe
2659 static enum ice_status
2660 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2661 struct ice_fltr_list_entry *f_entry)
2663 struct ice_switch_info *sw = hw->switch_info;
2664 struct ice_fltr_info *new_fltr, *cur_fltr;
2665 struct ice_fltr_mgmt_list_entry *m_entry;
2666 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2667 enum ice_status status = ICE_SUCCESS;
2669 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2670 return ICE_ERR_PARAM;
2672 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2673 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2674 f_entry->fltr_info.fwd_id.hw_vsi_id =
2675 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2677 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2679 ice_acquire_lock(rule_lock);
2680 new_fltr = &f_entry->fltr_info;
2681 if (new_fltr->flag & ICE_FLTR_RX)
2682 new_fltr->src = hw->port_info->lport;
2683 else if (new_fltr->flag & ICE_FLTR_TX)
2685 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2687 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2689 status = ice_create_pkt_fwd_rule(hw, f_entry);
2690 goto exit_add_rule_internal;
2693 cur_fltr = &m_entry->fltr_info;
2694 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2696 exit_add_rule_internal:
2697 ice_release_lock(rule_lock);
2702 * ice_remove_vsi_list_rule
2703 * @hw: pointer to the hardware structure
2704 * @vsi_list_id: VSI list ID generated as part of allocate resource
2705 * @lkup_type: switch rule filter lookup type
2707 * The VSI list should be emptied before this function is called to remove the
2710 static enum ice_status
2711 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2712 enum ice_sw_lkup_type lkup_type)
2714 struct ice_aqc_sw_rules_elem *s_rule;
2715 enum ice_status status;
2718 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2719 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2721 return ICE_ERR_NO_MEMORY;
2723 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2724 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2726 /* Free the vsi_list resource that we allocated. It is assumed that the
2727 * list is empty at this point.
2729 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2730 ice_aqc_opc_free_res);
2732 ice_free(hw, s_rule);
2737 * ice_rem_update_vsi_list
2738 * @hw: pointer to the hardware structure
2739 * @vsi_handle: VSI handle of the VSI to remove
2740 * @fm_list: filter management entry for which the VSI list management needs to
2743 static enum ice_status
2744 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2745 struct ice_fltr_mgmt_list_entry *fm_list)
2747 enum ice_sw_lkup_type lkup_type;
2748 enum ice_status status = ICE_SUCCESS;
2751 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2752 fm_list->vsi_count == 0)
2753 return ICE_ERR_PARAM;
2755 /* A rule with the VSI being removed does not exist */
2756 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2757 return ICE_ERR_DOES_NOT_EXIST;
2759 lkup_type = fm_list->fltr_info.lkup_type;
2760 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2761 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2762 ice_aqc_opc_update_sw_rules,
2767 fm_list->vsi_count--;
2768 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2770 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2771 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2772 struct ice_vsi_list_map_info *vsi_list_info =
2773 fm_list->vsi_list_info;
2776 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2778 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2779 return ICE_ERR_OUT_OF_RANGE;
2781 /* Make sure VSI list is empty before removing it below */
2782 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2784 ice_aqc_opc_update_sw_rules,
2789 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2790 tmp_fltr_info.fwd_id.hw_vsi_id =
2791 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2792 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2793 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2795 ice_debug(hw, ICE_DBG_SW,
2796 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2797 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2801 fm_list->fltr_info = tmp_fltr_info;
2804 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2805 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2806 struct ice_vsi_list_map_info *vsi_list_info =
2807 fm_list->vsi_list_info;
2809 /* Remove the VSI list since it is no longer used */
2810 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2812 ice_debug(hw, ICE_DBG_SW,
2813 "Failed to remove VSI list %d, error %d\n",
2814 vsi_list_id, status);
2818 LIST_DEL(&vsi_list_info->list_entry);
2819 ice_free(hw, vsi_list_info);
2820 fm_list->vsi_list_info = NULL;
2827 * ice_remove_rule_internal - Remove a filter rule of a given type
2829 * @hw: pointer to the hardware structure
2830 * @recp_id: recipe ID for which the rule needs to removed
2831 * @f_entry: rule entry containing filter information
2833 static enum ice_status
2834 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2835 struct ice_fltr_list_entry *f_entry)
2837 struct ice_switch_info *sw = hw->switch_info;
2838 struct ice_fltr_mgmt_list_entry *list_elem;
2839 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2840 enum ice_status status = ICE_SUCCESS;
2841 bool remove_rule = false;
2844 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2845 return ICE_ERR_PARAM;
2846 f_entry->fltr_info.fwd_id.hw_vsi_id =
2847 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2849 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2850 ice_acquire_lock(rule_lock);
2851 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2853 status = ICE_ERR_DOES_NOT_EXIST;
2857 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2859 } else if (!list_elem->vsi_list_info) {
2860 status = ICE_ERR_DOES_NOT_EXIST;
2862 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2863 /* a ref_cnt > 1 indicates that the vsi_list is being
2864 * shared by multiple rules. Decrement the ref_cnt and
2865 * remove this rule, but do not modify the list, as it
2866 * is in-use by other rules.
2868 list_elem->vsi_list_info->ref_cnt--;
2871 /* a ref_cnt of 1 indicates the vsi_list is only used
2872 * by one rule. However, the original removal request is only
2873 * for a single VSI. Update the vsi_list first, and only
2874 * remove the rule if there are no further VSIs in this list.
2876 vsi_handle = f_entry->fltr_info.vsi_handle;
2877 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2880 /* if VSI count goes to zero after updating the VSI list */
2881 if (list_elem->vsi_count == 0)
2886 /* Remove the lookup rule */
2887 struct ice_aqc_sw_rules_elem *s_rule;
2889 s_rule = (struct ice_aqc_sw_rules_elem *)
2890 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2892 status = ICE_ERR_NO_MEMORY;
2896 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2897 ice_aqc_opc_remove_sw_rules);
2899 status = ice_aq_sw_rules(hw, s_rule,
2900 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2901 ice_aqc_opc_remove_sw_rules, NULL);
2903 /* Remove a book keeping from the list */
2904 ice_free(hw, s_rule);
2909 LIST_DEL(&list_elem->list_entry);
2910 ice_free(hw, list_elem);
2913 ice_release_lock(rule_lock);
2918 * ice_aq_get_res_alloc - get allocated resources
2919 * @hw: pointer to the HW struct
2920 * @num_entries: pointer to u16 to store the number of resource entries returned
2921 * @buf: pointer to user-supplied buffer
2922 * @buf_size: size of buff
2923 * @cd: pointer to command details structure or NULL
2925 * The user-supplied buffer must be large enough to store the resource
2926 * information for all resource types. Each resource type is an
2927 * ice_aqc_get_res_resp_data_elem structure.
2930 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2931 u16 buf_size, struct ice_sq_cd *cd)
2933 struct ice_aqc_get_res_alloc *resp;
2934 enum ice_status status;
2935 struct ice_aq_desc desc;
2938 return ICE_ERR_BAD_PTR;
2940 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2941 return ICE_ERR_INVAL_SIZE;
2943 resp = &desc.params.get_res;
2945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2946 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2948 if (!status && num_entries)
2949 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2955 * ice_aq_get_res_descs - get allocated resource descriptors
2956 * @hw: pointer to the hardware structure
2957 * @num_entries: number of resource entries in buffer
2958 * @buf: Indirect buffer to hold data parameters and response
2959 * @buf_size: size of buffer for indirect commands
2960 * @res_type: resource type
2961 * @res_shared: is resource shared
2962 * @desc_id: input - first desc ID to start; output - next desc ID
2963 * @cd: pointer to command details structure or NULL
2966 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2967 struct ice_aqc_get_allocd_res_desc_resp *buf,
2968 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2969 struct ice_sq_cd *cd)
2971 struct ice_aqc_get_allocd_res_desc *cmd;
2972 struct ice_aq_desc desc;
2973 enum ice_status status;
2975 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2977 cmd = &desc.params.get_res_desc;
2980 return ICE_ERR_PARAM;
2982 if (buf_size != (num_entries * sizeof(*buf)))
2983 return ICE_ERR_PARAM;
2985 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2987 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2988 ICE_AQC_RES_TYPE_M) | (res_shared ?
2989 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2990 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2992 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2994 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2996 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3002 * ice_add_mac - Add a MAC address based filter rule
3003 * @hw: pointer to the hardware structure
3004 * @m_list: list of MAC addresses and forwarding information
3006 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3007 * multiple unicast addresses, the function assumes that all the
3008 * addresses are unique in a given add_mac call. It doesn't
3009 * check for duplicates in this case, removing duplicates from a given
3010 * list should be taken care of in the caller of this function.
3013 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3015 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3016 struct ice_fltr_list_entry *m_list_itr;
3017 struct LIST_HEAD_TYPE *rule_head;
3018 u16 elem_sent, total_elem_left;
3019 struct ice_switch_info *sw;
3020 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3021 enum ice_status status = ICE_SUCCESS;
3022 u16 num_unicast = 0;
3026 return ICE_ERR_PARAM;
3028 sw = hw->switch_info;
3029 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3030 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3032 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3036 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3037 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3038 if (!ice_is_vsi_valid(hw, vsi_handle))
3039 return ICE_ERR_PARAM;
3040 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3041 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3042 /* update the src in case it is VSI num */
3043 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3044 return ICE_ERR_PARAM;
3045 m_list_itr->fltr_info.src = hw_vsi_id;
3046 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3047 IS_ZERO_ETHER_ADDR(add))
3048 return ICE_ERR_PARAM;
3049 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3050 /* Don't overwrite the unicast address */
3051 ice_acquire_lock(rule_lock);
3052 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3053 &m_list_itr->fltr_info)) {
3054 ice_release_lock(rule_lock);
3055 return ICE_ERR_ALREADY_EXISTS;
3057 ice_release_lock(rule_lock);
3059 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3060 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3061 m_list_itr->status =
3062 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3064 if (m_list_itr->status)
3065 return m_list_itr->status;
3069 ice_acquire_lock(rule_lock);
3070 /* Exit if no suitable entries were found for adding bulk switch rule */
3072 status = ICE_SUCCESS;
3073 goto ice_add_mac_exit;
3076 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3078 /* Allocate switch rule buffer for the bulk update for unicast */
3079 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3080 s_rule = (struct ice_aqc_sw_rules_elem *)
3081 ice_calloc(hw, num_unicast, s_rule_size);
3083 status = ICE_ERR_NO_MEMORY;
3084 goto ice_add_mac_exit;
3088 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3090 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3091 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3093 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3094 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3095 ice_aqc_opc_add_sw_rules);
3096 r_iter = (struct ice_aqc_sw_rules_elem *)
3097 ((u8 *)r_iter + s_rule_size);
3101 /* Call AQ bulk switch rule update for all unicast addresses */
3103 /* Call AQ switch rule in AQ_MAX chunk */
3104 for (total_elem_left = num_unicast; total_elem_left > 0;
3105 total_elem_left -= elem_sent) {
3106 struct ice_aqc_sw_rules_elem *entry = r_iter;
3108 elem_sent = min(total_elem_left,
3109 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3110 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3111 elem_sent, ice_aqc_opc_add_sw_rules,
3114 goto ice_add_mac_exit;
3115 r_iter = (struct ice_aqc_sw_rules_elem *)
3116 ((u8 *)r_iter + (elem_sent * s_rule_size));
3119 /* Fill up rule ID based on the value returned from FW */
3121 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3123 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3124 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3125 struct ice_fltr_mgmt_list_entry *fm_entry;
3127 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3128 f_info->fltr_rule_id =
3129 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3130 f_info->fltr_act = ICE_FWD_TO_VSI;
3131 /* Create an entry to track this MAC address */
3132 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3133 ice_malloc(hw, sizeof(*fm_entry));
3135 status = ICE_ERR_NO_MEMORY;
3136 goto ice_add_mac_exit;
3138 fm_entry->fltr_info = *f_info;
3139 fm_entry->vsi_count = 1;
3140 /* The book keeping entries will get removed when
3141 * base driver calls remove filter AQ command
3144 LIST_ADD(&fm_entry->list_entry, rule_head);
3145 r_iter = (struct ice_aqc_sw_rules_elem *)
3146 ((u8 *)r_iter + s_rule_size);
3151 ice_release_lock(rule_lock);
3153 ice_free(hw, s_rule);
3158 * ice_add_vlan_internal - Add one VLAN based filter rule
3159 * @hw: pointer to the hardware structure
3160 * @f_entry: filter entry containing one VLAN information
3162 static enum ice_status
3163 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3165 struct ice_switch_info *sw = hw->switch_info;
3166 struct ice_fltr_mgmt_list_entry *v_list_itr;
3167 struct ice_fltr_info *new_fltr, *cur_fltr;
3168 enum ice_sw_lkup_type lkup_type;
3169 u16 vsi_list_id = 0, vsi_handle;
3170 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3171 enum ice_status status = ICE_SUCCESS;
3173 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3174 return ICE_ERR_PARAM;
3176 f_entry->fltr_info.fwd_id.hw_vsi_id =
3177 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3178 new_fltr = &f_entry->fltr_info;
3180 /* VLAN ID should only be 12 bits */
3181 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3182 return ICE_ERR_PARAM;
3184 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3185 return ICE_ERR_PARAM;
3187 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3188 lkup_type = new_fltr->lkup_type;
3189 vsi_handle = new_fltr->vsi_handle;
3190 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3191 ice_acquire_lock(rule_lock);
3192 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3194 struct ice_vsi_list_map_info *map_info = NULL;
3196 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3197 /* All VLAN pruning rules use a VSI list. Check if
3198 * there is already a VSI list containing VSI that we
3199 * want to add. If found, use the same vsi_list_id for
3200 * this new VLAN rule or else create a new list.
3202 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3206 status = ice_create_vsi_list_rule(hw,
3214 /* Convert the action to forwarding to a VSI list. */
3215 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3216 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3219 status = ice_create_pkt_fwd_rule(hw, f_entry);
3221 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3224 status = ICE_ERR_DOES_NOT_EXIST;
3227 /* reuse VSI list for new rule and increment ref_cnt */
3229 v_list_itr->vsi_list_info = map_info;
3230 map_info->ref_cnt++;
3232 v_list_itr->vsi_list_info =
3233 ice_create_vsi_list_map(hw, &vsi_handle,
3237 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3238 /* Update existing VSI list to add new VSI ID only if it used
3241 cur_fltr = &v_list_itr->fltr_info;
3242 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3245 /* If VLAN rule exists and VSI list being used by this rule is
3246 * referenced by more than 1 VLAN rule. Then create a new VSI
3247 * list appending previous VSI with new VSI and update existing
3248 * VLAN rule to point to new VSI list ID
3250 struct ice_fltr_info tmp_fltr;
3251 u16 vsi_handle_arr[2];
3254 /* Current implementation only supports reusing VSI list with
3255 * one VSI count. We should never hit below condition
3257 if (v_list_itr->vsi_count > 1 &&
3258 v_list_itr->vsi_list_info->ref_cnt > 1) {
3259 ice_debug(hw, ICE_DBG_SW,
3260 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3261 status = ICE_ERR_CFG;
3266 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3269 /* A rule already exists with the new VSI being added */
3270 if (cur_handle == vsi_handle) {
3271 status = ICE_ERR_ALREADY_EXISTS;
3275 vsi_handle_arr[0] = cur_handle;
3276 vsi_handle_arr[1] = vsi_handle;
3277 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3278 &vsi_list_id, lkup_type);
3282 tmp_fltr = v_list_itr->fltr_info;
3283 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3284 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3285 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3286 /* Update the previous switch rule to a new VSI list which
3287 * includes current VSI that is requested
3289 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3293 /* before overriding VSI list map info. decrement ref_cnt of
3296 v_list_itr->vsi_list_info->ref_cnt--;
3298 /* now update to newly created list */
3299 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3300 v_list_itr->vsi_list_info =
3301 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3303 v_list_itr->vsi_count++;
3307 ice_release_lock(rule_lock);
3312 * ice_add_vlan - Add VLAN based filter rule
3313 * @hw: pointer to the hardware structure
3314 * @v_list: list of VLAN entries and forwarding information
3317 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3319 struct ice_fltr_list_entry *v_list_itr;
3322 return ICE_ERR_PARAM;
3324 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3326 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3327 return ICE_ERR_PARAM;
3328 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3329 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3330 if (v_list_itr->status)
3331 return v_list_itr->status;
3337 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3338 * @hw: pointer to the hardware structure
3339 * @mv_list: list of MAC and VLAN filters
3341 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3342 * pruning bits enabled, then it is the responsibility of the caller to make
3343 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3344 * VLAN won't be received on that VSI otherwise.
3347 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3349 struct ice_fltr_list_entry *mv_list_itr;
3351 if (!mv_list || !hw)
3352 return ICE_ERR_PARAM;
3354 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3356 enum ice_sw_lkup_type l_type =
3357 mv_list_itr->fltr_info.lkup_type;
3359 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3360 return ICE_ERR_PARAM;
3361 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3362 mv_list_itr->status =
3363 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3365 if (mv_list_itr->status)
3366 return mv_list_itr->status;
3372 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3373 * @hw: pointer to the hardware structure
3374 * @em_list: list of ether type MAC filter, MAC is optional
3376 * This function requires the caller to populate the entries in
3377 * the filter list with the necessary fields (including flags to
3378 * indicate Tx or Rx rules).
3381 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3383 struct ice_fltr_list_entry *em_list_itr;
3385 if (!em_list || !hw)
3386 return ICE_ERR_PARAM;
3388 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3390 enum ice_sw_lkup_type l_type =
3391 em_list_itr->fltr_info.lkup_type;
3393 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3394 l_type != ICE_SW_LKUP_ETHERTYPE)
3395 return ICE_ERR_PARAM;
3397 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3399 if (em_list_itr->status)
3400 return em_list_itr->status;
3406 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3407 * @hw: pointer to the hardware structure
3408 * @em_list: list of ethertype or ethertype MAC entries
3411 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3413 struct ice_fltr_list_entry *em_list_itr, *tmp;
3415 if (!em_list || !hw)
3416 return ICE_ERR_PARAM;
3418 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3420 enum ice_sw_lkup_type l_type =
3421 em_list_itr->fltr_info.lkup_type;
3423 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3424 l_type != ICE_SW_LKUP_ETHERTYPE)
3425 return ICE_ERR_PARAM;
3427 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3429 if (em_list_itr->status)
3430 return em_list_itr->status;
3437 * ice_rem_sw_rule_info
3438 * @hw: pointer to the hardware structure
3439 * @rule_head: pointer to the switch list structure that we want to delete
3442 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3444 if (!LIST_EMPTY(rule_head)) {
3445 struct ice_fltr_mgmt_list_entry *entry;
3446 struct ice_fltr_mgmt_list_entry *tmp;
3448 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3449 ice_fltr_mgmt_list_entry, list_entry) {
3450 LIST_DEL(&entry->list_entry);
3451 ice_free(hw, entry);
3457 * ice_rem_adv_rule_info
3458 * @hw: pointer to the hardware structure
3459 * @rule_head: pointer to the switch list structure that we want to delete
3462 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3464 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3465 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3467 if (LIST_EMPTY(rule_head))
3470 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3471 ice_adv_fltr_mgmt_list_entry, list_entry) {
3472 LIST_DEL(&lst_itr->list_entry);
3473 ice_free(hw, lst_itr->lkups);
3474 ice_free(hw, lst_itr);
3479 * ice_rem_all_sw_rules_info
3480 * @hw: pointer to the hardware structure
3482 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3484 struct ice_switch_info *sw = hw->switch_info;
3487 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3488 struct LIST_HEAD_TYPE *rule_head;
3490 rule_head = &sw->recp_list[i].filt_rules;
3491 if (!sw->recp_list[i].adv_rule)
3492 ice_rem_sw_rule_info(hw, rule_head);
3494 ice_rem_adv_rule_info(hw, rule_head);
3499 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3500 * @pi: pointer to the port_info structure
3501 * @vsi_handle: VSI handle to set as default
3502 * @set: true to add the above mentioned switch rule, false to remove it
3503 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3505 * add filter rule to set/unset given VSI as default VSI for the switch
3506 * (represented by swid)
3509 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3512 struct ice_aqc_sw_rules_elem *s_rule;
3513 struct ice_fltr_info f_info;
3514 struct ice_hw *hw = pi->hw;
3515 enum ice_adminq_opc opcode;
3516 enum ice_status status;
3520 if (!ice_is_vsi_valid(hw, vsi_handle))
3521 return ICE_ERR_PARAM;
3522 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3524 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3525 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3526 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3528 return ICE_ERR_NO_MEMORY;
3530 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3532 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3533 f_info.flag = direction;
3534 f_info.fltr_act = ICE_FWD_TO_VSI;
3535 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3537 if (f_info.flag & ICE_FLTR_RX) {
3538 f_info.src = pi->lport;
3539 f_info.src_id = ICE_SRC_ID_LPORT;
3541 f_info.fltr_rule_id =
3542 pi->dflt_rx_vsi_rule_id;
3543 } else if (f_info.flag & ICE_FLTR_TX) {
3544 f_info.src_id = ICE_SRC_ID_VSI;
3545 f_info.src = hw_vsi_id;
3547 f_info.fltr_rule_id =
3548 pi->dflt_tx_vsi_rule_id;
3552 opcode = ice_aqc_opc_add_sw_rules;
3554 opcode = ice_aqc_opc_remove_sw_rules;
3556 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3558 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3559 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3562 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3564 if (f_info.flag & ICE_FLTR_TX) {
3565 pi->dflt_tx_vsi_num = hw_vsi_id;
3566 pi->dflt_tx_vsi_rule_id = index;
3567 } else if (f_info.flag & ICE_FLTR_RX) {
3568 pi->dflt_rx_vsi_num = hw_vsi_id;
3569 pi->dflt_rx_vsi_rule_id = index;
3572 if (f_info.flag & ICE_FLTR_TX) {
3573 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3574 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3575 } else if (f_info.flag & ICE_FLTR_RX) {
3576 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3577 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3582 ice_free(hw, s_rule);
3587 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3588 * @hw: pointer to the hardware structure
3589 * @recp_id: lookup type for which the specified rule needs to be searched
3590 * @f_info: rule information
3592 * Helper function to search for a unicast rule entry - this is to be used
3593 * to remove unicast MAC filter that is not shared with other VSIs on the
3596 * Returns pointer to entry storing the rule if found
3598 static struct ice_fltr_mgmt_list_entry *
3599 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3600 struct ice_fltr_info *f_info)
3602 struct ice_switch_info *sw = hw->switch_info;
3603 struct ice_fltr_mgmt_list_entry *list_itr;
3604 struct LIST_HEAD_TYPE *list_head;
3606 list_head = &sw->recp_list[recp_id].filt_rules;
3607 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3609 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3610 sizeof(f_info->l_data)) &&
3611 f_info->fwd_id.hw_vsi_id ==
3612 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3613 f_info->flag == list_itr->fltr_info.flag)
3620 * ice_remove_mac - remove a MAC address based filter rule
3621 * @hw: pointer to the hardware structure
3622 * @m_list: list of MAC addresses and forwarding information
3624 * This function removes either a MAC filter rule or a specific VSI from a
3625 * VSI list for a multicast MAC address.
3627 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3628 * ice_add_mac. Caller should be aware that this call will only work if all
3629 * the entries passed into m_list were added previously. It will not attempt to
3630 * do a partial remove of entries that were found.
3633 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3635 struct ice_fltr_list_entry *list_itr, *tmp;
3636 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3639 return ICE_ERR_PARAM;
3641 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3642 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3644 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3645 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3648 if (l_type != ICE_SW_LKUP_MAC)
3649 return ICE_ERR_PARAM;
3651 vsi_handle = list_itr->fltr_info.vsi_handle;
3652 if (!ice_is_vsi_valid(hw, vsi_handle))
3653 return ICE_ERR_PARAM;
3655 list_itr->fltr_info.fwd_id.hw_vsi_id =
3656 ice_get_hw_vsi_num(hw, vsi_handle);
3657 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3658 /* Don't remove the unicast address that belongs to
3659 * another VSI on the switch, since it is not being
3662 ice_acquire_lock(rule_lock);
3663 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3664 &list_itr->fltr_info)) {
3665 ice_release_lock(rule_lock);
3666 return ICE_ERR_DOES_NOT_EXIST;
3668 ice_release_lock(rule_lock);
3670 list_itr->status = ice_remove_rule_internal(hw,
3673 if (list_itr->status)
3674 return list_itr->status;
3680 * ice_remove_vlan - Remove VLAN based filter rule
3681 * @hw: pointer to the hardware structure
3682 * @v_list: list of VLAN entries and forwarding information
3685 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3687 struct ice_fltr_list_entry *v_list_itr, *tmp;
3690 return ICE_ERR_PARAM;
3692 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3694 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3696 if (l_type != ICE_SW_LKUP_VLAN)
3697 return ICE_ERR_PARAM;
3698 v_list_itr->status = ice_remove_rule_internal(hw,
3701 if (v_list_itr->status)
3702 return v_list_itr->status;
3708 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3709 * @hw: pointer to the hardware structure
3710 * @v_list: list of MAC VLAN entries and forwarding information
3713 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3715 struct ice_fltr_list_entry *v_list_itr, *tmp;
3718 return ICE_ERR_PARAM;
3720 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3722 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3724 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3725 return ICE_ERR_PARAM;
3726 v_list_itr->status =
3727 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3729 if (v_list_itr->status)
3730 return v_list_itr->status;
3736 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3737 * @fm_entry: filter entry to inspect
3738 * @vsi_handle: VSI handle to compare with filter info
3741 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3743 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3744 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3745 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3746 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3751 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3752 * @hw: pointer to the hardware structure
3753 * @vsi_handle: VSI handle to remove filters from
3754 * @vsi_list_head: pointer to the list to add entry to
3755 * @fi: pointer to fltr_info of filter entry to copy & add
3757 * Helper function, used when creating a list of filters to remove from
3758 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3759 * original filter entry, with the exception of fltr_info.fltr_act and
3760 * fltr_info.fwd_id fields. These are set such that later logic can
3761 * extract which VSI to remove the fltr from, and pass on that information.
3763 static enum ice_status
3764 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3765 struct LIST_HEAD_TYPE *vsi_list_head,
3766 struct ice_fltr_info *fi)
3768 struct ice_fltr_list_entry *tmp;
3770 /* this memory is freed up in the caller function
3771 * once filters for this VSI are removed
3773 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3775 return ICE_ERR_NO_MEMORY;
3777 tmp->fltr_info = *fi;
3779 /* Overwrite these fields to indicate which VSI to remove filter from,
3780 * so find and remove logic can extract the information from the
3781 * list entries. Note that original entries will still have proper
3784 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3785 tmp->fltr_info.vsi_handle = vsi_handle;
3786 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3788 LIST_ADD(&tmp->list_entry, vsi_list_head);
3794 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3795 * @hw: pointer to the hardware structure
3796 * @vsi_handle: VSI handle to remove filters from
3797 * @lkup_list_head: pointer to the list that has certain lookup type filters
3798 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3800 * Locates all filters in lkup_list_head that are used by the given VSI,
3801 * and adds COPIES of those entries to vsi_list_head (intended to be used
3802 * to remove the listed filters).
3803 * Note that this means all entries in vsi_list_head must be explicitly
3804 * deallocated by the caller when done with list.
3806 static enum ice_status
3807 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3808 struct LIST_HEAD_TYPE *lkup_list_head,
3809 struct LIST_HEAD_TYPE *vsi_list_head)
3811 struct ice_fltr_mgmt_list_entry *fm_entry;
3812 enum ice_status status = ICE_SUCCESS;
3814 /* check to make sure VSI ID is valid and within boundary */
3815 if (!ice_is_vsi_valid(hw, vsi_handle))
3816 return ICE_ERR_PARAM;
3818 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3819 ice_fltr_mgmt_list_entry, list_entry) {
3820 struct ice_fltr_info *fi;
3822 fi = &fm_entry->fltr_info;
3823 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3826 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3836 * ice_determine_promisc_mask
3837 * @fi: filter info to parse
3839 * Helper function to determine which ICE_PROMISC_ mask corresponds
3840 * to given filter into.
3842 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3844 u16 vid = fi->l_data.mac_vlan.vlan_id;
3845 u8 *macaddr = fi->l_data.mac.mac_addr;
3846 bool is_tx_fltr = false;
3847 u8 promisc_mask = 0;
3849 if (fi->flag == ICE_FLTR_TX)
3852 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3853 promisc_mask |= is_tx_fltr ?
3854 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3855 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3856 promisc_mask |= is_tx_fltr ?
3857 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3858 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3859 promisc_mask |= is_tx_fltr ?
3860 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3862 promisc_mask |= is_tx_fltr ?
3863 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3865 return promisc_mask;
3869 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3870 * @hw: pointer to the hardware structure
3871 * @vsi_handle: VSI handle to retrieve info from
3872 * @promisc_mask: pointer to mask to be filled in
3873 * @vid: VLAN ID of promisc VLAN VSI
3876 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3879 struct ice_switch_info *sw = hw->switch_info;
3880 struct ice_fltr_mgmt_list_entry *itr;
3881 struct LIST_HEAD_TYPE *rule_head;
3882 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3884 if (!ice_is_vsi_valid(hw, vsi_handle))
3885 return ICE_ERR_PARAM;
3889 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3890 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3892 ice_acquire_lock(rule_lock);
3893 LIST_FOR_EACH_ENTRY(itr, rule_head,
3894 ice_fltr_mgmt_list_entry, list_entry) {
3895 /* Continue if this filter doesn't apply to this VSI or the
3896 * VSI ID is not in the VSI map for this filter
3898 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3901 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3903 ice_release_lock(rule_lock);
3909 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3910 * @hw: pointer to the hardware structure
3911 * @vsi_handle: VSI handle to retrieve info from
3912 * @promisc_mask: pointer to mask to be filled in
3913 * @vid: VLAN ID of promisc VLAN VSI
3916 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3919 struct ice_switch_info *sw = hw->switch_info;
3920 struct ice_fltr_mgmt_list_entry *itr;
3921 struct LIST_HEAD_TYPE *rule_head;
3922 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3924 if (!ice_is_vsi_valid(hw, vsi_handle))
3925 return ICE_ERR_PARAM;
3929 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3930 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3932 ice_acquire_lock(rule_lock);
3933 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3935 /* Continue if this filter doesn't apply to this VSI or the
3936 * VSI ID is not in the VSI map for this filter
3938 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3941 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3943 ice_release_lock(rule_lock);
3949 * ice_remove_promisc - Remove promisc based filter rules
3950 * @hw: pointer to the hardware structure
3951 * @recp_id: recipe ID for which the rule needs to removed
3952 * @v_list: list of promisc entries
3954 static enum ice_status
3955 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3956 struct LIST_HEAD_TYPE *v_list)
3958 struct ice_fltr_list_entry *v_list_itr, *tmp;
3960 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3962 v_list_itr->status =
3963 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3964 if (v_list_itr->status)
3965 return v_list_itr->status;
3971 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3972 * @hw: pointer to the hardware structure
3973 * @vsi_handle: VSI handle to clear mode
3974 * @promisc_mask: mask of promiscuous config bits to clear
3975 * @vid: VLAN ID to clear VLAN promiscuous
3978 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3981 struct ice_switch_info *sw = hw->switch_info;
3982 struct ice_fltr_list_entry *fm_entry, *tmp;
3983 struct LIST_HEAD_TYPE remove_list_head;
3984 struct ice_fltr_mgmt_list_entry *itr;
3985 struct LIST_HEAD_TYPE *rule_head;
3986 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3987 enum ice_status status = ICE_SUCCESS;
3990 if (!ice_is_vsi_valid(hw, vsi_handle))
3991 return ICE_ERR_PARAM;
3994 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3996 recipe_id = ICE_SW_LKUP_PROMISC;
3998 rule_head = &sw->recp_list[recipe_id].filt_rules;
3999 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4001 INIT_LIST_HEAD(&remove_list_head);
4003 ice_acquire_lock(rule_lock);
4004 LIST_FOR_EACH_ENTRY(itr, rule_head,
4005 ice_fltr_mgmt_list_entry, list_entry) {
4006 u8 fltr_promisc_mask = 0;
4008 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4011 fltr_promisc_mask |=
4012 ice_determine_promisc_mask(&itr->fltr_info);
4014 /* Skip if filter is not completely specified by given mask */
4015 if (fltr_promisc_mask & ~promisc_mask)
4018 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4022 ice_release_lock(rule_lock);
4023 goto free_fltr_list;
4026 ice_release_lock(rule_lock);
4028 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4031 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4032 ice_fltr_list_entry, list_entry) {
4033 LIST_DEL(&fm_entry->list_entry);
4034 ice_free(hw, fm_entry);
4041 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4042 * @hw: pointer to the hardware structure
4043 * @vsi_handle: VSI handle to configure
4044 * @promisc_mask: mask of promiscuous config bits
4045 * @vid: VLAN ID to set VLAN promiscuous
4048 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4050 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4051 struct ice_fltr_list_entry f_list_entry;
4052 struct ice_fltr_info new_fltr;
4053 enum ice_status status = ICE_SUCCESS;
4059 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4061 if (!ice_is_vsi_valid(hw, vsi_handle))
4062 return ICE_ERR_PARAM;
4063 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4065 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4067 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4068 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4069 new_fltr.l_data.mac_vlan.vlan_id = vid;
4070 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4072 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4073 recipe_id = ICE_SW_LKUP_PROMISC;
4076 /* Separate filters must be set for each direction/packet type
4077 * combination, so we will loop over the mask value, store the
4078 * individual type, and clear it out in the input mask as it
4081 while (promisc_mask) {
4087 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4088 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4089 pkt_type = UCAST_FLTR;
4090 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4091 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4092 pkt_type = UCAST_FLTR;
4094 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4095 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4096 pkt_type = MCAST_FLTR;
4097 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4098 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4099 pkt_type = MCAST_FLTR;
4101 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4102 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4103 pkt_type = BCAST_FLTR;
4104 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4105 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4106 pkt_type = BCAST_FLTR;
4110 /* Check for VLAN promiscuous flag */
4111 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4112 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4113 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4114 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4118 /* Set filter DA based on packet type */
4119 mac_addr = new_fltr.l_data.mac.mac_addr;
4120 if (pkt_type == BCAST_FLTR) {
4121 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4122 } else if (pkt_type == MCAST_FLTR ||
4123 pkt_type == UCAST_FLTR) {
4124 /* Use the dummy ether header DA */
4125 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4126 ICE_NONDMA_TO_NONDMA);
4127 if (pkt_type == MCAST_FLTR)
4128 mac_addr[0] |= 0x1; /* Set multicast bit */
4131 /* Need to reset this to zero for all iterations */
4134 new_fltr.flag |= ICE_FLTR_TX;
4135 new_fltr.src = hw_vsi_id;
4137 new_fltr.flag |= ICE_FLTR_RX;
4138 new_fltr.src = hw->port_info->lport;
4141 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4142 new_fltr.vsi_handle = vsi_handle;
4143 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4144 f_list_entry.fltr_info = new_fltr;
4146 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4147 if (status != ICE_SUCCESS)
4148 goto set_promisc_exit;
4156 * ice_set_vlan_vsi_promisc
4157 * @hw: pointer to the hardware structure
4158 * @vsi_handle: VSI handle to configure
4159 * @promisc_mask: mask of promiscuous config bits
4160 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4162 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4165 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4166 bool rm_vlan_promisc)
4168 struct ice_switch_info *sw = hw->switch_info;
4169 struct ice_fltr_list_entry *list_itr, *tmp;
4170 struct LIST_HEAD_TYPE vsi_list_head;
4171 struct LIST_HEAD_TYPE *vlan_head;
4172 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4173 enum ice_status status;
4176 INIT_LIST_HEAD(&vsi_list_head);
4177 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4178 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4179 ice_acquire_lock(vlan_lock);
4180 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4182 ice_release_lock(vlan_lock);
4184 goto free_fltr_list;
4186 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4188 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4189 if (rm_vlan_promisc)
4190 status = ice_clear_vsi_promisc(hw, vsi_handle,
4191 promisc_mask, vlan_id);
4193 status = ice_set_vsi_promisc(hw, vsi_handle,
4194 promisc_mask, vlan_id);
4200 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4201 ice_fltr_list_entry, list_entry) {
4202 LIST_DEL(&list_itr->list_entry);
4203 ice_free(hw, list_itr);
4209 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4210 * @hw: pointer to the hardware structure
4211 * @vsi_handle: VSI handle to remove filters from
4212 * @lkup: switch rule filter lookup type
4215 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4216 enum ice_sw_lkup_type lkup)
4218 struct ice_switch_info *sw = hw->switch_info;
4219 struct ice_fltr_list_entry *fm_entry;
4220 struct LIST_HEAD_TYPE remove_list_head;
4221 struct LIST_HEAD_TYPE *rule_head;
4222 struct ice_fltr_list_entry *tmp;
4223 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4224 enum ice_status status;
4226 INIT_LIST_HEAD(&remove_list_head);
4227 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4228 rule_head = &sw->recp_list[lkup].filt_rules;
4229 ice_acquire_lock(rule_lock);
4230 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4232 ice_release_lock(rule_lock);
4237 case ICE_SW_LKUP_MAC:
4238 ice_remove_mac(hw, &remove_list_head);
4240 case ICE_SW_LKUP_VLAN:
4241 ice_remove_vlan(hw, &remove_list_head);
4243 case ICE_SW_LKUP_PROMISC:
4244 case ICE_SW_LKUP_PROMISC_VLAN:
4245 ice_remove_promisc(hw, lkup, &remove_list_head);
4247 case ICE_SW_LKUP_MAC_VLAN:
4248 ice_remove_mac_vlan(hw, &remove_list_head);
4250 case ICE_SW_LKUP_ETHERTYPE:
4251 case ICE_SW_LKUP_ETHERTYPE_MAC:
4252 ice_remove_eth_mac(hw, &remove_list_head);
4254 case ICE_SW_LKUP_DFLT:
4255 ice_debug(hw, ICE_DBG_SW,
4256 "Remove filters for this lookup type hasn't been implemented yet\n");
4258 case ICE_SW_LKUP_LAST:
4259 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4263 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4264 ice_fltr_list_entry, list_entry) {
4265 LIST_DEL(&fm_entry->list_entry);
4266 ice_free(hw, fm_entry);
4271 * ice_remove_vsi_fltr - Remove all filters for a VSI
4272 * @hw: pointer to the hardware structure
4273 * @vsi_handle: VSI handle to remove filters from
4275 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4277 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4279 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4280 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4281 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4282 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4283 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4284 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4285 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4286 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4290 * ice_alloc_res_cntr - allocating resource counter
4291 * @hw: pointer to the hardware structure
4292 * @type: type of resource
4293 * @alloc_shared: if set it is shared else dedicated
4294 * @num_items: number of entries requested for FD resource type
4295 * @counter_id: counter index returned by AQ call
4298 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4301 struct ice_aqc_alloc_free_res_elem *buf;
4302 enum ice_status status;
4305 /* Allocate resource */
4306 buf_len = sizeof(*buf);
4307 buf = (struct ice_aqc_alloc_free_res_elem *)
4308 ice_malloc(hw, buf_len);
4310 return ICE_ERR_NO_MEMORY;
4312 buf->num_elems = CPU_TO_LE16(num_items);
4313 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4314 ICE_AQC_RES_TYPE_M) | alloc_shared);
4316 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4317 ice_aqc_opc_alloc_res, NULL);
4321 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4329 * ice_free_res_cntr - free resource counter
4330 * @hw: pointer to the hardware structure
4331 * @type: type of resource
4332 * @alloc_shared: if set it is shared else dedicated
4333 * @num_items: number of entries to be freed for FD resource type
4334 * @counter_id: counter ID resource which needs to be freed
4337 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4340 struct ice_aqc_alloc_free_res_elem *buf;
4341 enum ice_status status;
4345 buf_len = sizeof(*buf);
4346 buf = (struct ice_aqc_alloc_free_res_elem *)
4347 ice_malloc(hw, buf_len);
4349 return ICE_ERR_NO_MEMORY;
4351 buf->num_elems = CPU_TO_LE16(num_items);
4352 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4353 ICE_AQC_RES_TYPE_M) | alloc_shared);
4354 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4356 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4357 ice_aqc_opc_free_res, NULL);
4359 ice_debug(hw, ICE_DBG_SW,
4360 "counter resource could not be freed\n");
4367 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4368 * @hw: pointer to the hardware structure
4369 * @counter_id: returns counter index
4371 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4373 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4374 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4379 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4380 * @hw: pointer to the hardware structure
4381 * @counter_id: counter index to be freed
4383 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4385 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4386 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4391 * ice_alloc_res_lg_act - add large action resource
4392 * @hw: pointer to the hardware structure
4393 * @l_id: large action ID to fill it in
4394 * @num_acts: number of actions to hold with a large action entry
4396 static enum ice_status
4397 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4399 struct ice_aqc_alloc_free_res_elem *sw_buf;
4400 enum ice_status status;
4403 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4404 return ICE_ERR_PARAM;
4406 /* Allocate resource for large action */
4407 buf_len = sizeof(*sw_buf);
4408 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4409 ice_malloc(hw, buf_len);
4411 return ICE_ERR_NO_MEMORY;
4413 sw_buf->num_elems = CPU_TO_LE16(1);
4415 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4416 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4417 * If num_acts is greater than 2, then use
4418 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4419 * The num_acts cannot exceed 4. This was ensured at the
4420 * beginning of the function.
4423 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4424 else if (num_acts == 2)
4425 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4427 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4429 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4430 ice_aqc_opc_alloc_res, NULL);
4432 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4434 ice_free(hw, sw_buf);
4439 * ice_add_mac_with_sw_marker - add filter with sw marker
4440 * @hw: pointer to the hardware structure
4441 * @f_info: filter info structure containing the MAC filter information
4442 * @sw_marker: sw marker to tag the Rx descriptor with
4445 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4448 struct ice_switch_info *sw = hw->switch_info;
4449 struct ice_fltr_mgmt_list_entry *m_entry;
4450 struct ice_fltr_list_entry fl_info;
4451 struct LIST_HEAD_TYPE l_head;
4452 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4453 enum ice_status ret;
4457 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4458 return ICE_ERR_PARAM;
4460 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4461 return ICE_ERR_PARAM;
4463 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4464 return ICE_ERR_PARAM;
4466 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4467 return ICE_ERR_PARAM;
4468 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4470 /* Add filter if it doesn't exist so then the adding of large
4471 * action always results in update
4474 INIT_LIST_HEAD(&l_head);
4475 fl_info.fltr_info = *f_info;
4476 LIST_ADD(&fl_info.list_entry, &l_head);
4478 entry_exists = false;
4479 ret = ice_add_mac(hw, &l_head);
4480 if (ret == ICE_ERR_ALREADY_EXISTS)
4481 entry_exists = true;
4485 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4486 ice_acquire_lock(rule_lock);
4487 /* Get the book keeping entry for the filter */
4488 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4492 /* If counter action was enabled for this rule then don't enable
4493 * sw marker large action
4495 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4496 ret = ICE_ERR_PARAM;
4500 /* if same marker was added before */
4501 if (m_entry->sw_marker_id == sw_marker) {
4502 ret = ICE_ERR_ALREADY_EXISTS;
4506 /* Allocate a hardware table entry to hold large act. Three actions
4507 * for marker based large action
4509 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4513 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4516 /* Update the switch rule to add the marker action */
4517 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4519 ice_release_lock(rule_lock);
4524 ice_release_lock(rule_lock);
4525 /* only remove entry if it did not exist previously */
4527 ret = ice_remove_mac(hw, &l_head);
4533 * ice_add_mac_with_counter - add filter with counter enabled
4534 * @hw: pointer to the hardware structure
4535 * @f_info: pointer to filter info structure containing the MAC filter
4539 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4541 struct ice_switch_info *sw = hw->switch_info;
4542 struct ice_fltr_mgmt_list_entry *m_entry;
4543 struct ice_fltr_list_entry fl_info;
4544 struct LIST_HEAD_TYPE l_head;
4545 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4546 enum ice_status ret;
4551 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4552 return ICE_ERR_PARAM;
4554 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4555 return ICE_ERR_PARAM;
4557 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4558 return ICE_ERR_PARAM;
4559 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4561 entry_exist = false;
4563 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4565 /* Add filter if it doesn't exist so then the adding of large
4566 * action always results in update
4568 INIT_LIST_HEAD(&l_head);
4570 fl_info.fltr_info = *f_info;
4571 LIST_ADD(&fl_info.list_entry, &l_head);
4573 ret = ice_add_mac(hw, &l_head);
4574 if (ret == ICE_ERR_ALREADY_EXISTS)
4579 ice_acquire_lock(rule_lock);
4580 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4582 ret = ICE_ERR_BAD_PTR;
4586 /* Don't enable counter for a filter for which sw marker was enabled */
4587 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4588 ret = ICE_ERR_PARAM;
4592 /* If a counter was already enabled then don't need to add again */
4593 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4594 ret = ICE_ERR_ALREADY_EXISTS;
4598 /* Allocate a hardware table entry to VLAN counter */
4599 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4603 /* Allocate a hardware table entry to hold large act. Two actions for
4604 * counter based large action
4606 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4610 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4613 /* Update the switch rule to add the counter action */
4614 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4616 ice_release_lock(rule_lock);
4621 ice_release_lock(rule_lock);
4622 /* only remove entry if it did not exist previously */
4624 ret = ice_remove_mac(hw, &l_head);
4629 /* This is mapping table entry that maps every word within a given protocol
4630 * structure to the real byte offset as per the specification of that
4632 * for example dst address is 3 words in ethertype header and corresponding
4633 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4634 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4635 * matching entry describing its field. This needs to be updated if new
4636 * structure is added to that union.
4638 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4639 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4640 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4641 { ICE_ETYPE_OL, { 0 } },
4642 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4643 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4644 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4645 26, 28, 30, 32, 34, 36, 38 } },
4646 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4647 26, 28, 30, 32, 34, 36, 38 } },
4648 { ICE_TCP_IL, { 0, 2 } },
4649 { ICE_UDP_OF, { 0, 2 } },
4650 { ICE_UDP_ILOS, { 0, 2 } },
4651 { ICE_SCTP_IL, { 0, 2 } },
4652 { ICE_VXLAN, { 8, 10, 12, 14 } },
4653 { ICE_GENEVE, { 8, 10, 12, 14 } },
4654 { ICE_VXLAN_GPE, { 0, 2, 4 } },
4655 { ICE_NVGRE, { 0, 2, 4, 6 } },
4656 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4657 { ICE_PPPOE, { 0, 2, 4, 6 } },
4658 { ICE_PROTOCOL_LAST, { 0 } }
4661 /* The following table describes preferred grouping of recipes.
4662 * If a recipe that needs to be programmed is a superset or matches one of the
4663 * following combinations, then the recipe needs to be chained as per the
4666 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4667 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4668 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4669 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4670 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4671 { 0xffff, 0xffff, 0xffff, 0xffff } },
4672 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4673 { 0xffff, 0xffff, 0xffff, 0xffff } },
4674 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4675 { 0xffff, 0xffff, 0xffff, 0xffff } },
4678 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4679 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4680 { ICE_MAC_IL, ICE_MAC_IL_HW },
4681 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4682 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4683 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4684 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4685 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4686 { ICE_TCP_IL, ICE_TCP_IL_HW },
4687 { ICE_UDP_OF, ICE_UDP_OF_HW },
4688 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4689 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4690 { ICE_VXLAN, ICE_UDP_OF_HW },
4691 { ICE_GENEVE, ICE_UDP_OF_HW },
4692 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4693 { ICE_NVGRE, ICE_GRE_OF_HW },
4694 { ICE_GTP, ICE_UDP_OF_HW },
4695 { ICE_PPPOE, ICE_PPPOE_HW },
4696 { ICE_PROTOCOL_LAST, 0 }
4700 * ice_find_recp - find a recipe
4701 * @hw: pointer to the hardware structure
4702 * @lkup_exts: extension sequence to match
4704 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4706 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4708 bool refresh_required = true;
4709 struct ice_sw_recipe *recp;
4712 /* Walk through existing recipes to find a match */
4713 recp = hw->switch_info->recp_list;
4714 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4715 /* If recipe was not created for this ID, in SW bookkeeping,
4716 * check if FW has an entry for this recipe. If the FW has an
4717 * entry update it in our SW bookkeeping and continue with the
4720 if (!recp[i].recp_created)
4721 if (ice_get_recp_frm_fw(hw,
4722 hw->switch_info->recp_list, i,
4726 /* if number of words we are looking for match */
4727 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4728 struct ice_fv_word *a = lkup_exts->fv_words;
4729 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4733 for (p = 0; p < lkup_exts->n_val_words; p++) {
4734 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4736 if (a[p].off == b[q].off &&
4737 a[p].prot_id == b[q].prot_id)
4738 /* Found the "p"th word in the
4743 /* After walking through all the words in the
4744 * "i"th recipe if "p"th word was not found then
4745 * this recipe is not what we are looking for.
4746 * So break out from this loop and try the next
4749 if (q >= recp[i].lkup_exts.n_val_words) {
4754 /* If for "i"th recipe the found was never set to false
4755 * then it means we found our match
4758 return i; /* Return the recipe ID */
4761 return ICE_MAX_NUM_RECIPES;
4765 * ice_prot_type_to_id - get protocol ID from protocol type
4766 * @type: protocol type
4767 * @id: pointer to variable that will receive the ID
4769 * Returns true if found, false otherwise
4771 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4775 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4776 if (ice_prot_id_tbl[i].type == type) {
4777 *id = ice_prot_id_tbl[i].protocol_id;
4784 * ice_find_valid_words - count valid words
4785 * @rule: advanced rule with lookup information
4786 * @lkup_exts: byte offset extractions of the words that are valid
4788 * calculate valid words in a lookup rule using mask value
4791 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4792 struct ice_prot_lkup_ext *lkup_exts)
4798 if (!ice_prot_type_to_id(rule->type, &prot_id))
4801 word = lkup_exts->n_val_words;
4803 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4804 if (((u16 *)&rule->m_u)[j] &&
4805 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4806 /* No more space to accommodate */
4807 if (word >= ICE_MAX_CHAIN_WORDS)
4809 lkup_exts->fv_words[word].off =
4810 ice_prot_ext[rule->type].offs[j];
4811 lkup_exts->fv_words[word].prot_id =
4812 ice_prot_id_tbl[rule->type].protocol_id;
4813 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4817 ret_val = word - lkup_exts->n_val_words;
4818 lkup_exts->n_val_words = word;
4824 * ice_find_prot_off_ind - check for specific ID and offset in rule
4825 * @lkup_exts: an array of protocol header extractions
4826 * @prot_type: protocol type to check
4827 * @off: expected offset of the extraction
4829 * Check if the prot_ext has given protocol ID and offset
4832 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4837 for (j = 0; j < lkup_exts->n_val_words; j++)
4838 if (lkup_exts->fv_words[j].off == off &&
4839 lkup_exts->fv_words[j].prot_id == prot_type)
4842 return ICE_MAX_CHAIN_WORDS;
4846 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4847 * @lkup_exts: an array of protocol header extractions
4848 * @r_policy: preferred recipe grouping policy
4850 * Helper function to check if given recipe group is subset we need to check if
4851 * all the words described by the given recipe group exist in the advanced rule
4852 * look up information
4855 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4856 const struct ice_pref_recipe_group *r_policy)
4858 u8 ind[ICE_NUM_WORDS_RECIPE];
4862 /* check if everything in the r_policy is part of the entire rule */
4863 for (i = 0; i < r_policy->n_val_pairs; i++) {
4866 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4867 r_policy->pairs[i].off);
4868 if (j >= ICE_MAX_CHAIN_WORDS)
4871 /* store the indexes temporarily found by the find function
4872 * this will be used to mark the words as 'done'
4877 /* If the entire policy recipe was a true match, then mark the fields
4878 * that are covered by the recipe as 'done' meaning that these words
4879 * will be clumped together in one recipe.
4880 * "Done" here means in our searching if certain recipe group
4881 * matches or is subset of the given rule, then we mark all
4882 * the corresponding offsets as found. So the remaining recipes should
4883 * be created with whatever words that were left.
4885 for (i = 0; i < count; i++) {
4888 ice_set_bit(in, lkup_exts->done);
4894 * ice_create_first_fit_recp_def - Create a recipe grouping
4895 * @hw: pointer to the hardware structure
4896 * @lkup_exts: an array of protocol header extractions
4897 * @rg_list: pointer to a list that stores new recipe groups
4898 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4900 * Using first fit algorithm, take all the words that are still not done
4901 * and start grouping them in 4-word groups. Each group makes up one
4904 static enum ice_status
4905 ice_create_first_fit_recp_def(struct ice_hw *hw,
4906 struct ice_prot_lkup_ext *lkup_exts,
4907 struct LIST_HEAD_TYPE *rg_list,
4910 struct ice_pref_recipe_group *grp = NULL;
4915 /* Walk through every word in the rule to check if it is not done. If so
4916 * then this word needs to be part of a new recipe.
4918 for (j = 0; j < lkup_exts->n_val_words; j++)
4919 if (!ice_is_bit_set(lkup_exts->done, j)) {
4921 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4922 struct ice_recp_grp_entry *entry;
4924 entry = (struct ice_recp_grp_entry *)
4925 ice_malloc(hw, sizeof(*entry));
4927 return ICE_ERR_NO_MEMORY;
4928 LIST_ADD(&entry->l_entry, rg_list);
4929 grp = &entry->r_group;
4933 grp->pairs[grp->n_val_pairs].prot_id =
4934 lkup_exts->fv_words[j].prot_id;
4935 grp->pairs[grp->n_val_pairs].off =
4936 lkup_exts->fv_words[j].off;
4937 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4945 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4946 * @hw: pointer to the hardware structure
4947 * @fv_list: field vector with the extraction sequence information
4948 * @rg_list: recipe groupings with protocol-offset pairs
4950 * Helper function to fill in the field vector indices for protocol-offset
4951 * pairs. These indexes are then ultimately programmed into a recipe.
4954 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4955 struct LIST_HEAD_TYPE *rg_list)
4957 struct ice_sw_fv_list_entry *fv;
4958 struct ice_recp_grp_entry *rg;
4959 struct ice_fv_word *fv_ext;
4961 if (LIST_EMPTY(fv_list))
4964 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4965 fv_ext = fv->fv_ptr->ew;
4967 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4970 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4971 struct ice_fv_word *pr;
4975 pr = &rg->r_group.pairs[i];
4976 mask = rg->r_group.mask[i];
4978 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4979 if (fv_ext[j].prot_id == pr->prot_id &&
4980 fv_ext[j].off == pr->off) {
4981 /* Store index of field vector */
4983 /* Mask is given by caller as big
4984 * endian, but sent to FW as little
4987 rg->fv_mask[i] = mask << 8 | mask >> 8;
4995 * ice_find_free_recp_res_idx - find free result indexes for recipe
4996 * @hw: pointer to hardware structure
4997 * @profiles: bitmap of profiles that will be associated with the new recipe
4998 * @free_idx: pointer to variable to receive the free index bitmap
5000 * The algorithm used here is:
5001 * 1. When creating a new recipe, create a set P which contains all
5002 * Profiles that will be associated with our new recipe
5004 * 2. For each Profile p in set P:
5005 * a. Add all recipes associated with Profile p into set R
5006 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5007 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5008 * i. Or just assume they all have the same possible indexes:
5010 * i.e., PossibleIndexes = 0x0000F00000000000
5012 * 3. For each Recipe r in set R:
5013 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5014 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5016 * FreeIndexes will contain the bits indicating the indexes free for use,
5017 * then the code needs to update the recipe[r].used_result_idx_bits to
5018 * indicate which indexes were selected for use by this recipe.
5021 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5022 ice_bitmap_t *free_idx)
5024 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5025 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5026 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5030 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5031 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5032 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5033 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5035 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
5036 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
5037 ice_set_bit(bit, possible_idx);
5039 /* For each profile we are going to associate the recipe with, add the
5040 * recipes that are associated with that profile. This will give us
5041 * the set of recipes that our recipe may collide with.
5044 while (ICE_MAX_NUM_PROFILES >
5045 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5046 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5047 ICE_MAX_NUM_RECIPES);
5052 /* For each recipe that our new recipe may collide with, determine
5053 * which indexes have been used.
5055 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5056 if (ice_is_bit_set(recipes, bit))
5057 ice_or_bitmap(used_idx, used_idx,
5058 hw->switch_info->recp_list[bit].res_idxs,
5061 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5063 /* return number of free indexes */
5065 while (ICE_MAX_FV_WORDS >
5066 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5075 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5076 * @hw: pointer to hardware structure
5077 * @rm: recipe management list entry
5078 * @match_tun: if field vector index for tunnel needs to be programmed
5079 * @profiles: bitmap of profiles that will be assocated.
5081 static enum ice_status
5082 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5083 bool match_tun, ice_bitmap_t *profiles)
5085 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5086 struct ice_aqc_recipe_data_elem *tmp;
5087 struct ice_aqc_recipe_data_elem *buf;
5088 struct ice_recp_grp_entry *entry;
5089 enum ice_status status;
5095 /* When more than one recipe are required, another recipe is needed to
5096 * chain them together. Matching a tunnel metadata ID takes up one of
5097 * the match fields in the chaining recipe reducing the number of
5098 * chained recipes by one.
5100 /* check number of free result indices */
5101 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5102 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5104 if (rm->n_grp_count > 1) {
5105 if (rm->n_grp_count > free_res_idx)
5106 return ICE_ERR_MAX_LIMIT;
5111 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5112 ICE_MAX_NUM_RECIPES,
5115 return ICE_ERR_NO_MEMORY;
5117 buf = (struct ice_aqc_recipe_data_elem *)
5118 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5120 status = ICE_ERR_NO_MEMORY;
5124 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5125 recipe_count = ICE_MAX_NUM_RECIPES;
5126 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5128 if (status || recipe_count == 0)
5131 /* Allocate the recipe resources, and configure them according to the
5132 * match fields from protocol headers and extracted field vectors.
5134 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5135 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5138 status = ice_alloc_recipe(hw, &entry->rid);
5142 /* Clear the result index of the located recipe, as this will be
5143 * updated, if needed, later in the recipe creation process.
5145 tmp[0].content.result_indx = 0;
5147 buf[recps] = tmp[0];
5148 buf[recps].recipe_indx = (u8)entry->rid;
5149 /* if the recipe is a non-root recipe RID should be programmed
5150 * as 0 for the rules to be applied correctly.
5152 buf[recps].content.rid = 0;
5153 ice_memset(&buf[recps].content.lkup_indx, 0,
5154 sizeof(buf[recps].content.lkup_indx),
5157 /* All recipes use look-up index 0 to match switch ID. */
5158 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5159 buf[recps].content.mask[0] =
5160 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5161 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5164 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5165 buf[recps].content.lkup_indx[i] = 0x80;
5166 buf[recps].content.mask[i] = 0;
5169 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5170 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5171 buf[recps].content.mask[i + 1] =
5172 CPU_TO_LE16(entry->fv_mask[i]);
5175 if (rm->n_grp_count > 1) {
5176 /* Checks to see if there really is a valid result index
5179 if (chain_idx >= ICE_MAX_FV_WORDS) {
5180 ice_debug(hw, ICE_DBG_SW,
5181 "No chain index available\n");
5182 status = ICE_ERR_MAX_LIMIT;
5186 entry->chain_idx = chain_idx;
5187 buf[recps].content.result_indx =
5188 ICE_AQ_RECIPE_RESULT_EN |
5189 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5190 ICE_AQ_RECIPE_RESULT_DATA_M);
5191 ice_clear_bit(chain_idx, result_idx_bm);
5192 chain_idx = ice_find_first_bit(result_idx_bm,
5196 /* fill recipe dependencies */
5197 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5198 ICE_MAX_NUM_RECIPES);
5199 ice_set_bit(buf[recps].recipe_indx,
5200 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5201 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5205 if (rm->n_grp_count == 1) {
5206 rm->root_rid = buf[0].recipe_indx;
5207 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5208 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5209 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5210 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5211 sizeof(buf[0].recipe_bitmap),
5212 ICE_NONDMA_TO_NONDMA);
5214 status = ICE_ERR_BAD_PTR;
5217 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5218 * the recipe which is getting created if specified
5219 * by user. Usually any advanced switch filter, which results
5220 * into new extraction sequence, ended up creating a new recipe
5221 * of type ROOT and usually recipes are associated with profiles
5222 * Switch rule referreing newly created recipe, needs to have
5223 * either/or 'fwd' or 'join' priority, otherwise switch rule
5224 * evaluation will not happen correctly. In other words, if
5225 * switch rule to be evaluated on priority basis, then recipe
5226 * needs to have priority, otherwise it will be evaluated last.
5228 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5230 struct ice_recp_grp_entry *last_chain_entry;
5233 /* Allocate the last recipe that will chain the outcomes of the
5234 * other recipes together
5236 status = ice_alloc_recipe(hw, &rid);
5240 buf[recps].recipe_indx = (u8)rid;
5241 buf[recps].content.rid = (u8)rid;
5242 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5243 /* the new entry created should also be part of rg_list to
5244 * make sure we have complete recipe
5246 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5247 sizeof(*last_chain_entry));
5248 if (!last_chain_entry) {
5249 status = ICE_ERR_NO_MEMORY;
5252 last_chain_entry->rid = rid;
5253 ice_memset(&buf[recps].content.lkup_indx, 0,
5254 sizeof(buf[recps].content.lkup_indx),
5256 /* All recipes use look-up index 0 to match switch ID. */
5257 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5258 buf[recps].content.mask[0] =
5259 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5260 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5261 buf[recps].content.lkup_indx[i] =
5262 ICE_AQ_RECIPE_LKUP_IGNORE;
5263 buf[recps].content.mask[i] = 0;
5267 /* update r_bitmap with the recp that is used for chaining */
5268 ice_set_bit(rid, rm->r_bitmap);
5269 /* this is the recipe that chains all the other recipes so it
5270 * should not have a chaining ID to indicate the same
5272 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5273 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5275 last_chain_entry->fv_idx[i] = entry->chain_idx;
5276 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5277 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5278 ice_set_bit(entry->rid, rm->r_bitmap);
5280 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5281 if (sizeof(buf[recps].recipe_bitmap) >=
5282 sizeof(rm->r_bitmap)) {
5283 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5284 sizeof(buf[recps].recipe_bitmap),
5285 ICE_NONDMA_TO_NONDMA);
5287 status = ICE_ERR_BAD_PTR;
5290 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5292 /* To differentiate among different UDP tunnels, a meta data ID
5296 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5297 buf[recps].content.mask[i] =
5298 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5302 rm->root_rid = (u8)rid;
5304 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5308 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5309 ice_release_change_lock(hw);
5313 /* Every recipe that just got created add it to the recipe
5316 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5317 struct ice_switch_info *sw = hw->switch_info;
5318 bool is_root, idx_found = false;
5319 struct ice_sw_recipe *recp;
5320 u16 idx, buf_idx = 0;
5322 /* find buffer index for copying some data */
5323 for (idx = 0; idx < rm->n_grp_count; idx++)
5324 if (buf[idx].recipe_indx == entry->rid) {
5330 status = ICE_ERR_OUT_OF_RANGE;
5334 recp = &sw->recp_list[entry->rid];
5335 is_root = (rm->root_rid == entry->rid);
5336 recp->is_root = is_root;
5338 recp->root_rid = entry->rid;
5339 recp->big_recp = (is_root && rm->n_grp_count > 1);
5341 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5342 entry->r_group.n_val_pairs *
5343 sizeof(struct ice_fv_word),
5344 ICE_NONDMA_TO_NONDMA);
5346 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5347 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5349 /* Copy non-result fv index values and masks to recipe. This
5350 * call will also update the result recipe bitmask.
5352 ice_collect_result_idx(&buf[buf_idx], recp);
5354 /* for non-root recipes, also copy to the root, this allows
5355 * easier matching of a complete chained recipe
5358 ice_collect_result_idx(&buf[buf_idx],
5359 &sw->recp_list[rm->root_rid]);
5361 recp->n_ext_words = entry->r_group.n_val_pairs;
5362 recp->chain_idx = entry->chain_idx;
5363 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5364 recp->tun_type = rm->tun_type;
5365 recp->recp_created = true;
5380 * ice_create_recipe_group - creates recipe group
5381 * @hw: pointer to hardware structure
5382 * @rm: recipe management list entry
5383 * @lkup_exts: lookup elements
5385 static enum ice_status
5386 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5387 struct ice_prot_lkup_ext *lkup_exts)
5389 struct ice_recp_grp_entry *entry;
5390 struct ice_recp_grp_entry *tmp;
5391 enum ice_status status;
5395 rm->n_grp_count = 0;
5398 if (lkup_exts->n_val_words > ICE_NUM_WORDS_RECIPE) {
5399 /* Each switch recipe can match up to 5 words or metadata. One
5400 * word in each recipe is used to match the switch ID. Four
5401 * words are left for matching other values. If the new advanced
5402 * recipe requires more than 4 words, it needs to be split into
5403 * multiple recipes which are chained together using the
5404 * intermediate result that each produces as input to the other
5405 * recipes in the sequence.
5407 groups = ARRAY_SIZE(ice_recipe_pack);
5409 /* Check if any of the preferred recipes from the grouping
5412 for (i = 0; i < groups; i++)
5413 /* Check if the recipe from the preferred grouping
5414 * matches or is a subset of the fields that needs to be
5417 if (ice_is_recipe_subset(lkup_exts,
5418 &ice_recipe_pack[i])) {
5419 /* This recipe can be used by itself or grouped
5420 * with other recipes.
5422 entry = (struct ice_recp_grp_entry *)
5423 ice_malloc(hw, sizeof(*entry));
5425 status = ICE_ERR_NO_MEMORY;
5428 entry->r_group = ice_recipe_pack[i];
5429 LIST_ADD(&entry->l_entry, &rm->rg_list);
5434 /* Create recipes for words that are marked not done by packing them
5437 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5438 &rm->rg_list, &recp_count);
5440 rm->n_grp_count += recp_count;
5441 rm->n_ext_words = lkup_exts->n_val_words;
5442 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5443 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5444 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5445 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5450 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5452 LIST_DEL(&entry->l_entry);
5453 ice_free(hw, entry);
5461 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5462 * @hw: pointer to hardware structure
5463 * @lkups: lookup elements or match criteria for the advanced recipe, one
5464 * structure per protocol header
5465 * @lkups_cnt: number of protocols
5466 * @fv_list: pointer to a list that holds the returned field vectors
5468 static enum ice_status
5469 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5470 struct LIST_HEAD_TYPE *fv_list)
5472 enum ice_status status;
5476 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5478 return ICE_ERR_NO_MEMORY;
5480 for (i = 0; i < lkups_cnt; i++)
5481 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5482 status = ICE_ERR_CFG;
5486 /* Find field vectors that include all specified protocol types */
5487 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
5490 ice_free(hw, prot_ids);
5495 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5496 * @hw: pointer to hardware structure
5497 * @lkups: lookup elements or match criteria for the advanced recipe, one
5498 * structure per protocol header
5499 * @lkups_cnt: number of protocols
5500 * @rinfo: other information regarding the rule e.g. priority and action info
5501 * @rid: return the recipe ID of the recipe created
5503 static enum ice_status
5504 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5505 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5507 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5508 struct ice_prot_lkup_ext *lkup_exts;
5509 struct ice_recp_grp_entry *r_entry;
5510 struct ice_sw_fv_list_entry *fvit;
5511 struct ice_recp_grp_entry *r_tmp;
5512 struct ice_sw_fv_list_entry *tmp;
5513 enum ice_status status = ICE_SUCCESS;
5514 struct ice_sw_recipe *rm;
5515 bool match_tun = false;
5519 return ICE_ERR_PARAM;
5521 lkup_exts = (struct ice_prot_lkup_ext *)
5522 ice_malloc(hw, sizeof(*lkup_exts));
5524 return ICE_ERR_NO_MEMORY;
5526 /* Determine the number of words to be matched and if it exceeds a
5527 * recipe's restrictions
5529 for (i = 0; i < lkups_cnt; i++) {
5532 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5533 status = ICE_ERR_CFG;
5534 goto err_free_lkup_exts;
5537 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5539 status = ICE_ERR_CFG;
5540 goto err_free_lkup_exts;
5544 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5546 status = ICE_ERR_NO_MEMORY;
5547 goto err_free_lkup_exts;
5550 /* Get field vectors that contain fields extracted from all the protocol
5551 * headers being programmed.
5553 INIT_LIST_HEAD(&rm->fv_list);
5554 INIT_LIST_HEAD(&rm->rg_list);
5556 status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
5560 /* Group match words into recipes using preferred recipe grouping
5563 status = ice_create_recipe_group(hw, rm, lkup_exts);
5567 /* There is only profile for UDP tunnels. So, it is necessary to use a
5568 * metadata ID flag to differentiate different tunnel types. A separate
5569 * recipe needs to be used for the metadata.
5571 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5572 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5573 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5576 /* set the recipe priority if specified */
5577 rm->priority = rinfo->priority ? rinfo->priority : 0;
5579 /* Find offsets from the field vector. Pick the first one for all the
5582 ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5584 /* get bitmap of all profiles the recipe will be associated with */
5585 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5586 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5588 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5589 ice_set_bit((u16)fvit->profile_id, profiles);
5592 /* Look for a recipe which matches our requested fv / mask list */
5593 *rid = ice_find_recp(hw, lkup_exts);
5594 if (*rid < ICE_MAX_NUM_RECIPES)
5595 /* Success if found a recipe that match the existing criteria */
5598 /* Recipe we need does not exist, add a recipe */
5599 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5603 /* Associate all the recipes created with all the profiles in the
5604 * common field vector.
5606 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5608 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5610 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5611 (u8 *)r_bitmap, NULL);
5615 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5616 ICE_MAX_NUM_RECIPES);
5617 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5621 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5624 ice_release_change_lock(hw);
5630 *rid = rm->root_rid;
5631 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5632 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5634 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5635 ice_recp_grp_entry, l_entry) {
5636 LIST_DEL(&r_entry->l_entry);
5637 ice_free(hw, r_entry);
5640 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5642 LIST_DEL(&fvit->list_entry);
5647 ice_free(hw, rm->root_buf);
5652 ice_free(hw, lkup_exts);
5658 * ice_find_dummy_packet - find dummy packet by tunnel type
5660 * @lkups: lookup elements or match criteria for the advanced recipe, one
5661 * structure per protocol header
5662 * @lkups_cnt: number of protocols
5663 * @tun_type: tunnel type from the match criteria
5664 * @pkt: dummy packet to fill according to filter match criteria
5665 * @pkt_len: packet length of dummy packet
5666 * @offsets: pointer to receive the pointer to the offsets for the packet
5669 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5670 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5672 const struct ice_dummy_pkt_offsets **offsets)
5674 bool tcp = false, udp = false, ipv6 = false;
5677 if (tun_type == ICE_SW_TUN_GTP) {
5678 *pkt = dummy_udp_gtp_packet;
5679 *pkt_len = sizeof(dummy_udp_gtp_packet);
5680 *offsets = dummy_udp_gtp_packet_offsets;
5683 if (tun_type == ICE_SW_TUN_PPPOE) {
5684 *pkt = dummy_pppoe_packet;
5685 *pkt_len = sizeof(dummy_pppoe_packet);
5686 *offsets = dummy_pppoe_packet_offsets;
5689 for (i = 0; i < lkups_cnt; i++) {
5690 if (lkups[i].type == ICE_UDP_ILOS)
5692 else if (lkups[i].type == ICE_TCP_IL)
5694 else if (lkups[i].type == ICE_IPV6_OFOS)
5698 if (tun_type == ICE_ALL_TUNNELS) {
5699 *pkt = dummy_gre_udp_packet;
5700 *pkt_len = sizeof(dummy_gre_udp_packet);
5701 *offsets = dummy_gre_udp_packet_offsets;
5705 if (tun_type == ICE_SW_TUN_NVGRE) {
5707 *pkt = dummy_gre_tcp_packet;
5708 *pkt_len = sizeof(dummy_gre_tcp_packet);
5709 *offsets = dummy_gre_tcp_packet_offsets;
5713 *pkt = dummy_gre_udp_packet;
5714 *pkt_len = sizeof(dummy_gre_udp_packet);
5715 *offsets = dummy_gre_udp_packet_offsets;
5719 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5720 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5722 *pkt = dummy_udp_tun_tcp_packet;
5723 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5724 *offsets = dummy_udp_tun_tcp_packet_offsets;
5728 *pkt = dummy_udp_tun_udp_packet;
5729 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5730 *offsets = dummy_udp_tun_udp_packet_offsets;
5735 *pkt = dummy_udp_packet;
5736 *pkt_len = sizeof(dummy_udp_packet);
5737 *offsets = dummy_udp_packet_offsets;
5739 } else if (udp && ipv6) {
5740 *pkt = dummy_udp_ipv6_packet;
5741 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5742 *offsets = dummy_udp_ipv6_packet_offsets;
5744 } else if ((tcp && ipv6) || ipv6) {
5745 *pkt = dummy_tcp_ipv6_packet;
5746 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5747 *offsets = dummy_tcp_ipv6_packet_offsets;
5751 *pkt = dummy_tcp_packet;
5752 *pkt_len = sizeof(dummy_tcp_packet);
5753 *offsets = dummy_tcp_packet_offsets;
5757 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5759 * @lkups: lookup elements or match criteria for the advanced recipe, one
5760 * structure per protocol header
5761 * @lkups_cnt: number of protocols
5762 * @s_rule: stores rule information from the match criteria
5763 * @dummy_pkt: dummy packet to fill according to filter match criteria
5764 * @pkt_len: packet length of dummy packet
5765 * @offsets: offset info for the dummy packet
5767 static enum ice_status
5768 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5769 struct ice_aqc_sw_rules_elem *s_rule,
5770 const u8 *dummy_pkt, u16 pkt_len,
5771 const struct ice_dummy_pkt_offsets *offsets)
5776 /* Start with a packet with a pre-defined/dummy content. Then, fill
5777 * in the header values to be looked up or matched.
5779 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5781 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5783 for (i = 0; i < lkups_cnt; i++) {
5784 enum ice_protocol_type type;
5785 u16 offset = 0, len = 0, j;
5788 /* find the start of this layer; it should be found since this
5789 * was already checked when search for the dummy packet
5791 type = lkups[i].type;
5792 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5793 if (type == offsets[j].type) {
5794 offset = offsets[j].offset;
5799 /* this should never happen in a correct calling sequence */
5801 return ICE_ERR_PARAM;
5803 switch (lkups[i].type) {
5806 len = sizeof(struct ice_ether_hdr);
5809 len = sizeof(struct ice_ethtype_hdr);
5813 len = sizeof(struct ice_ipv4_hdr);
5817 len = sizeof(struct ice_ipv6_hdr);
5822 len = sizeof(struct ice_l4_hdr);
5825 len = sizeof(struct ice_sctp_hdr);
5828 len = sizeof(struct ice_nvgre);
5833 len = sizeof(struct ice_udp_tnl_hdr);
5837 len = sizeof(struct ice_udp_gtp_hdr);
5840 return ICE_ERR_PARAM;
5843 /* the length should be a word multiple */
5844 if (len % ICE_BYTES_PER_WORD)
5847 /* We have the offset to the header start, the length, the
5848 * caller's header values and mask. Use this information to
5849 * copy the data into the dummy packet appropriately based on
5850 * the mask. Note that we need to only write the bits as
5851 * indicated by the mask to make sure we don't improperly write
5852 * over any significant packet data.
5854 for (j = 0; j < len / sizeof(u16); j++)
5855 if (((u16 *)&lkups[i].m_u)[j])
5856 ((u16 *)(pkt + offset))[j] =
5857 (((u16 *)(pkt + offset))[j] &
5858 ~((u16 *)&lkups[i].m_u)[j]) |
5859 (((u16 *)&lkups[i].h_u)[j] &
5860 ((u16 *)&lkups[i].m_u)[j]);
5863 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5869 * ice_find_adv_rule_entry - Search a rule entry
5870 * @hw: pointer to the hardware structure
5871 * @lkups: lookup elements or match criteria for the advanced recipe, one
5872 * structure per protocol header
5873 * @lkups_cnt: number of protocols
5874 * @recp_id: recipe ID for which we are finding the rule
5875 * @rinfo: other information regarding the rule e.g. priority and action info
5877 * Helper function to search for a given advance rule entry
5878 * Returns pointer to entry storing the rule if found
5880 static struct ice_adv_fltr_mgmt_list_entry *
5881 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5882 u16 lkups_cnt, u8 recp_id,
5883 struct ice_adv_rule_info *rinfo)
5885 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5886 struct ice_switch_info *sw = hw->switch_info;
5889 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5890 ice_adv_fltr_mgmt_list_entry, list_entry) {
5891 bool lkups_matched = true;
5893 if (lkups_cnt != list_itr->lkups_cnt)
5895 for (i = 0; i < list_itr->lkups_cnt; i++)
5896 if (memcmp(&list_itr->lkups[i], &lkups[i],
5898 lkups_matched = false;
5901 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5902 rinfo->tun_type == list_itr->rule_info.tun_type &&
5910 * ice_adv_add_update_vsi_list
5911 * @hw: pointer to the hardware structure
5912 * @m_entry: pointer to current adv filter management list entry
5913 * @cur_fltr: filter information from the book keeping entry
5914 * @new_fltr: filter information with the new VSI to be added
5916 * Call AQ command to add or update previously created VSI list with new VSI.
5918 * Helper function to do book keeping associated with adding filter information
5919 * The algorithm to do the booking keeping is described below :
5920 * When a VSI needs to subscribe to a given advanced filter
5921 * if only one VSI has been added till now
5922 * Allocate a new VSI list and add two VSIs
5923 * to this list using switch rule command
5924 * Update the previously created switch rule with the
5925 * newly created VSI list ID
5926 * if a VSI list was previously created
5927 * Add the new VSI to the previously created VSI list set
5928 * using the update switch rule command
5930 static enum ice_status
5931 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5932 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5933 struct ice_adv_rule_info *cur_fltr,
5934 struct ice_adv_rule_info *new_fltr)
5936 enum ice_status status;
5937 u16 vsi_list_id = 0;
5939 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5940 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5941 return ICE_ERR_NOT_IMPL;
5943 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5944 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5945 return ICE_ERR_ALREADY_EXISTS;
5947 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5948 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5949 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5950 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5951 return ICE_ERR_NOT_IMPL;
5953 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5954 /* Only one entry existed in the mapping and it was not already
5955 * a part of a VSI list. So, create a VSI list with the old and
5958 struct ice_fltr_info tmp_fltr;
5959 u16 vsi_handle_arr[2];
5961 /* A rule already exists with the new VSI being added */
5962 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5963 new_fltr->sw_act.fwd_id.hw_vsi_id)
5964 return ICE_ERR_ALREADY_EXISTS;
5966 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5967 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5968 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5974 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5975 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5976 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5977 /* Update the previous switch rule of "forward to VSI" to
5980 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5984 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5985 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5986 m_entry->vsi_list_info =
5987 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5990 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5992 if (!m_entry->vsi_list_info)
5995 /* A rule already exists with the new VSI being added */
5996 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5999 /* Update the previously created VSI list set with
6000 * the new VSI ID passed in
6002 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6004 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6006 ice_aqc_opc_update_sw_rules,
6008 /* update VSI list mapping info with new VSI ID */
6010 ice_set_bit(vsi_handle,
6011 m_entry->vsi_list_info->vsi_map);
6014 m_entry->vsi_count++;
6019 * ice_add_adv_rule - helper function to create an advanced switch rule
6020 * @hw: pointer to the hardware structure
6021 * @lkups: information on the words that needs to be looked up. All words
6022 * together makes one recipe
6023 * @lkups_cnt: num of entries in the lkups array
6024 * @rinfo: other information related to the rule that needs to be programmed
6025 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6026 * ignored is case of error.
6028 * This function can program only 1 rule at a time. The lkups is used to
6029 * describe the all the words that forms the "lookup" portion of the recipe.
6030 * These words can span multiple protocols. Callers to this function need to
6031 * pass in a list of protocol headers with lookup information along and mask
6032 * that determines which words are valid from the given protocol header.
6033 * rinfo describes other information related to this rule such as forwarding
6034 * IDs, priority of this rule, etc.
6037 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6038 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6039 struct ice_rule_query_data *added_entry)
6041 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6042 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6043 const struct ice_dummy_pkt_offsets *pkt_offsets;
6044 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6045 struct LIST_HEAD_TYPE *rule_head;
6046 struct ice_switch_info *sw;
6047 enum ice_status status;
6048 const u8 *pkt = NULL;
6054 return ICE_ERR_PARAM;
6056 /* get # of words we need to match */
6058 for (i = 0; i < lkups_cnt; i++) {
6061 ptr = (u16 *)&lkups[i].m_u;
6062 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6066 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6067 return ICE_ERR_PARAM;
6069 /* make sure that we can locate a dummy packet */
6070 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6073 status = ICE_ERR_PARAM;
6074 goto err_ice_add_adv_rule;
6077 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6078 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6079 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6080 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6083 vsi_handle = rinfo->sw_act.vsi_handle;
6084 if (!ice_is_vsi_valid(hw, vsi_handle))
6085 return ICE_ERR_PARAM;
6087 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6088 rinfo->sw_act.fwd_id.hw_vsi_id =
6089 ice_get_hw_vsi_num(hw, vsi_handle);
6090 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6091 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6093 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6096 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6098 /* we have to add VSI to VSI_LIST and increment vsi_count.
6099 * Also Update VSI list so that we can change forwarding rule
6100 * if the rule already exists, we will check if it exists with
6101 * same vsi_id, if not then add it to the VSI list if it already
6102 * exists if not then create a VSI list and add the existing VSI
6103 * ID and the new VSI ID to the list
6104 * We will add that VSI to the list
6106 status = ice_adv_add_update_vsi_list(hw, m_entry,
6107 &m_entry->rule_info,
6110 added_entry->rid = rid;
6111 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6112 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6116 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6117 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6119 return ICE_ERR_NO_MEMORY;
6120 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6121 switch (rinfo->sw_act.fltr_act) {
6122 case ICE_FWD_TO_VSI:
6123 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6124 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6125 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6128 act |= ICE_SINGLE_ACT_TO_Q;
6129 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6130 ICE_SINGLE_ACT_Q_INDEX_M;
6132 case ICE_FWD_TO_QGRP:
6133 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6134 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6135 act |= ICE_SINGLE_ACT_TO_Q;
6136 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6137 ICE_SINGLE_ACT_Q_INDEX_M;
6138 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6139 ICE_SINGLE_ACT_Q_REGION_M;
6141 case ICE_DROP_PACKET:
6142 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6143 ICE_SINGLE_ACT_VALID_BIT;
6146 status = ICE_ERR_CFG;
6147 goto err_ice_add_adv_rule;
6150 /* set the rule LOOKUP type based on caller specified 'RX'
6151 * instead of hardcoding it to be either LOOKUP_TX/RX
6153 * for 'RX' set the source to be the port number
6154 * for 'TX' set the source to be the source HW VSI number (determined
6158 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6159 s_rule->pdata.lkup_tx_rx.src =
6160 CPU_TO_LE16(hw->port_info->lport);
6162 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6163 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6166 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6167 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6169 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
6172 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6173 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6176 goto err_ice_add_adv_rule;
6177 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6178 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6180 status = ICE_ERR_NO_MEMORY;
6181 goto err_ice_add_adv_rule;
6184 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6185 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6186 ICE_NONDMA_TO_NONDMA);
6187 if (!adv_fltr->lkups) {
6188 status = ICE_ERR_NO_MEMORY;
6189 goto err_ice_add_adv_rule;
6192 adv_fltr->lkups_cnt = lkups_cnt;
6193 adv_fltr->rule_info = *rinfo;
6194 adv_fltr->rule_info.fltr_rule_id =
6195 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6196 sw = hw->switch_info;
6197 sw->recp_list[rid].adv_rule = true;
6198 rule_head = &sw->recp_list[rid].filt_rules;
6200 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6201 struct ice_fltr_info tmp_fltr;
6203 tmp_fltr.fltr_rule_id =
6204 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6205 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6206 tmp_fltr.fwd_id.hw_vsi_id =
6207 ice_get_hw_vsi_num(hw, vsi_handle);
6208 tmp_fltr.vsi_handle = vsi_handle;
6209 /* Update the previous switch rule of "forward to VSI" to
6212 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6214 goto err_ice_add_adv_rule;
6215 adv_fltr->vsi_count = 1;
6218 /* Add rule entry to book keeping list */
6219 LIST_ADD(&adv_fltr->list_entry, rule_head);
6221 added_entry->rid = rid;
6222 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6223 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6225 err_ice_add_adv_rule:
6226 if (status && adv_fltr) {
6227 ice_free(hw, adv_fltr->lkups);
6228 ice_free(hw, adv_fltr);
6231 ice_free(hw, s_rule);
6237 * ice_adv_rem_update_vsi_list
6238 * @hw: pointer to the hardware structure
6239 * @vsi_handle: VSI handle of the VSI to remove
6240 * @fm_list: filter management entry for which the VSI list management needs to
6243 static enum ice_status
6244 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6245 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6247 struct ice_vsi_list_map_info *vsi_list_info;
6248 enum ice_sw_lkup_type lkup_type;
6249 enum ice_status status;
6252 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6253 fm_list->vsi_count == 0)
6254 return ICE_ERR_PARAM;
6256 /* A rule with the VSI being removed does not exist */
6257 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6258 return ICE_ERR_DOES_NOT_EXIST;
6260 lkup_type = ICE_SW_LKUP_LAST;
6261 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6262 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6263 ice_aqc_opc_update_sw_rules,
6268 fm_list->vsi_count--;
6269 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6270 vsi_list_info = fm_list->vsi_list_info;
6271 if (fm_list->vsi_count == 1) {
6272 struct ice_fltr_info tmp_fltr;
6275 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6277 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6278 return ICE_ERR_OUT_OF_RANGE;
6280 /* Make sure VSI list is empty before removing it below */
6281 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6283 ice_aqc_opc_update_sw_rules,
6287 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6288 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6289 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6290 tmp_fltr.fwd_id.hw_vsi_id =
6291 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6292 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6293 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6295 /* Update the previous switch rule of "MAC forward to VSI" to
6296 * "MAC fwd to VSI list"
6298 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6300 ice_debug(hw, ICE_DBG_SW,
6301 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6302 tmp_fltr.fwd_id.hw_vsi_id, status);
6307 if (fm_list->vsi_count == 1) {
6308 /* Remove the VSI list since it is no longer used */
6309 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6311 ice_debug(hw, ICE_DBG_SW,
6312 "Failed to remove VSI list %d, error %d\n",
6313 vsi_list_id, status);
6317 LIST_DEL(&vsi_list_info->list_entry);
6318 ice_free(hw, vsi_list_info);
6319 fm_list->vsi_list_info = NULL;
6326 * ice_rem_adv_rule - removes existing advanced switch rule
6327 * @hw: pointer to the hardware structure
6328 * @lkups: information on the words that needs to be looked up. All words
6329 * together makes one recipe
6330 * @lkups_cnt: num of entries in the lkups array
6331 * @rinfo: Its the pointer to the rule information for the rule
6333 * This function can be used to remove 1 rule at a time. The lkups is
6334 * used to describe all the words that forms the "lookup" portion of the
6335 * rule. These words can span multiple protocols. Callers to this function
6336 * need to pass in a list of protocol headers with lookup information along
6337 * and mask that determines which words are valid from the given protocol
6338 * header. rinfo describes other information related to this rule such as
6339 * forwarding IDs, priority of this rule, etc.
6342 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6343 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6345 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6346 const struct ice_dummy_pkt_offsets *offsets;
6347 struct ice_prot_lkup_ext lkup_exts;
6348 u16 rule_buf_sz, pkt_len, i, rid;
6349 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6350 enum ice_status status = ICE_SUCCESS;
6351 bool remove_rule = false;
6352 const u8 *pkt = NULL;
6355 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6356 for (i = 0; i < lkups_cnt; i++) {
6359 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6362 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6366 rid = ice_find_recp(hw, &lkup_exts);
6367 /* If did not find a recipe that match the existing criteria */
6368 if (rid == ICE_MAX_NUM_RECIPES)
6369 return ICE_ERR_PARAM;
6371 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6372 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6373 /* the rule is already removed */
6376 ice_acquire_lock(rule_lock);
6377 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6379 } else if (list_elem->vsi_count > 1) {
6380 list_elem->vsi_list_info->ref_cnt--;
6381 remove_rule = false;
6382 vsi_handle = rinfo->sw_act.vsi_handle;
6383 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6385 vsi_handle = rinfo->sw_act.vsi_handle;
6386 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6388 ice_release_lock(rule_lock);
6391 if (list_elem->vsi_count == 0)
6394 ice_release_lock(rule_lock);
6396 struct ice_aqc_sw_rules_elem *s_rule;
6398 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
6399 &pkt_len, &offsets);
6400 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6402 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6405 return ICE_ERR_NO_MEMORY;
6406 s_rule->pdata.lkup_tx_rx.act = 0;
6407 s_rule->pdata.lkup_tx_rx.index =
6408 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6409 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6410 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6412 ice_aqc_opc_remove_sw_rules, NULL);
6413 if (status == ICE_SUCCESS) {
6414 ice_acquire_lock(rule_lock);
6415 LIST_DEL(&list_elem->list_entry);
6416 ice_free(hw, list_elem->lkups);
6417 ice_free(hw, list_elem);
6418 ice_release_lock(rule_lock);
6420 ice_free(hw, s_rule);
6426 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6427 * @hw: pointer to the hardware structure
6428 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6430 * This function is used to remove 1 rule at a time. The removal is based on
6431 * the remove_entry parameter. This function will remove rule for a given
6432 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6435 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6436 struct ice_rule_query_data *remove_entry)
6438 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6439 struct LIST_HEAD_TYPE *list_head;
6440 struct ice_adv_rule_info rinfo;
6441 struct ice_switch_info *sw;
6443 sw = hw->switch_info;
6444 if (!sw->recp_list[remove_entry->rid].recp_created)
6445 return ICE_ERR_PARAM;
6446 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6447 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6449 if (list_itr->rule_info.fltr_rule_id ==
6450 remove_entry->rule_id) {
6451 rinfo = list_itr->rule_info;
6452 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6453 return ice_rem_adv_rule(hw, list_itr->lkups,
6454 list_itr->lkups_cnt, &rinfo);
6457 return ICE_ERR_PARAM;
6461 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6463 * @hw: pointer to the hardware structure
6464 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6466 * This function is used to remove all the rules for a given VSI and as soon
6467 * as removing a rule fails, it will return immediately with the error code,
6468 * else it will return ICE_SUCCESS
6471 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6473 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6474 struct ice_vsi_list_map_info *map_info;
6475 struct LIST_HEAD_TYPE *list_head;
6476 struct ice_adv_rule_info rinfo;
6477 struct ice_switch_info *sw;
6478 enum ice_status status;
6479 u16 vsi_list_id = 0;
6482 sw = hw->switch_info;
6483 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6484 if (!sw->recp_list[rid].recp_created)
6486 if (!sw->recp_list[rid].adv_rule)
6488 list_head = &sw->recp_list[rid].filt_rules;
6490 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6491 ice_adv_fltr_mgmt_list_entry, list_entry) {
6492 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6496 rinfo = list_itr->rule_info;
6497 rinfo.sw_act.vsi_handle = vsi_handle;
6498 status = ice_rem_adv_rule(hw, list_itr->lkups,
6499 list_itr->lkups_cnt, &rinfo);
6509 * ice_replay_fltr - Replay all the filters stored by a specific list head
6510 * @hw: pointer to the hardware structure
6511 * @list_head: list for which filters needs to be replayed
6512 * @recp_id: Recipe ID for which rules need to be replayed
6514 static enum ice_status
6515 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6517 struct ice_fltr_mgmt_list_entry *itr;
6518 struct LIST_HEAD_TYPE l_head;
6519 enum ice_status status = ICE_SUCCESS;
6521 if (LIST_EMPTY(list_head))
6524 /* Move entries from the given list_head to a temporary l_head so that
6525 * they can be replayed. Otherwise when trying to re-add the same
6526 * filter, the function will return already exists
6528 LIST_REPLACE_INIT(list_head, &l_head);
6530 /* Mark the given list_head empty by reinitializing it so filters
6531 * could be added again by *handler
6533 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6535 struct ice_fltr_list_entry f_entry;
6537 f_entry.fltr_info = itr->fltr_info;
6538 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6539 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6540 if (status != ICE_SUCCESS)
6545 /* Add a filter per VSI separately */
6550 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6552 if (!ice_is_vsi_valid(hw, vsi_handle))
6555 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6556 f_entry.fltr_info.vsi_handle = vsi_handle;
6557 f_entry.fltr_info.fwd_id.hw_vsi_id =
6558 ice_get_hw_vsi_num(hw, vsi_handle);
6559 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6560 if (recp_id == ICE_SW_LKUP_VLAN)
6561 status = ice_add_vlan_internal(hw, &f_entry);
6563 status = ice_add_rule_internal(hw, recp_id,
6565 if (status != ICE_SUCCESS)
6570 /* Clear the filter management list */
6571 ice_rem_sw_rule_info(hw, &l_head);
6576 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6577 * @hw: pointer to the hardware structure
6579 * NOTE: This function does not clean up partially added filters on error.
6580 * It is up to caller of the function to issue a reset or fail early.
6582 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6584 struct ice_switch_info *sw = hw->switch_info;
6585 enum ice_status status = ICE_SUCCESS;
6588 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6589 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6591 status = ice_replay_fltr(hw, i, head);
6592 if (status != ICE_SUCCESS)
6599 * ice_replay_vsi_fltr - Replay filters for requested VSI
6600 * @hw: pointer to the hardware structure
6601 * @vsi_handle: driver VSI handle
6602 * @recp_id: Recipe ID for which rules need to be replayed
6603 * @list_head: list for which filters need to be replayed
6605 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6606 * It is required to pass valid VSI handle.
6608 static enum ice_status
6609 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6610 struct LIST_HEAD_TYPE *list_head)
6612 struct ice_fltr_mgmt_list_entry *itr;
6613 enum ice_status status = ICE_SUCCESS;
6616 if (LIST_EMPTY(list_head))
6618 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6620 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6622 struct ice_fltr_list_entry f_entry;
6624 f_entry.fltr_info = itr->fltr_info;
6625 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6626 itr->fltr_info.vsi_handle == vsi_handle) {
6627 /* update the src in case it is VSI num */
6628 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6629 f_entry.fltr_info.src = hw_vsi_id;
6630 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6631 if (status != ICE_SUCCESS)
6635 if (!itr->vsi_list_info ||
6636 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6638 /* Clearing it so that the logic can add it back */
6639 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6640 f_entry.fltr_info.vsi_handle = vsi_handle;
6641 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6642 /* update the src in case it is VSI num */
6643 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6644 f_entry.fltr_info.src = hw_vsi_id;
6645 if (recp_id == ICE_SW_LKUP_VLAN)
6646 status = ice_add_vlan_internal(hw, &f_entry);
6648 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6649 if (status != ICE_SUCCESS)
6657 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6658 * @hw: pointer to the hardware structure
6659 * @vsi_handle: driver VSI handle
6660 * @list_head: list for which filters need to be replayed
6662 * Replay the advanced rule for the given VSI.
6664 static enum ice_status
6665 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6666 struct LIST_HEAD_TYPE *list_head)
6668 struct ice_rule_query_data added_entry = { 0 };
6669 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6670 enum ice_status status = ICE_SUCCESS;
6672 if (LIST_EMPTY(list_head))
6674 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6676 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6677 u16 lk_cnt = adv_fltr->lkups_cnt;
6679 if (vsi_handle != rinfo->sw_act.vsi_handle)
6681 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6690 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6691 * @hw: pointer to the hardware structure
6692 * @vsi_handle: driver VSI handle
6694 * Replays filters for requested VSI via vsi_handle.
6696 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6698 struct ice_switch_info *sw = hw->switch_info;
6699 enum ice_status status;
6702 /* Update the recipes that were created */
6703 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6704 struct LIST_HEAD_TYPE *head;
6706 head = &sw->recp_list[i].filt_replay_rules;
6707 if (!sw->recp_list[i].adv_rule)
6708 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6710 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6711 if (status != ICE_SUCCESS)
6719 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6720 * @hw: pointer to the HW struct
6722 * Deletes the filter replay rules.
6724 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6726 struct ice_switch_info *sw = hw->switch_info;
6732 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6733 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6734 struct LIST_HEAD_TYPE *l_head;
6736 l_head = &sw->recp_list[i].filt_replay_rules;
6737 if (!sw->recp_list[i].adv_rule)
6738 ice_rem_sw_rule_info(hw, l_head);
6740 ice_rem_adv_rule_info(hw, l_head);