1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
70 { ICE_PROTOCOL_LAST, 0 },
74 u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
109 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
111 { ICE_ETYPE_OL, 12 },
112 { ICE_IPV4_OFOS, 14 },
116 { ICE_UDP_ILOS, 76 },
117 { ICE_PROTOCOL_LAST, 0 },
121 u8 dummy_gre_udp_packet[] = {
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_OL 12 */
128 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x2F, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
135 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x00, 0x00, 0x00,
142 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
149 0x00, 0x08, 0x00, 0x00,
153 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
155 { ICE_ETYPE_OL, 12 },
156 { ICE_IPV4_OFOS, 14 },
162 { ICE_PROTOCOL_LAST, 0 },
166 u8 dummy_udp_tun_tcp_packet[] = {
167 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
168 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
171 0x08, 0x00, /* ICE_ETYPE_OL 12 */
173 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
174 0x00, 0x01, 0x00, 0x00,
175 0x40, 0x11, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
180 0x00, 0x46, 0x00, 0x00,
182 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
186 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00,
190 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
191 0x00, 0x01, 0x00, 0x00,
192 0x40, 0x06, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
199 0x50, 0x02, 0x20, 0x00,
200 0x00, 0x00, 0x00, 0x00
204 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
206 { ICE_ETYPE_OL, 12 },
207 { ICE_IPV4_OFOS, 14 },
212 { ICE_UDP_ILOS, 84 },
213 { ICE_PROTOCOL_LAST, 0 },
217 u8 dummy_udp_tun_udp_packet[] = {
218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
222 0x08, 0x00, /* ICE_ETYPE_OL 12 */
224 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
225 0x00, 0x01, 0x00, 0x00,
226 0x00, 0x11, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
231 0x00, 0x3a, 0x00, 0x00,
233 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
234 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
237 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00,
241 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
242 0x00, 0x01, 0x00, 0x00,
243 0x00, 0x11, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
248 0x00, 0x08, 0x00, 0x00,
252 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
254 { ICE_ETYPE_OL, 12 },
255 { ICE_IPV4_OFOS, 14 },
256 { ICE_UDP_ILOS, 34 },
257 { ICE_PROTOCOL_LAST, 0 },
261 dummy_udp_packet[] = {
262 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
263 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00,
266 0x08, 0x00, /* ICE_ETYPE_OL 12 */
268 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
269 0x00, 0x01, 0x00, 0x00,
270 0x00, 0x11, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
275 0x00, 0x08, 0x00, 0x00,
277 0x00, 0x00, /* 2 bytes for 4 byte alignment */
281 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
283 { ICE_ETYPE_OL, 12 },
284 { ICE_IPV4_OFOS, 14 },
286 { ICE_PROTOCOL_LAST, 0 },
290 dummy_tcp_packet[] = {
291 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
292 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00,
295 0x08, 0x00, /* ICE_ETYPE_OL 12 */
297 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
298 0x00, 0x01, 0x00, 0x00,
299 0x00, 0x06, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
306 0x50, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, /* 2 bytes for 4 byte alignment */
313 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
315 { ICE_ETYPE_OL, 12 },
316 { ICE_IPV6_OFOS, 14 },
318 { ICE_PROTOCOL_LAST, 0 },
322 dummy_tcp_ipv6_packet[] = {
323 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
327 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
329 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
330 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
341 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00,
343 0x50, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, /* 2 bytes for 4 byte alignment */
350 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
352 { ICE_ETYPE_OL, 12 },
353 { ICE_IPV6_OFOS, 14 },
354 { ICE_UDP_ILOS, 54 },
355 { ICE_PROTOCOL_LAST, 0 },
359 dummy_udp_ipv6_packet[] = {
360 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
364 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
366 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
367 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
378 0x00, 0x08, 0x00, 0x00,
380 0x00, 0x00, /* 2 bytes for 4 byte alignment */
384 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
386 { ICE_IPV4_OFOS, 14 },
389 { ICE_PROTOCOL_LAST, 0 },
393 dummy_udp_gtp_packet[] = {
394 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
399 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x11, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
406 0x00, 0x1c, 0x00, 0x00,
408 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
409 0x00, 0x00, 0x00, 0x00,
410 0x00, 0x00, 0x00, 0x85,
412 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
413 0x00, 0x00, 0x00, 0x00,
417 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
420 { ICE_PROTOCOL_LAST, 0 },
424 dummy_pppoe_packet[] = {
425 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
426 0x00, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
430 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 14 */
431 0x00, 0x4e, 0x00, 0x21,
433 0x45, 0x00, 0x00, 0x30, /* PDU */
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x11, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
440 /* this is a recipe to profile association bitmap */
441 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
442 ICE_MAX_NUM_PROFILES);
444 /* this is a profile to recipe association bitmap */
445 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
446 ICE_MAX_NUM_RECIPES);
448 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
451 * ice_collect_result_idx - copy result index values
452 * @buf: buffer that contains the result index
453 * @recp: the recipe struct to copy data into
455 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
456 struct ice_sw_recipe *recp)
458 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
459 ice_set_bit(buf->content.result_indx &
460 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
464 * ice_init_possible_res_bm - initialize possible result bitmap
465 * @pos_result_bm: pointer to the bitmap to initialize
467 static void ice_init_possible_res_bm(ice_bitmap_t *pos_result_bm)
471 ice_zero_bitmap(pos_result_bm, ICE_MAX_FV_WORDS);
473 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
474 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
475 ice_set_bit(bit, pos_result_bm);
479 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
480 * @hw: pointer to hardware structure
481 * @recps: struct that we need to populate
482 * @rid: recipe ID that we are populating
483 * @refresh_required: true if we should get recipe to profile mapping from FW
485 * This function is used to populate all the necessary entries into our
486 * bookkeeping so that we have a current list of all the recipes that are
487 * programmed in the firmware.
489 static enum ice_status
490 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
491 bool *refresh_required)
493 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
494 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
495 struct ice_aqc_recipe_data_elem *tmp;
496 u16 num_recps = ICE_MAX_NUM_RECIPES;
497 struct ice_prot_lkup_ext *lkup_exts;
498 u16 i, sub_recps, fv_word_idx = 0;
499 enum ice_status status;
501 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
502 ice_init_possible_res_bm(possible_idx);
504 /* we need a buffer big enough to accommodate all the recipes */
505 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
506 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
508 return ICE_ERR_NO_MEMORY;
510 tmp[0].recipe_indx = rid;
511 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
512 /* non-zero status meaning recipe doesn't exist */
516 /* Get recipe to profile map so that we can get the fv from lkups that
517 * we read for a recipe from FW. Since we want to minimize the number of
518 * times we make this FW call, just make one call and cache the copy
519 * until a new recipe is added. This operation is only required the
520 * first time to get the changes from FW. Then to search existing
521 * entries we don't need to update the cache again until another recipe
524 if (*refresh_required) {
525 ice_get_recp_to_prof_map(hw);
526 *refresh_required = false;
529 /* Start populating all the entries for recps[rid] based on lkups from
530 * firmware. Note that we are only creating the root recipe in our
533 lkup_exts = &recps[rid].lkup_exts;
535 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
536 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
537 struct ice_recp_grp_entry *rg_entry;
538 u8 prof_id, idx, prot = 0;
542 rg_entry = (struct ice_recp_grp_entry *)
543 ice_malloc(hw, sizeof(*rg_entry));
545 status = ICE_ERR_NO_MEMORY;
549 idx = root_bufs.recipe_indx;
550 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
552 /* Mark all result indices in this chain */
553 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
554 ice_set_bit(root_bufs.content.result_indx &
555 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
557 /* get the first profile that is associated with rid */
558 prof_id = ice_find_first_bit(recipe_to_profile[idx],
559 ICE_MAX_NUM_PROFILES);
560 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
561 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
563 rg_entry->fv_idx[i] = lkup_indx;
564 rg_entry->fv_mask[i] =
565 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
567 /* If the recipe is a chained recipe then all its
568 * child recipe's result will have a result index.
569 * To fill fv_words we should not use those result
570 * index, we only need the protocol ids and offsets.
571 * We will skip all the fv_idx which stores result
572 * index in them. We also need to skip any fv_idx which
573 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
574 * valid offset value.
576 if (ice_is_bit_set(possible_idx, rg_entry->fv_idx[i]) ||
577 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
578 rg_entry->fv_idx[i] == 0)
581 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
582 rg_entry->fv_idx[i], &prot, &off);
583 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
584 lkup_exts->fv_words[fv_word_idx].off = off;
587 /* populate rg_list with the data from the child entry of this
590 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
592 /* Propagate some data to the recipe database */
593 recps[idx].is_root = is_root;
594 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
595 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
596 recps[idx].chain_idx = root_bufs.content.result_indx &
597 ~ICE_AQ_RECIPE_RESULT_EN;
599 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
604 /* Only do the following for root recipes entries */
605 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
606 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
607 recps[idx].root_rid = root_bufs.content.rid &
608 ~ICE_AQ_RECIPE_ID_IS_ROOT;
609 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
610 recps[idx].big_recp = (recps[rid].n_grp_count > 1);
613 /* Complete initialization of the root recipe entry */
614 lkup_exts->n_val_words = fv_word_idx;
615 recps[rid].n_grp_count = num_recps;
616 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
617 ice_calloc(hw, recps[rid].n_grp_count,
618 sizeof(struct ice_aqc_recipe_data_elem));
619 if (!recps[rid].root_buf)
622 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
623 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
625 /* Copy result indexes */
626 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
627 ICE_NONDMA_TO_NONDMA);
628 recps[rid].recp_created = true;
636 * ice_get_recp_to_prof_map - updates recipe to profile mapping
637 * @hw: pointer to hardware structure
639 * This function is used to populate recipe_to_profile matrix where index to
640 * this array is the recipe ID and the element is the mapping of which profiles
641 * is this recipe mapped to.
644 ice_get_recp_to_prof_map(struct ice_hw *hw)
646 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
649 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
652 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
653 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
654 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
656 ice_memcpy(profile_to_recipe[i], r_bitmap,
657 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
658 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
659 if (ice_is_bit_set(r_bitmap, j))
660 ice_set_bit(i, recipe_to_profile[j]);
665 * ice_init_def_sw_recp - initialize the recipe book keeping tables
666 * @hw: pointer to the HW struct
668 * Allocate memory for the entire recipe table and initialize the structures/
669 * entries corresponding to basic recipes.
671 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
673 struct ice_sw_recipe *recps;
676 recps = (struct ice_sw_recipe *)
677 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
679 return ICE_ERR_NO_MEMORY;
681 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
682 recps[i].root_rid = i;
683 INIT_LIST_HEAD(&recps[i].filt_rules);
684 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
685 INIT_LIST_HEAD(&recps[i].rg_list);
686 ice_init_lock(&recps[i].filt_rule_lock);
689 hw->switch_info->recp_list = recps;
695 * ice_aq_get_sw_cfg - get switch configuration
696 * @hw: pointer to the hardware structure
697 * @buf: pointer to the result buffer
698 * @buf_size: length of the buffer available for response
699 * @req_desc: pointer to requested descriptor
700 * @num_elems: pointer to number of elements
701 * @cd: pointer to command details structure or NULL
703 * Get switch configuration (0x0200) to be placed in 'buff'.
704 * This admin command returns information such as initial VSI/port number
705 * and switch ID it belongs to.
707 * NOTE: *req_desc is both an input/output parameter.
708 * The caller of this function first calls this function with *request_desc set
709 * to 0. If the response from f/w has *req_desc set to 0, all the switch
710 * configuration information has been returned; if non-zero (meaning not all
711 * the information was returned), the caller should call this function again
712 * with *req_desc set to the previous value returned by f/w to get the
713 * next block of switch configuration information.
715 * *num_elems is output only parameter. This reflects the number of elements
716 * in response buffer. The caller of this function to use *num_elems while
717 * parsing the response buffer.
719 static enum ice_status
720 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
721 u16 buf_size, u16 *req_desc, u16 *num_elems,
722 struct ice_sq_cd *cd)
724 struct ice_aqc_get_sw_cfg *cmd;
725 enum ice_status status;
726 struct ice_aq_desc desc;
728 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
729 cmd = &desc.params.get_sw_conf;
730 cmd->element = CPU_TO_LE16(*req_desc);
732 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
734 *req_desc = LE16_TO_CPU(cmd->element);
735 *num_elems = LE16_TO_CPU(cmd->num_elems);
743 * ice_alloc_sw - allocate resources specific to switch
744 * @hw: pointer to the HW struct
745 * @ena_stats: true to turn on VEB stats
746 * @shared_res: true for shared resource, false for dedicated resource
747 * @sw_id: switch ID returned
748 * @counter_id: VEB counter ID returned
750 * allocates switch resources (SWID and VEB counter) (0x0208)
753 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
756 struct ice_aqc_alloc_free_res_elem *sw_buf;
757 struct ice_aqc_res_elem *sw_ele;
758 enum ice_status status;
761 buf_len = sizeof(*sw_buf);
762 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
763 ice_malloc(hw, buf_len);
765 return ICE_ERR_NO_MEMORY;
767 /* Prepare buffer for switch ID.
768 * The number of resource entries in buffer is passed as 1 since only a
769 * single switch/VEB instance is allocated, and hence a single sw_id
772 sw_buf->num_elems = CPU_TO_LE16(1);
774 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
775 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
776 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
778 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
779 ice_aqc_opc_alloc_res, NULL);
782 goto ice_alloc_sw_exit;
784 sw_ele = &sw_buf->elem[0];
785 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
788 /* Prepare buffer for VEB Counter */
789 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
790 struct ice_aqc_alloc_free_res_elem *counter_buf;
791 struct ice_aqc_res_elem *counter_ele;
793 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
794 ice_malloc(hw, buf_len);
796 status = ICE_ERR_NO_MEMORY;
797 goto ice_alloc_sw_exit;
800 /* The number of resource entries in buffer is passed as 1 since
801 * only a single switch/VEB instance is allocated, and hence a
802 * single VEB counter is requested.
804 counter_buf->num_elems = CPU_TO_LE16(1);
805 counter_buf->res_type =
806 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
807 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
808 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
812 ice_free(hw, counter_buf);
813 goto ice_alloc_sw_exit;
815 counter_ele = &counter_buf->elem[0];
816 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
817 ice_free(hw, counter_buf);
821 ice_free(hw, sw_buf);
826 * ice_free_sw - free resources specific to switch
827 * @hw: pointer to the HW struct
828 * @sw_id: switch ID returned
829 * @counter_id: VEB counter ID returned
831 * free switch resources (SWID and VEB counter) (0x0209)
833 * NOTE: This function frees multiple resources. It continues
834 * releasing other resources even after it encounters error.
835 * The error code returned is the last error it encountered.
837 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
839 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
840 enum ice_status status, ret_status;
843 buf_len = sizeof(*sw_buf);
844 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
845 ice_malloc(hw, buf_len);
847 return ICE_ERR_NO_MEMORY;
849 /* Prepare buffer to free for switch ID res.
850 * The number of resource entries in buffer is passed as 1 since only a
851 * single switch/VEB instance is freed, and hence a single sw_id
854 sw_buf->num_elems = CPU_TO_LE16(1);
855 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
856 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
858 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
859 ice_aqc_opc_free_res, NULL);
862 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
864 /* Prepare buffer to free for VEB Counter resource */
865 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
866 ice_malloc(hw, buf_len);
868 ice_free(hw, sw_buf);
869 return ICE_ERR_NO_MEMORY;
872 /* The number of resource entries in buffer is passed as 1 since only a
873 * single switch/VEB instance is freed, and hence a single VEB counter
876 counter_buf->num_elems = CPU_TO_LE16(1);
877 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
878 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
880 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
881 ice_aqc_opc_free_res, NULL);
883 ice_debug(hw, ICE_DBG_SW,
884 "VEB counter resource could not be freed\n");
888 ice_free(hw, counter_buf);
889 ice_free(hw, sw_buf);
895 * @hw: pointer to the HW struct
896 * @vsi_ctx: pointer to a VSI context struct
897 * @cd: pointer to command details structure or NULL
899 * Add a VSI context to the hardware (0x0210)
902 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
903 struct ice_sq_cd *cd)
905 struct ice_aqc_add_update_free_vsi_resp *res;
906 struct ice_aqc_add_get_update_free_vsi *cmd;
907 struct ice_aq_desc desc;
908 enum ice_status status;
910 cmd = &desc.params.vsi_cmd;
911 res = &desc.params.add_update_free_vsi_res;
913 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
915 if (!vsi_ctx->alloc_from_pool)
916 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
917 ICE_AQ_VSI_IS_VALID);
919 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
921 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
923 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
924 sizeof(vsi_ctx->info), cd);
927 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
928 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
929 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
937 * @hw: pointer to the HW struct
938 * @vsi_ctx: pointer to a VSI context struct
939 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
940 * @cd: pointer to command details structure or NULL
942 * Free VSI context info from hardware (0x0213)
945 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
946 bool keep_vsi_alloc, struct ice_sq_cd *cd)
948 struct ice_aqc_add_update_free_vsi_resp *resp;
949 struct ice_aqc_add_get_update_free_vsi *cmd;
950 struct ice_aq_desc desc;
951 enum ice_status status;
953 cmd = &desc.params.vsi_cmd;
954 resp = &desc.params.add_update_free_vsi_res;
956 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
958 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
960 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
962 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
964 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
965 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
973 * @hw: pointer to the HW struct
974 * @vsi_ctx: pointer to a VSI context struct
975 * @cd: pointer to command details structure or NULL
977 * Update VSI context in the hardware (0x0211)
980 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
981 struct ice_sq_cd *cd)
983 struct ice_aqc_add_update_free_vsi_resp *resp;
984 struct ice_aqc_add_get_update_free_vsi *cmd;
985 struct ice_aq_desc desc;
986 enum ice_status status;
988 cmd = &desc.params.vsi_cmd;
989 resp = &desc.params.add_update_free_vsi_res;
991 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
993 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
995 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
997 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
998 sizeof(vsi_ctx->info), cd);
1001 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1002 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1009 * ice_is_vsi_valid - check whether the VSI is valid or not
1010 * @hw: pointer to the HW struct
1011 * @vsi_handle: VSI handle
1013 * check whether the VSI is valid or not
1015 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1017 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1021 * ice_get_hw_vsi_num - return the HW VSI number
1022 * @hw: pointer to the HW struct
1023 * @vsi_handle: VSI handle
1025 * return the HW VSI number
1026 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1028 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1030 return hw->vsi_ctx[vsi_handle]->vsi_num;
1034 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1035 * @hw: pointer to the HW struct
1036 * @vsi_handle: VSI handle
1038 * return the VSI context entry for a given VSI handle
1040 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1042 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1046 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1047 * @hw: pointer to the HW struct
1048 * @vsi_handle: VSI handle
1049 * @vsi: VSI context pointer
1051 * save the VSI context entry for a given VSI handle
1054 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1056 hw->vsi_ctx[vsi_handle] = vsi;
1060 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1061 * @hw: pointer to the HW struct
1062 * @vsi_handle: VSI handle
1064 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1066 struct ice_vsi_ctx *vsi;
1069 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1072 ice_for_each_traffic_class(i) {
1073 if (vsi->lan_q_ctx[i]) {
1074 ice_free(hw, vsi->lan_q_ctx[i]);
1075 vsi->lan_q_ctx[i] = NULL;
1081 * ice_clear_vsi_ctx - clear the VSI context entry
1082 * @hw: pointer to the HW struct
1083 * @vsi_handle: VSI handle
1085 * clear the VSI context entry
1087 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1089 struct ice_vsi_ctx *vsi;
1091 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1093 ice_clear_vsi_q_ctx(hw, vsi_handle);
1095 hw->vsi_ctx[vsi_handle] = NULL;
1100 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1101 * @hw: pointer to the HW struct
1103 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1107 for (i = 0; i < ICE_MAX_VSI; i++)
1108 ice_clear_vsi_ctx(hw, i);
1112 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1113 * @hw: pointer to the HW struct
1114 * @vsi_handle: unique VSI handle provided by drivers
1115 * @vsi_ctx: pointer to a VSI context struct
1116 * @cd: pointer to command details structure or NULL
1118 * Add a VSI context to the hardware also add it into the VSI handle list.
1119 * If this function gets called after reset for existing VSIs then update
1120 * with the new HW VSI number in the corresponding VSI handle list entry.
1123 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1124 struct ice_sq_cd *cd)
1126 struct ice_vsi_ctx *tmp_vsi_ctx;
1127 enum ice_status status;
1129 if (vsi_handle >= ICE_MAX_VSI)
1130 return ICE_ERR_PARAM;
1131 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1134 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1136 /* Create a new VSI context */
1137 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1138 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1140 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1141 return ICE_ERR_NO_MEMORY;
1143 *tmp_vsi_ctx = *vsi_ctx;
1145 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1147 /* update with new HW VSI num */
1148 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
1149 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1156 * ice_free_vsi- free VSI context from hardware and VSI handle list
1157 * @hw: pointer to the HW struct
1158 * @vsi_handle: unique VSI handle
1159 * @vsi_ctx: pointer to a VSI context struct
1160 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1161 * @cd: pointer to command details structure or NULL
1163 * Free VSI context info from hardware as well as from VSI handle list
1166 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1167 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1169 enum ice_status status;
1171 if (!ice_is_vsi_valid(hw, vsi_handle))
1172 return ICE_ERR_PARAM;
1173 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1174 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1176 ice_clear_vsi_ctx(hw, vsi_handle);
1182 * @hw: pointer to the HW struct
1183 * @vsi_handle: unique VSI handle
1184 * @vsi_ctx: pointer to a VSI context struct
1185 * @cd: pointer to command details structure or NULL
1187 * Update VSI context in the hardware
1190 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1191 struct ice_sq_cd *cd)
1193 if (!ice_is_vsi_valid(hw, vsi_handle))
1194 return ICE_ERR_PARAM;
1195 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1196 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1200 * ice_aq_get_vsi_params
1201 * @hw: pointer to the HW struct
1202 * @vsi_ctx: pointer to a VSI context struct
1203 * @cd: pointer to command details structure or NULL
1205 * Get VSI context info from hardware (0x0212)
1208 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1209 struct ice_sq_cd *cd)
1211 struct ice_aqc_add_get_update_free_vsi *cmd;
1212 struct ice_aqc_get_vsi_resp *resp;
1213 struct ice_aq_desc desc;
1214 enum ice_status status;
1216 cmd = &desc.params.vsi_cmd;
1217 resp = &desc.params.get_vsi_resp;
1219 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1221 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1223 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1224 sizeof(vsi_ctx->info), cd);
1226 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1228 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1229 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1236 * ice_aq_add_update_mir_rule - add/update a mirror rule
1237 * @hw: pointer to the HW struct
1238 * @rule_type: Rule Type
1239 * @dest_vsi: VSI number to which packets will be mirrored
1240 * @count: length of the list
1241 * @mr_buf: buffer for list of mirrored VSI numbers
1242 * @cd: pointer to command details structure or NULL
1245 * Add/Update Mirror Rule (0x260).
1248 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1249 u16 count, struct ice_mir_rule_buf *mr_buf,
1250 struct ice_sq_cd *cd, u16 *rule_id)
1252 struct ice_aqc_add_update_mir_rule *cmd;
1253 struct ice_aq_desc desc;
1254 enum ice_status status;
1255 __le16 *mr_list = NULL;
1258 switch (rule_type) {
1259 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1260 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1261 /* Make sure count and mr_buf are set for these rule_types */
1262 if (!(count && mr_buf))
1263 return ICE_ERR_PARAM;
1265 buf_size = count * sizeof(__le16);
1266 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1268 return ICE_ERR_NO_MEMORY;
1270 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1271 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1272 /* Make sure count and mr_buf are not set for these
1275 if (count || mr_buf)
1276 return ICE_ERR_PARAM;
1279 ice_debug(hw, ICE_DBG_SW,
1280 "Error due to unsupported rule_type %u\n", rule_type);
1281 return ICE_ERR_OUT_OF_RANGE;
1284 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1286 /* Pre-process 'mr_buf' items for add/update of virtual port
1287 * ingress/egress mirroring (but not physical port ingress/egress
1293 for (i = 0; i < count; i++) {
1296 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1298 /* Validate specified VSI number, make sure it is less
1299 * than ICE_MAX_VSI, if not return with error.
1301 if (id >= ICE_MAX_VSI) {
1302 ice_debug(hw, ICE_DBG_SW,
1303 "Error VSI index (%u) out-of-range\n",
1305 ice_free(hw, mr_list);
1306 return ICE_ERR_OUT_OF_RANGE;
1309 /* add VSI to mirror rule */
1312 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1313 else /* remove VSI from mirror rule */
1314 mr_list[i] = CPU_TO_LE16(id);
1318 cmd = &desc.params.add_update_rule;
1319 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1320 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1321 ICE_AQC_RULE_ID_VALID_M);
1322 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1323 cmd->num_entries = CPU_TO_LE16(count);
1324 cmd->dest = CPU_TO_LE16(dest_vsi);
1326 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1328 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1330 ice_free(hw, mr_list);
1336 * ice_aq_delete_mir_rule - delete a mirror rule
1337 * @hw: pointer to the HW struct
1338 * @rule_id: Mirror rule ID (to be deleted)
1339 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1340 * otherwise it is returned to the shared pool
1341 * @cd: pointer to command details structure or NULL
1343 * Delete Mirror Rule (0x261).
1346 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1347 struct ice_sq_cd *cd)
1349 struct ice_aqc_delete_mir_rule *cmd;
1350 struct ice_aq_desc desc;
1352 /* rule_id should be in the range 0...63 */
1353 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1354 return ICE_ERR_OUT_OF_RANGE;
1356 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1358 cmd = &desc.params.del_rule;
1359 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1360 cmd->rule_id = CPU_TO_LE16(rule_id);
1363 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1365 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1369 * ice_aq_alloc_free_vsi_list
1370 * @hw: pointer to the HW struct
1371 * @vsi_list_id: VSI list ID returned or used for lookup
1372 * @lkup_type: switch rule filter lookup type
1373 * @opc: switch rules population command type - pass in the command opcode
1375 * allocates or free a VSI list resource
1377 static enum ice_status
1378 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1379 enum ice_sw_lkup_type lkup_type,
1380 enum ice_adminq_opc opc)
1382 struct ice_aqc_alloc_free_res_elem *sw_buf;
1383 struct ice_aqc_res_elem *vsi_ele;
1384 enum ice_status status;
1387 buf_len = sizeof(*sw_buf);
1388 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1389 ice_malloc(hw, buf_len);
1391 return ICE_ERR_NO_MEMORY;
1392 sw_buf->num_elems = CPU_TO_LE16(1);
1394 if (lkup_type == ICE_SW_LKUP_MAC ||
1395 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1396 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1397 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1398 lkup_type == ICE_SW_LKUP_PROMISC ||
1399 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1400 lkup_type == ICE_SW_LKUP_LAST) {
1401 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1402 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1404 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1406 status = ICE_ERR_PARAM;
1407 goto ice_aq_alloc_free_vsi_list_exit;
1410 if (opc == ice_aqc_opc_free_res)
1411 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1413 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1415 goto ice_aq_alloc_free_vsi_list_exit;
1417 if (opc == ice_aqc_opc_alloc_res) {
1418 vsi_ele = &sw_buf->elem[0];
1419 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1422 ice_aq_alloc_free_vsi_list_exit:
1423 ice_free(hw, sw_buf);
1428 * ice_aq_set_storm_ctrl - Sets storm control configuration
1429 * @hw: pointer to the HW struct
1430 * @bcast_thresh: represents the upper threshold for broadcast storm control
1431 * @mcast_thresh: represents the upper threshold for multicast storm control
1432 * @ctl_bitmask: storm control control knobs
1434 * Sets the storm control configuration (0x0280)
1437 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1440 struct ice_aqc_storm_cfg *cmd;
1441 struct ice_aq_desc desc;
1443 cmd = &desc.params.storm_conf;
1445 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1447 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1448 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1449 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1451 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1455 * ice_aq_get_storm_ctrl - gets storm control configuration
1456 * @hw: pointer to the HW struct
1457 * @bcast_thresh: represents the upper threshold for broadcast storm control
1458 * @mcast_thresh: represents the upper threshold for multicast storm control
1459 * @ctl_bitmask: storm control control knobs
1461 * Gets the storm control configuration (0x0281)
1464 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1467 enum ice_status status;
1468 struct ice_aq_desc desc;
1470 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1472 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1474 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1477 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1480 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1483 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1490 * ice_aq_sw_rules - add/update/remove switch rules
1491 * @hw: pointer to the HW struct
1492 * @rule_list: pointer to switch rule population list
1493 * @rule_list_sz: total size of the rule list in bytes
1494 * @num_rules: number of switch rules in the rule_list
1495 * @opc: switch rules population command type - pass in the command opcode
1496 * @cd: pointer to command details structure or NULL
1498 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1500 static enum ice_status
1501 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1502 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1504 struct ice_aq_desc desc;
1506 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1508 if (opc != ice_aqc_opc_add_sw_rules &&
1509 opc != ice_aqc_opc_update_sw_rules &&
1510 opc != ice_aqc_opc_remove_sw_rules)
1511 return ICE_ERR_PARAM;
1513 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1515 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1516 desc.params.sw_rules.num_rules_fltr_entry_index =
1517 CPU_TO_LE16(num_rules);
1518 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1522 * ice_aq_add_recipe - add switch recipe
1523 * @hw: pointer to the HW struct
1524 * @s_recipe_list: pointer to switch rule population list
1525 * @num_recipes: number of switch recipes in the list
1526 * @cd: pointer to command details structure or NULL
1531 ice_aq_add_recipe(struct ice_hw *hw,
1532 struct ice_aqc_recipe_data_elem *s_recipe_list,
1533 u16 num_recipes, struct ice_sq_cd *cd)
1535 struct ice_aqc_add_get_recipe *cmd;
1536 struct ice_aq_desc desc;
1539 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1540 cmd = &desc.params.add_get_recipe;
1541 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1543 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1544 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1546 buf_size = num_recipes * sizeof(*s_recipe_list);
1548 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1552 * ice_aq_get_recipe - get switch recipe
1553 * @hw: pointer to the HW struct
1554 * @s_recipe_list: pointer to switch rule population list
1555 * @num_recipes: pointer to the number of recipes (input and output)
1556 * @recipe_root: root recipe number of recipe(s) to retrieve
1557 * @cd: pointer to command details structure or NULL
1561 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1562 * On output, *num_recipes will equal the number of entries returned in
1565 * The caller must supply enough space in s_recipe_list to hold all possible
1566 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1569 ice_aq_get_recipe(struct ice_hw *hw,
1570 struct ice_aqc_recipe_data_elem *s_recipe_list,
1571 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1573 struct ice_aqc_add_get_recipe *cmd;
1574 struct ice_aq_desc desc;
1575 enum ice_status status;
1578 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1579 return ICE_ERR_PARAM;
1581 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1582 cmd = &desc.params.add_get_recipe;
1583 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1585 cmd->return_index = CPU_TO_LE16(recipe_root);
1586 cmd->num_sub_recipes = 0;
1588 buf_size = *num_recipes * sizeof(*s_recipe_list);
1590 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1591 /* cppcheck-suppress constArgument */
1592 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1598 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1599 * @hw: pointer to the HW struct
1600 * @profile_id: package profile ID to associate the recipe with
1601 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1602 * @cd: pointer to command details structure or NULL
1603 * Recipe to profile association (0x0291)
1606 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1607 struct ice_sq_cd *cd)
1609 struct ice_aqc_recipe_to_profile *cmd;
1610 struct ice_aq_desc desc;
1612 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1613 cmd = &desc.params.recipe_to_profile;
1614 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1615 cmd->profile_id = CPU_TO_LE16(profile_id);
1616 /* Set the recipe ID bit in the bitmask to let the device know which
1617 * profile we are associating the recipe to
1619 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1620 ICE_NONDMA_TO_NONDMA);
1622 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1626 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1627 * @hw: pointer to the HW struct
1628 * @profile_id: package profile ID to associate the recipe with
1629 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1630 * @cd: pointer to command details structure or NULL
1631 * Associate profile ID with given recipe (0x0293)
1634 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1635 struct ice_sq_cd *cd)
1637 struct ice_aqc_recipe_to_profile *cmd;
1638 struct ice_aq_desc desc;
1639 enum ice_status status;
1641 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1642 cmd = &desc.params.recipe_to_profile;
1643 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1644 cmd->profile_id = CPU_TO_LE16(profile_id);
1646 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1648 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1649 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1655 * ice_alloc_recipe - add recipe resource
1656 * @hw: pointer to the hardware structure
1657 * @rid: recipe ID returned as response to AQ call
1659 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1661 struct ice_aqc_alloc_free_res_elem *sw_buf;
1662 enum ice_status status;
1665 buf_len = sizeof(*sw_buf);
1666 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1668 return ICE_ERR_NO_MEMORY;
1670 sw_buf->num_elems = CPU_TO_LE16(1);
1671 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1672 ICE_AQC_RES_TYPE_S) |
1673 ICE_AQC_RES_TYPE_FLAG_SHARED);
1674 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1675 ice_aqc_opc_alloc_res, NULL);
1677 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1678 ice_free(hw, sw_buf);
1683 /* ice_init_port_info - Initialize port_info with switch configuration data
1684 * @pi: pointer to port_info
1685 * @vsi_port_num: VSI number or port number
1686 * @type: Type of switch element (port or VSI)
1687 * @swid: switch ID of the switch the element is attached to
1688 * @pf_vf_num: PF or VF number
1689 * @is_vf: true if the element is a VF, false otherwise
1692 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1693 u16 swid, u16 pf_vf_num, bool is_vf)
1696 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1697 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1699 pi->pf_vf_num = pf_vf_num;
1701 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1702 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1705 ice_debug(pi->hw, ICE_DBG_SW,
1706 "incorrect VSI/port type received\n");
1711 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1712 * @hw: pointer to the hardware structure
1714 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1716 struct ice_aqc_get_sw_cfg_resp *rbuf;
1717 enum ice_status status;
1718 u16 num_total_ports;
1724 num_total_ports = 1;
1726 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1727 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1730 return ICE_ERR_NO_MEMORY;
1732 /* Multiple calls to ice_aq_get_sw_cfg may be required
1733 * to get all the switch configuration information. The need
1734 * for additional calls is indicated by ice_aq_get_sw_cfg
1735 * writing a non-zero value in req_desc
1738 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1739 &req_desc, &num_elems, NULL);
1744 for (i = 0; i < num_elems; i++) {
1745 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1746 u16 pf_vf_num, swid, vsi_port_num;
1750 ele = rbuf[i].elements;
1751 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1752 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1754 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1755 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1757 swid = LE16_TO_CPU(ele->swid);
1759 if (LE16_TO_CPU(ele->pf_vf_num) &
1760 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1763 type = LE16_TO_CPU(ele->vsi_port_num) >>
1764 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1767 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1768 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1769 if (j == num_total_ports) {
1770 ice_debug(hw, ICE_DBG_SW,
1771 "more ports than expected\n");
1772 status = ICE_ERR_CFG;
1775 ice_init_port_info(hw->port_info,
1776 vsi_port_num, type, swid,
1784 } while (req_desc && !status);
1788 ice_free(hw, (void *)rbuf);
1794 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1795 * @hw: pointer to the hardware structure
1796 * @fi: filter info structure to fill/update
1798 * This helper function populates the lb_en and lan_en elements of the provided
1799 * ice_fltr_info struct using the switch's type and characteristics of the
1800 * switch rule being configured.
1802 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1806 if ((fi->flag & ICE_FLTR_TX) &&
1807 (fi->fltr_act == ICE_FWD_TO_VSI ||
1808 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1809 fi->fltr_act == ICE_FWD_TO_Q ||
1810 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1811 /* Setting LB for prune actions will result in replicated
1812 * packets to the internal switch that will be dropped.
1814 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1817 /* Set lan_en to TRUE if
1818 * 1. The switch is a VEB AND
1820 * 2.1 The lookup is a directional lookup like ethertype,
1821 * promiscuous, ethertype-MAC, promiscuous-VLAN
1822 * and default-port OR
1823 * 2.2 The lookup is VLAN, OR
1824 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1825 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1829 * The switch is a VEPA.
1831 * In all other cases, the LAN enable has to be set to false.
1834 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1835 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1836 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1837 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1838 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1839 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1840 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1841 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1842 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1843 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1852 * ice_ilog2 - Calculates integer log base 2 of a number
1853 * @n: number on which to perform operation
1855 static int ice_ilog2(u64 n)
1859 for (i = 63; i >= 0; i--)
1860 if (((u64)1 << i) & n)
1867 * ice_fill_sw_rule - Helper function to fill switch rule structure
1868 * @hw: pointer to the hardware structure
1869 * @f_info: entry containing packet forwarding information
1870 * @s_rule: switch rule structure to be filled in based on mac_entry
1871 * @opc: switch rules population command type - pass in the command opcode
1874 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1875 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1877 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1885 if (opc == ice_aqc_opc_remove_sw_rules) {
1886 s_rule->pdata.lkup_tx_rx.act = 0;
1887 s_rule->pdata.lkup_tx_rx.index =
1888 CPU_TO_LE16(f_info->fltr_rule_id);
1889 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1893 eth_hdr_sz = sizeof(dummy_eth_header);
1894 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1896 /* initialize the ether header with a dummy header */
1897 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1898 ice_fill_sw_info(hw, f_info);
1900 switch (f_info->fltr_act) {
1901 case ICE_FWD_TO_VSI:
1902 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1903 ICE_SINGLE_ACT_VSI_ID_M;
1904 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1905 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1906 ICE_SINGLE_ACT_VALID_BIT;
1908 case ICE_FWD_TO_VSI_LIST:
1909 act |= ICE_SINGLE_ACT_VSI_LIST;
1910 act |= (f_info->fwd_id.vsi_list_id <<
1911 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1912 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1913 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1914 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1915 ICE_SINGLE_ACT_VALID_BIT;
1918 act |= ICE_SINGLE_ACT_TO_Q;
1919 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1920 ICE_SINGLE_ACT_Q_INDEX_M;
1922 case ICE_DROP_PACKET:
1923 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1924 ICE_SINGLE_ACT_VALID_BIT;
1926 case ICE_FWD_TO_QGRP:
1927 q_rgn = f_info->qgrp_size > 0 ?
1928 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1929 act |= ICE_SINGLE_ACT_TO_Q;
1930 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1931 ICE_SINGLE_ACT_Q_INDEX_M;
1932 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1933 ICE_SINGLE_ACT_Q_REGION_M;
1940 act |= ICE_SINGLE_ACT_LB_ENABLE;
1942 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1944 switch (f_info->lkup_type) {
1945 case ICE_SW_LKUP_MAC:
1946 daddr = f_info->l_data.mac.mac_addr;
1948 case ICE_SW_LKUP_VLAN:
1949 vlan_id = f_info->l_data.vlan.vlan_id;
1950 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1951 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1952 act |= ICE_SINGLE_ACT_PRUNE;
1953 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1956 case ICE_SW_LKUP_ETHERTYPE_MAC:
1957 daddr = f_info->l_data.ethertype_mac.mac_addr;
1959 case ICE_SW_LKUP_ETHERTYPE:
1960 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1961 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1963 case ICE_SW_LKUP_MAC_VLAN:
1964 daddr = f_info->l_data.mac_vlan.mac_addr;
1965 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1967 case ICE_SW_LKUP_PROMISC_VLAN:
1968 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1970 case ICE_SW_LKUP_PROMISC:
1971 daddr = f_info->l_data.mac_vlan.mac_addr;
1977 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1978 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1979 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1981 /* Recipe set depending on lookup type */
1982 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1983 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1984 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1987 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1988 ICE_NONDMA_TO_NONDMA);
1990 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1991 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1992 *off = CPU_TO_BE16(vlan_id);
1995 /* Create the switch rule with the final dummy Ethernet header */
1996 if (opc != ice_aqc_opc_update_sw_rules)
1997 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2001 * ice_add_marker_act
2002 * @hw: pointer to the hardware structure
2003 * @m_ent: the management entry for which sw marker needs to be added
2004 * @sw_marker: sw marker to tag the Rx descriptor with
2005 * @l_id: large action resource ID
2007 * Create a large action to hold software marker and update the switch rule
2008 * entry pointed by m_ent with newly created large action
2010 static enum ice_status
2011 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2012 u16 sw_marker, u16 l_id)
2014 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2015 /* For software marker we need 3 large actions
2016 * 1. FWD action: FWD TO VSI or VSI LIST
2017 * 2. GENERIC VALUE action to hold the profile ID
2018 * 3. GENERIC VALUE action to hold the software marker ID
2020 const u16 num_lg_acts = 3;
2021 enum ice_status status;
2027 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2028 return ICE_ERR_PARAM;
2030 /* Create two back-to-back switch rules and submit them to the HW using
2031 * one memory buffer:
2035 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2036 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2037 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2039 return ICE_ERR_NO_MEMORY;
2041 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2043 /* Fill in the first switch rule i.e. large action */
2044 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2045 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2046 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2048 /* First action VSI forwarding or VSI list forwarding depending on how
2051 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2052 m_ent->fltr_info.fwd_id.hw_vsi_id;
2054 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2055 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2056 ICE_LG_ACT_VSI_LIST_ID_M;
2057 if (m_ent->vsi_count > 1)
2058 act |= ICE_LG_ACT_VSI_LIST;
2059 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2061 /* Second action descriptor type */
2062 act = ICE_LG_ACT_GENERIC;
2064 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2065 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2067 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2068 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2070 /* Third action Marker value */
2071 act |= ICE_LG_ACT_GENERIC;
2072 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2073 ICE_LG_ACT_GENERIC_VALUE_M;
2075 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2077 /* call the fill switch rule to fill the lookup Tx Rx structure */
2078 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2079 ice_aqc_opc_update_sw_rules);
2081 /* Update the action to point to the large action ID */
2082 rx_tx->pdata.lkup_tx_rx.act =
2083 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2084 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2085 ICE_SINGLE_ACT_PTR_VAL_M));
2087 /* Use the filter rule ID of the previously created rule with single
2088 * act. Once the update happens, hardware will treat this as large
2091 rx_tx->pdata.lkup_tx_rx.index =
2092 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2094 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2095 ice_aqc_opc_update_sw_rules, NULL);
2097 m_ent->lg_act_idx = l_id;
2098 m_ent->sw_marker_id = sw_marker;
2101 ice_free(hw, lg_act);
2106 * ice_add_counter_act - add/update filter rule with counter action
2107 * @hw: pointer to the hardware structure
2108 * @m_ent: the management entry for which counter needs to be added
2109 * @counter_id: VLAN counter ID returned as part of allocate resource
2110 * @l_id: large action resource ID
2112 static enum ice_status
2113 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2114 u16 counter_id, u16 l_id)
2116 struct ice_aqc_sw_rules_elem *lg_act;
2117 struct ice_aqc_sw_rules_elem *rx_tx;
2118 enum ice_status status;
2119 /* 2 actions will be added while adding a large action counter */
2120 const int num_acts = 2;
2127 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2128 return ICE_ERR_PARAM;
2130 /* Create two back-to-back switch rules and submit them to the HW using
2131 * one memory buffer:
2135 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2136 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2137 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2140 return ICE_ERR_NO_MEMORY;
2142 rx_tx = (struct ice_aqc_sw_rules_elem *)
2143 ((u8 *)lg_act + lg_act_size);
2145 /* Fill in the first switch rule i.e. large action */
2146 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2147 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2148 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2150 /* First action VSI forwarding or VSI list forwarding depending on how
2153 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2154 m_ent->fltr_info.fwd_id.hw_vsi_id;
2156 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2157 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2158 ICE_LG_ACT_VSI_LIST_ID_M;
2159 if (m_ent->vsi_count > 1)
2160 act |= ICE_LG_ACT_VSI_LIST;
2161 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2163 /* Second action counter ID */
2164 act = ICE_LG_ACT_STAT_COUNT;
2165 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2166 ICE_LG_ACT_STAT_COUNT_M;
2167 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2169 /* call the fill switch rule to fill the lookup Tx Rx structure */
2170 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2171 ice_aqc_opc_update_sw_rules);
2173 act = ICE_SINGLE_ACT_PTR;
2174 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2175 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2177 /* Use the filter rule ID of the previously created rule with single
2178 * act. Once the update happens, hardware will treat this as large
2181 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2182 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2184 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2185 ice_aqc_opc_update_sw_rules, NULL);
2187 m_ent->lg_act_idx = l_id;
2188 m_ent->counter_index = counter_id;
2191 ice_free(hw, lg_act);
2196 * ice_create_vsi_list_map
2197 * @hw: pointer to the hardware structure
2198 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2199 * @num_vsi: number of VSI handles in the array
2200 * @vsi_list_id: VSI list ID generated as part of allocate resource
2202 * Helper function to create a new entry of VSI list ID to VSI mapping
2203 * using the given VSI list ID
2205 static struct ice_vsi_list_map_info *
2206 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2209 struct ice_switch_info *sw = hw->switch_info;
2210 struct ice_vsi_list_map_info *v_map;
2213 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2218 v_map->vsi_list_id = vsi_list_id;
2220 for (i = 0; i < num_vsi; i++)
2221 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2223 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2228 * ice_update_vsi_list_rule
2229 * @hw: pointer to the hardware structure
2230 * @vsi_handle_arr: array of VSI handles to form a VSI list
2231 * @num_vsi: number of VSI handles in the array
2232 * @vsi_list_id: VSI list ID generated as part of allocate resource
2233 * @remove: Boolean value to indicate if this is a remove action
2234 * @opc: switch rules population command type - pass in the command opcode
2235 * @lkup_type: lookup type of the filter
2237 * Call AQ command to add a new switch rule or update existing switch rule
2238 * using the given VSI list ID
2240 static enum ice_status
2241 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2242 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2243 enum ice_sw_lkup_type lkup_type)
2245 struct ice_aqc_sw_rules_elem *s_rule;
2246 enum ice_status status;
2252 return ICE_ERR_PARAM;
2254 if (lkup_type == ICE_SW_LKUP_MAC ||
2255 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2256 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2257 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2258 lkup_type == ICE_SW_LKUP_PROMISC ||
2259 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2260 lkup_type == ICE_SW_LKUP_LAST)
2261 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2262 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2263 else if (lkup_type == ICE_SW_LKUP_VLAN)
2264 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2265 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2267 return ICE_ERR_PARAM;
2269 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2270 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2272 return ICE_ERR_NO_MEMORY;
2273 for (i = 0; i < num_vsi; i++) {
2274 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2275 status = ICE_ERR_PARAM;
2278 /* AQ call requires hw_vsi_id(s) */
2279 s_rule->pdata.vsi_list.vsi[i] =
2280 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2283 s_rule->type = CPU_TO_LE16(type);
2284 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2285 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2287 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2290 ice_free(hw, s_rule);
2295 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2296 * @hw: pointer to the HW struct
2297 * @vsi_handle_arr: array of VSI handles to form a VSI list
2298 * @num_vsi: number of VSI handles in the array
2299 * @vsi_list_id: stores the ID of the VSI list to be created
2300 * @lkup_type: switch rule filter's lookup type
2302 static enum ice_status
2303 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2304 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2306 enum ice_status status;
2308 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2309 ice_aqc_opc_alloc_res);
2313 /* Update the newly created VSI list to include the specified VSIs */
2314 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2315 *vsi_list_id, false,
2316 ice_aqc_opc_add_sw_rules, lkup_type);
2320 * ice_create_pkt_fwd_rule
2321 * @hw: pointer to the hardware structure
2322 * @f_entry: entry containing packet forwarding information
2324 * Create switch rule with given filter information and add an entry
2325 * to the corresponding filter management list to track this switch rule
2328 static enum ice_status
2329 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2330 struct ice_fltr_list_entry *f_entry)
2332 struct ice_fltr_mgmt_list_entry *fm_entry;
2333 struct ice_aqc_sw_rules_elem *s_rule;
2334 enum ice_sw_lkup_type l_type;
2335 struct ice_sw_recipe *recp;
2336 enum ice_status status;
2338 s_rule = (struct ice_aqc_sw_rules_elem *)
2339 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2341 return ICE_ERR_NO_MEMORY;
2342 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2343 ice_malloc(hw, sizeof(*fm_entry));
2345 status = ICE_ERR_NO_MEMORY;
2346 goto ice_create_pkt_fwd_rule_exit;
2349 fm_entry->fltr_info = f_entry->fltr_info;
2351 /* Initialize all the fields for the management entry */
2352 fm_entry->vsi_count = 1;
2353 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2354 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2355 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2357 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2358 ice_aqc_opc_add_sw_rules);
2360 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2361 ice_aqc_opc_add_sw_rules, NULL);
2363 ice_free(hw, fm_entry);
2364 goto ice_create_pkt_fwd_rule_exit;
2367 f_entry->fltr_info.fltr_rule_id =
2368 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2369 fm_entry->fltr_info.fltr_rule_id =
2370 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2372 /* The book keeping entries will get removed when base driver
2373 * calls remove filter AQ command
2375 l_type = fm_entry->fltr_info.lkup_type;
2376 recp = &hw->switch_info->recp_list[l_type];
2377 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2379 ice_create_pkt_fwd_rule_exit:
2380 ice_free(hw, s_rule);
2385 * ice_update_pkt_fwd_rule
2386 * @hw: pointer to the hardware structure
2387 * @f_info: filter information for switch rule
2389 * Call AQ command to update a previously created switch rule with a
2392 static enum ice_status
2393 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2395 struct ice_aqc_sw_rules_elem *s_rule;
2396 enum ice_status status;
2398 s_rule = (struct ice_aqc_sw_rules_elem *)
2399 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2401 return ICE_ERR_NO_MEMORY;
2403 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2405 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2407 /* Update switch rule with new rule set to forward VSI list */
2408 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2409 ice_aqc_opc_update_sw_rules, NULL);
2411 ice_free(hw, s_rule);
2416 * ice_update_sw_rule_bridge_mode
2417 * @hw: pointer to the HW struct
2419 * Updates unicast switch filter rules based on VEB/VEPA mode
2421 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2423 struct ice_switch_info *sw = hw->switch_info;
2424 struct ice_fltr_mgmt_list_entry *fm_entry;
2425 enum ice_status status = ICE_SUCCESS;
2426 struct LIST_HEAD_TYPE *rule_head;
2427 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2429 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2430 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2432 ice_acquire_lock(rule_lock);
2433 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2435 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2436 u8 *addr = fi->l_data.mac.mac_addr;
2438 /* Update unicast Tx rules to reflect the selected
2441 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2442 (fi->fltr_act == ICE_FWD_TO_VSI ||
2443 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2444 fi->fltr_act == ICE_FWD_TO_Q ||
2445 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2446 status = ice_update_pkt_fwd_rule(hw, fi);
2452 ice_release_lock(rule_lock);
2458 * ice_add_update_vsi_list
2459 * @hw: pointer to the hardware structure
2460 * @m_entry: pointer to current filter management list entry
2461 * @cur_fltr: filter information from the book keeping entry
2462 * @new_fltr: filter information with the new VSI to be added
2464 * Call AQ command to add or update previously created VSI list with new VSI.
2466 * Helper function to do book keeping associated with adding filter information
2467 * The algorithm to do the book keeping is described below :
2468 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2469 * if only one VSI has been added till now
2470 * Allocate a new VSI list and add two VSIs
2471 * to this list using switch rule command
2472 * Update the previously created switch rule with the
2473 * newly created VSI list ID
2474 * if a VSI list was previously created
2475 * Add the new VSI to the previously created VSI list set
2476 * using the update switch rule command
2478 static enum ice_status
2479 ice_add_update_vsi_list(struct ice_hw *hw,
2480 struct ice_fltr_mgmt_list_entry *m_entry,
2481 struct ice_fltr_info *cur_fltr,
2482 struct ice_fltr_info *new_fltr)
2484 enum ice_status status = ICE_SUCCESS;
2485 u16 vsi_list_id = 0;
2487 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2488 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2489 return ICE_ERR_NOT_IMPL;
2491 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2492 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2493 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2494 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2495 return ICE_ERR_NOT_IMPL;
2497 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2498 /* Only one entry existed in the mapping and it was not already
2499 * a part of a VSI list. So, create a VSI list with the old and
2502 struct ice_fltr_info tmp_fltr;
2503 u16 vsi_handle_arr[2];
2505 /* A rule already exists with the new VSI being added */
2506 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2507 return ICE_ERR_ALREADY_EXISTS;
2509 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2510 vsi_handle_arr[1] = new_fltr->vsi_handle;
2511 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2513 new_fltr->lkup_type);
2517 tmp_fltr = *new_fltr;
2518 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2519 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2520 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2521 /* Update the previous switch rule of "MAC forward to VSI" to
2522 * "MAC fwd to VSI list"
2524 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2528 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2529 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2530 m_entry->vsi_list_info =
2531 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2534 /* If this entry was large action then the large action needs
2535 * to be updated to point to FWD to VSI list
2537 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2539 ice_add_marker_act(hw, m_entry,
2540 m_entry->sw_marker_id,
2541 m_entry->lg_act_idx);
2543 u16 vsi_handle = new_fltr->vsi_handle;
2544 enum ice_adminq_opc opcode;
2546 if (!m_entry->vsi_list_info)
2549 /* A rule already exists with the new VSI being added */
2550 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2553 /* Update the previously created VSI list set with
2554 * the new VSI ID passed in
2556 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2557 opcode = ice_aqc_opc_update_sw_rules;
2559 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2560 vsi_list_id, false, opcode,
2561 new_fltr->lkup_type);
2562 /* update VSI list mapping info with new VSI ID */
2564 ice_set_bit(vsi_handle,
2565 m_entry->vsi_list_info->vsi_map);
2568 m_entry->vsi_count++;
2573 * ice_find_rule_entry - Search a rule entry
2574 * @hw: pointer to the hardware structure
2575 * @recp_id: lookup type for which the specified rule needs to be searched
2576 * @f_info: rule information
2578 * Helper function to search for a given rule entry
2579 * Returns pointer to entry storing the rule if found
2581 static struct ice_fltr_mgmt_list_entry *
2582 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2584 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2585 struct ice_switch_info *sw = hw->switch_info;
2586 struct LIST_HEAD_TYPE *list_head;
2588 list_head = &sw->recp_list[recp_id].filt_rules;
2589 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2591 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2592 sizeof(f_info->l_data)) &&
2593 f_info->flag == list_itr->fltr_info.flag) {
2602 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2603 * @hw: pointer to the hardware structure
2604 * @recp_id: lookup type for which VSI lists needs to be searched
2605 * @vsi_handle: VSI handle to be found in VSI list
2606 * @vsi_list_id: VSI list ID found containing vsi_handle
2608 * Helper function to search a VSI list with single entry containing given VSI
2609 * handle element. This can be extended further to search VSI list with more
2610 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2612 static struct ice_vsi_list_map_info *
2613 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2616 struct ice_vsi_list_map_info *map_info = NULL;
2617 struct ice_switch_info *sw = hw->switch_info;
2618 struct LIST_HEAD_TYPE *list_head;
2620 list_head = &sw->recp_list[recp_id].filt_rules;
2621 if (sw->recp_list[recp_id].adv_rule) {
2622 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2624 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2625 ice_adv_fltr_mgmt_list_entry,
2627 if (list_itr->vsi_list_info) {
2628 map_info = list_itr->vsi_list_info;
2629 if (ice_is_bit_set(map_info->vsi_map,
2631 *vsi_list_id = map_info->vsi_list_id;
2637 struct ice_fltr_mgmt_list_entry *list_itr;
2639 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2640 ice_fltr_mgmt_list_entry,
2642 if (list_itr->vsi_count == 1 &&
2643 list_itr->vsi_list_info) {
2644 map_info = list_itr->vsi_list_info;
2645 if (ice_is_bit_set(map_info->vsi_map,
2647 *vsi_list_id = map_info->vsi_list_id;
2657 * ice_add_rule_internal - add rule for a given lookup type
2658 * @hw: pointer to the hardware structure
2659 * @recp_id: lookup type (recipe ID) for which rule has to be added
2660 * @f_entry: structure containing MAC forwarding information
2662 * Adds or updates the rule lists for a given recipe
2664 static enum ice_status
2665 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2666 struct ice_fltr_list_entry *f_entry)
2668 struct ice_switch_info *sw = hw->switch_info;
2669 struct ice_fltr_info *new_fltr, *cur_fltr;
2670 struct ice_fltr_mgmt_list_entry *m_entry;
2671 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2672 enum ice_status status = ICE_SUCCESS;
2674 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2675 return ICE_ERR_PARAM;
2677 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2678 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2679 f_entry->fltr_info.fwd_id.hw_vsi_id =
2680 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2682 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2684 ice_acquire_lock(rule_lock);
2685 new_fltr = &f_entry->fltr_info;
2686 if (new_fltr->flag & ICE_FLTR_RX)
2687 new_fltr->src = hw->port_info->lport;
2688 else if (new_fltr->flag & ICE_FLTR_TX)
2690 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2692 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2694 status = ice_create_pkt_fwd_rule(hw, f_entry);
2695 goto exit_add_rule_internal;
2698 cur_fltr = &m_entry->fltr_info;
2699 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2701 exit_add_rule_internal:
2702 ice_release_lock(rule_lock);
2707 * ice_remove_vsi_list_rule
2708 * @hw: pointer to the hardware structure
2709 * @vsi_list_id: VSI list ID generated as part of allocate resource
2710 * @lkup_type: switch rule filter lookup type
2712 * The VSI list should be emptied before this function is called to remove the
2715 static enum ice_status
2716 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2717 enum ice_sw_lkup_type lkup_type)
2719 struct ice_aqc_sw_rules_elem *s_rule;
2720 enum ice_status status;
2723 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2724 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2726 return ICE_ERR_NO_MEMORY;
2728 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2729 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2731 /* Free the vsi_list resource that we allocated. It is assumed that the
2732 * list is empty at this point.
2734 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2735 ice_aqc_opc_free_res);
2737 ice_free(hw, s_rule);
2742 * ice_rem_update_vsi_list
2743 * @hw: pointer to the hardware structure
2744 * @vsi_handle: VSI handle of the VSI to remove
2745 * @fm_list: filter management entry for which the VSI list management needs to
2748 static enum ice_status
2749 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2750 struct ice_fltr_mgmt_list_entry *fm_list)
2752 enum ice_sw_lkup_type lkup_type;
2753 enum ice_status status = ICE_SUCCESS;
2756 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2757 fm_list->vsi_count == 0)
2758 return ICE_ERR_PARAM;
2760 /* A rule with the VSI being removed does not exist */
2761 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2762 return ICE_ERR_DOES_NOT_EXIST;
2764 lkup_type = fm_list->fltr_info.lkup_type;
2765 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2766 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2767 ice_aqc_opc_update_sw_rules,
2772 fm_list->vsi_count--;
2773 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2775 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2776 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2777 struct ice_vsi_list_map_info *vsi_list_info =
2778 fm_list->vsi_list_info;
2781 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2783 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2784 return ICE_ERR_OUT_OF_RANGE;
2786 /* Make sure VSI list is empty before removing it below */
2787 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2789 ice_aqc_opc_update_sw_rules,
2794 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2795 tmp_fltr_info.fwd_id.hw_vsi_id =
2796 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2797 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2798 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2800 ice_debug(hw, ICE_DBG_SW,
2801 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2802 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2806 fm_list->fltr_info = tmp_fltr_info;
2809 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2810 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2811 struct ice_vsi_list_map_info *vsi_list_info =
2812 fm_list->vsi_list_info;
2814 /* Remove the VSI list since it is no longer used */
2815 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2817 ice_debug(hw, ICE_DBG_SW,
2818 "Failed to remove VSI list %d, error %d\n",
2819 vsi_list_id, status);
2823 LIST_DEL(&vsi_list_info->list_entry);
2824 ice_free(hw, vsi_list_info);
2825 fm_list->vsi_list_info = NULL;
2832 * ice_remove_rule_internal - Remove a filter rule of a given type
2834 * @hw: pointer to the hardware structure
2835 * @recp_id: recipe ID for which the rule needs to removed
2836 * @f_entry: rule entry containing filter information
2838 static enum ice_status
2839 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2840 struct ice_fltr_list_entry *f_entry)
2842 struct ice_switch_info *sw = hw->switch_info;
2843 struct ice_fltr_mgmt_list_entry *list_elem;
2844 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2845 enum ice_status status = ICE_SUCCESS;
2846 bool remove_rule = false;
2849 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2850 return ICE_ERR_PARAM;
2851 f_entry->fltr_info.fwd_id.hw_vsi_id =
2852 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2854 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2855 ice_acquire_lock(rule_lock);
2856 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2858 status = ICE_ERR_DOES_NOT_EXIST;
2862 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2864 } else if (!list_elem->vsi_list_info) {
2865 status = ICE_ERR_DOES_NOT_EXIST;
2867 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2868 /* a ref_cnt > 1 indicates that the vsi_list is being
2869 * shared by multiple rules. Decrement the ref_cnt and
2870 * remove this rule, but do not modify the list, as it
2871 * is in-use by other rules.
2873 list_elem->vsi_list_info->ref_cnt--;
2876 /* a ref_cnt of 1 indicates the vsi_list is only used
2877 * by one rule. However, the original removal request is only
2878 * for a single VSI. Update the vsi_list first, and only
2879 * remove the rule if there are no further VSIs in this list.
2881 vsi_handle = f_entry->fltr_info.vsi_handle;
2882 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2885 /* if VSI count goes to zero after updating the VSI list */
2886 if (list_elem->vsi_count == 0)
2891 /* Remove the lookup rule */
2892 struct ice_aqc_sw_rules_elem *s_rule;
2894 s_rule = (struct ice_aqc_sw_rules_elem *)
2895 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2897 status = ICE_ERR_NO_MEMORY;
2901 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2902 ice_aqc_opc_remove_sw_rules);
2904 status = ice_aq_sw_rules(hw, s_rule,
2905 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2906 ice_aqc_opc_remove_sw_rules, NULL);
2908 /* Remove a book keeping from the list */
2909 ice_free(hw, s_rule);
2914 LIST_DEL(&list_elem->list_entry);
2915 ice_free(hw, list_elem);
2918 ice_release_lock(rule_lock);
2923 * ice_aq_get_res_alloc - get allocated resources
2924 * @hw: pointer to the HW struct
2925 * @num_entries: pointer to u16 to store the number of resource entries returned
2926 * @buf: pointer to user-supplied buffer
2927 * @buf_size: size of buff
2928 * @cd: pointer to command details structure or NULL
2930 * The user-supplied buffer must be large enough to store the resource
2931 * information for all resource types. Each resource type is an
2932 * ice_aqc_get_res_resp_data_elem structure.
2935 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2936 u16 buf_size, struct ice_sq_cd *cd)
2938 struct ice_aqc_get_res_alloc *resp;
2939 enum ice_status status;
2940 struct ice_aq_desc desc;
2943 return ICE_ERR_BAD_PTR;
2945 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2946 return ICE_ERR_INVAL_SIZE;
2948 resp = &desc.params.get_res;
2950 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2951 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2953 if (!status && num_entries)
2954 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2960 * ice_aq_get_res_descs - get allocated resource descriptors
2961 * @hw: pointer to the hardware structure
2962 * @num_entries: number of resource entries in buffer
2963 * @buf: Indirect buffer to hold data parameters and response
2964 * @buf_size: size of buffer for indirect commands
2965 * @res_type: resource type
2966 * @res_shared: is resource shared
2967 * @desc_id: input - first desc ID to start; output - next desc ID
2968 * @cd: pointer to command details structure or NULL
2971 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2972 struct ice_aqc_get_allocd_res_desc_resp *buf,
2973 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2974 struct ice_sq_cd *cd)
2976 struct ice_aqc_get_allocd_res_desc *cmd;
2977 struct ice_aq_desc desc;
2978 enum ice_status status;
2980 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2982 cmd = &desc.params.get_res_desc;
2985 return ICE_ERR_PARAM;
2987 if (buf_size != (num_entries * sizeof(*buf)))
2988 return ICE_ERR_PARAM;
2990 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2992 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2993 ICE_AQC_RES_TYPE_M) | (res_shared ?
2994 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2995 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2997 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2999 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3001 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3007 * ice_add_mac - Add a MAC address based filter rule
3008 * @hw: pointer to the hardware structure
3009 * @m_list: list of MAC addresses and forwarding information
3011 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3012 * multiple unicast addresses, the function assumes that all the
3013 * addresses are unique in a given add_mac call. It doesn't
3014 * check for duplicates in this case, removing duplicates from a given
3015 * list should be taken care of in the caller of this function.
3018 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3020 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3021 struct ice_fltr_list_entry *m_list_itr;
3022 struct LIST_HEAD_TYPE *rule_head;
3023 u16 elem_sent, total_elem_left;
3024 struct ice_switch_info *sw;
3025 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3026 enum ice_status status = ICE_SUCCESS;
3027 u16 num_unicast = 0;
3031 return ICE_ERR_PARAM;
3033 sw = hw->switch_info;
3034 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3035 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3037 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3041 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3042 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3043 if (!ice_is_vsi_valid(hw, vsi_handle))
3044 return ICE_ERR_PARAM;
3045 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3046 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3047 /* update the src in case it is VSI num */
3048 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3049 return ICE_ERR_PARAM;
3050 m_list_itr->fltr_info.src = hw_vsi_id;
3051 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3052 IS_ZERO_ETHER_ADDR(add))
3053 return ICE_ERR_PARAM;
3054 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3055 /* Don't overwrite the unicast address */
3056 ice_acquire_lock(rule_lock);
3057 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3058 &m_list_itr->fltr_info)) {
3059 ice_release_lock(rule_lock);
3060 return ICE_ERR_ALREADY_EXISTS;
3062 ice_release_lock(rule_lock);
3064 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3065 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3066 m_list_itr->status =
3067 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3069 if (m_list_itr->status)
3070 return m_list_itr->status;
3074 ice_acquire_lock(rule_lock);
3075 /* Exit if no suitable entries were found for adding bulk switch rule */
3077 status = ICE_SUCCESS;
3078 goto ice_add_mac_exit;
3081 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3083 /* Allocate switch rule buffer for the bulk update for unicast */
3084 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3085 s_rule = (struct ice_aqc_sw_rules_elem *)
3086 ice_calloc(hw, num_unicast, s_rule_size);
3088 status = ICE_ERR_NO_MEMORY;
3089 goto ice_add_mac_exit;
3093 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3095 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3096 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3098 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3099 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3100 ice_aqc_opc_add_sw_rules);
3101 r_iter = (struct ice_aqc_sw_rules_elem *)
3102 ((u8 *)r_iter + s_rule_size);
3106 /* Call AQ bulk switch rule update for all unicast addresses */
3108 /* Call AQ switch rule in AQ_MAX chunk */
3109 for (total_elem_left = num_unicast; total_elem_left > 0;
3110 total_elem_left -= elem_sent) {
3111 struct ice_aqc_sw_rules_elem *entry = r_iter;
3113 elem_sent = min(total_elem_left,
3114 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3115 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3116 elem_sent, ice_aqc_opc_add_sw_rules,
3119 goto ice_add_mac_exit;
3120 r_iter = (struct ice_aqc_sw_rules_elem *)
3121 ((u8 *)r_iter + (elem_sent * s_rule_size));
3124 /* Fill up rule ID based on the value returned from FW */
3126 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3128 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3129 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3130 struct ice_fltr_mgmt_list_entry *fm_entry;
3132 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3133 f_info->fltr_rule_id =
3134 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3135 f_info->fltr_act = ICE_FWD_TO_VSI;
3136 /* Create an entry to track this MAC address */
3137 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3138 ice_malloc(hw, sizeof(*fm_entry));
3140 status = ICE_ERR_NO_MEMORY;
3141 goto ice_add_mac_exit;
3143 fm_entry->fltr_info = *f_info;
3144 fm_entry->vsi_count = 1;
3145 /* The book keeping entries will get removed when
3146 * base driver calls remove filter AQ command
3149 LIST_ADD(&fm_entry->list_entry, rule_head);
3150 r_iter = (struct ice_aqc_sw_rules_elem *)
3151 ((u8 *)r_iter + s_rule_size);
3156 ice_release_lock(rule_lock);
3158 ice_free(hw, s_rule);
3163 * ice_add_vlan_internal - Add one VLAN based filter rule
3164 * @hw: pointer to the hardware structure
3165 * @f_entry: filter entry containing one VLAN information
3167 static enum ice_status
3168 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3170 struct ice_switch_info *sw = hw->switch_info;
3171 struct ice_fltr_mgmt_list_entry *v_list_itr;
3172 struct ice_fltr_info *new_fltr, *cur_fltr;
3173 enum ice_sw_lkup_type lkup_type;
3174 u16 vsi_list_id = 0, vsi_handle;
3175 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3176 enum ice_status status = ICE_SUCCESS;
3178 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3179 return ICE_ERR_PARAM;
3181 f_entry->fltr_info.fwd_id.hw_vsi_id =
3182 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3183 new_fltr = &f_entry->fltr_info;
3185 /* VLAN ID should only be 12 bits */
3186 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3187 return ICE_ERR_PARAM;
3189 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3190 return ICE_ERR_PARAM;
3192 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3193 lkup_type = new_fltr->lkup_type;
3194 vsi_handle = new_fltr->vsi_handle;
3195 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3196 ice_acquire_lock(rule_lock);
3197 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3199 struct ice_vsi_list_map_info *map_info = NULL;
3201 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3202 /* All VLAN pruning rules use a VSI list. Check if
3203 * there is already a VSI list containing VSI that we
3204 * want to add. If found, use the same vsi_list_id for
3205 * this new VLAN rule or else create a new list.
3207 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3211 status = ice_create_vsi_list_rule(hw,
3219 /* Convert the action to forwarding to a VSI list. */
3220 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3221 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3224 status = ice_create_pkt_fwd_rule(hw, f_entry);
3226 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3229 status = ICE_ERR_DOES_NOT_EXIST;
3232 /* reuse VSI list for new rule and increment ref_cnt */
3234 v_list_itr->vsi_list_info = map_info;
3235 map_info->ref_cnt++;
3237 v_list_itr->vsi_list_info =
3238 ice_create_vsi_list_map(hw, &vsi_handle,
3242 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3243 /* Update existing VSI list to add new VSI ID only if it used
3246 cur_fltr = &v_list_itr->fltr_info;
3247 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3250 /* If VLAN rule exists and VSI list being used by this rule is
3251 * referenced by more than 1 VLAN rule. Then create a new VSI
3252 * list appending previous VSI with new VSI and update existing
3253 * VLAN rule to point to new VSI list ID
3255 struct ice_fltr_info tmp_fltr;
3256 u16 vsi_handle_arr[2];
3259 /* Current implementation only supports reusing VSI list with
3260 * one VSI count. We should never hit below condition
3262 if (v_list_itr->vsi_count > 1 &&
3263 v_list_itr->vsi_list_info->ref_cnt > 1) {
3264 ice_debug(hw, ICE_DBG_SW,
3265 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3266 status = ICE_ERR_CFG;
3271 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3274 /* A rule already exists with the new VSI being added */
3275 if (cur_handle == vsi_handle) {
3276 status = ICE_ERR_ALREADY_EXISTS;
3280 vsi_handle_arr[0] = cur_handle;
3281 vsi_handle_arr[1] = vsi_handle;
3282 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3283 &vsi_list_id, lkup_type);
3287 tmp_fltr = v_list_itr->fltr_info;
3288 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3289 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3290 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3291 /* Update the previous switch rule to a new VSI list which
3292 * includes current VSI that is requested
3294 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3298 /* before overriding VSI list map info. decrement ref_cnt of
3301 v_list_itr->vsi_list_info->ref_cnt--;
3303 /* now update to newly created list */
3304 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3305 v_list_itr->vsi_list_info =
3306 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3308 v_list_itr->vsi_count++;
3312 ice_release_lock(rule_lock);
3317 * ice_add_vlan - Add VLAN based filter rule
3318 * @hw: pointer to the hardware structure
3319 * @v_list: list of VLAN entries and forwarding information
3322 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3324 struct ice_fltr_list_entry *v_list_itr;
3327 return ICE_ERR_PARAM;
3329 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3331 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3332 return ICE_ERR_PARAM;
3333 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3334 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3335 if (v_list_itr->status)
3336 return v_list_itr->status;
3342 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3343 * @hw: pointer to the hardware structure
3344 * @mv_list: list of MAC and VLAN filters
3346 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3347 * pruning bits enabled, then it is the responsibility of the caller to make
3348 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3349 * VLAN won't be received on that VSI otherwise.
3352 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3354 struct ice_fltr_list_entry *mv_list_itr;
3356 if (!mv_list || !hw)
3357 return ICE_ERR_PARAM;
3359 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3361 enum ice_sw_lkup_type l_type =
3362 mv_list_itr->fltr_info.lkup_type;
3364 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3365 return ICE_ERR_PARAM;
3366 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3367 mv_list_itr->status =
3368 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3370 if (mv_list_itr->status)
3371 return mv_list_itr->status;
3377 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3378 * @hw: pointer to the hardware structure
3379 * @em_list: list of ether type MAC filter, MAC is optional
3381 * This function requires the caller to populate the entries in
3382 * the filter list with the necessary fields (including flags to
3383 * indicate Tx or Rx rules).
3386 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3388 struct ice_fltr_list_entry *em_list_itr;
3390 if (!em_list || !hw)
3391 return ICE_ERR_PARAM;
3393 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3395 enum ice_sw_lkup_type l_type =
3396 em_list_itr->fltr_info.lkup_type;
3398 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3399 l_type != ICE_SW_LKUP_ETHERTYPE)
3400 return ICE_ERR_PARAM;
3402 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3404 if (em_list_itr->status)
3405 return em_list_itr->status;
3411 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3412 * @hw: pointer to the hardware structure
3413 * @em_list: list of ethertype or ethertype MAC entries
3416 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3418 struct ice_fltr_list_entry *em_list_itr, *tmp;
3420 if (!em_list || !hw)
3421 return ICE_ERR_PARAM;
3423 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3425 enum ice_sw_lkup_type l_type =
3426 em_list_itr->fltr_info.lkup_type;
3428 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3429 l_type != ICE_SW_LKUP_ETHERTYPE)
3430 return ICE_ERR_PARAM;
3432 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3434 if (em_list_itr->status)
3435 return em_list_itr->status;
3442 * ice_rem_sw_rule_info
3443 * @hw: pointer to the hardware structure
3444 * @rule_head: pointer to the switch list structure that we want to delete
3447 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3449 if (!LIST_EMPTY(rule_head)) {
3450 struct ice_fltr_mgmt_list_entry *entry;
3451 struct ice_fltr_mgmt_list_entry *tmp;
3453 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3454 ice_fltr_mgmt_list_entry, list_entry) {
3455 LIST_DEL(&entry->list_entry);
3456 ice_free(hw, entry);
3462 * ice_rem_adv_rule_info
3463 * @hw: pointer to the hardware structure
3464 * @rule_head: pointer to the switch list structure that we want to delete
3467 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3469 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3470 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3472 if (LIST_EMPTY(rule_head))
3475 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3476 ice_adv_fltr_mgmt_list_entry, list_entry) {
3477 LIST_DEL(&lst_itr->list_entry);
3478 ice_free(hw, lst_itr->lkups);
3479 ice_free(hw, lst_itr);
3484 * ice_rem_all_sw_rules_info
3485 * @hw: pointer to the hardware structure
3487 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3489 struct ice_switch_info *sw = hw->switch_info;
3492 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3493 struct LIST_HEAD_TYPE *rule_head;
3495 rule_head = &sw->recp_list[i].filt_rules;
3496 if (!sw->recp_list[i].adv_rule)
3497 ice_rem_sw_rule_info(hw, rule_head);
3499 ice_rem_adv_rule_info(hw, rule_head);
3504 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3505 * @pi: pointer to the port_info structure
3506 * @vsi_handle: VSI handle to set as default
3507 * @set: true to add the above mentioned switch rule, false to remove it
3508 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3510 * add filter rule to set/unset given VSI as default VSI for the switch
3511 * (represented by swid)
3514 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3517 struct ice_aqc_sw_rules_elem *s_rule;
3518 struct ice_fltr_info f_info;
3519 struct ice_hw *hw = pi->hw;
3520 enum ice_adminq_opc opcode;
3521 enum ice_status status;
3525 if (!ice_is_vsi_valid(hw, vsi_handle))
3526 return ICE_ERR_PARAM;
3527 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3529 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3530 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3531 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3533 return ICE_ERR_NO_MEMORY;
3535 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3537 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3538 f_info.flag = direction;
3539 f_info.fltr_act = ICE_FWD_TO_VSI;
3540 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3542 if (f_info.flag & ICE_FLTR_RX) {
3543 f_info.src = pi->lport;
3544 f_info.src_id = ICE_SRC_ID_LPORT;
3546 f_info.fltr_rule_id =
3547 pi->dflt_rx_vsi_rule_id;
3548 } else if (f_info.flag & ICE_FLTR_TX) {
3549 f_info.src_id = ICE_SRC_ID_VSI;
3550 f_info.src = hw_vsi_id;
3552 f_info.fltr_rule_id =
3553 pi->dflt_tx_vsi_rule_id;
3557 opcode = ice_aqc_opc_add_sw_rules;
3559 opcode = ice_aqc_opc_remove_sw_rules;
3561 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3563 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3564 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3567 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3569 if (f_info.flag & ICE_FLTR_TX) {
3570 pi->dflt_tx_vsi_num = hw_vsi_id;
3571 pi->dflt_tx_vsi_rule_id = index;
3572 } else if (f_info.flag & ICE_FLTR_RX) {
3573 pi->dflt_rx_vsi_num = hw_vsi_id;
3574 pi->dflt_rx_vsi_rule_id = index;
3577 if (f_info.flag & ICE_FLTR_TX) {
3578 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3579 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3580 } else if (f_info.flag & ICE_FLTR_RX) {
3581 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3582 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3587 ice_free(hw, s_rule);
3592 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3593 * @hw: pointer to the hardware structure
3594 * @recp_id: lookup type for which the specified rule needs to be searched
3595 * @f_info: rule information
3597 * Helper function to search for a unicast rule entry - this is to be used
3598 * to remove unicast MAC filter that is not shared with other VSIs on the
3601 * Returns pointer to entry storing the rule if found
3603 static struct ice_fltr_mgmt_list_entry *
3604 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3605 struct ice_fltr_info *f_info)
3607 struct ice_switch_info *sw = hw->switch_info;
3608 struct ice_fltr_mgmt_list_entry *list_itr;
3609 struct LIST_HEAD_TYPE *list_head;
3611 list_head = &sw->recp_list[recp_id].filt_rules;
3612 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3614 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3615 sizeof(f_info->l_data)) &&
3616 f_info->fwd_id.hw_vsi_id ==
3617 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3618 f_info->flag == list_itr->fltr_info.flag)
3625 * ice_remove_mac - remove a MAC address based filter rule
3626 * @hw: pointer to the hardware structure
3627 * @m_list: list of MAC addresses and forwarding information
3629 * This function removes either a MAC filter rule or a specific VSI from a
3630 * VSI list for a multicast MAC address.
3632 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3633 * ice_add_mac. Caller should be aware that this call will only work if all
3634 * the entries passed into m_list were added previously. It will not attempt to
3635 * do a partial remove of entries that were found.
3638 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3640 struct ice_fltr_list_entry *list_itr, *tmp;
3641 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3644 return ICE_ERR_PARAM;
3646 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3647 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3649 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3650 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3653 if (l_type != ICE_SW_LKUP_MAC)
3654 return ICE_ERR_PARAM;
3656 vsi_handle = list_itr->fltr_info.vsi_handle;
3657 if (!ice_is_vsi_valid(hw, vsi_handle))
3658 return ICE_ERR_PARAM;
3660 list_itr->fltr_info.fwd_id.hw_vsi_id =
3661 ice_get_hw_vsi_num(hw, vsi_handle);
3662 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3663 /* Don't remove the unicast address that belongs to
3664 * another VSI on the switch, since it is not being
3667 ice_acquire_lock(rule_lock);
3668 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3669 &list_itr->fltr_info)) {
3670 ice_release_lock(rule_lock);
3671 return ICE_ERR_DOES_NOT_EXIST;
3673 ice_release_lock(rule_lock);
3675 list_itr->status = ice_remove_rule_internal(hw,
3678 if (list_itr->status)
3679 return list_itr->status;
3685 * ice_remove_vlan - Remove VLAN based filter rule
3686 * @hw: pointer to the hardware structure
3687 * @v_list: list of VLAN entries and forwarding information
3690 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3692 struct ice_fltr_list_entry *v_list_itr, *tmp;
3695 return ICE_ERR_PARAM;
3697 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3699 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3701 if (l_type != ICE_SW_LKUP_VLAN)
3702 return ICE_ERR_PARAM;
3703 v_list_itr->status = ice_remove_rule_internal(hw,
3706 if (v_list_itr->status)
3707 return v_list_itr->status;
3713 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3714 * @hw: pointer to the hardware structure
3715 * @v_list: list of MAC VLAN entries and forwarding information
3718 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3720 struct ice_fltr_list_entry *v_list_itr, *tmp;
3723 return ICE_ERR_PARAM;
3725 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3727 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3729 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3730 return ICE_ERR_PARAM;
3731 v_list_itr->status =
3732 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3734 if (v_list_itr->status)
3735 return v_list_itr->status;
3741 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3742 * @fm_entry: filter entry to inspect
3743 * @vsi_handle: VSI handle to compare with filter info
3746 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3748 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3749 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3750 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3751 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3756 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3757 * @hw: pointer to the hardware structure
3758 * @vsi_handle: VSI handle to remove filters from
3759 * @vsi_list_head: pointer to the list to add entry to
3760 * @fi: pointer to fltr_info of filter entry to copy & add
3762 * Helper function, used when creating a list of filters to remove from
3763 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3764 * original filter entry, with the exception of fltr_info.fltr_act and
3765 * fltr_info.fwd_id fields. These are set such that later logic can
3766 * extract which VSI to remove the fltr from, and pass on that information.
3768 static enum ice_status
3769 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3770 struct LIST_HEAD_TYPE *vsi_list_head,
3771 struct ice_fltr_info *fi)
3773 struct ice_fltr_list_entry *tmp;
3775 /* this memory is freed up in the caller function
3776 * once filters for this VSI are removed
3778 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3780 return ICE_ERR_NO_MEMORY;
3782 tmp->fltr_info = *fi;
3784 /* Overwrite these fields to indicate which VSI to remove filter from,
3785 * so find and remove logic can extract the information from the
3786 * list entries. Note that original entries will still have proper
3789 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3790 tmp->fltr_info.vsi_handle = vsi_handle;
3791 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3793 LIST_ADD(&tmp->list_entry, vsi_list_head);
3799 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3800 * @hw: pointer to the hardware structure
3801 * @vsi_handle: VSI handle to remove filters from
3802 * @lkup_list_head: pointer to the list that has certain lookup type filters
3803 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3805 * Locates all filters in lkup_list_head that are used by the given VSI,
3806 * and adds COPIES of those entries to vsi_list_head (intended to be used
3807 * to remove the listed filters).
3808 * Note that this means all entries in vsi_list_head must be explicitly
3809 * deallocated by the caller when done with list.
3811 static enum ice_status
3812 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3813 struct LIST_HEAD_TYPE *lkup_list_head,
3814 struct LIST_HEAD_TYPE *vsi_list_head)
3816 struct ice_fltr_mgmt_list_entry *fm_entry;
3817 enum ice_status status = ICE_SUCCESS;
3819 /* check to make sure VSI ID is valid and within boundary */
3820 if (!ice_is_vsi_valid(hw, vsi_handle))
3821 return ICE_ERR_PARAM;
3823 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3824 ice_fltr_mgmt_list_entry, list_entry) {
3825 struct ice_fltr_info *fi;
3827 fi = &fm_entry->fltr_info;
3828 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3831 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3841 * ice_determine_promisc_mask
3842 * @fi: filter info to parse
3844 * Helper function to determine which ICE_PROMISC_ mask corresponds
3845 * to given filter into.
3847 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3849 u16 vid = fi->l_data.mac_vlan.vlan_id;
3850 u8 *macaddr = fi->l_data.mac.mac_addr;
3851 bool is_tx_fltr = false;
3852 u8 promisc_mask = 0;
3854 if (fi->flag == ICE_FLTR_TX)
3857 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3858 promisc_mask |= is_tx_fltr ?
3859 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3860 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3861 promisc_mask |= is_tx_fltr ?
3862 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3863 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3864 promisc_mask |= is_tx_fltr ?
3865 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3867 promisc_mask |= is_tx_fltr ?
3868 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3870 return promisc_mask;
3874 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3875 * @hw: pointer to the hardware structure
3876 * @vsi_handle: VSI handle to retrieve info from
3877 * @promisc_mask: pointer to mask to be filled in
3878 * @vid: VLAN ID of promisc VLAN VSI
3881 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3884 struct ice_switch_info *sw = hw->switch_info;
3885 struct ice_fltr_mgmt_list_entry *itr;
3886 struct LIST_HEAD_TYPE *rule_head;
3887 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3889 if (!ice_is_vsi_valid(hw, vsi_handle))
3890 return ICE_ERR_PARAM;
3894 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3895 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3897 ice_acquire_lock(rule_lock);
3898 LIST_FOR_EACH_ENTRY(itr, rule_head,
3899 ice_fltr_mgmt_list_entry, list_entry) {
3900 /* Continue if this filter doesn't apply to this VSI or the
3901 * VSI ID is not in the VSI map for this filter
3903 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3906 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3908 ice_release_lock(rule_lock);
3914 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3915 * @hw: pointer to the hardware structure
3916 * @vsi_handle: VSI handle to retrieve info from
3917 * @promisc_mask: pointer to mask to be filled in
3918 * @vid: VLAN ID of promisc VLAN VSI
3921 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3924 struct ice_switch_info *sw = hw->switch_info;
3925 struct ice_fltr_mgmt_list_entry *itr;
3926 struct LIST_HEAD_TYPE *rule_head;
3927 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3929 if (!ice_is_vsi_valid(hw, vsi_handle))
3930 return ICE_ERR_PARAM;
3934 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3935 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3937 ice_acquire_lock(rule_lock);
3938 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3940 /* Continue if this filter doesn't apply to this VSI or the
3941 * VSI ID is not in the VSI map for this filter
3943 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3946 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3948 ice_release_lock(rule_lock);
3954 * ice_remove_promisc - Remove promisc based filter rules
3955 * @hw: pointer to the hardware structure
3956 * @recp_id: recipe ID for which the rule needs to removed
3957 * @v_list: list of promisc entries
3959 static enum ice_status
3960 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3961 struct LIST_HEAD_TYPE *v_list)
3963 struct ice_fltr_list_entry *v_list_itr, *tmp;
3965 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3967 v_list_itr->status =
3968 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3969 if (v_list_itr->status)
3970 return v_list_itr->status;
3976 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3977 * @hw: pointer to the hardware structure
3978 * @vsi_handle: VSI handle to clear mode
3979 * @promisc_mask: mask of promiscuous config bits to clear
3980 * @vid: VLAN ID to clear VLAN promiscuous
3983 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3986 struct ice_switch_info *sw = hw->switch_info;
3987 struct ice_fltr_list_entry *fm_entry, *tmp;
3988 struct LIST_HEAD_TYPE remove_list_head;
3989 struct ice_fltr_mgmt_list_entry *itr;
3990 struct LIST_HEAD_TYPE *rule_head;
3991 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3992 enum ice_status status = ICE_SUCCESS;
3995 if (!ice_is_vsi_valid(hw, vsi_handle))
3996 return ICE_ERR_PARAM;
3999 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4001 recipe_id = ICE_SW_LKUP_PROMISC;
4003 rule_head = &sw->recp_list[recipe_id].filt_rules;
4004 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4006 INIT_LIST_HEAD(&remove_list_head);
4008 ice_acquire_lock(rule_lock);
4009 LIST_FOR_EACH_ENTRY(itr, rule_head,
4010 ice_fltr_mgmt_list_entry, list_entry) {
4011 u8 fltr_promisc_mask = 0;
4013 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4016 fltr_promisc_mask |=
4017 ice_determine_promisc_mask(&itr->fltr_info);
4019 /* Skip if filter is not completely specified by given mask */
4020 if (fltr_promisc_mask & ~promisc_mask)
4023 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4027 ice_release_lock(rule_lock);
4028 goto free_fltr_list;
4031 ice_release_lock(rule_lock);
4033 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4036 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4037 ice_fltr_list_entry, list_entry) {
4038 LIST_DEL(&fm_entry->list_entry);
4039 ice_free(hw, fm_entry);
4046 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4047 * @hw: pointer to the hardware structure
4048 * @vsi_handle: VSI handle to configure
4049 * @promisc_mask: mask of promiscuous config bits
4050 * @vid: VLAN ID to set VLAN promiscuous
4053 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4055 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4056 struct ice_fltr_list_entry f_list_entry;
4057 struct ice_fltr_info new_fltr;
4058 enum ice_status status = ICE_SUCCESS;
4064 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4066 if (!ice_is_vsi_valid(hw, vsi_handle))
4067 return ICE_ERR_PARAM;
4068 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4070 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4072 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4073 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4074 new_fltr.l_data.mac_vlan.vlan_id = vid;
4075 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4077 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4078 recipe_id = ICE_SW_LKUP_PROMISC;
4081 /* Separate filters must be set for each direction/packet type
4082 * combination, so we will loop over the mask value, store the
4083 * individual type, and clear it out in the input mask as it
4086 while (promisc_mask) {
4092 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4093 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4094 pkt_type = UCAST_FLTR;
4095 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4096 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4097 pkt_type = UCAST_FLTR;
4099 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4100 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4101 pkt_type = MCAST_FLTR;
4102 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4103 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4104 pkt_type = MCAST_FLTR;
4106 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4107 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4108 pkt_type = BCAST_FLTR;
4109 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4110 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4111 pkt_type = BCAST_FLTR;
4115 /* Check for VLAN promiscuous flag */
4116 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4117 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4118 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4119 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4123 /* Set filter DA based on packet type */
4124 mac_addr = new_fltr.l_data.mac.mac_addr;
4125 if (pkt_type == BCAST_FLTR) {
4126 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4127 } else if (pkt_type == MCAST_FLTR ||
4128 pkt_type == UCAST_FLTR) {
4129 /* Use the dummy ether header DA */
4130 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4131 ICE_NONDMA_TO_NONDMA);
4132 if (pkt_type == MCAST_FLTR)
4133 mac_addr[0] |= 0x1; /* Set multicast bit */
4136 /* Need to reset this to zero for all iterations */
4139 new_fltr.flag |= ICE_FLTR_TX;
4140 new_fltr.src = hw_vsi_id;
4142 new_fltr.flag |= ICE_FLTR_RX;
4143 new_fltr.src = hw->port_info->lport;
4146 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4147 new_fltr.vsi_handle = vsi_handle;
4148 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4149 f_list_entry.fltr_info = new_fltr;
4151 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4152 if (status != ICE_SUCCESS)
4153 goto set_promisc_exit;
4161 * ice_set_vlan_vsi_promisc
4162 * @hw: pointer to the hardware structure
4163 * @vsi_handle: VSI handle to configure
4164 * @promisc_mask: mask of promiscuous config bits
4165 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4167 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4170 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4171 bool rm_vlan_promisc)
4173 struct ice_switch_info *sw = hw->switch_info;
4174 struct ice_fltr_list_entry *list_itr, *tmp;
4175 struct LIST_HEAD_TYPE vsi_list_head;
4176 struct LIST_HEAD_TYPE *vlan_head;
4177 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4178 enum ice_status status;
4181 INIT_LIST_HEAD(&vsi_list_head);
4182 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4183 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4184 ice_acquire_lock(vlan_lock);
4185 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4187 ice_release_lock(vlan_lock);
4189 goto free_fltr_list;
4191 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4193 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4194 if (rm_vlan_promisc)
4195 status = ice_clear_vsi_promisc(hw, vsi_handle,
4196 promisc_mask, vlan_id);
4198 status = ice_set_vsi_promisc(hw, vsi_handle,
4199 promisc_mask, vlan_id);
4205 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4206 ice_fltr_list_entry, list_entry) {
4207 LIST_DEL(&list_itr->list_entry);
4208 ice_free(hw, list_itr);
4214 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4215 * @hw: pointer to the hardware structure
4216 * @vsi_handle: VSI handle to remove filters from
4217 * @lkup: switch rule filter lookup type
4220 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4221 enum ice_sw_lkup_type lkup)
4223 struct ice_switch_info *sw = hw->switch_info;
4224 struct ice_fltr_list_entry *fm_entry;
4225 struct LIST_HEAD_TYPE remove_list_head;
4226 struct LIST_HEAD_TYPE *rule_head;
4227 struct ice_fltr_list_entry *tmp;
4228 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4229 enum ice_status status;
4231 INIT_LIST_HEAD(&remove_list_head);
4232 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4233 rule_head = &sw->recp_list[lkup].filt_rules;
4234 ice_acquire_lock(rule_lock);
4235 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4237 ice_release_lock(rule_lock);
4242 case ICE_SW_LKUP_MAC:
4243 ice_remove_mac(hw, &remove_list_head);
4245 case ICE_SW_LKUP_VLAN:
4246 ice_remove_vlan(hw, &remove_list_head);
4248 case ICE_SW_LKUP_PROMISC:
4249 case ICE_SW_LKUP_PROMISC_VLAN:
4250 ice_remove_promisc(hw, lkup, &remove_list_head);
4252 case ICE_SW_LKUP_MAC_VLAN:
4253 ice_remove_mac_vlan(hw, &remove_list_head);
4255 case ICE_SW_LKUP_ETHERTYPE:
4256 case ICE_SW_LKUP_ETHERTYPE_MAC:
4257 ice_remove_eth_mac(hw, &remove_list_head);
4259 case ICE_SW_LKUP_DFLT:
4260 ice_debug(hw, ICE_DBG_SW,
4261 "Remove filters for this lookup type hasn't been implemented yet\n");
4263 case ICE_SW_LKUP_LAST:
4264 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4268 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4269 ice_fltr_list_entry, list_entry) {
4270 LIST_DEL(&fm_entry->list_entry);
4271 ice_free(hw, fm_entry);
4276 * ice_remove_vsi_fltr - Remove all filters for a VSI
4277 * @hw: pointer to the hardware structure
4278 * @vsi_handle: VSI handle to remove filters from
4280 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4282 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4284 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4285 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4286 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4287 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4288 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4289 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4290 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4291 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4295 * ice_alloc_res_cntr - allocating resource counter
4296 * @hw: pointer to the hardware structure
4297 * @type: type of resource
4298 * @alloc_shared: if set it is shared else dedicated
4299 * @num_items: number of entries requested for FD resource type
4300 * @counter_id: counter index returned by AQ call
4303 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4306 struct ice_aqc_alloc_free_res_elem *buf;
4307 enum ice_status status;
4310 /* Allocate resource */
4311 buf_len = sizeof(*buf);
4312 buf = (struct ice_aqc_alloc_free_res_elem *)
4313 ice_malloc(hw, buf_len);
4315 return ICE_ERR_NO_MEMORY;
4317 buf->num_elems = CPU_TO_LE16(num_items);
4318 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4319 ICE_AQC_RES_TYPE_M) | alloc_shared);
4321 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4322 ice_aqc_opc_alloc_res, NULL);
4326 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4334 * ice_free_res_cntr - free resource counter
4335 * @hw: pointer to the hardware structure
4336 * @type: type of resource
4337 * @alloc_shared: if set it is shared else dedicated
4338 * @num_items: number of entries to be freed for FD resource type
4339 * @counter_id: counter ID resource which needs to be freed
4342 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4345 struct ice_aqc_alloc_free_res_elem *buf;
4346 enum ice_status status;
4350 buf_len = sizeof(*buf);
4351 buf = (struct ice_aqc_alloc_free_res_elem *)
4352 ice_malloc(hw, buf_len);
4354 return ICE_ERR_NO_MEMORY;
4356 buf->num_elems = CPU_TO_LE16(num_items);
4357 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4358 ICE_AQC_RES_TYPE_M) | alloc_shared);
4359 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4361 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4362 ice_aqc_opc_free_res, NULL);
4364 ice_debug(hw, ICE_DBG_SW,
4365 "counter resource could not be freed\n");
4372 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4373 * @hw: pointer to the hardware structure
4374 * @counter_id: returns counter index
4376 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4378 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4379 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4384 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4385 * @hw: pointer to the hardware structure
4386 * @counter_id: counter index to be freed
4388 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4390 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4391 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4396 * ice_alloc_res_lg_act - add large action resource
4397 * @hw: pointer to the hardware structure
4398 * @l_id: large action ID to fill it in
4399 * @num_acts: number of actions to hold with a large action entry
4401 static enum ice_status
4402 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4404 struct ice_aqc_alloc_free_res_elem *sw_buf;
4405 enum ice_status status;
4408 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4409 return ICE_ERR_PARAM;
4411 /* Allocate resource for large action */
4412 buf_len = sizeof(*sw_buf);
4413 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4414 ice_malloc(hw, buf_len);
4416 return ICE_ERR_NO_MEMORY;
4418 sw_buf->num_elems = CPU_TO_LE16(1);
4420 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4421 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4422 * If num_acts is greater than 2, then use
4423 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4424 * The num_acts cannot exceed 4. This was ensured at the
4425 * beginning of the function.
4428 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4429 else if (num_acts == 2)
4430 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4432 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4434 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4435 ice_aqc_opc_alloc_res, NULL);
4437 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4439 ice_free(hw, sw_buf);
4444 * ice_add_mac_with_sw_marker - add filter with sw marker
4445 * @hw: pointer to the hardware structure
4446 * @f_info: filter info structure containing the MAC filter information
4447 * @sw_marker: sw marker to tag the Rx descriptor with
4450 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4453 struct ice_switch_info *sw = hw->switch_info;
4454 struct ice_fltr_mgmt_list_entry *m_entry;
4455 struct ice_fltr_list_entry fl_info;
4456 struct LIST_HEAD_TYPE l_head;
4457 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4458 enum ice_status ret;
4462 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4463 return ICE_ERR_PARAM;
4465 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4466 return ICE_ERR_PARAM;
4468 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4469 return ICE_ERR_PARAM;
4471 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4472 return ICE_ERR_PARAM;
4473 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4475 /* Add filter if it doesn't exist so then the adding of large
4476 * action always results in update
4479 INIT_LIST_HEAD(&l_head);
4480 fl_info.fltr_info = *f_info;
4481 LIST_ADD(&fl_info.list_entry, &l_head);
4483 entry_exists = false;
4484 ret = ice_add_mac(hw, &l_head);
4485 if (ret == ICE_ERR_ALREADY_EXISTS)
4486 entry_exists = true;
4490 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4491 ice_acquire_lock(rule_lock);
4492 /* Get the book keeping entry for the filter */
4493 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4497 /* If counter action was enabled for this rule then don't enable
4498 * sw marker large action
4500 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4501 ret = ICE_ERR_PARAM;
4505 /* if same marker was added before */
4506 if (m_entry->sw_marker_id == sw_marker) {
4507 ret = ICE_ERR_ALREADY_EXISTS;
4511 /* Allocate a hardware table entry to hold large act. Three actions
4512 * for marker based large action
4514 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4518 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4521 /* Update the switch rule to add the marker action */
4522 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4524 ice_release_lock(rule_lock);
4529 ice_release_lock(rule_lock);
4530 /* only remove entry if it did not exist previously */
4532 ret = ice_remove_mac(hw, &l_head);
4538 * ice_add_mac_with_counter - add filter with counter enabled
4539 * @hw: pointer to the hardware structure
4540 * @f_info: pointer to filter info structure containing the MAC filter
4544 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4546 struct ice_switch_info *sw = hw->switch_info;
4547 struct ice_fltr_mgmt_list_entry *m_entry;
4548 struct ice_fltr_list_entry fl_info;
4549 struct LIST_HEAD_TYPE l_head;
4550 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4551 enum ice_status ret;
4556 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4557 return ICE_ERR_PARAM;
4559 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4560 return ICE_ERR_PARAM;
4562 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4563 return ICE_ERR_PARAM;
4564 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4566 entry_exist = false;
4568 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4570 /* Add filter if it doesn't exist so then the adding of large
4571 * action always results in update
4573 INIT_LIST_HEAD(&l_head);
4575 fl_info.fltr_info = *f_info;
4576 LIST_ADD(&fl_info.list_entry, &l_head);
4578 ret = ice_add_mac(hw, &l_head);
4579 if (ret == ICE_ERR_ALREADY_EXISTS)
4584 ice_acquire_lock(rule_lock);
4585 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4587 ret = ICE_ERR_BAD_PTR;
4591 /* Don't enable counter for a filter for which sw marker was enabled */
4592 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4593 ret = ICE_ERR_PARAM;
4597 /* If a counter was already enabled then don't need to add again */
4598 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4599 ret = ICE_ERR_ALREADY_EXISTS;
4603 /* Allocate a hardware table entry to VLAN counter */
4604 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4608 /* Allocate a hardware table entry to hold large act. Two actions for
4609 * counter based large action
4611 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4615 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4618 /* Update the switch rule to add the counter action */
4619 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4621 ice_release_lock(rule_lock);
4626 ice_release_lock(rule_lock);
4627 /* only remove entry if it did not exist previously */
4629 ret = ice_remove_mac(hw, &l_head);
4634 /* This is mapping table entry that maps every word within a given protocol
4635 * structure to the real byte offset as per the specification of that
4637 * for example dst address is 3 words in ethertype header and corresponding
4638 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4639 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4640 * matching entry describing its field. This needs to be updated if new
4641 * structure is added to that union.
4643 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4644 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4645 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4646 { ICE_ETYPE_OL, { 0 } },
4647 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4648 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4649 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4650 26, 28, 30, 32, 34, 36, 38 } },
4651 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4652 26, 28, 30, 32, 34, 36, 38 } },
4653 { ICE_TCP_IL, { 0, 2 } },
4654 { ICE_UDP_OF, { 0, 2 } },
4655 { ICE_UDP_ILOS, { 0, 2 } },
4656 { ICE_SCTP_IL, { 0, 2 } },
4657 { ICE_VXLAN, { 8, 10, 12, 14 } },
4658 { ICE_GENEVE, { 8, 10, 12, 14 } },
4659 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4660 { ICE_NVGRE, { 0, 2, 4, 6 } },
4661 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4662 { ICE_PPPOE, { 0, 2, 4, 6 } },
4663 { ICE_PROTOCOL_LAST, { 0 } }
4666 /* The following table describes preferred grouping of recipes.
4667 * If a recipe that needs to be programmed is a superset or matches one of the
4668 * following combinations, then the recipe needs to be chained as per the
4671 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4672 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4673 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4674 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4675 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4676 { 0xffff, 0xffff, 0xffff, 0xffff } },
4677 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4678 { 0xffff, 0xffff, 0xffff, 0xffff } },
4679 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4680 { 0xffff, 0xffff, 0xffff, 0xffff } },
4683 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4684 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4685 { ICE_MAC_IL, ICE_MAC_IL_HW },
4686 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4687 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4688 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4689 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4690 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4691 { ICE_TCP_IL, ICE_TCP_IL_HW },
4692 { ICE_UDP_OF, ICE_UDP_OF_HW },
4693 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4694 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4695 { ICE_VXLAN, ICE_UDP_OF_HW },
4696 { ICE_GENEVE, ICE_UDP_OF_HW },
4697 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4698 { ICE_NVGRE, ICE_GRE_OF_HW },
4699 { ICE_GTP, ICE_UDP_OF_HW },
4700 { ICE_PPPOE, ICE_PPPOE_HW },
4701 { ICE_PROTOCOL_LAST, 0 }
4705 * ice_find_recp - find a recipe
4706 * @hw: pointer to the hardware structure
4707 * @lkup_exts: extension sequence to match
4709 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4711 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4713 bool refresh_required = true;
4714 struct ice_sw_recipe *recp;
4717 /* Walk through existing recipes to find a match */
4718 recp = hw->switch_info->recp_list;
4719 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4720 /* If recipe was not created for this ID, in SW bookkeeping,
4721 * check if FW has an entry for this recipe. If the FW has an
4722 * entry update it in our SW bookkeeping and continue with the
4725 if (!recp[i].recp_created)
4726 if (ice_get_recp_frm_fw(hw,
4727 hw->switch_info->recp_list, i,
4731 /* if number of words we are looking for match */
4732 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4733 struct ice_fv_word *a = lkup_exts->fv_words;
4734 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4738 for (p = 0; p < lkup_exts->n_val_words; p++) {
4739 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4741 if (a[p].off == b[q].off &&
4742 a[p].prot_id == b[q].prot_id)
4743 /* Found the "p"th word in the
4748 /* After walking through all the words in the
4749 * "i"th recipe if "p"th word was not found then
4750 * this recipe is not what we are looking for.
4751 * So break out from this loop and try the next
4754 if (q >= recp[i].lkup_exts.n_val_words) {
4759 /* If for "i"th recipe the found was never set to false
4760 * then it means we found our match
4763 return i; /* Return the recipe ID */
4766 return ICE_MAX_NUM_RECIPES;
4770 * ice_prot_type_to_id - get protocol ID from protocol type
4771 * @type: protocol type
4772 * @id: pointer to variable that will receive the ID
4774 * Returns true if found, false otherwise
4776 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4780 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4781 if (ice_prot_id_tbl[i].type == type) {
4782 *id = ice_prot_id_tbl[i].protocol_id;
4789 * ice_find_valid_words - count valid words
4790 * @rule: advanced rule with lookup information
4791 * @lkup_exts: byte offset extractions of the words that are valid
4793 * calculate valid words in a lookup rule using mask value
4796 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4797 struct ice_prot_lkup_ext *lkup_exts)
4803 if (!ice_prot_type_to_id(rule->type, &prot_id))
4806 word = lkup_exts->n_val_words;
4808 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4809 if (((u16 *)&rule->m_u)[j] &&
4810 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4811 /* No more space to accommodate */
4812 if (word >= ICE_MAX_CHAIN_WORDS)
4814 lkup_exts->fv_words[word].off =
4815 ice_prot_ext[rule->type].offs[j];
4816 lkup_exts->fv_words[word].prot_id =
4817 ice_prot_id_tbl[rule->type].protocol_id;
4818 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4822 ret_val = word - lkup_exts->n_val_words;
4823 lkup_exts->n_val_words = word;
4829 * ice_find_prot_off_ind - check for specific ID and offset in rule
4830 * @lkup_exts: an array of protocol header extractions
4831 * @prot_type: protocol type to check
4832 * @off: expected offset of the extraction
4834 * Check if the prot_ext has given protocol ID and offset
4837 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4842 for (j = 0; j < lkup_exts->n_val_words; j++)
4843 if (lkup_exts->fv_words[j].off == off &&
4844 lkup_exts->fv_words[j].prot_id == prot_type)
4847 return ICE_MAX_CHAIN_WORDS;
4851 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4852 * @lkup_exts: an array of protocol header extractions
4853 * @r_policy: preferred recipe grouping policy
4855 * Helper function to check if given recipe group is subset we need to check if
4856 * all the words described by the given recipe group exist in the advanced rule
4857 * look up information
4860 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4861 const struct ice_pref_recipe_group *r_policy)
4863 u8 ind[ICE_NUM_WORDS_RECIPE];
4867 /* check if everything in the r_policy is part of the entire rule */
4868 for (i = 0; i < r_policy->n_val_pairs; i++) {
4871 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4872 r_policy->pairs[i].off);
4873 if (j >= ICE_MAX_CHAIN_WORDS)
4876 /* store the indexes temporarily found by the find function
4877 * this will be used to mark the words as 'done'
4882 /* If the entire policy recipe was a true match, then mark the fields
4883 * that are covered by the recipe as 'done' meaning that these words
4884 * will be clumped together in one recipe.
4885 * "Done" here means in our searching if certain recipe group
4886 * matches or is subset of the given rule, then we mark all
4887 * the corresponding offsets as found. So the remaining recipes should
4888 * be created with whatever words that were left.
4890 for (i = 0; i < count; i++) {
4893 ice_set_bit(in, lkup_exts->done);
4899 * ice_create_first_fit_recp_def - Create a recipe grouping
4900 * @hw: pointer to the hardware structure
4901 * @lkup_exts: an array of protocol header extractions
4902 * @rg_list: pointer to a list that stores new recipe groups
4903 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4905 * Using first fit algorithm, take all the words that are still not done
4906 * and start grouping them in 4-word groups. Each group makes up one
4909 static enum ice_status
4910 ice_create_first_fit_recp_def(struct ice_hw *hw,
4911 struct ice_prot_lkup_ext *lkup_exts,
4912 struct LIST_HEAD_TYPE *rg_list,
4915 struct ice_pref_recipe_group *grp = NULL;
4920 /* Walk through every word in the rule to check if it is not done. If so
4921 * then this word needs to be part of a new recipe.
4923 for (j = 0; j < lkup_exts->n_val_words; j++)
4924 if (!ice_is_bit_set(lkup_exts->done, j)) {
4926 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4927 struct ice_recp_grp_entry *entry;
4929 entry = (struct ice_recp_grp_entry *)
4930 ice_malloc(hw, sizeof(*entry));
4932 return ICE_ERR_NO_MEMORY;
4933 LIST_ADD(&entry->l_entry, rg_list);
4934 grp = &entry->r_group;
4938 grp->pairs[grp->n_val_pairs].prot_id =
4939 lkup_exts->fv_words[j].prot_id;
4940 grp->pairs[grp->n_val_pairs].off =
4941 lkup_exts->fv_words[j].off;
4942 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4950 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4951 * @hw: pointer to the hardware structure
4952 * @fv_list: field vector with the extraction sequence information
4953 * @rg_list: recipe groupings with protocol-offset pairs
4955 * Helper function to fill in the field vector indices for protocol-offset
4956 * pairs. These indexes are then ultimately programmed into a recipe.
4958 static enum ice_status
4959 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4960 struct LIST_HEAD_TYPE *rg_list)
4962 struct ice_sw_fv_list_entry *fv;
4963 struct ice_recp_grp_entry *rg;
4964 struct ice_fv_word *fv_ext;
4966 if (LIST_EMPTY(fv_list))
4969 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4970 fv_ext = fv->fv_ptr->ew;
4972 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4975 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4976 struct ice_fv_word *pr;
4981 pr = &rg->r_group.pairs[i];
4982 mask = rg->r_group.mask[i];
4984 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4985 if (fv_ext[j].prot_id == pr->prot_id &&
4986 fv_ext[j].off == pr->off) {
4989 /* Store index of field vector */
4991 /* Mask is given by caller as big
4992 * endian, but sent to FW as little
4995 rg->fv_mask[i] = mask << 8 | mask >> 8;
4999 /* Protocol/offset could not be found, caller gave an
5003 return ICE_ERR_PARAM;
5011 * ice_find_free_recp_res_idx - find free result indexes for recipe
5012 * @hw: pointer to hardware structure
5013 * @profiles: bitmap of profiles that will be associated with the new recipe
5014 * @free_idx: pointer to variable to receive the free index bitmap
5016 * The algorithm used here is:
5017 * 1. When creating a new recipe, create a set P which contains all
5018 * Profiles that will be associated with our new recipe
5020 * 2. For each Profile p in set P:
5021 * a. Add all recipes associated with Profile p into set R
5022 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5023 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5024 * i. Or just assume they all have the same possible indexes:
5026 * i.e., PossibleIndexes = 0x0000F00000000000
5028 * 3. For each Recipe r in set R:
5029 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5030 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5032 * FreeIndexes will contain the bits indicating the indexes free for use,
5033 * then the code needs to update the recipe[r].used_result_idx_bits to
5034 * indicate which indexes were selected for use by this recipe.
5037 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5038 ice_bitmap_t *free_idx)
5040 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5041 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5042 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5046 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5047 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5048 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5049 ice_init_possible_res_bm(possible_idx);
5051 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
5052 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
5053 ice_set_bit(bit, possible_idx);
5055 /* For each profile we are going to associate the recipe with, add the
5056 * recipes that are associated with that profile. This will give us
5057 * the set of recipes that our recipe may collide with.
5060 while (ICE_MAX_NUM_PROFILES >
5061 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5062 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5063 ICE_MAX_NUM_RECIPES);
5068 /* For each recipe that our new recipe may collide with, determine
5069 * which indexes have been used.
5071 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5072 if (ice_is_bit_set(recipes, bit))
5073 ice_or_bitmap(used_idx, used_idx,
5074 hw->switch_info->recp_list[bit].res_idxs,
5077 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5079 /* return number of free indexes */
5081 while (ICE_MAX_FV_WORDS >
5082 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5091 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5092 * @hw: pointer to hardware structure
5093 * @rm: recipe management list entry
5094 * @match_tun: if field vector index for tunnel needs to be programmed
5095 * @profiles: bitmap of profiles that will be assocated.
5097 static enum ice_status
5098 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5099 bool match_tun, ice_bitmap_t *profiles)
5101 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5102 struct ice_aqc_recipe_data_elem *tmp;
5103 struct ice_aqc_recipe_data_elem *buf;
5104 struct ice_recp_grp_entry *entry;
5105 enum ice_status status;
5111 /* When more than one recipe are required, another recipe is needed to
5112 * chain them together. Matching a tunnel metadata ID takes up one of
5113 * the match fields in the chaining recipe reducing the number of
5114 * chained recipes by one.
5116 /* check number of free result indices */
5117 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5118 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5120 if (rm->n_grp_count > 1) {
5121 if (rm->n_grp_count > free_res_idx)
5122 return ICE_ERR_MAX_LIMIT;
5127 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5128 ICE_MAX_NUM_RECIPES,
5131 return ICE_ERR_NO_MEMORY;
5133 buf = (struct ice_aqc_recipe_data_elem *)
5134 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5136 status = ICE_ERR_NO_MEMORY;
5140 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5141 recipe_count = ICE_MAX_NUM_RECIPES;
5142 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5144 if (status || recipe_count == 0)
5147 /* Allocate the recipe resources, and configure them according to the
5148 * match fields from protocol headers and extracted field vectors.
5150 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5151 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5154 status = ice_alloc_recipe(hw, &entry->rid);
5158 /* Clear the result index of the located recipe, as this will be
5159 * updated, if needed, later in the recipe creation process.
5161 tmp[0].content.result_indx = 0;
5163 buf[recps] = tmp[0];
5164 buf[recps].recipe_indx = (u8)entry->rid;
5165 /* if the recipe is a non-root recipe RID should be programmed
5166 * as 0 for the rules to be applied correctly.
5168 buf[recps].content.rid = 0;
5169 ice_memset(&buf[recps].content.lkup_indx, 0,
5170 sizeof(buf[recps].content.lkup_indx),
5173 /* All recipes use look-up index 0 to match switch ID. */
5174 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5175 buf[recps].content.mask[0] =
5176 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5177 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5180 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5181 buf[recps].content.lkup_indx[i] = 0x80;
5182 buf[recps].content.mask[i] = 0;
5185 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5186 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5187 buf[recps].content.mask[i + 1] =
5188 CPU_TO_LE16(entry->fv_mask[i]);
5191 if (rm->n_grp_count > 1) {
5192 /* Checks to see if there really is a valid result index
5195 if (chain_idx >= ICE_MAX_FV_WORDS) {
5196 ice_debug(hw, ICE_DBG_SW,
5197 "No chain index available\n");
5198 status = ICE_ERR_MAX_LIMIT;
5202 entry->chain_idx = chain_idx;
5203 buf[recps].content.result_indx =
5204 ICE_AQ_RECIPE_RESULT_EN |
5205 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5206 ICE_AQ_RECIPE_RESULT_DATA_M);
5207 ice_clear_bit(chain_idx, result_idx_bm);
5208 chain_idx = ice_find_first_bit(result_idx_bm,
5212 /* fill recipe dependencies */
5213 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5214 ICE_MAX_NUM_RECIPES);
5215 ice_set_bit(buf[recps].recipe_indx,
5216 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5217 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5221 if (rm->n_grp_count == 1) {
5222 rm->root_rid = buf[0].recipe_indx;
5223 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5224 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5225 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5226 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5227 sizeof(buf[0].recipe_bitmap),
5228 ICE_NONDMA_TO_NONDMA);
5230 status = ICE_ERR_BAD_PTR;
5233 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5234 * the recipe which is getting created if specified
5235 * by user. Usually any advanced switch filter, which results
5236 * into new extraction sequence, ended up creating a new recipe
5237 * of type ROOT and usually recipes are associated with profiles
5238 * Switch rule referreing newly created recipe, needs to have
5239 * either/or 'fwd' or 'join' priority, otherwise switch rule
5240 * evaluation will not happen correctly. In other words, if
5241 * switch rule to be evaluated on priority basis, then recipe
5242 * needs to have priority, otherwise it will be evaluated last.
5244 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5246 struct ice_recp_grp_entry *last_chain_entry;
5249 /* Allocate the last recipe that will chain the outcomes of the
5250 * other recipes together
5252 status = ice_alloc_recipe(hw, &rid);
5256 buf[recps].recipe_indx = (u8)rid;
5257 buf[recps].content.rid = (u8)rid;
5258 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5259 /* the new entry created should also be part of rg_list to
5260 * make sure we have complete recipe
5262 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5263 sizeof(*last_chain_entry));
5264 if (!last_chain_entry) {
5265 status = ICE_ERR_NO_MEMORY;
5268 last_chain_entry->rid = rid;
5269 ice_memset(&buf[recps].content.lkup_indx, 0,
5270 sizeof(buf[recps].content.lkup_indx),
5272 /* All recipes use look-up index 0 to match switch ID. */
5273 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5274 buf[recps].content.mask[0] =
5275 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5276 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5277 buf[recps].content.lkup_indx[i] =
5278 ICE_AQ_RECIPE_LKUP_IGNORE;
5279 buf[recps].content.mask[i] = 0;
5283 /* update r_bitmap with the recp that is used for chaining */
5284 ice_set_bit(rid, rm->r_bitmap);
5285 /* this is the recipe that chains all the other recipes so it
5286 * should not have a chaining ID to indicate the same
5288 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5289 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5291 last_chain_entry->fv_idx[i] = entry->chain_idx;
5292 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5293 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5294 ice_set_bit(entry->rid, rm->r_bitmap);
5296 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5297 if (sizeof(buf[recps].recipe_bitmap) >=
5298 sizeof(rm->r_bitmap)) {
5299 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5300 sizeof(buf[recps].recipe_bitmap),
5301 ICE_NONDMA_TO_NONDMA);
5303 status = ICE_ERR_BAD_PTR;
5306 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5308 /* To differentiate among different UDP tunnels, a meta data ID
5312 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5313 buf[recps].content.mask[i] =
5314 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5318 rm->root_rid = (u8)rid;
5320 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5324 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5325 ice_release_change_lock(hw);
5329 /* Every recipe that just got created add it to the recipe
5332 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5333 struct ice_switch_info *sw = hw->switch_info;
5334 bool is_root, idx_found = false;
5335 struct ice_sw_recipe *recp;
5336 u16 idx, buf_idx = 0;
5338 /* find buffer index for copying some data */
5339 for (idx = 0; idx < rm->n_grp_count; idx++)
5340 if (buf[idx].recipe_indx == entry->rid) {
5346 status = ICE_ERR_OUT_OF_RANGE;
5350 recp = &sw->recp_list[entry->rid];
5351 is_root = (rm->root_rid == entry->rid);
5352 recp->is_root = is_root;
5354 recp->root_rid = entry->rid;
5355 recp->big_recp = (is_root && rm->n_grp_count > 1);
5357 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5358 entry->r_group.n_val_pairs *
5359 sizeof(struct ice_fv_word),
5360 ICE_NONDMA_TO_NONDMA);
5362 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5363 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5365 /* Copy non-result fv index values and masks to recipe. This
5366 * call will also update the result recipe bitmask.
5368 ice_collect_result_idx(&buf[buf_idx], recp);
5370 /* for non-root recipes, also copy to the root, this allows
5371 * easier matching of a complete chained recipe
5374 ice_collect_result_idx(&buf[buf_idx],
5375 &sw->recp_list[rm->root_rid]);
5377 recp->n_ext_words = entry->r_group.n_val_pairs;
5378 recp->chain_idx = entry->chain_idx;
5379 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5380 recp->tun_type = rm->tun_type;
5381 recp->recp_created = true;
5396 * ice_create_recipe_group - creates recipe group
5397 * @hw: pointer to hardware structure
5398 * @rm: recipe management list entry
5399 * @lkup_exts: lookup elements
5401 static enum ice_status
5402 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5403 struct ice_prot_lkup_ext *lkup_exts)
5405 struct ice_recp_grp_entry *entry;
5406 struct ice_recp_grp_entry *tmp;
5407 enum ice_status status;
5411 rm->n_grp_count = 0;
5414 if (lkup_exts->n_val_words > ICE_NUM_WORDS_RECIPE) {
5415 /* Each switch recipe can match up to 5 words or metadata. One
5416 * word in each recipe is used to match the switch ID. Four
5417 * words are left for matching other values. If the new advanced
5418 * recipe requires more than 4 words, it needs to be split into
5419 * multiple recipes which are chained together using the
5420 * intermediate result that each produces as input to the other
5421 * recipes in the sequence.
5423 groups = ARRAY_SIZE(ice_recipe_pack);
5425 /* Check if any of the preferred recipes from the grouping
5428 for (i = 0; i < groups; i++)
5429 /* Check if the recipe from the preferred grouping
5430 * matches or is a subset of the fields that needs to be
5433 if (ice_is_recipe_subset(lkup_exts,
5434 &ice_recipe_pack[i])) {
5435 /* This recipe can be used by itself or grouped
5436 * with other recipes.
5438 entry = (struct ice_recp_grp_entry *)
5439 ice_malloc(hw, sizeof(*entry));
5441 status = ICE_ERR_NO_MEMORY;
5444 entry->r_group = ice_recipe_pack[i];
5445 LIST_ADD(&entry->l_entry, &rm->rg_list);
5450 /* Create recipes for words that are marked not done by packing them
5453 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5454 &rm->rg_list, &recp_count);
5456 rm->n_grp_count += recp_count;
5457 rm->n_ext_words = lkup_exts->n_val_words;
5458 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5459 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5460 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5461 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5466 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5468 LIST_DEL(&entry->l_entry);
5469 ice_free(hw, entry);
5477 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5478 * @hw: pointer to hardware structure
5479 * @lkups: lookup elements or match criteria for the advanced recipe, one
5480 * structure per protocol header
5481 * @lkups_cnt: number of protocols
5482 * @bm: bitmap of field vectors to consider
5483 * @fv_list: pointer to a list that holds the returned field vectors
5485 static enum ice_status
5486 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5487 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5489 enum ice_status status;
5493 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5495 return ICE_ERR_NO_MEMORY;
5497 for (i = 0; i < lkups_cnt; i++)
5498 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5499 status = ICE_ERR_CFG;
5503 /* Find field vectors that include all specified protocol types */
5504 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5507 ice_free(hw, prot_ids);
5512 * ice_add_special_words - Add words that are not protocols, such as metadata
5513 * @rinfo: other information regarding the rule e.g. priority and action info
5514 * @lkup_exts: lookup word structure
5516 static enum ice_status
5517 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5518 struct ice_prot_lkup_ext *lkup_exts)
5520 /* If this is a tunneled packet, then add recipe index to match the
5521 * tunnel bit in the packet metadata flags.
5523 if (rinfo->tun_type != ICE_NON_TUN) {
5524 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5525 u8 word = lkup_exts->n_val_words++;
5527 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5528 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5530 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5532 return ICE_ERR_MAX_LIMIT;
5539 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5540 * @hw: pointer to hardware structure
5541 * @rinfo: other information regarding the rule e.g. priority and action info
5542 * @bm: pointer to memory for returning the bitmap of field vectors
5545 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5548 enum ice_prof_type type;
5550 switch (rinfo->tun_type) {
5552 type = ICE_PROF_NON_TUN;
5554 case ICE_ALL_TUNNELS:
5555 type = ICE_PROF_TUN_ALL;
5557 case ICE_SW_TUN_VXLAN_GPE:
5558 case ICE_SW_TUN_GENEVE:
5559 case ICE_SW_TUN_VXLAN:
5560 case ICE_SW_TUN_UDP:
5561 case ICE_SW_TUN_GTP:
5562 type = ICE_PROF_TUN_UDP;
5564 case ICE_SW_TUN_NVGRE:
5565 type = ICE_PROF_TUN_GRE;
5567 case ICE_SW_TUN_PPPOE:
5568 type = ICE_PROF_TUN_PPPOE;
5570 case ICE_SW_TUN_AND_NON_TUN:
5572 type = ICE_PROF_ALL;
5576 ice_get_sw_fv_bitmap(hw, type, bm);
5580 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5581 * @hw: pointer to hardware structure
5582 * @lkups: lookup elements or match criteria for the advanced recipe, one
5583 * structure per protocol header
5584 * @lkups_cnt: number of protocols
5585 * @rinfo: other information regarding the rule e.g. priority and action info
5586 * @rid: return the recipe ID of the recipe created
5588 static enum ice_status
5589 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5590 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5592 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5593 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5594 struct ice_prot_lkup_ext *lkup_exts;
5595 struct ice_recp_grp_entry *r_entry;
5596 struct ice_sw_fv_list_entry *fvit;
5597 struct ice_recp_grp_entry *r_tmp;
5598 struct ice_sw_fv_list_entry *tmp;
5599 enum ice_status status = ICE_SUCCESS;
5600 struct ice_sw_recipe *rm;
5601 bool match_tun = false;
5605 return ICE_ERR_PARAM;
5607 lkup_exts = (struct ice_prot_lkup_ext *)
5608 ice_malloc(hw, sizeof(*lkup_exts));
5610 return ICE_ERR_NO_MEMORY;
5612 /* Determine the number of words to be matched and if it exceeds a
5613 * recipe's restrictions
5615 for (i = 0; i < lkups_cnt; i++) {
5618 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5619 status = ICE_ERR_CFG;
5620 goto err_free_lkup_exts;
5623 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5625 status = ICE_ERR_CFG;
5626 goto err_free_lkup_exts;
5630 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5632 status = ICE_ERR_NO_MEMORY;
5633 goto err_free_lkup_exts;
5636 /* Get field vectors that contain fields extracted from all the protocol
5637 * headers being programmed.
5639 INIT_LIST_HEAD(&rm->fv_list);
5640 INIT_LIST_HEAD(&rm->rg_list);
5642 /* Get bitmap of field vectors (profiles) that are compatible with the
5643 * rule request; only these will be searched in the subsequent call to
5646 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5648 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5652 /* Group match words into recipes using preferred recipe grouping
5655 status = ice_create_recipe_group(hw, rm, lkup_exts);
5659 /* There is only profile for UDP tunnels. So, it is necessary to use a
5660 * metadata ID flag to differentiate different tunnel types. A separate
5661 * recipe needs to be used for the metadata.
5663 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5664 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5665 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5668 /* set the recipe priority if specified */
5669 rm->priority = rinfo->priority ? rinfo->priority : 0;
5671 /* Find offsets from the field vector. Pick the first one for all the
5674 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5678 /* get bitmap of all profiles the recipe will be associated with */
5679 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5680 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5682 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5683 ice_set_bit((u16)fvit->profile_id, profiles);
5686 /* Create any special protocol/offset pairs, such as looking at tunnel
5687 * bits by extracting metadata
5689 status = ice_add_special_words(rinfo, lkup_exts);
5691 goto err_free_lkup_exts;
5693 /* Look for a recipe which matches our requested fv / mask list */
5694 *rid = ice_find_recp(hw, lkup_exts);
5695 if (*rid < ICE_MAX_NUM_RECIPES)
5696 /* Success if found a recipe that match the existing criteria */
5699 /* Recipe we need does not exist, add a recipe */
5700 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5704 /* Associate all the recipes created with all the profiles in the
5705 * common field vector.
5707 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5709 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5711 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5712 (u8 *)r_bitmap, NULL);
5716 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5717 ICE_MAX_NUM_RECIPES);
5718 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5722 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5725 ice_release_change_lock(hw);
5731 *rid = rm->root_rid;
5732 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5733 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5735 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5736 ice_recp_grp_entry, l_entry) {
5737 LIST_DEL(&r_entry->l_entry);
5738 ice_free(hw, r_entry);
5741 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5743 LIST_DEL(&fvit->list_entry);
5748 ice_free(hw, rm->root_buf);
5753 ice_free(hw, lkup_exts);
5759 * ice_find_dummy_packet - find dummy packet by tunnel type
5761 * @lkups: lookup elements or match criteria for the advanced recipe, one
5762 * structure per protocol header
5763 * @lkups_cnt: number of protocols
5764 * @tun_type: tunnel type from the match criteria
5765 * @pkt: dummy packet to fill according to filter match criteria
5766 * @pkt_len: packet length of dummy packet
5767 * @offsets: pointer to receive the pointer to the offsets for the packet
5770 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5771 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5773 const struct ice_dummy_pkt_offsets **offsets)
5775 bool tcp = false, udp = false, ipv6 = false;
5778 if (tun_type == ICE_SW_TUN_GTP) {
5779 *pkt = dummy_udp_gtp_packet;
5780 *pkt_len = sizeof(dummy_udp_gtp_packet);
5781 *offsets = dummy_udp_gtp_packet_offsets;
5784 if (tun_type == ICE_SW_TUN_PPPOE) {
5785 *pkt = dummy_pppoe_packet;
5786 *pkt_len = sizeof(dummy_pppoe_packet);
5787 *offsets = dummy_pppoe_packet_offsets;
5790 for (i = 0; i < lkups_cnt; i++) {
5791 if (lkups[i].type == ICE_UDP_ILOS)
5793 else if (lkups[i].type == ICE_TCP_IL)
5795 else if (lkups[i].type == ICE_IPV6_OFOS)
5799 if (tun_type == ICE_ALL_TUNNELS) {
5800 *pkt = dummy_gre_udp_packet;
5801 *pkt_len = sizeof(dummy_gre_udp_packet);
5802 *offsets = dummy_gre_udp_packet_offsets;
5806 if (tun_type == ICE_SW_TUN_NVGRE) {
5808 *pkt = dummy_gre_tcp_packet;
5809 *pkt_len = sizeof(dummy_gre_tcp_packet);
5810 *offsets = dummy_gre_tcp_packet_offsets;
5814 *pkt = dummy_gre_udp_packet;
5815 *pkt_len = sizeof(dummy_gre_udp_packet);
5816 *offsets = dummy_gre_udp_packet_offsets;
5820 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5821 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5823 *pkt = dummy_udp_tun_tcp_packet;
5824 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5825 *offsets = dummy_udp_tun_tcp_packet_offsets;
5829 *pkt = dummy_udp_tun_udp_packet;
5830 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5831 *offsets = dummy_udp_tun_udp_packet_offsets;
5836 *pkt = dummy_udp_packet;
5837 *pkt_len = sizeof(dummy_udp_packet);
5838 *offsets = dummy_udp_packet_offsets;
5840 } else if (udp && ipv6) {
5841 *pkt = dummy_udp_ipv6_packet;
5842 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5843 *offsets = dummy_udp_ipv6_packet_offsets;
5845 } else if ((tcp && ipv6) || ipv6) {
5846 *pkt = dummy_tcp_ipv6_packet;
5847 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5848 *offsets = dummy_tcp_ipv6_packet_offsets;
5852 *pkt = dummy_tcp_packet;
5853 *pkt_len = sizeof(dummy_tcp_packet);
5854 *offsets = dummy_tcp_packet_offsets;
5858 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5860 * @lkups: lookup elements or match criteria for the advanced recipe, one
5861 * structure per protocol header
5862 * @lkups_cnt: number of protocols
5863 * @s_rule: stores rule information from the match criteria
5864 * @dummy_pkt: dummy packet to fill according to filter match criteria
5865 * @pkt_len: packet length of dummy packet
5866 * @offsets: offset info for the dummy packet
5868 static enum ice_status
5869 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5870 struct ice_aqc_sw_rules_elem *s_rule,
5871 const u8 *dummy_pkt, u16 pkt_len,
5872 const struct ice_dummy_pkt_offsets *offsets)
5877 /* Start with a packet with a pre-defined/dummy content. Then, fill
5878 * in the header values to be looked up or matched.
5880 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5882 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5884 for (i = 0; i < lkups_cnt; i++) {
5885 enum ice_protocol_type type;
5886 u16 offset = 0, len = 0, j;
5889 /* find the start of this layer; it should be found since this
5890 * was already checked when search for the dummy packet
5892 type = lkups[i].type;
5893 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5894 if (type == offsets[j].type) {
5895 offset = offsets[j].offset;
5900 /* this should never happen in a correct calling sequence */
5902 return ICE_ERR_PARAM;
5904 switch (lkups[i].type) {
5907 len = sizeof(struct ice_ether_hdr);
5910 len = sizeof(struct ice_ethtype_hdr);
5914 len = sizeof(struct ice_ipv4_hdr);
5918 len = sizeof(struct ice_ipv6_hdr);
5923 len = sizeof(struct ice_l4_hdr);
5926 len = sizeof(struct ice_sctp_hdr);
5929 len = sizeof(struct ice_nvgre);
5934 len = sizeof(struct ice_udp_tnl_hdr);
5938 len = sizeof(struct ice_udp_gtp_hdr);
5941 return ICE_ERR_PARAM;
5944 /* the length should be a word multiple */
5945 if (len % ICE_BYTES_PER_WORD)
5948 /* We have the offset to the header start, the length, the
5949 * caller's header values and mask. Use this information to
5950 * copy the data into the dummy packet appropriately based on
5951 * the mask. Note that we need to only write the bits as
5952 * indicated by the mask to make sure we don't improperly write
5953 * over any significant packet data.
5955 for (j = 0; j < len / sizeof(u16); j++)
5956 if (((u16 *)&lkups[i].m_u)[j])
5957 ((u16 *)(pkt + offset))[j] =
5958 (((u16 *)(pkt + offset))[j] &
5959 ~((u16 *)&lkups[i].m_u)[j]) |
5960 (((u16 *)&lkups[i].h_u)[j] &
5961 ((u16 *)&lkups[i].m_u)[j]);
5964 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5970 * ice_find_adv_rule_entry - Search a rule entry
5971 * @hw: pointer to the hardware structure
5972 * @lkups: lookup elements or match criteria for the advanced recipe, one
5973 * structure per protocol header
5974 * @lkups_cnt: number of protocols
5975 * @recp_id: recipe ID for which we are finding the rule
5976 * @rinfo: other information regarding the rule e.g. priority and action info
5978 * Helper function to search for a given advance rule entry
5979 * Returns pointer to entry storing the rule if found
5981 static struct ice_adv_fltr_mgmt_list_entry *
5982 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5983 u16 lkups_cnt, u8 recp_id,
5984 struct ice_adv_rule_info *rinfo)
5986 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5987 struct ice_switch_info *sw = hw->switch_info;
5990 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5991 ice_adv_fltr_mgmt_list_entry, list_entry) {
5992 bool lkups_matched = true;
5994 if (lkups_cnt != list_itr->lkups_cnt)
5996 for (i = 0; i < list_itr->lkups_cnt; i++)
5997 if (memcmp(&list_itr->lkups[i], &lkups[i],
5999 lkups_matched = false;
6002 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6003 rinfo->tun_type == list_itr->rule_info.tun_type &&
6011 * ice_adv_add_update_vsi_list
6012 * @hw: pointer to the hardware structure
6013 * @m_entry: pointer to current adv filter management list entry
6014 * @cur_fltr: filter information from the book keeping entry
6015 * @new_fltr: filter information with the new VSI to be added
6017 * Call AQ command to add or update previously created VSI list with new VSI.
6019 * Helper function to do book keeping associated with adding filter information
6020 * The algorithm to do the booking keeping is described below :
6021 * When a VSI needs to subscribe to a given advanced filter
6022 * if only one VSI has been added till now
6023 * Allocate a new VSI list and add two VSIs
6024 * to this list using switch rule command
6025 * Update the previously created switch rule with the
6026 * newly created VSI list ID
6027 * if a VSI list was previously created
6028 * Add the new VSI to the previously created VSI list set
6029 * using the update switch rule command
6031 static enum ice_status
6032 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6033 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6034 struct ice_adv_rule_info *cur_fltr,
6035 struct ice_adv_rule_info *new_fltr)
6037 enum ice_status status;
6038 u16 vsi_list_id = 0;
6040 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6041 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
6042 return ICE_ERR_NOT_IMPL;
6044 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
6045 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6046 return ICE_ERR_ALREADY_EXISTS;
6048 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6049 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6050 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6051 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6052 return ICE_ERR_NOT_IMPL;
6054 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6055 /* Only one entry existed in the mapping and it was not already
6056 * a part of a VSI list. So, create a VSI list with the old and
6059 struct ice_fltr_info tmp_fltr;
6060 u16 vsi_handle_arr[2];
6062 /* A rule already exists with the new VSI being added */
6063 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6064 new_fltr->sw_act.fwd_id.hw_vsi_id)
6065 return ICE_ERR_ALREADY_EXISTS;
6067 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6068 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6069 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6075 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6076 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6077 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6078 /* Update the previous switch rule of "forward to VSI" to
6081 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6085 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6086 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6087 m_entry->vsi_list_info =
6088 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6091 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6093 if (!m_entry->vsi_list_info)
6096 /* A rule already exists with the new VSI being added */
6097 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6100 /* Update the previously created VSI list set with
6101 * the new VSI ID passed in
6103 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6105 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6107 ice_aqc_opc_update_sw_rules,
6109 /* update VSI list mapping info with new VSI ID */
6111 ice_set_bit(vsi_handle,
6112 m_entry->vsi_list_info->vsi_map);
6115 m_entry->vsi_count++;
6120 * ice_add_adv_rule - helper function to create an advanced switch rule
6121 * @hw: pointer to the hardware structure
6122 * @lkups: information on the words that needs to be looked up. All words
6123 * together makes one recipe
6124 * @lkups_cnt: num of entries in the lkups array
6125 * @rinfo: other information related to the rule that needs to be programmed
6126 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6127 * ignored is case of error.
6129 * This function can program only 1 rule at a time. The lkups is used to
6130 * describe the all the words that forms the "lookup" portion of the recipe.
6131 * These words can span multiple protocols. Callers to this function need to
6132 * pass in a list of protocol headers with lookup information along and mask
6133 * that determines which words are valid from the given protocol header.
6134 * rinfo describes other information related to this rule such as forwarding
6135 * IDs, priority of this rule, etc.
6138 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6139 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6140 struct ice_rule_query_data *added_entry)
6142 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6143 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6144 const struct ice_dummy_pkt_offsets *pkt_offsets;
6145 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6146 struct LIST_HEAD_TYPE *rule_head;
6147 struct ice_switch_info *sw;
6148 enum ice_status status;
6149 const u8 *pkt = NULL;
6155 return ICE_ERR_PARAM;
6157 /* get # of words we need to match */
6159 for (i = 0; i < lkups_cnt; i++) {
6162 ptr = (u16 *)&lkups[i].m_u;
6163 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6167 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6168 return ICE_ERR_PARAM;
6170 /* make sure that we can locate a dummy packet */
6171 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6174 status = ICE_ERR_PARAM;
6175 goto err_ice_add_adv_rule;
6178 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6179 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6180 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6181 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6184 vsi_handle = rinfo->sw_act.vsi_handle;
6185 if (!ice_is_vsi_valid(hw, vsi_handle))
6186 return ICE_ERR_PARAM;
6188 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6189 rinfo->sw_act.fwd_id.hw_vsi_id =
6190 ice_get_hw_vsi_num(hw, vsi_handle);
6191 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6192 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6194 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6197 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6199 /* we have to add VSI to VSI_LIST and increment vsi_count.
6200 * Also Update VSI list so that we can change forwarding rule
6201 * if the rule already exists, we will check if it exists with
6202 * same vsi_id, if not then add it to the VSI list if it already
6203 * exists if not then create a VSI list and add the existing VSI
6204 * ID and the new VSI ID to the list
6205 * We will add that VSI to the list
6207 status = ice_adv_add_update_vsi_list(hw, m_entry,
6208 &m_entry->rule_info,
6211 added_entry->rid = rid;
6212 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6213 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6217 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6218 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6220 return ICE_ERR_NO_MEMORY;
6221 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6222 switch (rinfo->sw_act.fltr_act) {
6223 case ICE_FWD_TO_VSI:
6224 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6225 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6226 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6229 act |= ICE_SINGLE_ACT_TO_Q;
6230 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6231 ICE_SINGLE_ACT_Q_INDEX_M;
6233 case ICE_FWD_TO_QGRP:
6234 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6235 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6236 act |= ICE_SINGLE_ACT_TO_Q;
6237 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6238 ICE_SINGLE_ACT_Q_INDEX_M;
6239 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6240 ICE_SINGLE_ACT_Q_REGION_M;
6242 case ICE_DROP_PACKET:
6243 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6244 ICE_SINGLE_ACT_VALID_BIT;
6247 status = ICE_ERR_CFG;
6248 goto err_ice_add_adv_rule;
6251 /* set the rule LOOKUP type based on caller specified 'RX'
6252 * instead of hardcoding it to be either LOOKUP_TX/RX
6254 * for 'RX' set the source to be the port number
6255 * for 'TX' set the source to be the source HW VSI number (determined
6259 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6260 s_rule->pdata.lkup_tx_rx.src =
6261 CPU_TO_LE16(hw->port_info->lport);
6263 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6264 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6267 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6268 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6270 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
6273 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6274 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6277 goto err_ice_add_adv_rule;
6278 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6279 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6281 status = ICE_ERR_NO_MEMORY;
6282 goto err_ice_add_adv_rule;
6285 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6286 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6287 ICE_NONDMA_TO_NONDMA);
6288 if (!adv_fltr->lkups) {
6289 status = ICE_ERR_NO_MEMORY;
6290 goto err_ice_add_adv_rule;
6293 adv_fltr->lkups_cnt = lkups_cnt;
6294 adv_fltr->rule_info = *rinfo;
6295 adv_fltr->rule_info.fltr_rule_id =
6296 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6297 sw = hw->switch_info;
6298 sw->recp_list[rid].adv_rule = true;
6299 rule_head = &sw->recp_list[rid].filt_rules;
6301 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6302 struct ice_fltr_info tmp_fltr;
6304 tmp_fltr.fltr_rule_id =
6305 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6306 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6307 tmp_fltr.fwd_id.hw_vsi_id =
6308 ice_get_hw_vsi_num(hw, vsi_handle);
6309 tmp_fltr.vsi_handle = vsi_handle;
6310 /* Update the previous switch rule of "forward to VSI" to
6313 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6315 goto err_ice_add_adv_rule;
6316 adv_fltr->vsi_count = 1;
6319 /* Add rule entry to book keeping list */
6320 LIST_ADD(&adv_fltr->list_entry, rule_head);
6322 added_entry->rid = rid;
6323 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6324 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6326 err_ice_add_adv_rule:
6327 if (status && adv_fltr) {
6328 ice_free(hw, adv_fltr->lkups);
6329 ice_free(hw, adv_fltr);
6332 ice_free(hw, s_rule);
6338 * ice_adv_rem_update_vsi_list
6339 * @hw: pointer to the hardware structure
6340 * @vsi_handle: VSI handle of the VSI to remove
6341 * @fm_list: filter management entry for which the VSI list management needs to
6344 static enum ice_status
6345 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6346 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6348 struct ice_vsi_list_map_info *vsi_list_info;
6349 enum ice_sw_lkup_type lkup_type;
6350 enum ice_status status;
6353 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6354 fm_list->vsi_count == 0)
6355 return ICE_ERR_PARAM;
6357 /* A rule with the VSI being removed does not exist */
6358 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6359 return ICE_ERR_DOES_NOT_EXIST;
6361 lkup_type = ICE_SW_LKUP_LAST;
6362 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6363 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6364 ice_aqc_opc_update_sw_rules,
6369 fm_list->vsi_count--;
6370 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6371 vsi_list_info = fm_list->vsi_list_info;
6372 if (fm_list->vsi_count == 1) {
6373 struct ice_fltr_info tmp_fltr;
6376 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6378 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6379 return ICE_ERR_OUT_OF_RANGE;
6381 /* Make sure VSI list is empty before removing it below */
6382 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6384 ice_aqc_opc_update_sw_rules,
6388 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6389 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6390 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6391 tmp_fltr.fwd_id.hw_vsi_id =
6392 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6393 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6394 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6396 /* Update the previous switch rule of "MAC forward to VSI" to
6397 * "MAC fwd to VSI list"
6399 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6401 ice_debug(hw, ICE_DBG_SW,
6402 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6403 tmp_fltr.fwd_id.hw_vsi_id, status);
6408 if (fm_list->vsi_count == 1) {
6409 /* Remove the VSI list since it is no longer used */
6410 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6412 ice_debug(hw, ICE_DBG_SW,
6413 "Failed to remove VSI list %d, error %d\n",
6414 vsi_list_id, status);
6418 LIST_DEL(&vsi_list_info->list_entry);
6419 ice_free(hw, vsi_list_info);
6420 fm_list->vsi_list_info = NULL;
6427 * ice_rem_adv_rule - removes existing advanced switch rule
6428 * @hw: pointer to the hardware structure
6429 * @lkups: information on the words that needs to be looked up. All words
6430 * together makes one recipe
6431 * @lkups_cnt: num of entries in the lkups array
6432 * @rinfo: Its the pointer to the rule information for the rule
6434 * This function can be used to remove 1 rule at a time. The lkups is
6435 * used to describe all the words that forms the "lookup" portion of the
6436 * rule. These words can span multiple protocols. Callers to this function
6437 * need to pass in a list of protocol headers with lookup information along
6438 * and mask that determines which words are valid from the given protocol
6439 * header. rinfo describes other information related to this rule such as
6440 * forwarding IDs, priority of this rule, etc.
6443 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6444 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6446 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6447 const struct ice_dummy_pkt_offsets *offsets;
6448 struct ice_prot_lkup_ext lkup_exts;
6449 u16 rule_buf_sz, pkt_len, i, rid;
6450 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6451 enum ice_status status = ICE_SUCCESS;
6452 bool remove_rule = false;
6453 const u8 *pkt = NULL;
6456 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6457 for (i = 0; i < lkups_cnt; i++) {
6460 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6463 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6468 /* Create any special protocol/offset pairs, such as looking at tunnel
6469 * bits by extracting metadata
6471 status = ice_add_special_words(rinfo, &lkup_exts);
6475 rid = ice_find_recp(hw, &lkup_exts);
6476 /* If did not find a recipe that match the existing criteria */
6477 if (rid == ICE_MAX_NUM_RECIPES)
6478 return ICE_ERR_PARAM;
6480 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6481 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6482 /* the rule is already removed */
6485 ice_acquire_lock(rule_lock);
6486 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6488 } else if (list_elem->vsi_count > 1) {
6489 list_elem->vsi_list_info->ref_cnt--;
6490 remove_rule = false;
6491 vsi_handle = rinfo->sw_act.vsi_handle;
6492 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6494 vsi_handle = rinfo->sw_act.vsi_handle;
6495 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6497 ice_release_lock(rule_lock);
6500 if (list_elem->vsi_count == 0)
6503 ice_release_lock(rule_lock);
6505 struct ice_aqc_sw_rules_elem *s_rule;
6507 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
6508 &pkt_len, &offsets);
6509 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6511 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6514 return ICE_ERR_NO_MEMORY;
6515 s_rule->pdata.lkup_tx_rx.act = 0;
6516 s_rule->pdata.lkup_tx_rx.index =
6517 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6518 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6519 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6521 ice_aqc_opc_remove_sw_rules, NULL);
6522 if (status == ICE_SUCCESS) {
6523 ice_acquire_lock(rule_lock);
6524 LIST_DEL(&list_elem->list_entry);
6525 ice_free(hw, list_elem->lkups);
6526 ice_free(hw, list_elem);
6527 ice_release_lock(rule_lock);
6529 ice_free(hw, s_rule);
6535 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6536 * @hw: pointer to the hardware structure
6537 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6539 * This function is used to remove 1 rule at a time. The removal is based on
6540 * the remove_entry parameter. This function will remove rule for a given
6541 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6544 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6545 struct ice_rule_query_data *remove_entry)
6547 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6548 struct LIST_HEAD_TYPE *list_head;
6549 struct ice_adv_rule_info rinfo;
6550 struct ice_switch_info *sw;
6552 sw = hw->switch_info;
6553 if (!sw->recp_list[remove_entry->rid].recp_created)
6554 return ICE_ERR_PARAM;
6555 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6556 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6558 if (list_itr->rule_info.fltr_rule_id ==
6559 remove_entry->rule_id) {
6560 rinfo = list_itr->rule_info;
6561 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6562 return ice_rem_adv_rule(hw, list_itr->lkups,
6563 list_itr->lkups_cnt, &rinfo);
6566 return ICE_ERR_PARAM;
6570 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6572 * @hw: pointer to the hardware structure
6573 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6575 * This function is used to remove all the rules for a given VSI and as soon
6576 * as removing a rule fails, it will return immediately with the error code,
6577 * else it will return ICE_SUCCESS
6580 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6582 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6583 struct ice_vsi_list_map_info *map_info;
6584 struct LIST_HEAD_TYPE *list_head;
6585 struct ice_adv_rule_info rinfo;
6586 struct ice_switch_info *sw;
6587 enum ice_status status;
6588 u16 vsi_list_id = 0;
6591 sw = hw->switch_info;
6592 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6593 if (!sw->recp_list[rid].recp_created)
6595 if (!sw->recp_list[rid].adv_rule)
6597 list_head = &sw->recp_list[rid].filt_rules;
6599 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6600 ice_adv_fltr_mgmt_list_entry, list_entry) {
6601 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6605 rinfo = list_itr->rule_info;
6606 rinfo.sw_act.vsi_handle = vsi_handle;
6607 status = ice_rem_adv_rule(hw, list_itr->lkups,
6608 list_itr->lkups_cnt, &rinfo);
6618 * ice_replay_fltr - Replay all the filters stored by a specific list head
6619 * @hw: pointer to the hardware structure
6620 * @list_head: list for which filters needs to be replayed
6621 * @recp_id: Recipe ID for which rules need to be replayed
6623 static enum ice_status
6624 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6626 struct ice_fltr_mgmt_list_entry *itr;
6627 struct LIST_HEAD_TYPE l_head;
6628 enum ice_status status = ICE_SUCCESS;
6630 if (LIST_EMPTY(list_head))
6633 /* Move entries from the given list_head to a temporary l_head so that
6634 * they can be replayed. Otherwise when trying to re-add the same
6635 * filter, the function will return already exists
6637 LIST_REPLACE_INIT(list_head, &l_head);
6639 /* Mark the given list_head empty by reinitializing it so filters
6640 * could be added again by *handler
6642 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6644 struct ice_fltr_list_entry f_entry;
6646 f_entry.fltr_info = itr->fltr_info;
6647 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6648 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6649 if (status != ICE_SUCCESS)
6654 /* Add a filter per VSI separately */
6659 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6661 if (!ice_is_vsi_valid(hw, vsi_handle))
6664 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6665 f_entry.fltr_info.vsi_handle = vsi_handle;
6666 f_entry.fltr_info.fwd_id.hw_vsi_id =
6667 ice_get_hw_vsi_num(hw, vsi_handle);
6668 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6669 if (recp_id == ICE_SW_LKUP_VLAN)
6670 status = ice_add_vlan_internal(hw, &f_entry);
6672 status = ice_add_rule_internal(hw, recp_id,
6674 if (status != ICE_SUCCESS)
6679 /* Clear the filter management list */
6680 ice_rem_sw_rule_info(hw, &l_head);
6685 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6686 * @hw: pointer to the hardware structure
6688 * NOTE: This function does not clean up partially added filters on error.
6689 * It is up to caller of the function to issue a reset or fail early.
6691 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6693 struct ice_switch_info *sw = hw->switch_info;
6694 enum ice_status status = ICE_SUCCESS;
6697 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6698 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6700 status = ice_replay_fltr(hw, i, head);
6701 if (status != ICE_SUCCESS)
6708 * ice_replay_vsi_fltr - Replay filters for requested VSI
6709 * @hw: pointer to the hardware structure
6710 * @vsi_handle: driver VSI handle
6711 * @recp_id: Recipe ID for which rules need to be replayed
6712 * @list_head: list for which filters need to be replayed
6714 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6715 * It is required to pass valid VSI handle.
6717 static enum ice_status
6718 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6719 struct LIST_HEAD_TYPE *list_head)
6721 struct ice_fltr_mgmt_list_entry *itr;
6722 enum ice_status status = ICE_SUCCESS;
6725 if (LIST_EMPTY(list_head))
6727 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6729 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6731 struct ice_fltr_list_entry f_entry;
6733 f_entry.fltr_info = itr->fltr_info;
6734 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6735 itr->fltr_info.vsi_handle == vsi_handle) {
6736 /* update the src in case it is VSI num */
6737 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6738 f_entry.fltr_info.src = hw_vsi_id;
6739 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6740 if (status != ICE_SUCCESS)
6744 if (!itr->vsi_list_info ||
6745 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6747 /* Clearing it so that the logic can add it back */
6748 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6749 f_entry.fltr_info.vsi_handle = vsi_handle;
6750 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6751 /* update the src in case it is VSI num */
6752 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6753 f_entry.fltr_info.src = hw_vsi_id;
6754 if (recp_id == ICE_SW_LKUP_VLAN)
6755 status = ice_add_vlan_internal(hw, &f_entry);
6757 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6758 if (status != ICE_SUCCESS)
6766 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6767 * @hw: pointer to the hardware structure
6768 * @vsi_handle: driver VSI handle
6769 * @list_head: list for which filters need to be replayed
6771 * Replay the advanced rule for the given VSI.
6773 static enum ice_status
6774 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6775 struct LIST_HEAD_TYPE *list_head)
6777 struct ice_rule_query_data added_entry = { 0 };
6778 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6779 enum ice_status status = ICE_SUCCESS;
6781 if (LIST_EMPTY(list_head))
6783 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6785 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6786 u16 lk_cnt = adv_fltr->lkups_cnt;
6788 if (vsi_handle != rinfo->sw_act.vsi_handle)
6790 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6799 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6800 * @hw: pointer to the hardware structure
6801 * @vsi_handle: driver VSI handle
6803 * Replays filters for requested VSI via vsi_handle.
6805 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6807 struct ice_switch_info *sw = hw->switch_info;
6808 enum ice_status status;
6811 /* Update the recipes that were created */
6812 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6813 struct LIST_HEAD_TYPE *head;
6815 head = &sw->recp_list[i].filt_replay_rules;
6816 if (!sw->recp_list[i].adv_rule)
6817 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6819 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6820 if (status != ICE_SUCCESS)
6828 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6829 * @hw: pointer to the HW struct
6831 * Deletes the filter replay rules.
6833 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6835 struct ice_switch_info *sw = hw->switch_info;
6841 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6842 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6843 struct LIST_HEAD_TYPE *l_head;
6845 l_head = &sw->recp_list[i].filt_replay_rules;
6846 if (!sw->recp_list[i].adv_rule)
6847 ice_rem_sw_rule_info(hw, l_head);
6849 ice_rem_adv_rule_info(hw, l_head);