1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
70 { ICE_PROTOCOL_LAST, 0 },
74 u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
109 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
111 { ICE_ETYPE_OL, 12 },
112 { ICE_IPV4_OFOS, 14 },
116 { ICE_UDP_ILOS, 76 },
117 { ICE_PROTOCOL_LAST, 0 },
121 u8 dummy_gre_udp_packet[] = {
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_OL 12 */
128 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x2F, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
135 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x00, 0x00, 0x00,
142 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
149 0x00, 0x08, 0x00, 0x00,
153 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
155 { ICE_ETYPE_OL, 12 },
156 { ICE_IPV4_OFOS, 14 },
162 { ICE_PROTOCOL_LAST, 0 },
166 u8 dummy_udp_tun_tcp_packet[] = {
167 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
168 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
171 0x08, 0x00, /* ICE_ETYPE_OL 12 */
173 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
174 0x00, 0x01, 0x00, 0x00,
175 0x40, 0x11, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
180 0x00, 0x46, 0x00, 0x00,
182 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
186 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00,
190 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
191 0x00, 0x01, 0x00, 0x00,
192 0x40, 0x06, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
199 0x50, 0x02, 0x20, 0x00,
200 0x00, 0x00, 0x00, 0x00
204 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
206 { ICE_ETYPE_OL, 12 },
207 { ICE_IPV4_OFOS, 14 },
212 { ICE_UDP_ILOS, 84 },
213 { ICE_PROTOCOL_LAST, 0 },
217 u8 dummy_udp_tun_udp_packet[] = {
218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
222 0x08, 0x00, /* ICE_ETYPE_OL 12 */
224 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
225 0x00, 0x01, 0x00, 0x00,
226 0x00, 0x11, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
231 0x00, 0x3a, 0x00, 0x00,
233 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
234 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
237 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00,
241 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
242 0x00, 0x01, 0x00, 0x00,
243 0x00, 0x11, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
248 0x00, 0x08, 0x00, 0x00,
252 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
254 { ICE_ETYPE_OL, 12 },
255 { ICE_IPV4_OFOS, 14 },
256 { ICE_UDP_ILOS, 34 },
257 { ICE_PROTOCOL_LAST, 0 },
261 dummy_udp_packet[] = {
262 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
263 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00,
266 0x08, 0x00, /* ICE_ETYPE_OL 12 */
268 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
269 0x00, 0x01, 0x00, 0x00,
270 0x00, 0x11, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
275 0x00, 0x08, 0x00, 0x00,
277 0x00, 0x00, /* 2 bytes for 4 byte alignment */
281 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
283 { ICE_ETYPE_OL, 12 },
284 { ICE_IPV4_OFOS, 14 },
286 { ICE_PROTOCOL_LAST, 0 },
290 dummy_tcp_packet[] = {
291 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
292 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00,
295 0x08, 0x00, /* ICE_ETYPE_OL 12 */
297 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
298 0x00, 0x01, 0x00, 0x00,
299 0x00, 0x06, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
306 0x50, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, /* 2 bytes for 4 byte alignment */
313 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
315 { ICE_ETYPE_OL, 12 },
316 { ICE_IPV6_OFOS, 14 },
318 { ICE_PROTOCOL_LAST, 0 },
322 dummy_tcp_ipv6_packet[] = {
323 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
327 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
329 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
330 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
341 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00,
343 0x50, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, /* 2 bytes for 4 byte alignment */
350 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
352 { ICE_ETYPE_OL, 12 },
353 { ICE_IPV6_OFOS, 14 },
354 { ICE_UDP_ILOS, 54 },
355 { ICE_PROTOCOL_LAST, 0 },
359 dummy_udp_ipv6_packet[] = {
360 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
364 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
366 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
367 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
378 0x00, 0x08, 0x00, 0x00,
380 0x00, 0x00, /* 2 bytes for 4 byte alignment */
384 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
386 { ICE_IPV4_OFOS, 14 },
389 { ICE_PROTOCOL_LAST, 0 },
393 dummy_udp_gtp_packet[] = {
394 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
399 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x11, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
406 0x00, 0x1c, 0x00, 0x00,
408 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
409 0x00, 0x00, 0x00, 0x00,
410 0x00, 0x00, 0x00, 0x85,
412 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
413 0x00, 0x00, 0x00, 0x00,
417 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
420 { ICE_PROTOCOL_LAST, 0 },
424 dummy_pppoe_packet[] = {
425 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
426 0x00, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
430 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 14 */
431 0x00, 0x4e, 0x00, 0x21,
433 0x45, 0x00, 0x00, 0x30, /* PDU */
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x11, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
440 /* this is a recipe to profile association bitmap */
441 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
442 ICE_MAX_NUM_PROFILES);
444 /* this is a profile to recipe association bitmap */
445 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
446 ICE_MAX_NUM_RECIPES);
448 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
451 * ice_collect_result_idx - copy result index values
452 * @buf: buffer that contains the result index
453 * @recp: the recipe struct to copy data into
455 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
456 struct ice_sw_recipe *recp)
458 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
459 ice_set_bit(buf->content.result_indx &
460 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
464 * ice_init_possible_res_bm - initialize possible result bitmap
465 * @pos_result_bm: pointer to the bitmap to initialize
467 static void ice_init_possible_res_bm(ice_bitmap_t *pos_result_bm)
471 ice_zero_bitmap(pos_result_bm, ICE_MAX_FV_WORDS);
473 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
474 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
475 ice_set_bit(bit, pos_result_bm);
479 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
480 * @hw: pointer to hardware structure
481 * @recps: struct that we need to populate
482 * @rid: recipe ID that we are populating
483 * @refresh_required: true if we should get recipe to profile mapping from FW
485 * This function is used to populate all the necessary entries into our
486 * bookkeeping so that we have a current list of all the recipes that are
487 * programmed in the firmware.
489 static enum ice_status
490 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
491 bool *refresh_required)
493 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
494 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
495 struct ice_aqc_recipe_data_elem *tmp;
496 u16 num_recps = ICE_MAX_NUM_RECIPES;
497 struct ice_prot_lkup_ext *lkup_exts;
498 u16 i, sub_recps, fv_word_idx = 0;
499 enum ice_status status;
501 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
502 ice_init_possible_res_bm(possible_idx);
504 /* we need a buffer big enough to accommodate all the recipes */
505 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
506 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
508 return ICE_ERR_NO_MEMORY;
510 tmp[0].recipe_indx = rid;
511 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
512 /* non-zero status meaning recipe doesn't exist */
516 /* Get recipe to profile map so that we can get the fv from lkups that
517 * we read for a recipe from FW. Since we want to minimize the number of
518 * times we make this FW call, just make one call and cache the copy
519 * until a new recipe is added. This operation is only required the
520 * first time to get the changes from FW. Then to search existing
521 * entries we don't need to update the cache again until another recipe
524 if (*refresh_required) {
525 ice_get_recp_to_prof_map(hw);
526 *refresh_required = false;
529 /* Start populating all the entries for recps[rid] based on lkups from
530 * firmware. Note that we are only creating the root recipe in our
533 lkup_exts = &recps[rid].lkup_exts;
535 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
536 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
537 struct ice_recp_grp_entry *rg_entry;
538 u8 prof_id, idx, prot = 0;
542 rg_entry = (struct ice_recp_grp_entry *)
543 ice_malloc(hw, sizeof(*rg_entry));
545 status = ICE_ERR_NO_MEMORY;
549 idx = root_bufs.recipe_indx;
550 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
552 /* Mark all result indices in this chain */
553 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
554 ice_set_bit(root_bufs.content.result_indx &
555 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
557 /* get the first profile that is associated with rid */
558 prof_id = ice_find_first_bit(recipe_to_profile[idx],
559 ICE_MAX_NUM_PROFILES);
560 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
561 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
563 rg_entry->fv_idx[i] = lkup_indx;
564 rg_entry->fv_mask[i] =
565 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
567 /* If the recipe is a chained recipe then all its
568 * child recipe's result will have a result index.
569 * To fill fv_words we should not use those result
570 * index, we only need the protocol ids and offsets.
571 * We will skip all the fv_idx which stores result
572 * index in them. We also need to skip any fv_idx which
573 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
574 * valid offset value.
576 if (ice_is_bit_set(possible_idx, rg_entry->fv_idx[i]) ||
577 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
578 rg_entry->fv_idx[i] == 0)
581 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
582 rg_entry->fv_idx[i], &prot, &off);
583 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
584 lkup_exts->fv_words[fv_word_idx].off = off;
587 /* populate rg_list with the data from the child entry of this
590 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
592 /* Propagate some data to the recipe database */
593 recps[idx].is_root = is_root;
594 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
595 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
596 recps[idx].chain_idx = root_bufs.content.result_indx &
597 ~ICE_AQ_RECIPE_RESULT_EN;
599 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
604 /* Only do the following for root recipes entries */
605 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
606 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
607 recps[idx].root_rid = root_bufs.content.rid &
608 ~ICE_AQ_RECIPE_ID_IS_ROOT;
609 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
610 recps[idx].big_recp = (recps[rid].n_grp_count > 1);
613 /* Complete initialization of the root recipe entry */
614 lkup_exts->n_val_words = fv_word_idx;
615 recps[rid].n_grp_count = num_recps;
616 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
617 ice_calloc(hw, recps[rid].n_grp_count,
618 sizeof(struct ice_aqc_recipe_data_elem));
619 if (!recps[rid].root_buf)
622 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
623 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
625 /* Copy result indexes */
626 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
627 ICE_NONDMA_TO_NONDMA);
628 recps[rid].recp_created = true;
636 * ice_get_recp_to_prof_map - updates recipe to profile mapping
637 * @hw: pointer to hardware structure
639 * This function is used to populate recipe_to_profile matrix where index to
640 * this array is the recipe ID and the element is the mapping of which profiles
641 * is this recipe mapped to.
644 ice_get_recp_to_prof_map(struct ice_hw *hw)
646 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
649 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
652 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
653 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
654 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
656 ice_memcpy(profile_to_recipe[i], r_bitmap,
657 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
658 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
659 if (ice_is_bit_set(r_bitmap, j))
660 ice_set_bit(i, recipe_to_profile[j]);
665 * ice_init_def_sw_recp - initialize the recipe book keeping tables
666 * @hw: pointer to the HW struct
668 * Allocate memory for the entire recipe table and initialize the structures/
669 * entries corresponding to basic recipes.
671 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
673 struct ice_sw_recipe *recps;
676 recps = (struct ice_sw_recipe *)
677 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
679 return ICE_ERR_NO_MEMORY;
681 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
682 recps[i].root_rid = i;
683 INIT_LIST_HEAD(&recps[i].filt_rules);
684 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
685 INIT_LIST_HEAD(&recps[i].rg_list);
686 ice_init_lock(&recps[i].filt_rule_lock);
689 hw->switch_info->recp_list = recps;
695 * ice_aq_get_sw_cfg - get switch configuration
696 * @hw: pointer to the hardware structure
697 * @buf: pointer to the result buffer
698 * @buf_size: length of the buffer available for response
699 * @req_desc: pointer to requested descriptor
700 * @num_elems: pointer to number of elements
701 * @cd: pointer to command details structure or NULL
703 * Get switch configuration (0x0200) to be placed in 'buff'.
704 * This admin command returns information such as initial VSI/port number
705 * and switch ID it belongs to.
707 * NOTE: *req_desc is both an input/output parameter.
708 * The caller of this function first calls this function with *request_desc set
709 * to 0. If the response from f/w has *req_desc set to 0, all the switch
710 * configuration information has been returned; if non-zero (meaning not all
711 * the information was returned), the caller should call this function again
712 * with *req_desc set to the previous value returned by f/w to get the
713 * next block of switch configuration information.
715 * *num_elems is output only parameter. This reflects the number of elements
716 * in response buffer. The caller of this function to use *num_elems while
717 * parsing the response buffer.
719 static enum ice_status
720 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
721 u16 buf_size, u16 *req_desc, u16 *num_elems,
722 struct ice_sq_cd *cd)
724 struct ice_aqc_get_sw_cfg *cmd;
725 enum ice_status status;
726 struct ice_aq_desc desc;
728 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
729 cmd = &desc.params.get_sw_conf;
730 cmd->element = CPU_TO_LE16(*req_desc);
732 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
734 *req_desc = LE16_TO_CPU(cmd->element);
735 *num_elems = LE16_TO_CPU(cmd->num_elems);
743 * ice_alloc_sw - allocate resources specific to switch
744 * @hw: pointer to the HW struct
745 * @ena_stats: true to turn on VEB stats
746 * @shared_res: true for shared resource, false for dedicated resource
747 * @sw_id: switch ID returned
748 * @counter_id: VEB counter ID returned
750 * allocates switch resources (SWID and VEB counter) (0x0208)
753 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
756 struct ice_aqc_alloc_free_res_elem *sw_buf;
757 struct ice_aqc_res_elem *sw_ele;
758 enum ice_status status;
761 buf_len = sizeof(*sw_buf);
762 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
763 ice_malloc(hw, buf_len);
765 return ICE_ERR_NO_MEMORY;
767 /* Prepare buffer for switch ID.
768 * The number of resource entries in buffer is passed as 1 since only a
769 * single switch/VEB instance is allocated, and hence a single sw_id
772 sw_buf->num_elems = CPU_TO_LE16(1);
774 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
775 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
776 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
778 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
779 ice_aqc_opc_alloc_res, NULL);
782 goto ice_alloc_sw_exit;
784 sw_ele = &sw_buf->elem[0];
785 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
788 /* Prepare buffer for VEB Counter */
789 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
790 struct ice_aqc_alloc_free_res_elem *counter_buf;
791 struct ice_aqc_res_elem *counter_ele;
793 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
794 ice_malloc(hw, buf_len);
796 status = ICE_ERR_NO_MEMORY;
797 goto ice_alloc_sw_exit;
800 /* The number of resource entries in buffer is passed as 1 since
801 * only a single switch/VEB instance is allocated, and hence a
802 * single VEB counter is requested.
804 counter_buf->num_elems = CPU_TO_LE16(1);
805 counter_buf->res_type =
806 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
807 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
808 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
812 ice_free(hw, counter_buf);
813 goto ice_alloc_sw_exit;
815 counter_ele = &counter_buf->elem[0];
816 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
817 ice_free(hw, counter_buf);
821 ice_free(hw, sw_buf);
826 * ice_free_sw - free resources specific to switch
827 * @hw: pointer to the HW struct
828 * @sw_id: switch ID returned
829 * @counter_id: VEB counter ID returned
831 * free switch resources (SWID and VEB counter) (0x0209)
833 * NOTE: This function frees multiple resources. It continues
834 * releasing other resources even after it encounters error.
835 * The error code returned is the last error it encountered.
837 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
839 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
840 enum ice_status status, ret_status;
843 buf_len = sizeof(*sw_buf);
844 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
845 ice_malloc(hw, buf_len);
847 return ICE_ERR_NO_MEMORY;
849 /* Prepare buffer to free for switch ID res.
850 * The number of resource entries in buffer is passed as 1 since only a
851 * single switch/VEB instance is freed, and hence a single sw_id
854 sw_buf->num_elems = CPU_TO_LE16(1);
855 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
856 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
858 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
859 ice_aqc_opc_free_res, NULL);
862 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
864 /* Prepare buffer to free for VEB Counter resource */
865 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
866 ice_malloc(hw, buf_len);
868 ice_free(hw, sw_buf);
869 return ICE_ERR_NO_MEMORY;
872 /* The number of resource entries in buffer is passed as 1 since only a
873 * single switch/VEB instance is freed, and hence a single VEB counter
876 counter_buf->num_elems = CPU_TO_LE16(1);
877 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
878 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
880 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
881 ice_aqc_opc_free_res, NULL);
883 ice_debug(hw, ICE_DBG_SW,
884 "VEB counter resource could not be freed\n");
888 ice_free(hw, counter_buf);
889 ice_free(hw, sw_buf);
895 * @hw: pointer to the HW struct
896 * @vsi_ctx: pointer to a VSI context struct
897 * @cd: pointer to command details structure or NULL
899 * Add a VSI context to the hardware (0x0210)
902 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
903 struct ice_sq_cd *cd)
905 struct ice_aqc_add_update_free_vsi_resp *res;
906 struct ice_aqc_add_get_update_free_vsi *cmd;
907 struct ice_aq_desc desc;
908 enum ice_status status;
910 cmd = &desc.params.vsi_cmd;
911 res = &desc.params.add_update_free_vsi_res;
913 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
915 if (!vsi_ctx->alloc_from_pool)
916 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
917 ICE_AQ_VSI_IS_VALID);
919 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
921 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
923 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
924 sizeof(vsi_ctx->info), cd);
927 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
928 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
929 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
937 * @hw: pointer to the HW struct
938 * @vsi_ctx: pointer to a VSI context struct
939 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
940 * @cd: pointer to command details structure or NULL
942 * Free VSI context info from hardware (0x0213)
945 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
946 bool keep_vsi_alloc, struct ice_sq_cd *cd)
948 struct ice_aqc_add_update_free_vsi_resp *resp;
949 struct ice_aqc_add_get_update_free_vsi *cmd;
950 struct ice_aq_desc desc;
951 enum ice_status status;
953 cmd = &desc.params.vsi_cmd;
954 resp = &desc.params.add_update_free_vsi_res;
956 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
958 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
960 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
962 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
964 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
965 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
973 * @hw: pointer to the HW struct
974 * @vsi_ctx: pointer to a VSI context struct
975 * @cd: pointer to command details structure or NULL
977 * Update VSI context in the hardware (0x0211)
980 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
981 struct ice_sq_cd *cd)
983 struct ice_aqc_add_update_free_vsi_resp *resp;
984 struct ice_aqc_add_get_update_free_vsi *cmd;
985 struct ice_aq_desc desc;
986 enum ice_status status;
988 cmd = &desc.params.vsi_cmd;
989 resp = &desc.params.add_update_free_vsi_res;
991 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
993 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
995 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
997 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
998 sizeof(vsi_ctx->info), cd);
1001 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1002 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1009 * ice_is_vsi_valid - check whether the VSI is valid or not
1010 * @hw: pointer to the HW struct
1011 * @vsi_handle: VSI handle
1013 * check whether the VSI is valid or not
1015 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1017 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1021 * ice_get_hw_vsi_num - return the HW VSI number
1022 * @hw: pointer to the HW struct
1023 * @vsi_handle: VSI handle
1025 * return the HW VSI number
1026 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1028 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1030 return hw->vsi_ctx[vsi_handle]->vsi_num;
1034 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1035 * @hw: pointer to the HW struct
1036 * @vsi_handle: VSI handle
1038 * return the VSI context entry for a given VSI handle
1040 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1042 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1046 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1047 * @hw: pointer to the HW struct
1048 * @vsi_handle: VSI handle
1049 * @vsi: VSI context pointer
1051 * save the VSI context entry for a given VSI handle
1054 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1056 hw->vsi_ctx[vsi_handle] = vsi;
1060 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1061 * @hw: pointer to the HW struct
1062 * @vsi_handle: VSI handle
1064 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1066 struct ice_vsi_ctx *vsi;
1069 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1072 ice_for_each_traffic_class(i) {
1073 if (vsi->lan_q_ctx[i]) {
1074 ice_free(hw, vsi->lan_q_ctx[i]);
1075 vsi->lan_q_ctx[i] = NULL;
1081 * ice_clear_vsi_ctx - clear the VSI context entry
1082 * @hw: pointer to the HW struct
1083 * @vsi_handle: VSI handle
1085 * clear the VSI context entry
1087 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1089 struct ice_vsi_ctx *vsi;
1091 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1093 ice_clear_vsi_q_ctx(hw, vsi_handle);
1095 hw->vsi_ctx[vsi_handle] = NULL;
1100 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1101 * @hw: pointer to the HW struct
1103 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1107 for (i = 0; i < ICE_MAX_VSI; i++)
1108 ice_clear_vsi_ctx(hw, i);
1112 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1113 * @hw: pointer to the HW struct
1114 * @vsi_handle: unique VSI handle provided by drivers
1115 * @vsi_ctx: pointer to a VSI context struct
1116 * @cd: pointer to command details structure or NULL
1118 * Add a VSI context to the hardware also add it into the VSI handle list.
1119 * If this function gets called after reset for existing VSIs then update
1120 * with the new HW VSI number in the corresponding VSI handle list entry.
1123 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1124 struct ice_sq_cd *cd)
1126 struct ice_vsi_ctx *tmp_vsi_ctx;
1127 enum ice_status status;
1129 if (vsi_handle >= ICE_MAX_VSI)
1130 return ICE_ERR_PARAM;
1131 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1134 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1136 /* Create a new VSI context */
1137 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1138 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1140 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1141 return ICE_ERR_NO_MEMORY;
1143 *tmp_vsi_ctx = *vsi_ctx;
1145 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1147 /* update with new HW VSI num */
1148 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1155 * ice_free_vsi- free VSI context from hardware and VSI handle list
1156 * @hw: pointer to the HW struct
1157 * @vsi_handle: unique VSI handle
1158 * @vsi_ctx: pointer to a VSI context struct
1159 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1160 * @cd: pointer to command details structure or NULL
1162 * Free VSI context info from hardware as well as from VSI handle list
1165 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1166 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1168 enum ice_status status;
1170 if (!ice_is_vsi_valid(hw, vsi_handle))
1171 return ICE_ERR_PARAM;
1172 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1173 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1175 ice_clear_vsi_ctx(hw, vsi_handle);
1181 * @hw: pointer to the HW struct
1182 * @vsi_handle: unique VSI handle
1183 * @vsi_ctx: pointer to a VSI context struct
1184 * @cd: pointer to command details structure or NULL
1186 * Update VSI context in the hardware
1189 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1190 struct ice_sq_cd *cd)
1192 if (!ice_is_vsi_valid(hw, vsi_handle))
1193 return ICE_ERR_PARAM;
1194 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1195 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1199 * ice_aq_get_vsi_params
1200 * @hw: pointer to the HW struct
1201 * @vsi_ctx: pointer to a VSI context struct
1202 * @cd: pointer to command details structure or NULL
1204 * Get VSI context info from hardware (0x0212)
1207 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1208 struct ice_sq_cd *cd)
1210 struct ice_aqc_add_get_update_free_vsi *cmd;
1211 struct ice_aqc_get_vsi_resp *resp;
1212 struct ice_aq_desc desc;
1213 enum ice_status status;
1215 cmd = &desc.params.vsi_cmd;
1216 resp = &desc.params.get_vsi_resp;
1218 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1220 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1222 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1223 sizeof(vsi_ctx->info), cd);
1225 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1227 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1228 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1235 * ice_aq_add_update_mir_rule - add/update a mirror rule
1236 * @hw: pointer to the HW struct
1237 * @rule_type: Rule Type
1238 * @dest_vsi: VSI number to which packets will be mirrored
1239 * @count: length of the list
1240 * @mr_buf: buffer for list of mirrored VSI numbers
1241 * @cd: pointer to command details structure or NULL
1244 * Add/Update Mirror Rule (0x260).
1247 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1248 u16 count, struct ice_mir_rule_buf *mr_buf,
1249 struct ice_sq_cd *cd, u16 *rule_id)
1251 struct ice_aqc_add_update_mir_rule *cmd;
1252 struct ice_aq_desc desc;
1253 enum ice_status status;
1254 __le16 *mr_list = NULL;
1257 switch (rule_type) {
1258 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1259 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1260 /* Make sure count and mr_buf are set for these rule_types */
1261 if (!(count && mr_buf))
1262 return ICE_ERR_PARAM;
1264 buf_size = count * sizeof(__le16);
1265 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1267 return ICE_ERR_NO_MEMORY;
1269 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1270 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1271 /* Make sure count and mr_buf are not set for these
1274 if (count || mr_buf)
1275 return ICE_ERR_PARAM;
1278 ice_debug(hw, ICE_DBG_SW,
1279 "Error due to unsupported rule_type %u\n", rule_type);
1280 return ICE_ERR_OUT_OF_RANGE;
1283 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1285 /* Pre-process 'mr_buf' items for add/update of virtual port
1286 * ingress/egress mirroring (but not physical port ingress/egress
1292 for (i = 0; i < count; i++) {
1295 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1297 /* Validate specified VSI number, make sure it is less
1298 * than ICE_MAX_VSI, if not return with error.
1300 if (id >= ICE_MAX_VSI) {
1301 ice_debug(hw, ICE_DBG_SW,
1302 "Error VSI index (%u) out-of-range\n",
1304 ice_free(hw, mr_list);
1305 return ICE_ERR_OUT_OF_RANGE;
1308 /* add VSI to mirror rule */
1311 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1312 else /* remove VSI from mirror rule */
1313 mr_list[i] = CPU_TO_LE16(id);
1317 cmd = &desc.params.add_update_rule;
1318 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1319 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1320 ICE_AQC_RULE_ID_VALID_M);
1321 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1322 cmd->num_entries = CPU_TO_LE16(count);
1323 cmd->dest = CPU_TO_LE16(dest_vsi);
1325 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1327 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1329 ice_free(hw, mr_list);
1335 * ice_aq_delete_mir_rule - delete a mirror rule
1336 * @hw: pointer to the HW struct
1337 * @rule_id: Mirror rule ID (to be deleted)
1338 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1339 * otherwise it is returned to the shared pool
1340 * @cd: pointer to command details structure or NULL
1342 * Delete Mirror Rule (0x261).
1345 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1346 struct ice_sq_cd *cd)
1348 struct ice_aqc_delete_mir_rule *cmd;
1349 struct ice_aq_desc desc;
1351 /* rule_id should be in the range 0...63 */
1352 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1353 return ICE_ERR_OUT_OF_RANGE;
1355 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1357 cmd = &desc.params.del_rule;
1358 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1359 cmd->rule_id = CPU_TO_LE16(rule_id);
1362 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1364 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1368 * ice_aq_alloc_free_vsi_list
1369 * @hw: pointer to the HW struct
1370 * @vsi_list_id: VSI list ID returned or used for lookup
1371 * @lkup_type: switch rule filter lookup type
1372 * @opc: switch rules population command type - pass in the command opcode
1374 * allocates or free a VSI list resource
1376 static enum ice_status
1377 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1378 enum ice_sw_lkup_type lkup_type,
1379 enum ice_adminq_opc opc)
1381 struct ice_aqc_alloc_free_res_elem *sw_buf;
1382 struct ice_aqc_res_elem *vsi_ele;
1383 enum ice_status status;
1386 buf_len = sizeof(*sw_buf);
1387 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1388 ice_malloc(hw, buf_len);
1390 return ICE_ERR_NO_MEMORY;
1391 sw_buf->num_elems = CPU_TO_LE16(1);
1393 if (lkup_type == ICE_SW_LKUP_MAC ||
1394 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1395 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1396 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1397 lkup_type == ICE_SW_LKUP_PROMISC ||
1398 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1399 lkup_type == ICE_SW_LKUP_LAST) {
1400 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1401 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1403 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1405 status = ICE_ERR_PARAM;
1406 goto ice_aq_alloc_free_vsi_list_exit;
1409 if (opc == ice_aqc_opc_free_res)
1410 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1412 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1414 goto ice_aq_alloc_free_vsi_list_exit;
1416 if (opc == ice_aqc_opc_alloc_res) {
1417 vsi_ele = &sw_buf->elem[0];
1418 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1421 ice_aq_alloc_free_vsi_list_exit:
1422 ice_free(hw, sw_buf);
1427 * ice_aq_set_storm_ctrl - Sets storm control configuration
1428 * @hw: pointer to the HW struct
1429 * @bcast_thresh: represents the upper threshold for broadcast storm control
1430 * @mcast_thresh: represents the upper threshold for multicast storm control
1431 * @ctl_bitmask: storm control control knobs
1433 * Sets the storm control configuration (0x0280)
1436 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1439 struct ice_aqc_storm_cfg *cmd;
1440 struct ice_aq_desc desc;
1442 cmd = &desc.params.storm_conf;
1444 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1446 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1447 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1448 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1450 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1454 * ice_aq_get_storm_ctrl - gets storm control configuration
1455 * @hw: pointer to the HW struct
1456 * @bcast_thresh: represents the upper threshold for broadcast storm control
1457 * @mcast_thresh: represents the upper threshold for multicast storm control
1458 * @ctl_bitmask: storm control control knobs
1460 * Gets the storm control configuration (0x0281)
1463 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1466 enum ice_status status;
1467 struct ice_aq_desc desc;
1469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1471 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1473 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1476 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1479 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1482 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1489 * ice_aq_sw_rules - add/update/remove switch rules
1490 * @hw: pointer to the HW struct
1491 * @rule_list: pointer to switch rule population list
1492 * @rule_list_sz: total size of the rule list in bytes
1493 * @num_rules: number of switch rules in the rule_list
1494 * @opc: switch rules population command type - pass in the command opcode
1495 * @cd: pointer to command details structure or NULL
1497 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1499 static enum ice_status
1500 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1501 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1503 struct ice_aq_desc desc;
1505 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1507 if (opc != ice_aqc_opc_add_sw_rules &&
1508 opc != ice_aqc_opc_update_sw_rules &&
1509 opc != ice_aqc_opc_remove_sw_rules)
1510 return ICE_ERR_PARAM;
1512 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1514 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1515 desc.params.sw_rules.num_rules_fltr_entry_index =
1516 CPU_TO_LE16(num_rules);
1517 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1521 * ice_aq_add_recipe - add switch recipe
1522 * @hw: pointer to the HW struct
1523 * @s_recipe_list: pointer to switch rule population list
1524 * @num_recipes: number of switch recipes in the list
1525 * @cd: pointer to command details structure or NULL
1530 ice_aq_add_recipe(struct ice_hw *hw,
1531 struct ice_aqc_recipe_data_elem *s_recipe_list,
1532 u16 num_recipes, struct ice_sq_cd *cd)
1534 struct ice_aqc_add_get_recipe *cmd;
1535 struct ice_aq_desc desc;
1538 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1539 cmd = &desc.params.add_get_recipe;
1540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1542 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1543 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1545 buf_size = num_recipes * sizeof(*s_recipe_list);
1547 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1551 * ice_aq_get_recipe - get switch recipe
1552 * @hw: pointer to the HW struct
1553 * @s_recipe_list: pointer to switch rule population list
1554 * @num_recipes: pointer to the number of recipes (input and output)
1555 * @recipe_root: root recipe number of recipe(s) to retrieve
1556 * @cd: pointer to command details structure or NULL
1560 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1561 * On output, *num_recipes will equal the number of entries returned in
1564 * The caller must supply enough space in s_recipe_list to hold all possible
1565 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1568 ice_aq_get_recipe(struct ice_hw *hw,
1569 struct ice_aqc_recipe_data_elem *s_recipe_list,
1570 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1572 struct ice_aqc_add_get_recipe *cmd;
1573 struct ice_aq_desc desc;
1574 enum ice_status status;
1577 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1578 return ICE_ERR_PARAM;
1580 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1581 cmd = &desc.params.add_get_recipe;
1582 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1584 cmd->return_index = CPU_TO_LE16(recipe_root);
1585 cmd->num_sub_recipes = 0;
1587 buf_size = *num_recipes * sizeof(*s_recipe_list);
1589 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1590 /* cppcheck-suppress constArgument */
1591 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1597 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1598 * @hw: pointer to the HW struct
1599 * @profile_id: package profile ID to associate the recipe with
1600 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1601 * @cd: pointer to command details structure or NULL
1602 * Recipe to profile association (0x0291)
1605 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1606 struct ice_sq_cd *cd)
1608 struct ice_aqc_recipe_to_profile *cmd;
1609 struct ice_aq_desc desc;
1611 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1612 cmd = &desc.params.recipe_to_profile;
1613 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1614 cmd->profile_id = CPU_TO_LE16(profile_id);
1615 /* Set the recipe ID bit in the bitmask to let the device know which
1616 * profile we are associating the recipe to
1618 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1619 ICE_NONDMA_TO_NONDMA);
1621 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1625 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1626 * @hw: pointer to the HW struct
1627 * @profile_id: package profile ID to associate the recipe with
1628 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1629 * @cd: pointer to command details structure or NULL
1630 * Associate profile ID with given recipe (0x0293)
1633 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1634 struct ice_sq_cd *cd)
1636 struct ice_aqc_recipe_to_profile *cmd;
1637 struct ice_aq_desc desc;
1638 enum ice_status status;
1640 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1641 cmd = &desc.params.recipe_to_profile;
1642 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1643 cmd->profile_id = CPU_TO_LE16(profile_id);
1645 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1647 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1648 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1654 * ice_alloc_recipe - add recipe resource
1655 * @hw: pointer to the hardware structure
1656 * @rid: recipe ID returned as response to AQ call
1658 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1660 struct ice_aqc_alloc_free_res_elem *sw_buf;
1661 enum ice_status status;
1664 buf_len = sizeof(*sw_buf);
1665 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1667 return ICE_ERR_NO_MEMORY;
1669 sw_buf->num_elems = CPU_TO_LE16(1);
1670 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1671 ICE_AQC_RES_TYPE_S) |
1672 ICE_AQC_RES_TYPE_FLAG_SHARED);
1673 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1674 ice_aqc_opc_alloc_res, NULL);
1676 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1677 ice_free(hw, sw_buf);
1682 /* ice_init_port_info - Initialize port_info with switch configuration data
1683 * @pi: pointer to port_info
1684 * @vsi_port_num: VSI number or port number
1685 * @type: Type of switch element (port or VSI)
1686 * @swid: switch ID of the switch the element is attached to
1687 * @pf_vf_num: PF or VF number
1688 * @is_vf: true if the element is a VF, false otherwise
1691 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1692 u16 swid, u16 pf_vf_num, bool is_vf)
1695 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1696 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1698 pi->pf_vf_num = pf_vf_num;
1700 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1701 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1704 ice_debug(pi->hw, ICE_DBG_SW,
1705 "incorrect VSI/port type received\n");
1710 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1711 * @hw: pointer to the hardware structure
1713 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1715 struct ice_aqc_get_sw_cfg_resp *rbuf;
1716 enum ice_status status;
1717 u16 num_total_ports;
1723 num_total_ports = 1;
1725 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1726 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1729 return ICE_ERR_NO_MEMORY;
1731 /* Multiple calls to ice_aq_get_sw_cfg may be required
1732 * to get all the switch configuration information. The need
1733 * for additional calls is indicated by ice_aq_get_sw_cfg
1734 * writing a non-zero value in req_desc
1737 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1738 &req_desc, &num_elems, NULL);
1743 for (i = 0; i < num_elems; i++) {
1744 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1745 u16 pf_vf_num, swid, vsi_port_num;
1749 ele = rbuf[i].elements;
1750 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1751 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1753 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1754 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1756 swid = LE16_TO_CPU(ele->swid);
1758 if (LE16_TO_CPU(ele->pf_vf_num) &
1759 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1762 type = LE16_TO_CPU(ele->vsi_port_num) >>
1763 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1766 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1767 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1768 if (j == num_total_ports) {
1769 ice_debug(hw, ICE_DBG_SW,
1770 "more ports than expected\n");
1771 status = ICE_ERR_CFG;
1774 ice_init_port_info(hw->port_info,
1775 vsi_port_num, type, swid,
1783 } while (req_desc && !status);
1787 ice_free(hw, (void *)rbuf);
1793 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1794 * @hw: pointer to the hardware structure
1795 * @fi: filter info structure to fill/update
1797 * This helper function populates the lb_en and lan_en elements of the provided
1798 * ice_fltr_info struct using the switch's type and characteristics of the
1799 * switch rule being configured.
1801 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1805 if ((fi->flag & ICE_FLTR_TX) &&
1806 (fi->fltr_act == ICE_FWD_TO_VSI ||
1807 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1808 fi->fltr_act == ICE_FWD_TO_Q ||
1809 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1810 /* Setting LB for prune actions will result in replicated
1811 * packets to the internal switch that will be dropped.
1813 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1816 /* Set lan_en to TRUE if
1817 * 1. The switch is a VEB AND
1819 * 2.1 The lookup is a directional lookup like ethertype,
1820 * promiscuous, ethertype-MAC, promiscuous-VLAN
1821 * and default-port OR
1822 * 2.2 The lookup is VLAN, OR
1823 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1824 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1828 * The switch is a VEPA.
1830 * In all other cases, the LAN enable has to be set to false.
1833 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1834 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1835 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1836 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1837 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1838 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1839 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1840 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1841 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1842 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1851 * ice_fill_sw_rule - Helper function to fill switch rule structure
1852 * @hw: pointer to the hardware structure
1853 * @f_info: entry containing packet forwarding information
1854 * @s_rule: switch rule structure to be filled in based on mac_entry
1855 * @opc: switch rules population command type - pass in the command opcode
1858 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1859 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1861 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1869 if (opc == ice_aqc_opc_remove_sw_rules) {
1870 s_rule->pdata.lkup_tx_rx.act = 0;
1871 s_rule->pdata.lkup_tx_rx.index =
1872 CPU_TO_LE16(f_info->fltr_rule_id);
1873 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1877 eth_hdr_sz = sizeof(dummy_eth_header);
1878 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1880 /* initialize the ether header with a dummy header */
1881 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1882 ice_fill_sw_info(hw, f_info);
1884 switch (f_info->fltr_act) {
1885 case ICE_FWD_TO_VSI:
1886 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1887 ICE_SINGLE_ACT_VSI_ID_M;
1888 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1889 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1890 ICE_SINGLE_ACT_VALID_BIT;
1892 case ICE_FWD_TO_VSI_LIST:
1893 act |= ICE_SINGLE_ACT_VSI_LIST;
1894 act |= (f_info->fwd_id.vsi_list_id <<
1895 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1896 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1897 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1898 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1899 ICE_SINGLE_ACT_VALID_BIT;
1902 act |= ICE_SINGLE_ACT_TO_Q;
1903 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1904 ICE_SINGLE_ACT_Q_INDEX_M;
1906 case ICE_DROP_PACKET:
1907 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1908 ICE_SINGLE_ACT_VALID_BIT;
1910 case ICE_FWD_TO_QGRP:
1911 q_rgn = f_info->qgrp_size > 0 ?
1912 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1913 act |= ICE_SINGLE_ACT_TO_Q;
1914 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1915 ICE_SINGLE_ACT_Q_INDEX_M;
1916 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1917 ICE_SINGLE_ACT_Q_REGION_M;
1924 act |= ICE_SINGLE_ACT_LB_ENABLE;
1926 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1928 switch (f_info->lkup_type) {
1929 case ICE_SW_LKUP_MAC:
1930 daddr = f_info->l_data.mac.mac_addr;
1932 case ICE_SW_LKUP_VLAN:
1933 vlan_id = f_info->l_data.vlan.vlan_id;
1934 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1935 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1936 act |= ICE_SINGLE_ACT_PRUNE;
1937 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1940 case ICE_SW_LKUP_ETHERTYPE_MAC:
1941 daddr = f_info->l_data.ethertype_mac.mac_addr;
1943 case ICE_SW_LKUP_ETHERTYPE:
1944 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1945 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1947 case ICE_SW_LKUP_MAC_VLAN:
1948 daddr = f_info->l_data.mac_vlan.mac_addr;
1949 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1951 case ICE_SW_LKUP_PROMISC_VLAN:
1952 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1954 case ICE_SW_LKUP_PROMISC:
1955 daddr = f_info->l_data.mac_vlan.mac_addr;
1961 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1962 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1963 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1965 /* Recipe set depending on lookup type */
1966 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1967 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1968 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1971 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1972 ICE_NONDMA_TO_NONDMA);
1974 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1975 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1976 *off = CPU_TO_BE16(vlan_id);
1979 /* Create the switch rule with the final dummy Ethernet header */
1980 if (opc != ice_aqc_opc_update_sw_rules)
1981 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1985 * ice_add_marker_act
1986 * @hw: pointer to the hardware structure
1987 * @m_ent: the management entry for which sw marker needs to be added
1988 * @sw_marker: sw marker to tag the Rx descriptor with
1989 * @l_id: large action resource ID
1991 * Create a large action to hold software marker and update the switch rule
1992 * entry pointed by m_ent with newly created large action
1994 static enum ice_status
1995 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1996 u16 sw_marker, u16 l_id)
1998 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1999 /* For software marker we need 3 large actions
2000 * 1. FWD action: FWD TO VSI or VSI LIST
2001 * 2. GENERIC VALUE action to hold the profile ID
2002 * 3. GENERIC VALUE action to hold the software marker ID
2004 const u16 num_lg_acts = 3;
2005 enum ice_status status;
2011 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2012 return ICE_ERR_PARAM;
2014 /* Create two back-to-back switch rules and submit them to the HW using
2015 * one memory buffer:
2019 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2020 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2021 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2023 return ICE_ERR_NO_MEMORY;
2025 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2027 /* Fill in the first switch rule i.e. large action */
2028 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2029 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2030 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2032 /* First action VSI forwarding or VSI list forwarding depending on how
2035 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2036 m_ent->fltr_info.fwd_id.hw_vsi_id;
2038 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2039 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2040 ICE_LG_ACT_VSI_LIST_ID_M;
2041 if (m_ent->vsi_count > 1)
2042 act |= ICE_LG_ACT_VSI_LIST;
2043 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2045 /* Second action descriptor type */
2046 act = ICE_LG_ACT_GENERIC;
2048 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2049 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2051 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2052 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2054 /* Third action Marker value */
2055 act |= ICE_LG_ACT_GENERIC;
2056 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2057 ICE_LG_ACT_GENERIC_VALUE_M;
2059 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2061 /* call the fill switch rule to fill the lookup Tx Rx structure */
2062 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2063 ice_aqc_opc_update_sw_rules);
2065 /* Update the action to point to the large action ID */
2066 rx_tx->pdata.lkup_tx_rx.act =
2067 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2068 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2069 ICE_SINGLE_ACT_PTR_VAL_M));
2071 /* Use the filter rule ID of the previously created rule with single
2072 * act. Once the update happens, hardware will treat this as large
2075 rx_tx->pdata.lkup_tx_rx.index =
2076 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2078 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2079 ice_aqc_opc_update_sw_rules, NULL);
2081 m_ent->lg_act_idx = l_id;
2082 m_ent->sw_marker_id = sw_marker;
2085 ice_free(hw, lg_act);
2090 * ice_add_counter_act - add/update filter rule with counter action
2091 * @hw: pointer to the hardware structure
2092 * @m_ent: the management entry for which counter needs to be added
2093 * @counter_id: VLAN counter ID returned as part of allocate resource
2094 * @l_id: large action resource ID
2096 static enum ice_status
2097 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2098 u16 counter_id, u16 l_id)
2100 struct ice_aqc_sw_rules_elem *lg_act;
2101 struct ice_aqc_sw_rules_elem *rx_tx;
2102 enum ice_status status;
2103 /* 2 actions will be added while adding a large action counter */
2104 const int num_acts = 2;
2111 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2112 return ICE_ERR_PARAM;
2114 /* Create two back-to-back switch rules and submit them to the HW using
2115 * one memory buffer:
2119 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2120 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2121 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2124 return ICE_ERR_NO_MEMORY;
2126 rx_tx = (struct ice_aqc_sw_rules_elem *)
2127 ((u8 *)lg_act + lg_act_size);
2129 /* Fill in the first switch rule i.e. large action */
2130 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2131 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2132 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2134 /* First action VSI forwarding or VSI list forwarding depending on how
2137 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2138 m_ent->fltr_info.fwd_id.hw_vsi_id;
2140 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2141 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2142 ICE_LG_ACT_VSI_LIST_ID_M;
2143 if (m_ent->vsi_count > 1)
2144 act |= ICE_LG_ACT_VSI_LIST;
2145 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2147 /* Second action counter ID */
2148 act = ICE_LG_ACT_STAT_COUNT;
2149 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2150 ICE_LG_ACT_STAT_COUNT_M;
2151 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2153 /* call the fill switch rule to fill the lookup Tx Rx structure */
2154 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2155 ice_aqc_opc_update_sw_rules);
2157 act = ICE_SINGLE_ACT_PTR;
2158 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2159 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2161 /* Use the filter rule ID of the previously created rule with single
2162 * act. Once the update happens, hardware will treat this as large
2165 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2166 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2168 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2169 ice_aqc_opc_update_sw_rules, NULL);
2171 m_ent->lg_act_idx = l_id;
2172 m_ent->counter_index = counter_id;
2175 ice_free(hw, lg_act);
2180 * ice_create_vsi_list_map
2181 * @hw: pointer to the hardware structure
2182 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2183 * @num_vsi: number of VSI handles in the array
2184 * @vsi_list_id: VSI list ID generated as part of allocate resource
2186 * Helper function to create a new entry of VSI list ID to VSI mapping
2187 * using the given VSI list ID
2189 static struct ice_vsi_list_map_info *
2190 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2193 struct ice_switch_info *sw = hw->switch_info;
2194 struct ice_vsi_list_map_info *v_map;
2197 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2202 v_map->vsi_list_id = vsi_list_id;
2204 for (i = 0; i < num_vsi; i++)
2205 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2207 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2212 * ice_update_vsi_list_rule
2213 * @hw: pointer to the hardware structure
2214 * @vsi_handle_arr: array of VSI handles to form a VSI list
2215 * @num_vsi: number of VSI handles in the array
2216 * @vsi_list_id: VSI list ID generated as part of allocate resource
2217 * @remove: Boolean value to indicate if this is a remove action
2218 * @opc: switch rules population command type - pass in the command opcode
2219 * @lkup_type: lookup type of the filter
2221 * Call AQ command to add a new switch rule or update existing switch rule
2222 * using the given VSI list ID
2224 static enum ice_status
2225 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2226 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2227 enum ice_sw_lkup_type lkup_type)
2229 struct ice_aqc_sw_rules_elem *s_rule;
2230 enum ice_status status;
2236 return ICE_ERR_PARAM;
2238 if (lkup_type == ICE_SW_LKUP_MAC ||
2239 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2240 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2241 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2242 lkup_type == ICE_SW_LKUP_PROMISC ||
2243 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2244 lkup_type == ICE_SW_LKUP_LAST)
2245 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2246 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2247 else if (lkup_type == ICE_SW_LKUP_VLAN)
2248 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2249 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2251 return ICE_ERR_PARAM;
2253 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2254 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2256 return ICE_ERR_NO_MEMORY;
2257 for (i = 0; i < num_vsi; i++) {
2258 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2259 status = ICE_ERR_PARAM;
2262 /* AQ call requires hw_vsi_id(s) */
2263 s_rule->pdata.vsi_list.vsi[i] =
2264 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2267 s_rule->type = CPU_TO_LE16(type);
2268 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2269 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2271 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2274 ice_free(hw, s_rule);
2279 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2280 * @hw: pointer to the HW struct
2281 * @vsi_handle_arr: array of VSI handles to form a VSI list
2282 * @num_vsi: number of VSI handles in the array
2283 * @vsi_list_id: stores the ID of the VSI list to be created
2284 * @lkup_type: switch rule filter's lookup type
2286 static enum ice_status
2287 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2288 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2290 enum ice_status status;
2292 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2293 ice_aqc_opc_alloc_res);
2297 /* Update the newly created VSI list to include the specified VSIs */
2298 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2299 *vsi_list_id, false,
2300 ice_aqc_opc_add_sw_rules, lkup_type);
2304 * ice_create_pkt_fwd_rule
2305 * @hw: pointer to the hardware structure
2306 * @f_entry: entry containing packet forwarding information
2308 * Create switch rule with given filter information and add an entry
2309 * to the corresponding filter management list to track this switch rule
2312 static enum ice_status
2313 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2314 struct ice_fltr_list_entry *f_entry)
2316 struct ice_fltr_mgmt_list_entry *fm_entry;
2317 struct ice_aqc_sw_rules_elem *s_rule;
2318 enum ice_sw_lkup_type l_type;
2319 struct ice_sw_recipe *recp;
2320 enum ice_status status;
2322 s_rule = (struct ice_aqc_sw_rules_elem *)
2323 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2325 return ICE_ERR_NO_MEMORY;
2326 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2327 ice_malloc(hw, sizeof(*fm_entry));
2329 status = ICE_ERR_NO_MEMORY;
2330 goto ice_create_pkt_fwd_rule_exit;
2333 fm_entry->fltr_info = f_entry->fltr_info;
2335 /* Initialize all the fields for the management entry */
2336 fm_entry->vsi_count = 1;
2337 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2338 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2339 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2341 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2342 ice_aqc_opc_add_sw_rules);
2344 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2345 ice_aqc_opc_add_sw_rules, NULL);
2347 ice_free(hw, fm_entry);
2348 goto ice_create_pkt_fwd_rule_exit;
2351 f_entry->fltr_info.fltr_rule_id =
2352 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2353 fm_entry->fltr_info.fltr_rule_id =
2354 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2356 /* The book keeping entries will get removed when base driver
2357 * calls remove filter AQ command
2359 l_type = fm_entry->fltr_info.lkup_type;
2360 recp = &hw->switch_info->recp_list[l_type];
2361 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2363 ice_create_pkt_fwd_rule_exit:
2364 ice_free(hw, s_rule);
2369 * ice_update_pkt_fwd_rule
2370 * @hw: pointer to the hardware structure
2371 * @f_info: filter information for switch rule
2373 * Call AQ command to update a previously created switch rule with a
2376 static enum ice_status
2377 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2379 struct ice_aqc_sw_rules_elem *s_rule;
2380 enum ice_status status;
2382 s_rule = (struct ice_aqc_sw_rules_elem *)
2383 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2385 return ICE_ERR_NO_MEMORY;
2387 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2389 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2391 /* Update switch rule with new rule set to forward VSI list */
2392 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2393 ice_aqc_opc_update_sw_rules, NULL);
2395 ice_free(hw, s_rule);
2400 * ice_update_sw_rule_bridge_mode
2401 * @hw: pointer to the HW struct
2403 * Updates unicast switch filter rules based on VEB/VEPA mode
2405 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2407 struct ice_switch_info *sw = hw->switch_info;
2408 struct ice_fltr_mgmt_list_entry *fm_entry;
2409 enum ice_status status = ICE_SUCCESS;
2410 struct LIST_HEAD_TYPE *rule_head;
2411 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2413 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2414 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2416 ice_acquire_lock(rule_lock);
2417 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2419 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2420 u8 *addr = fi->l_data.mac.mac_addr;
2422 /* Update unicast Tx rules to reflect the selected
2425 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2426 (fi->fltr_act == ICE_FWD_TO_VSI ||
2427 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2428 fi->fltr_act == ICE_FWD_TO_Q ||
2429 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2430 status = ice_update_pkt_fwd_rule(hw, fi);
2436 ice_release_lock(rule_lock);
2442 * ice_add_update_vsi_list
2443 * @hw: pointer to the hardware structure
2444 * @m_entry: pointer to current filter management list entry
2445 * @cur_fltr: filter information from the book keeping entry
2446 * @new_fltr: filter information with the new VSI to be added
2448 * Call AQ command to add or update previously created VSI list with new VSI.
2450 * Helper function to do book keeping associated with adding filter information
2451 * The algorithm to do the book keeping is described below :
2452 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2453 * if only one VSI has been added till now
2454 * Allocate a new VSI list and add two VSIs
2455 * to this list using switch rule command
2456 * Update the previously created switch rule with the
2457 * newly created VSI list ID
2458 * if a VSI list was previously created
2459 * Add the new VSI to the previously created VSI list set
2460 * using the update switch rule command
2462 static enum ice_status
2463 ice_add_update_vsi_list(struct ice_hw *hw,
2464 struct ice_fltr_mgmt_list_entry *m_entry,
2465 struct ice_fltr_info *cur_fltr,
2466 struct ice_fltr_info *new_fltr)
2468 enum ice_status status = ICE_SUCCESS;
2469 u16 vsi_list_id = 0;
2471 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2472 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2473 return ICE_ERR_NOT_IMPL;
2475 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2476 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2477 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2478 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2479 return ICE_ERR_NOT_IMPL;
2481 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2482 /* Only one entry existed in the mapping and it was not already
2483 * a part of a VSI list. So, create a VSI list with the old and
2486 struct ice_fltr_info tmp_fltr;
2487 u16 vsi_handle_arr[2];
2489 /* A rule already exists with the new VSI being added */
2490 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2491 return ICE_ERR_ALREADY_EXISTS;
2493 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2494 vsi_handle_arr[1] = new_fltr->vsi_handle;
2495 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2497 new_fltr->lkup_type);
2501 tmp_fltr = *new_fltr;
2502 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2503 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2504 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2505 /* Update the previous switch rule of "MAC forward to VSI" to
2506 * "MAC fwd to VSI list"
2508 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2512 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2513 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2514 m_entry->vsi_list_info =
2515 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2518 /* If this entry was large action then the large action needs
2519 * to be updated to point to FWD to VSI list
2521 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2523 ice_add_marker_act(hw, m_entry,
2524 m_entry->sw_marker_id,
2525 m_entry->lg_act_idx);
2527 u16 vsi_handle = new_fltr->vsi_handle;
2528 enum ice_adminq_opc opcode;
2530 if (!m_entry->vsi_list_info)
2533 /* A rule already exists with the new VSI being added */
2534 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2537 /* Update the previously created VSI list set with
2538 * the new VSI ID passed in
2540 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2541 opcode = ice_aqc_opc_update_sw_rules;
2543 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2544 vsi_list_id, false, opcode,
2545 new_fltr->lkup_type);
2546 /* update VSI list mapping info with new VSI ID */
2548 ice_set_bit(vsi_handle,
2549 m_entry->vsi_list_info->vsi_map);
2552 m_entry->vsi_count++;
2557 * ice_find_rule_entry - Search a rule entry
2558 * @hw: pointer to the hardware structure
2559 * @recp_id: lookup type for which the specified rule needs to be searched
2560 * @f_info: rule information
2562 * Helper function to search for a given rule entry
2563 * Returns pointer to entry storing the rule if found
2565 static struct ice_fltr_mgmt_list_entry *
2566 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2568 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2569 struct ice_switch_info *sw = hw->switch_info;
2570 struct LIST_HEAD_TYPE *list_head;
2572 list_head = &sw->recp_list[recp_id].filt_rules;
2573 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2575 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2576 sizeof(f_info->l_data)) &&
2577 f_info->flag == list_itr->fltr_info.flag) {
2586 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2587 * @hw: pointer to the hardware structure
2588 * @recp_id: lookup type for which VSI lists needs to be searched
2589 * @vsi_handle: VSI handle to be found in VSI list
2590 * @vsi_list_id: VSI list ID found containing vsi_handle
2592 * Helper function to search a VSI list with single entry containing given VSI
2593 * handle element. This can be extended further to search VSI list with more
2594 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2596 static struct ice_vsi_list_map_info *
2597 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2600 struct ice_vsi_list_map_info *map_info = NULL;
2601 struct ice_switch_info *sw = hw->switch_info;
2602 struct LIST_HEAD_TYPE *list_head;
2604 list_head = &sw->recp_list[recp_id].filt_rules;
2605 if (sw->recp_list[recp_id].adv_rule) {
2606 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2608 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2609 ice_adv_fltr_mgmt_list_entry,
2611 if (list_itr->vsi_list_info) {
2612 map_info = list_itr->vsi_list_info;
2613 if (ice_is_bit_set(map_info->vsi_map,
2615 *vsi_list_id = map_info->vsi_list_id;
2621 struct ice_fltr_mgmt_list_entry *list_itr;
2623 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2624 ice_fltr_mgmt_list_entry,
2626 if (list_itr->vsi_count == 1 &&
2627 list_itr->vsi_list_info) {
2628 map_info = list_itr->vsi_list_info;
2629 if (ice_is_bit_set(map_info->vsi_map,
2631 *vsi_list_id = map_info->vsi_list_id;
2641 * ice_add_rule_internal - add rule for a given lookup type
2642 * @hw: pointer to the hardware structure
2643 * @recp_id: lookup type (recipe ID) for which rule has to be added
2644 * @f_entry: structure containing MAC forwarding information
2646 * Adds or updates the rule lists for a given recipe
2648 static enum ice_status
2649 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2650 struct ice_fltr_list_entry *f_entry)
2652 struct ice_switch_info *sw = hw->switch_info;
2653 struct ice_fltr_info *new_fltr, *cur_fltr;
2654 struct ice_fltr_mgmt_list_entry *m_entry;
2655 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2656 enum ice_status status = ICE_SUCCESS;
2658 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2659 return ICE_ERR_PARAM;
2661 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2662 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2663 f_entry->fltr_info.fwd_id.hw_vsi_id =
2664 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2666 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2668 ice_acquire_lock(rule_lock);
2669 new_fltr = &f_entry->fltr_info;
2670 if (new_fltr->flag & ICE_FLTR_RX)
2671 new_fltr->src = hw->port_info->lport;
2672 else if (new_fltr->flag & ICE_FLTR_TX)
2674 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2676 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2678 status = ice_create_pkt_fwd_rule(hw, f_entry);
2679 goto exit_add_rule_internal;
2682 cur_fltr = &m_entry->fltr_info;
2683 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2685 exit_add_rule_internal:
2686 ice_release_lock(rule_lock);
2691 * ice_remove_vsi_list_rule
2692 * @hw: pointer to the hardware structure
2693 * @vsi_list_id: VSI list ID generated as part of allocate resource
2694 * @lkup_type: switch rule filter lookup type
2696 * The VSI list should be emptied before this function is called to remove the
2699 static enum ice_status
2700 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2701 enum ice_sw_lkup_type lkup_type)
2703 struct ice_aqc_sw_rules_elem *s_rule;
2704 enum ice_status status;
2707 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2708 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2710 return ICE_ERR_NO_MEMORY;
2712 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2713 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2715 /* Free the vsi_list resource that we allocated. It is assumed that the
2716 * list is empty at this point.
2718 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2719 ice_aqc_opc_free_res);
2721 ice_free(hw, s_rule);
2726 * ice_rem_update_vsi_list
2727 * @hw: pointer to the hardware structure
2728 * @vsi_handle: VSI handle of the VSI to remove
2729 * @fm_list: filter management entry for which the VSI list management needs to
2732 static enum ice_status
2733 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2734 struct ice_fltr_mgmt_list_entry *fm_list)
2736 enum ice_sw_lkup_type lkup_type;
2737 enum ice_status status = ICE_SUCCESS;
2740 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2741 fm_list->vsi_count == 0)
2742 return ICE_ERR_PARAM;
2744 /* A rule with the VSI being removed does not exist */
2745 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2746 return ICE_ERR_DOES_NOT_EXIST;
2748 lkup_type = fm_list->fltr_info.lkup_type;
2749 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2750 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2751 ice_aqc_opc_update_sw_rules,
2756 fm_list->vsi_count--;
2757 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2759 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2760 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2761 struct ice_vsi_list_map_info *vsi_list_info =
2762 fm_list->vsi_list_info;
2765 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2767 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2768 return ICE_ERR_OUT_OF_RANGE;
2770 /* Make sure VSI list is empty before removing it below */
2771 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2773 ice_aqc_opc_update_sw_rules,
2778 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2779 tmp_fltr_info.fwd_id.hw_vsi_id =
2780 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2781 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2782 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2784 ice_debug(hw, ICE_DBG_SW,
2785 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2786 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2790 fm_list->fltr_info = tmp_fltr_info;
2793 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2794 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2795 struct ice_vsi_list_map_info *vsi_list_info =
2796 fm_list->vsi_list_info;
2798 /* Remove the VSI list since it is no longer used */
2799 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2801 ice_debug(hw, ICE_DBG_SW,
2802 "Failed to remove VSI list %d, error %d\n",
2803 vsi_list_id, status);
2807 LIST_DEL(&vsi_list_info->list_entry);
2808 ice_free(hw, vsi_list_info);
2809 fm_list->vsi_list_info = NULL;
2816 * ice_remove_rule_internal - Remove a filter rule of a given type
2818 * @hw: pointer to the hardware structure
2819 * @recp_id: recipe ID for which the rule needs to removed
2820 * @f_entry: rule entry containing filter information
2822 static enum ice_status
2823 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2824 struct ice_fltr_list_entry *f_entry)
2826 struct ice_switch_info *sw = hw->switch_info;
2827 struct ice_fltr_mgmt_list_entry *list_elem;
2828 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2829 enum ice_status status = ICE_SUCCESS;
2830 bool remove_rule = false;
2833 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2834 return ICE_ERR_PARAM;
2835 f_entry->fltr_info.fwd_id.hw_vsi_id =
2836 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2838 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2839 ice_acquire_lock(rule_lock);
2840 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2842 status = ICE_ERR_DOES_NOT_EXIST;
2846 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2848 } else if (!list_elem->vsi_list_info) {
2849 status = ICE_ERR_DOES_NOT_EXIST;
2851 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2852 /* a ref_cnt > 1 indicates that the vsi_list is being
2853 * shared by multiple rules. Decrement the ref_cnt and
2854 * remove this rule, but do not modify the list, as it
2855 * is in-use by other rules.
2857 list_elem->vsi_list_info->ref_cnt--;
2860 /* a ref_cnt of 1 indicates the vsi_list is only used
2861 * by one rule. However, the original removal request is only
2862 * for a single VSI. Update the vsi_list first, and only
2863 * remove the rule if there are no further VSIs in this list.
2865 vsi_handle = f_entry->fltr_info.vsi_handle;
2866 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2869 /* if VSI count goes to zero after updating the VSI list */
2870 if (list_elem->vsi_count == 0)
2875 /* Remove the lookup rule */
2876 struct ice_aqc_sw_rules_elem *s_rule;
2878 s_rule = (struct ice_aqc_sw_rules_elem *)
2879 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2881 status = ICE_ERR_NO_MEMORY;
2885 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2886 ice_aqc_opc_remove_sw_rules);
2888 status = ice_aq_sw_rules(hw, s_rule,
2889 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2890 ice_aqc_opc_remove_sw_rules, NULL);
2892 /* Remove a book keeping from the list */
2893 ice_free(hw, s_rule);
2898 LIST_DEL(&list_elem->list_entry);
2899 ice_free(hw, list_elem);
2902 ice_release_lock(rule_lock);
2907 * ice_aq_get_res_alloc - get allocated resources
2908 * @hw: pointer to the HW struct
2909 * @num_entries: pointer to u16 to store the number of resource entries returned
2910 * @buf: pointer to user-supplied buffer
2911 * @buf_size: size of buff
2912 * @cd: pointer to command details structure or NULL
2914 * The user-supplied buffer must be large enough to store the resource
2915 * information for all resource types. Each resource type is an
2916 * ice_aqc_get_res_resp_data_elem structure.
2919 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2920 u16 buf_size, struct ice_sq_cd *cd)
2922 struct ice_aqc_get_res_alloc *resp;
2923 enum ice_status status;
2924 struct ice_aq_desc desc;
2927 return ICE_ERR_BAD_PTR;
2929 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2930 return ICE_ERR_INVAL_SIZE;
2932 resp = &desc.params.get_res;
2934 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2935 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2937 if (!status && num_entries)
2938 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2944 * ice_aq_get_res_descs - get allocated resource descriptors
2945 * @hw: pointer to the hardware structure
2946 * @num_entries: number of resource entries in buffer
2947 * @buf: Indirect buffer to hold data parameters and response
2948 * @buf_size: size of buffer for indirect commands
2949 * @res_type: resource type
2950 * @res_shared: is resource shared
2951 * @desc_id: input - first desc ID to start; output - next desc ID
2952 * @cd: pointer to command details structure or NULL
2955 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2956 struct ice_aqc_get_allocd_res_desc_resp *buf,
2957 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2958 struct ice_sq_cd *cd)
2960 struct ice_aqc_get_allocd_res_desc *cmd;
2961 struct ice_aq_desc desc;
2962 enum ice_status status;
2964 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2966 cmd = &desc.params.get_res_desc;
2969 return ICE_ERR_PARAM;
2971 if (buf_size != (num_entries * sizeof(*buf)))
2972 return ICE_ERR_PARAM;
2974 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2976 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2977 ICE_AQC_RES_TYPE_M) | (res_shared ?
2978 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2979 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2981 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2983 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2989 * ice_add_mac - Add a MAC address based filter rule
2990 * @hw: pointer to the hardware structure
2991 * @m_list: list of MAC addresses and forwarding information
2993 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2994 * multiple unicast addresses, the function assumes that all the
2995 * addresses are unique in a given add_mac call. It doesn't
2996 * check for duplicates in this case, removing duplicates from a given
2997 * list should be taken care of in the caller of this function.
3000 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3002 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3003 struct ice_fltr_list_entry *m_list_itr;
3004 struct LIST_HEAD_TYPE *rule_head;
3005 u16 elem_sent, total_elem_left;
3006 struct ice_switch_info *sw;
3007 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3008 enum ice_status status = ICE_SUCCESS;
3009 u16 num_unicast = 0;
3013 return ICE_ERR_PARAM;
3015 sw = hw->switch_info;
3016 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3017 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3019 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3023 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3024 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3025 if (!ice_is_vsi_valid(hw, vsi_handle))
3026 return ICE_ERR_PARAM;
3027 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3028 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3029 /* update the src in case it is VSI num */
3030 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3031 return ICE_ERR_PARAM;
3032 m_list_itr->fltr_info.src = hw_vsi_id;
3033 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3034 IS_ZERO_ETHER_ADDR(add))
3035 return ICE_ERR_PARAM;
3036 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3037 /* Don't overwrite the unicast address */
3038 ice_acquire_lock(rule_lock);
3039 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3040 &m_list_itr->fltr_info)) {
3041 ice_release_lock(rule_lock);
3042 return ICE_ERR_ALREADY_EXISTS;
3044 ice_release_lock(rule_lock);
3046 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3047 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3048 m_list_itr->status =
3049 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3051 if (m_list_itr->status)
3052 return m_list_itr->status;
3056 ice_acquire_lock(rule_lock);
3057 /* Exit if no suitable entries were found for adding bulk switch rule */
3059 status = ICE_SUCCESS;
3060 goto ice_add_mac_exit;
3063 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3065 /* Allocate switch rule buffer for the bulk update for unicast */
3066 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3067 s_rule = (struct ice_aqc_sw_rules_elem *)
3068 ice_calloc(hw, num_unicast, s_rule_size);
3070 status = ICE_ERR_NO_MEMORY;
3071 goto ice_add_mac_exit;
3075 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3077 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3078 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3080 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3081 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3082 ice_aqc_opc_add_sw_rules);
3083 r_iter = (struct ice_aqc_sw_rules_elem *)
3084 ((u8 *)r_iter + s_rule_size);
3088 /* Call AQ bulk switch rule update for all unicast addresses */
3090 /* Call AQ switch rule in AQ_MAX chunk */
3091 for (total_elem_left = num_unicast; total_elem_left > 0;
3092 total_elem_left -= elem_sent) {
3093 struct ice_aqc_sw_rules_elem *entry = r_iter;
3095 elem_sent = min(total_elem_left,
3096 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3097 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3098 elem_sent, ice_aqc_opc_add_sw_rules,
3101 goto ice_add_mac_exit;
3102 r_iter = (struct ice_aqc_sw_rules_elem *)
3103 ((u8 *)r_iter + (elem_sent * s_rule_size));
3106 /* Fill up rule ID based on the value returned from FW */
3108 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3110 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3111 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3112 struct ice_fltr_mgmt_list_entry *fm_entry;
3114 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3115 f_info->fltr_rule_id =
3116 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3117 f_info->fltr_act = ICE_FWD_TO_VSI;
3118 /* Create an entry to track this MAC address */
3119 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3120 ice_malloc(hw, sizeof(*fm_entry));
3122 status = ICE_ERR_NO_MEMORY;
3123 goto ice_add_mac_exit;
3125 fm_entry->fltr_info = *f_info;
3126 fm_entry->vsi_count = 1;
3127 /* The book keeping entries will get removed when
3128 * base driver calls remove filter AQ command
3131 LIST_ADD(&fm_entry->list_entry, rule_head);
3132 r_iter = (struct ice_aqc_sw_rules_elem *)
3133 ((u8 *)r_iter + s_rule_size);
3138 ice_release_lock(rule_lock);
3140 ice_free(hw, s_rule);
3145 * ice_add_vlan_internal - Add one VLAN based filter rule
3146 * @hw: pointer to the hardware structure
3147 * @f_entry: filter entry containing one VLAN information
3149 static enum ice_status
3150 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3152 struct ice_switch_info *sw = hw->switch_info;
3153 struct ice_fltr_mgmt_list_entry *v_list_itr;
3154 struct ice_fltr_info *new_fltr, *cur_fltr;
3155 enum ice_sw_lkup_type lkup_type;
3156 u16 vsi_list_id = 0, vsi_handle;
3157 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3158 enum ice_status status = ICE_SUCCESS;
3160 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3161 return ICE_ERR_PARAM;
3163 f_entry->fltr_info.fwd_id.hw_vsi_id =
3164 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3165 new_fltr = &f_entry->fltr_info;
3167 /* VLAN ID should only be 12 bits */
3168 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3169 return ICE_ERR_PARAM;
3171 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3172 return ICE_ERR_PARAM;
3174 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3175 lkup_type = new_fltr->lkup_type;
3176 vsi_handle = new_fltr->vsi_handle;
3177 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3178 ice_acquire_lock(rule_lock);
3179 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3181 struct ice_vsi_list_map_info *map_info = NULL;
3183 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3184 /* All VLAN pruning rules use a VSI list. Check if
3185 * there is already a VSI list containing VSI that we
3186 * want to add. If found, use the same vsi_list_id for
3187 * this new VLAN rule or else create a new list.
3189 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3193 status = ice_create_vsi_list_rule(hw,
3201 /* Convert the action to forwarding to a VSI list. */
3202 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3203 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3206 status = ice_create_pkt_fwd_rule(hw, f_entry);
3208 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3211 status = ICE_ERR_DOES_NOT_EXIST;
3214 /* reuse VSI list for new rule and increment ref_cnt */
3216 v_list_itr->vsi_list_info = map_info;
3217 map_info->ref_cnt++;
3219 v_list_itr->vsi_list_info =
3220 ice_create_vsi_list_map(hw, &vsi_handle,
3224 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3225 /* Update existing VSI list to add new VSI ID only if it used
3228 cur_fltr = &v_list_itr->fltr_info;
3229 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3232 /* If VLAN rule exists and VSI list being used by this rule is
3233 * referenced by more than 1 VLAN rule. Then create a new VSI
3234 * list appending previous VSI with new VSI and update existing
3235 * VLAN rule to point to new VSI list ID
3237 struct ice_fltr_info tmp_fltr;
3238 u16 vsi_handle_arr[2];
3241 /* Current implementation only supports reusing VSI list with
3242 * one VSI count. We should never hit below condition
3244 if (v_list_itr->vsi_count > 1 &&
3245 v_list_itr->vsi_list_info->ref_cnt > 1) {
3246 ice_debug(hw, ICE_DBG_SW,
3247 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3248 status = ICE_ERR_CFG;
3253 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3256 /* A rule already exists with the new VSI being added */
3257 if (cur_handle == vsi_handle) {
3258 status = ICE_ERR_ALREADY_EXISTS;
3262 vsi_handle_arr[0] = cur_handle;
3263 vsi_handle_arr[1] = vsi_handle;
3264 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3265 &vsi_list_id, lkup_type);
3269 tmp_fltr = v_list_itr->fltr_info;
3270 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3271 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3272 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3273 /* Update the previous switch rule to a new VSI list which
3274 * includes current VSI that is requested
3276 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3280 /* before overriding VSI list map info. decrement ref_cnt of
3283 v_list_itr->vsi_list_info->ref_cnt--;
3285 /* now update to newly created list */
3286 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3287 v_list_itr->vsi_list_info =
3288 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3290 v_list_itr->vsi_count++;
3294 ice_release_lock(rule_lock);
3299 * ice_add_vlan - Add VLAN based filter rule
3300 * @hw: pointer to the hardware structure
3301 * @v_list: list of VLAN entries and forwarding information
3304 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3306 struct ice_fltr_list_entry *v_list_itr;
3309 return ICE_ERR_PARAM;
3311 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3313 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3314 return ICE_ERR_PARAM;
3315 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3316 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3317 if (v_list_itr->status)
3318 return v_list_itr->status;
3324 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3325 * @hw: pointer to the hardware structure
3326 * @mv_list: list of MAC and VLAN filters
3328 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3329 * pruning bits enabled, then it is the responsibility of the caller to make
3330 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3331 * VLAN won't be received on that VSI otherwise.
3334 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3336 struct ice_fltr_list_entry *mv_list_itr;
3338 if (!mv_list || !hw)
3339 return ICE_ERR_PARAM;
3341 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3343 enum ice_sw_lkup_type l_type =
3344 mv_list_itr->fltr_info.lkup_type;
3346 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3347 return ICE_ERR_PARAM;
3348 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3349 mv_list_itr->status =
3350 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3352 if (mv_list_itr->status)
3353 return mv_list_itr->status;
3359 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3360 * @hw: pointer to the hardware structure
3361 * @em_list: list of ether type MAC filter, MAC is optional
3363 * This function requires the caller to populate the entries in
3364 * the filter list with the necessary fields (including flags to
3365 * indicate Tx or Rx rules).
3368 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3370 struct ice_fltr_list_entry *em_list_itr;
3372 if (!em_list || !hw)
3373 return ICE_ERR_PARAM;
3375 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3377 enum ice_sw_lkup_type l_type =
3378 em_list_itr->fltr_info.lkup_type;
3380 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3381 l_type != ICE_SW_LKUP_ETHERTYPE)
3382 return ICE_ERR_PARAM;
3384 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3386 if (em_list_itr->status)
3387 return em_list_itr->status;
3393 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3394 * @hw: pointer to the hardware structure
3395 * @em_list: list of ethertype or ethertype MAC entries
3398 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3400 struct ice_fltr_list_entry *em_list_itr, *tmp;
3402 if (!em_list || !hw)
3403 return ICE_ERR_PARAM;
3405 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3407 enum ice_sw_lkup_type l_type =
3408 em_list_itr->fltr_info.lkup_type;
3410 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3411 l_type != ICE_SW_LKUP_ETHERTYPE)
3412 return ICE_ERR_PARAM;
3414 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3416 if (em_list_itr->status)
3417 return em_list_itr->status;
3424 * ice_rem_sw_rule_info
3425 * @hw: pointer to the hardware structure
3426 * @rule_head: pointer to the switch list structure that we want to delete
3429 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3431 if (!LIST_EMPTY(rule_head)) {
3432 struct ice_fltr_mgmt_list_entry *entry;
3433 struct ice_fltr_mgmt_list_entry *tmp;
3435 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3436 ice_fltr_mgmt_list_entry, list_entry) {
3437 LIST_DEL(&entry->list_entry);
3438 ice_free(hw, entry);
3444 * ice_rem_adv_rule_info
3445 * @hw: pointer to the hardware structure
3446 * @rule_head: pointer to the switch list structure that we want to delete
3449 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3451 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3452 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3454 if (LIST_EMPTY(rule_head))
3457 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3458 ice_adv_fltr_mgmt_list_entry, list_entry) {
3459 LIST_DEL(&lst_itr->list_entry);
3460 ice_free(hw, lst_itr->lkups);
3461 ice_free(hw, lst_itr);
3466 * ice_rem_all_sw_rules_info
3467 * @hw: pointer to the hardware structure
3469 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3471 struct ice_switch_info *sw = hw->switch_info;
3474 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3475 struct LIST_HEAD_TYPE *rule_head;
3477 rule_head = &sw->recp_list[i].filt_rules;
3478 if (!sw->recp_list[i].adv_rule)
3479 ice_rem_sw_rule_info(hw, rule_head);
3481 ice_rem_adv_rule_info(hw, rule_head);
3486 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3487 * @pi: pointer to the port_info structure
3488 * @vsi_handle: VSI handle to set as default
3489 * @set: true to add the above mentioned switch rule, false to remove it
3490 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3492 * add filter rule to set/unset given VSI as default VSI for the switch
3493 * (represented by swid)
3496 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3499 struct ice_aqc_sw_rules_elem *s_rule;
3500 struct ice_fltr_info f_info;
3501 struct ice_hw *hw = pi->hw;
3502 enum ice_adminq_opc opcode;
3503 enum ice_status status;
3507 if (!ice_is_vsi_valid(hw, vsi_handle))
3508 return ICE_ERR_PARAM;
3509 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3511 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3512 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3513 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3515 return ICE_ERR_NO_MEMORY;
3517 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3519 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3520 f_info.flag = direction;
3521 f_info.fltr_act = ICE_FWD_TO_VSI;
3522 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3524 if (f_info.flag & ICE_FLTR_RX) {
3525 f_info.src = pi->lport;
3526 f_info.src_id = ICE_SRC_ID_LPORT;
3528 f_info.fltr_rule_id =
3529 pi->dflt_rx_vsi_rule_id;
3530 } else if (f_info.flag & ICE_FLTR_TX) {
3531 f_info.src_id = ICE_SRC_ID_VSI;
3532 f_info.src = hw_vsi_id;
3534 f_info.fltr_rule_id =
3535 pi->dflt_tx_vsi_rule_id;
3539 opcode = ice_aqc_opc_add_sw_rules;
3541 opcode = ice_aqc_opc_remove_sw_rules;
3543 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3545 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3546 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3549 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3551 if (f_info.flag & ICE_FLTR_TX) {
3552 pi->dflt_tx_vsi_num = hw_vsi_id;
3553 pi->dflt_tx_vsi_rule_id = index;
3554 } else if (f_info.flag & ICE_FLTR_RX) {
3555 pi->dflt_rx_vsi_num = hw_vsi_id;
3556 pi->dflt_rx_vsi_rule_id = index;
3559 if (f_info.flag & ICE_FLTR_TX) {
3560 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3561 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3562 } else if (f_info.flag & ICE_FLTR_RX) {
3563 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3564 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3569 ice_free(hw, s_rule);
3574 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3575 * @hw: pointer to the hardware structure
3576 * @recp_id: lookup type for which the specified rule needs to be searched
3577 * @f_info: rule information
3579 * Helper function to search for a unicast rule entry - this is to be used
3580 * to remove unicast MAC filter that is not shared with other VSIs on the
3583 * Returns pointer to entry storing the rule if found
3585 static struct ice_fltr_mgmt_list_entry *
3586 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3587 struct ice_fltr_info *f_info)
3589 struct ice_switch_info *sw = hw->switch_info;
3590 struct ice_fltr_mgmt_list_entry *list_itr;
3591 struct LIST_HEAD_TYPE *list_head;
3593 list_head = &sw->recp_list[recp_id].filt_rules;
3594 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3596 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3597 sizeof(f_info->l_data)) &&
3598 f_info->fwd_id.hw_vsi_id ==
3599 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3600 f_info->flag == list_itr->fltr_info.flag)
3607 * ice_remove_mac - remove a MAC address based filter rule
3608 * @hw: pointer to the hardware structure
3609 * @m_list: list of MAC addresses and forwarding information
3611 * This function removes either a MAC filter rule or a specific VSI from a
3612 * VSI list for a multicast MAC address.
3614 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3615 * ice_add_mac. Caller should be aware that this call will only work if all
3616 * the entries passed into m_list were added previously. It will not attempt to
3617 * do a partial remove of entries that were found.
3620 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3622 struct ice_fltr_list_entry *list_itr, *tmp;
3623 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3626 return ICE_ERR_PARAM;
3628 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3629 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3631 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3632 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3635 if (l_type != ICE_SW_LKUP_MAC)
3636 return ICE_ERR_PARAM;
3638 vsi_handle = list_itr->fltr_info.vsi_handle;
3639 if (!ice_is_vsi_valid(hw, vsi_handle))
3640 return ICE_ERR_PARAM;
3642 list_itr->fltr_info.fwd_id.hw_vsi_id =
3643 ice_get_hw_vsi_num(hw, vsi_handle);
3644 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3645 /* Don't remove the unicast address that belongs to
3646 * another VSI on the switch, since it is not being
3649 ice_acquire_lock(rule_lock);
3650 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3651 &list_itr->fltr_info)) {
3652 ice_release_lock(rule_lock);
3653 return ICE_ERR_DOES_NOT_EXIST;
3655 ice_release_lock(rule_lock);
3657 list_itr->status = ice_remove_rule_internal(hw,
3660 if (list_itr->status)
3661 return list_itr->status;
3667 * ice_remove_vlan - Remove VLAN based filter rule
3668 * @hw: pointer to the hardware structure
3669 * @v_list: list of VLAN entries and forwarding information
3672 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3674 struct ice_fltr_list_entry *v_list_itr, *tmp;
3677 return ICE_ERR_PARAM;
3679 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3681 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3683 if (l_type != ICE_SW_LKUP_VLAN)
3684 return ICE_ERR_PARAM;
3685 v_list_itr->status = ice_remove_rule_internal(hw,
3688 if (v_list_itr->status)
3689 return v_list_itr->status;
3695 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3696 * @hw: pointer to the hardware structure
3697 * @v_list: list of MAC VLAN entries and forwarding information
3700 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3702 struct ice_fltr_list_entry *v_list_itr, *tmp;
3705 return ICE_ERR_PARAM;
3707 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3709 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3711 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3712 return ICE_ERR_PARAM;
3713 v_list_itr->status =
3714 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3716 if (v_list_itr->status)
3717 return v_list_itr->status;
3723 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3724 * @fm_entry: filter entry to inspect
3725 * @vsi_handle: VSI handle to compare with filter info
3728 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3730 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3731 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3732 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3733 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3738 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3739 * @hw: pointer to the hardware structure
3740 * @vsi_handle: VSI handle to remove filters from
3741 * @vsi_list_head: pointer to the list to add entry to
3742 * @fi: pointer to fltr_info of filter entry to copy & add
3744 * Helper function, used when creating a list of filters to remove from
3745 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3746 * original filter entry, with the exception of fltr_info.fltr_act and
3747 * fltr_info.fwd_id fields. These are set such that later logic can
3748 * extract which VSI to remove the fltr from, and pass on that information.
3750 static enum ice_status
3751 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3752 struct LIST_HEAD_TYPE *vsi_list_head,
3753 struct ice_fltr_info *fi)
3755 struct ice_fltr_list_entry *tmp;
3757 /* this memory is freed up in the caller function
3758 * once filters for this VSI are removed
3760 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3762 return ICE_ERR_NO_MEMORY;
3764 tmp->fltr_info = *fi;
3766 /* Overwrite these fields to indicate which VSI to remove filter from,
3767 * so find and remove logic can extract the information from the
3768 * list entries. Note that original entries will still have proper
3771 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3772 tmp->fltr_info.vsi_handle = vsi_handle;
3773 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3775 LIST_ADD(&tmp->list_entry, vsi_list_head);
3781 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3782 * @hw: pointer to the hardware structure
3783 * @vsi_handle: VSI handle to remove filters from
3784 * @lkup_list_head: pointer to the list that has certain lookup type filters
3785 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3787 * Locates all filters in lkup_list_head that are used by the given VSI,
3788 * and adds COPIES of those entries to vsi_list_head (intended to be used
3789 * to remove the listed filters).
3790 * Note that this means all entries in vsi_list_head must be explicitly
3791 * deallocated by the caller when done with list.
3793 static enum ice_status
3794 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3795 struct LIST_HEAD_TYPE *lkup_list_head,
3796 struct LIST_HEAD_TYPE *vsi_list_head)
3798 struct ice_fltr_mgmt_list_entry *fm_entry;
3799 enum ice_status status = ICE_SUCCESS;
3801 /* check to make sure VSI ID is valid and within boundary */
3802 if (!ice_is_vsi_valid(hw, vsi_handle))
3803 return ICE_ERR_PARAM;
3805 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3806 ice_fltr_mgmt_list_entry, list_entry) {
3807 struct ice_fltr_info *fi;
3809 fi = &fm_entry->fltr_info;
3810 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3813 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3823 * ice_determine_promisc_mask
3824 * @fi: filter info to parse
3826 * Helper function to determine which ICE_PROMISC_ mask corresponds
3827 * to given filter into.
3829 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3831 u16 vid = fi->l_data.mac_vlan.vlan_id;
3832 u8 *macaddr = fi->l_data.mac.mac_addr;
3833 bool is_tx_fltr = false;
3834 u8 promisc_mask = 0;
3836 if (fi->flag == ICE_FLTR_TX)
3839 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3840 promisc_mask |= is_tx_fltr ?
3841 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3842 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3843 promisc_mask |= is_tx_fltr ?
3844 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3845 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3846 promisc_mask |= is_tx_fltr ?
3847 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3849 promisc_mask |= is_tx_fltr ?
3850 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3852 return promisc_mask;
3856 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3857 * @hw: pointer to the hardware structure
3858 * @vsi_handle: VSI handle to retrieve info from
3859 * @promisc_mask: pointer to mask to be filled in
3860 * @vid: VLAN ID of promisc VLAN VSI
3863 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3866 struct ice_switch_info *sw = hw->switch_info;
3867 struct ice_fltr_mgmt_list_entry *itr;
3868 struct LIST_HEAD_TYPE *rule_head;
3869 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3871 if (!ice_is_vsi_valid(hw, vsi_handle))
3872 return ICE_ERR_PARAM;
3876 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3877 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3879 ice_acquire_lock(rule_lock);
3880 LIST_FOR_EACH_ENTRY(itr, rule_head,
3881 ice_fltr_mgmt_list_entry, list_entry) {
3882 /* Continue if this filter doesn't apply to this VSI or the
3883 * VSI ID is not in the VSI map for this filter
3885 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3888 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3890 ice_release_lock(rule_lock);
3896 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3897 * @hw: pointer to the hardware structure
3898 * @vsi_handle: VSI handle to retrieve info from
3899 * @promisc_mask: pointer to mask to be filled in
3900 * @vid: VLAN ID of promisc VLAN VSI
3903 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3906 struct ice_switch_info *sw = hw->switch_info;
3907 struct ice_fltr_mgmt_list_entry *itr;
3908 struct LIST_HEAD_TYPE *rule_head;
3909 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3911 if (!ice_is_vsi_valid(hw, vsi_handle))
3912 return ICE_ERR_PARAM;
3916 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3917 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3919 ice_acquire_lock(rule_lock);
3920 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3922 /* Continue if this filter doesn't apply to this VSI or the
3923 * VSI ID is not in the VSI map for this filter
3925 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3928 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3930 ice_release_lock(rule_lock);
3936 * ice_remove_promisc - Remove promisc based filter rules
3937 * @hw: pointer to the hardware structure
3938 * @recp_id: recipe ID for which the rule needs to removed
3939 * @v_list: list of promisc entries
3941 static enum ice_status
3942 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3943 struct LIST_HEAD_TYPE *v_list)
3945 struct ice_fltr_list_entry *v_list_itr, *tmp;
3947 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3949 v_list_itr->status =
3950 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3951 if (v_list_itr->status)
3952 return v_list_itr->status;
3958 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3959 * @hw: pointer to the hardware structure
3960 * @vsi_handle: VSI handle to clear mode
3961 * @promisc_mask: mask of promiscuous config bits to clear
3962 * @vid: VLAN ID to clear VLAN promiscuous
3965 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3968 struct ice_switch_info *sw = hw->switch_info;
3969 struct ice_fltr_list_entry *fm_entry, *tmp;
3970 struct LIST_HEAD_TYPE remove_list_head;
3971 struct ice_fltr_mgmt_list_entry *itr;
3972 struct LIST_HEAD_TYPE *rule_head;
3973 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3974 enum ice_status status = ICE_SUCCESS;
3977 if (!ice_is_vsi_valid(hw, vsi_handle))
3978 return ICE_ERR_PARAM;
3980 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3981 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3983 recipe_id = ICE_SW_LKUP_PROMISC;
3985 rule_head = &sw->recp_list[recipe_id].filt_rules;
3986 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3988 INIT_LIST_HEAD(&remove_list_head);
3990 ice_acquire_lock(rule_lock);
3991 LIST_FOR_EACH_ENTRY(itr, rule_head,
3992 ice_fltr_mgmt_list_entry, list_entry) {
3993 struct ice_fltr_info *fltr_info;
3994 u8 fltr_promisc_mask = 0;
3996 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3998 fltr_info = &itr->fltr_info;
4000 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4001 vid != fltr_info->l_data.mac_vlan.vlan_id)
4004 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4006 /* Skip if filter is not completely specified by given mask */
4007 if (fltr_promisc_mask & ~promisc_mask)
4010 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4014 ice_release_lock(rule_lock);
4015 goto free_fltr_list;
4018 ice_release_lock(rule_lock);
4020 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4023 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4024 ice_fltr_list_entry, list_entry) {
4025 LIST_DEL(&fm_entry->list_entry);
4026 ice_free(hw, fm_entry);
4033 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4034 * @hw: pointer to the hardware structure
4035 * @vsi_handle: VSI handle to configure
4036 * @promisc_mask: mask of promiscuous config bits
4037 * @vid: VLAN ID to set VLAN promiscuous
4040 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4042 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4043 struct ice_fltr_list_entry f_list_entry;
4044 struct ice_fltr_info new_fltr;
4045 enum ice_status status = ICE_SUCCESS;
4051 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4053 if (!ice_is_vsi_valid(hw, vsi_handle))
4054 return ICE_ERR_PARAM;
4055 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4057 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4059 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4060 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4061 new_fltr.l_data.mac_vlan.vlan_id = vid;
4062 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4064 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4065 recipe_id = ICE_SW_LKUP_PROMISC;
4068 /* Separate filters must be set for each direction/packet type
4069 * combination, so we will loop over the mask value, store the
4070 * individual type, and clear it out in the input mask as it
4073 while (promisc_mask) {
4079 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4080 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4081 pkt_type = UCAST_FLTR;
4082 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4083 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4084 pkt_type = UCAST_FLTR;
4086 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4087 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4088 pkt_type = MCAST_FLTR;
4089 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4090 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4091 pkt_type = MCAST_FLTR;
4093 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4094 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4095 pkt_type = BCAST_FLTR;
4096 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4097 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4098 pkt_type = BCAST_FLTR;
4102 /* Check for VLAN promiscuous flag */
4103 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4104 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4105 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4106 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4110 /* Set filter DA based on packet type */
4111 mac_addr = new_fltr.l_data.mac.mac_addr;
4112 if (pkt_type == BCAST_FLTR) {
4113 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4114 } else if (pkt_type == MCAST_FLTR ||
4115 pkt_type == UCAST_FLTR) {
4116 /* Use the dummy ether header DA */
4117 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4118 ICE_NONDMA_TO_NONDMA);
4119 if (pkt_type == MCAST_FLTR)
4120 mac_addr[0] |= 0x1; /* Set multicast bit */
4123 /* Need to reset this to zero for all iterations */
4126 new_fltr.flag |= ICE_FLTR_TX;
4127 new_fltr.src = hw_vsi_id;
4129 new_fltr.flag |= ICE_FLTR_RX;
4130 new_fltr.src = hw->port_info->lport;
4133 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4134 new_fltr.vsi_handle = vsi_handle;
4135 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4136 f_list_entry.fltr_info = new_fltr;
4138 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4139 if (status != ICE_SUCCESS)
4140 goto set_promisc_exit;
4148 * ice_set_vlan_vsi_promisc
4149 * @hw: pointer to the hardware structure
4150 * @vsi_handle: VSI handle to configure
4151 * @promisc_mask: mask of promiscuous config bits
4152 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4154 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4157 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4158 bool rm_vlan_promisc)
4160 struct ice_switch_info *sw = hw->switch_info;
4161 struct ice_fltr_list_entry *list_itr, *tmp;
4162 struct LIST_HEAD_TYPE vsi_list_head;
4163 struct LIST_HEAD_TYPE *vlan_head;
4164 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4165 enum ice_status status;
4168 INIT_LIST_HEAD(&vsi_list_head);
4169 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4170 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4171 ice_acquire_lock(vlan_lock);
4172 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4174 ice_release_lock(vlan_lock);
4176 goto free_fltr_list;
4178 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4180 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4181 if (rm_vlan_promisc)
4182 status = ice_clear_vsi_promisc(hw, vsi_handle,
4183 promisc_mask, vlan_id);
4185 status = ice_set_vsi_promisc(hw, vsi_handle,
4186 promisc_mask, vlan_id);
4192 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4193 ice_fltr_list_entry, list_entry) {
4194 LIST_DEL(&list_itr->list_entry);
4195 ice_free(hw, list_itr);
4201 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4202 * @hw: pointer to the hardware structure
4203 * @vsi_handle: VSI handle to remove filters from
4204 * @lkup: switch rule filter lookup type
4207 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4208 enum ice_sw_lkup_type lkup)
4210 struct ice_switch_info *sw = hw->switch_info;
4211 struct ice_fltr_list_entry *fm_entry;
4212 struct LIST_HEAD_TYPE remove_list_head;
4213 struct LIST_HEAD_TYPE *rule_head;
4214 struct ice_fltr_list_entry *tmp;
4215 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4216 enum ice_status status;
4218 INIT_LIST_HEAD(&remove_list_head);
4219 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4220 rule_head = &sw->recp_list[lkup].filt_rules;
4221 ice_acquire_lock(rule_lock);
4222 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4224 ice_release_lock(rule_lock);
4229 case ICE_SW_LKUP_MAC:
4230 ice_remove_mac(hw, &remove_list_head);
4232 case ICE_SW_LKUP_VLAN:
4233 ice_remove_vlan(hw, &remove_list_head);
4235 case ICE_SW_LKUP_PROMISC:
4236 case ICE_SW_LKUP_PROMISC_VLAN:
4237 ice_remove_promisc(hw, lkup, &remove_list_head);
4239 case ICE_SW_LKUP_MAC_VLAN:
4240 ice_remove_mac_vlan(hw, &remove_list_head);
4242 case ICE_SW_LKUP_ETHERTYPE:
4243 case ICE_SW_LKUP_ETHERTYPE_MAC:
4244 ice_remove_eth_mac(hw, &remove_list_head);
4246 case ICE_SW_LKUP_DFLT:
4247 ice_debug(hw, ICE_DBG_SW,
4248 "Remove filters for this lookup type hasn't been implemented yet\n");
4250 case ICE_SW_LKUP_LAST:
4251 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4255 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4256 ice_fltr_list_entry, list_entry) {
4257 LIST_DEL(&fm_entry->list_entry);
4258 ice_free(hw, fm_entry);
4263 * ice_remove_vsi_fltr - Remove all filters for a VSI
4264 * @hw: pointer to the hardware structure
4265 * @vsi_handle: VSI handle to remove filters from
4267 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4269 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4271 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4272 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4273 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4274 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4275 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4276 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4277 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4278 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4282 * ice_alloc_res_cntr - allocating resource counter
4283 * @hw: pointer to the hardware structure
4284 * @type: type of resource
4285 * @alloc_shared: if set it is shared else dedicated
4286 * @num_items: number of entries requested for FD resource type
4287 * @counter_id: counter index returned by AQ call
4290 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4293 struct ice_aqc_alloc_free_res_elem *buf;
4294 enum ice_status status;
4297 /* Allocate resource */
4298 buf_len = sizeof(*buf);
4299 buf = (struct ice_aqc_alloc_free_res_elem *)
4300 ice_malloc(hw, buf_len);
4302 return ICE_ERR_NO_MEMORY;
4304 buf->num_elems = CPU_TO_LE16(num_items);
4305 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4306 ICE_AQC_RES_TYPE_M) | alloc_shared);
4308 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4309 ice_aqc_opc_alloc_res, NULL);
4313 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4321 * ice_free_res_cntr - free resource counter
4322 * @hw: pointer to the hardware structure
4323 * @type: type of resource
4324 * @alloc_shared: if set it is shared else dedicated
4325 * @num_items: number of entries to be freed for FD resource type
4326 * @counter_id: counter ID resource which needs to be freed
4329 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4332 struct ice_aqc_alloc_free_res_elem *buf;
4333 enum ice_status status;
4337 buf_len = sizeof(*buf);
4338 buf = (struct ice_aqc_alloc_free_res_elem *)
4339 ice_malloc(hw, buf_len);
4341 return ICE_ERR_NO_MEMORY;
4343 buf->num_elems = CPU_TO_LE16(num_items);
4344 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4345 ICE_AQC_RES_TYPE_M) | alloc_shared);
4346 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4348 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4349 ice_aqc_opc_free_res, NULL);
4351 ice_debug(hw, ICE_DBG_SW,
4352 "counter resource could not be freed\n");
4359 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4360 * @hw: pointer to the hardware structure
4361 * @counter_id: returns counter index
4363 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4365 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4366 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4371 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4372 * @hw: pointer to the hardware structure
4373 * @counter_id: counter index to be freed
4375 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4377 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4378 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4383 * ice_alloc_res_lg_act - add large action resource
4384 * @hw: pointer to the hardware structure
4385 * @l_id: large action ID to fill it in
4386 * @num_acts: number of actions to hold with a large action entry
4388 static enum ice_status
4389 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4391 struct ice_aqc_alloc_free_res_elem *sw_buf;
4392 enum ice_status status;
4395 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4396 return ICE_ERR_PARAM;
4398 /* Allocate resource for large action */
4399 buf_len = sizeof(*sw_buf);
4400 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4401 ice_malloc(hw, buf_len);
4403 return ICE_ERR_NO_MEMORY;
4405 sw_buf->num_elems = CPU_TO_LE16(1);
4407 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4408 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4409 * If num_acts is greater than 2, then use
4410 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4411 * The num_acts cannot exceed 4. This was ensured at the
4412 * beginning of the function.
4415 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4416 else if (num_acts == 2)
4417 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4419 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4421 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4422 ice_aqc_opc_alloc_res, NULL);
4424 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4426 ice_free(hw, sw_buf);
4431 * ice_add_mac_with_sw_marker - add filter with sw marker
4432 * @hw: pointer to the hardware structure
4433 * @f_info: filter info structure containing the MAC filter information
4434 * @sw_marker: sw marker to tag the Rx descriptor with
4437 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4440 struct ice_switch_info *sw = hw->switch_info;
4441 struct ice_fltr_mgmt_list_entry *m_entry;
4442 struct ice_fltr_list_entry fl_info;
4443 struct LIST_HEAD_TYPE l_head;
4444 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4445 enum ice_status ret;
4449 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4450 return ICE_ERR_PARAM;
4452 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4453 return ICE_ERR_PARAM;
4455 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4456 return ICE_ERR_PARAM;
4458 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4459 return ICE_ERR_PARAM;
4460 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4462 /* Add filter if it doesn't exist so then the adding of large
4463 * action always results in update
4466 INIT_LIST_HEAD(&l_head);
4467 fl_info.fltr_info = *f_info;
4468 LIST_ADD(&fl_info.list_entry, &l_head);
4470 entry_exists = false;
4471 ret = ice_add_mac(hw, &l_head);
4472 if (ret == ICE_ERR_ALREADY_EXISTS)
4473 entry_exists = true;
4477 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4478 ice_acquire_lock(rule_lock);
4479 /* Get the book keeping entry for the filter */
4480 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4484 /* If counter action was enabled for this rule then don't enable
4485 * sw marker large action
4487 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4488 ret = ICE_ERR_PARAM;
4492 /* if same marker was added before */
4493 if (m_entry->sw_marker_id == sw_marker) {
4494 ret = ICE_ERR_ALREADY_EXISTS;
4498 /* Allocate a hardware table entry to hold large act. Three actions
4499 * for marker based large action
4501 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4505 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4508 /* Update the switch rule to add the marker action */
4509 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4511 ice_release_lock(rule_lock);
4516 ice_release_lock(rule_lock);
4517 /* only remove entry if it did not exist previously */
4519 ret = ice_remove_mac(hw, &l_head);
4525 * ice_add_mac_with_counter - add filter with counter enabled
4526 * @hw: pointer to the hardware structure
4527 * @f_info: pointer to filter info structure containing the MAC filter
4531 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4533 struct ice_switch_info *sw = hw->switch_info;
4534 struct ice_fltr_mgmt_list_entry *m_entry;
4535 struct ice_fltr_list_entry fl_info;
4536 struct LIST_HEAD_TYPE l_head;
4537 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4538 enum ice_status ret;
4543 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4544 return ICE_ERR_PARAM;
4546 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4547 return ICE_ERR_PARAM;
4549 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4550 return ICE_ERR_PARAM;
4551 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4553 entry_exist = false;
4555 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4557 /* Add filter if it doesn't exist so then the adding of large
4558 * action always results in update
4560 INIT_LIST_HEAD(&l_head);
4562 fl_info.fltr_info = *f_info;
4563 LIST_ADD(&fl_info.list_entry, &l_head);
4565 ret = ice_add_mac(hw, &l_head);
4566 if (ret == ICE_ERR_ALREADY_EXISTS)
4571 ice_acquire_lock(rule_lock);
4572 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4574 ret = ICE_ERR_BAD_PTR;
4578 /* Don't enable counter for a filter for which sw marker was enabled */
4579 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4580 ret = ICE_ERR_PARAM;
4584 /* If a counter was already enabled then don't need to add again */
4585 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4586 ret = ICE_ERR_ALREADY_EXISTS;
4590 /* Allocate a hardware table entry to VLAN counter */
4591 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4595 /* Allocate a hardware table entry to hold large act. Two actions for
4596 * counter based large action
4598 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4602 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4605 /* Update the switch rule to add the counter action */
4606 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4608 ice_release_lock(rule_lock);
4613 ice_release_lock(rule_lock);
4614 /* only remove entry if it did not exist previously */
4616 ret = ice_remove_mac(hw, &l_head);
4621 /* This is mapping table entry that maps every word within a given protocol
4622 * structure to the real byte offset as per the specification of that
4624 * for example dst address is 3 words in ethertype header and corresponding
4625 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4626 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4627 * matching entry describing its field. This needs to be updated if new
4628 * structure is added to that union.
4630 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4631 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4632 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4633 { ICE_ETYPE_OL, { 0 } },
4634 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4635 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4636 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4637 26, 28, 30, 32, 34, 36, 38 } },
4638 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4639 26, 28, 30, 32, 34, 36, 38 } },
4640 { ICE_TCP_IL, { 0, 2 } },
4641 { ICE_UDP_OF, { 0, 2 } },
4642 { ICE_UDP_ILOS, { 0, 2 } },
4643 { ICE_SCTP_IL, { 0, 2 } },
4644 { ICE_VXLAN, { 8, 10, 12, 14 } },
4645 { ICE_GENEVE, { 8, 10, 12, 14 } },
4646 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4647 { ICE_NVGRE, { 0, 2, 4, 6 } },
4648 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4649 { ICE_PPPOE, { 0, 2, 4, 6 } },
4650 { ICE_PROTOCOL_LAST, { 0 } }
4653 /* The following table describes preferred grouping of recipes.
4654 * If a recipe that needs to be programmed is a superset or matches one of the
4655 * following combinations, then the recipe needs to be chained as per the
4658 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4659 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4660 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4661 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4662 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4663 { 0xffff, 0xffff, 0xffff, 0xffff } },
4664 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4665 { 0xffff, 0xffff, 0xffff, 0xffff } },
4666 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4667 { 0xffff, 0xffff, 0xffff, 0xffff } },
4670 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4671 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4672 { ICE_MAC_IL, ICE_MAC_IL_HW },
4673 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4674 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4675 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4676 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4677 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4678 { ICE_TCP_IL, ICE_TCP_IL_HW },
4679 { ICE_UDP_OF, ICE_UDP_OF_HW },
4680 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4681 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4682 { ICE_VXLAN, ICE_UDP_OF_HW },
4683 { ICE_GENEVE, ICE_UDP_OF_HW },
4684 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4685 { ICE_NVGRE, ICE_GRE_OF_HW },
4686 { ICE_GTP, ICE_UDP_OF_HW },
4687 { ICE_PPPOE, ICE_PPPOE_HW },
4688 { ICE_PROTOCOL_LAST, 0 }
4692 * ice_find_recp - find a recipe
4693 * @hw: pointer to the hardware structure
4694 * @lkup_exts: extension sequence to match
4696 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4698 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4700 bool refresh_required = true;
4701 struct ice_sw_recipe *recp;
4704 /* Walk through existing recipes to find a match */
4705 recp = hw->switch_info->recp_list;
4706 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4707 /* If recipe was not created for this ID, in SW bookkeeping,
4708 * check if FW has an entry for this recipe. If the FW has an
4709 * entry update it in our SW bookkeeping and continue with the
4712 if (!recp[i].recp_created)
4713 if (ice_get_recp_frm_fw(hw,
4714 hw->switch_info->recp_list, i,
4718 /* if number of words we are looking for match */
4719 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4720 struct ice_fv_word *a = lkup_exts->fv_words;
4721 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4725 for (p = 0; p < lkup_exts->n_val_words; p++) {
4726 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4728 if (a[p].off == b[q].off &&
4729 a[p].prot_id == b[q].prot_id)
4730 /* Found the "p"th word in the
4735 /* After walking through all the words in the
4736 * "i"th recipe if "p"th word was not found then
4737 * this recipe is not what we are looking for.
4738 * So break out from this loop and try the next
4741 if (q >= recp[i].lkup_exts.n_val_words) {
4746 /* If for "i"th recipe the found was never set to false
4747 * then it means we found our match
4750 return i; /* Return the recipe ID */
4753 return ICE_MAX_NUM_RECIPES;
4757 * ice_prot_type_to_id - get protocol ID from protocol type
4758 * @type: protocol type
4759 * @id: pointer to variable that will receive the ID
4761 * Returns true if found, false otherwise
4763 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4767 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4768 if (ice_prot_id_tbl[i].type == type) {
4769 *id = ice_prot_id_tbl[i].protocol_id;
4776 * ice_find_valid_words - count valid words
4777 * @rule: advanced rule with lookup information
4778 * @lkup_exts: byte offset extractions of the words that are valid
4780 * calculate valid words in a lookup rule using mask value
4783 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4784 struct ice_prot_lkup_ext *lkup_exts)
4790 if (!ice_prot_type_to_id(rule->type, &prot_id))
4793 word = lkup_exts->n_val_words;
4795 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4796 if (((u16 *)&rule->m_u)[j] &&
4797 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4798 /* No more space to accommodate */
4799 if (word >= ICE_MAX_CHAIN_WORDS)
4801 lkup_exts->fv_words[word].off =
4802 ice_prot_ext[rule->type].offs[j];
4803 lkup_exts->fv_words[word].prot_id =
4804 ice_prot_id_tbl[rule->type].protocol_id;
4805 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4809 ret_val = word - lkup_exts->n_val_words;
4810 lkup_exts->n_val_words = word;
4816 * ice_find_prot_off_ind - check for specific ID and offset in rule
4817 * @lkup_exts: an array of protocol header extractions
4818 * @prot_type: protocol type to check
4819 * @off: expected offset of the extraction
4821 * Check if the prot_ext has given protocol ID and offset
4824 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4829 for (j = 0; j < lkup_exts->n_val_words; j++)
4830 if (lkup_exts->fv_words[j].off == off &&
4831 lkup_exts->fv_words[j].prot_id == prot_type)
4834 return ICE_MAX_CHAIN_WORDS;
4838 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4839 * @lkup_exts: an array of protocol header extractions
4840 * @r_policy: preferred recipe grouping policy
4842 * Helper function to check if given recipe group is subset we need to check if
4843 * all the words described by the given recipe group exist in the advanced rule
4844 * look up information
4847 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4848 const struct ice_pref_recipe_group *r_policy)
4850 u8 ind[ICE_NUM_WORDS_RECIPE];
4854 /* check if everything in the r_policy is part of the entire rule */
4855 for (i = 0; i < r_policy->n_val_pairs; i++) {
4858 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4859 r_policy->pairs[i].off);
4860 if (j >= ICE_MAX_CHAIN_WORDS)
4863 /* store the indexes temporarily found by the find function
4864 * this will be used to mark the words as 'done'
4869 /* If the entire policy recipe was a true match, then mark the fields
4870 * that are covered by the recipe as 'done' meaning that these words
4871 * will be clumped together in one recipe.
4872 * "Done" here means in our searching if certain recipe group
4873 * matches or is subset of the given rule, then we mark all
4874 * the corresponding offsets as found. So the remaining recipes should
4875 * be created with whatever words that were left.
4877 for (i = 0; i < count; i++) {
4880 ice_set_bit(in, lkup_exts->done);
4886 * ice_create_first_fit_recp_def - Create a recipe grouping
4887 * @hw: pointer to the hardware structure
4888 * @lkup_exts: an array of protocol header extractions
4889 * @rg_list: pointer to a list that stores new recipe groups
4890 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4892 * Using first fit algorithm, take all the words that are still not done
4893 * and start grouping them in 4-word groups. Each group makes up one
4896 static enum ice_status
4897 ice_create_first_fit_recp_def(struct ice_hw *hw,
4898 struct ice_prot_lkup_ext *lkup_exts,
4899 struct LIST_HEAD_TYPE *rg_list,
4902 struct ice_pref_recipe_group *grp = NULL;
4907 /* Walk through every word in the rule to check if it is not done. If so
4908 * then this word needs to be part of a new recipe.
4910 for (j = 0; j < lkup_exts->n_val_words; j++)
4911 if (!ice_is_bit_set(lkup_exts->done, j)) {
4913 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4914 struct ice_recp_grp_entry *entry;
4916 entry = (struct ice_recp_grp_entry *)
4917 ice_malloc(hw, sizeof(*entry));
4919 return ICE_ERR_NO_MEMORY;
4920 LIST_ADD(&entry->l_entry, rg_list);
4921 grp = &entry->r_group;
4925 grp->pairs[grp->n_val_pairs].prot_id =
4926 lkup_exts->fv_words[j].prot_id;
4927 grp->pairs[grp->n_val_pairs].off =
4928 lkup_exts->fv_words[j].off;
4929 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4937 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4938 * @hw: pointer to the hardware structure
4939 * @fv_list: field vector with the extraction sequence information
4940 * @rg_list: recipe groupings with protocol-offset pairs
4942 * Helper function to fill in the field vector indices for protocol-offset
4943 * pairs. These indexes are then ultimately programmed into a recipe.
4945 static enum ice_status
4946 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4947 struct LIST_HEAD_TYPE *rg_list)
4949 struct ice_sw_fv_list_entry *fv;
4950 struct ice_recp_grp_entry *rg;
4951 struct ice_fv_word *fv_ext;
4953 if (LIST_EMPTY(fv_list))
4956 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4957 fv_ext = fv->fv_ptr->ew;
4959 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4962 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4963 struct ice_fv_word *pr;
4968 pr = &rg->r_group.pairs[i];
4969 mask = rg->r_group.mask[i];
4971 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4972 if (fv_ext[j].prot_id == pr->prot_id &&
4973 fv_ext[j].off == pr->off) {
4976 /* Store index of field vector */
4978 /* Mask is given by caller as big
4979 * endian, but sent to FW as little
4982 rg->fv_mask[i] = mask << 8 | mask >> 8;
4986 /* Protocol/offset could not be found, caller gave an
4990 return ICE_ERR_PARAM;
4998 * ice_find_free_recp_res_idx - find free result indexes for recipe
4999 * @hw: pointer to hardware structure
5000 * @profiles: bitmap of profiles that will be associated with the new recipe
5001 * @free_idx: pointer to variable to receive the free index bitmap
5003 * The algorithm used here is:
5004 * 1. When creating a new recipe, create a set P which contains all
5005 * Profiles that will be associated with our new recipe
5007 * 2. For each Profile p in set P:
5008 * a. Add all recipes associated with Profile p into set R
5009 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5010 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5011 * i. Or just assume they all have the same possible indexes:
5013 * i.e., PossibleIndexes = 0x0000F00000000000
5015 * 3. For each Recipe r in set R:
5016 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5017 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5019 * FreeIndexes will contain the bits indicating the indexes free for use,
5020 * then the code needs to update the recipe[r].used_result_idx_bits to
5021 * indicate which indexes were selected for use by this recipe.
5024 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5025 ice_bitmap_t *free_idx)
5027 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5028 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5029 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5033 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5034 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5035 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5036 ice_init_possible_res_bm(possible_idx);
5038 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
5039 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
5040 ice_set_bit(bit, possible_idx);
5042 /* For each profile we are going to associate the recipe with, add the
5043 * recipes that are associated with that profile. This will give us
5044 * the set of recipes that our recipe may collide with.
5047 while (ICE_MAX_NUM_PROFILES >
5048 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5049 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5050 ICE_MAX_NUM_RECIPES);
5055 /* For each recipe that our new recipe may collide with, determine
5056 * which indexes have been used.
5058 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5059 if (ice_is_bit_set(recipes, bit))
5060 ice_or_bitmap(used_idx, used_idx,
5061 hw->switch_info->recp_list[bit].res_idxs,
5064 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5066 /* return number of free indexes */
5068 while (ICE_MAX_FV_WORDS >
5069 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5078 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5079 * @hw: pointer to hardware structure
5080 * @rm: recipe management list entry
5081 * @match_tun: if field vector index for tunnel needs to be programmed
5082 * @profiles: bitmap of profiles that will be assocated.
5084 static enum ice_status
5085 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5086 bool match_tun, ice_bitmap_t *profiles)
5088 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5089 struct ice_aqc_recipe_data_elem *tmp;
5090 struct ice_aqc_recipe_data_elem *buf;
5091 struct ice_recp_grp_entry *entry;
5092 enum ice_status status;
5098 /* When more than one recipe are required, another recipe is needed to
5099 * chain them together. Matching a tunnel metadata ID takes up one of
5100 * the match fields in the chaining recipe reducing the number of
5101 * chained recipes by one.
5103 /* check number of free result indices */
5104 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5105 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5107 if (rm->n_grp_count > 1) {
5108 if (rm->n_grp_count > free_res_idx)
5109 return ICE_ERR_MAX_LIMIT;
5114 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5115 ICE_MAX_NUM_RECIPES,
5118 return ICE_ERR_NO_MEMORY;
5120 buf = (struct ice_aqc_recipe_data_elem *)
5121 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5123 status = ICE_ERR_NO_MEMORY;
5127 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5128 recipe_count = ICE_MAX_NUM_RECIPES;
5129 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5131 if (status || recipe_count == 0)
5134 /* Allocate the recipe resources, and configure them according to the
5135 * match fields from protocol headers and extracted field vectors.
5137 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5138 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5141 status = ice_alloc_recipe(hw, &entry->rid);
5145 /* Clear the result index of the located recipe, as this will be
5146 * updated, if needed, later in the recipe creation process.
5148 tmp[0].content.result_indx = 0;
5150 buf[recps] = tmp[0];
5151 buf[recps].recipe_indx = (u8)entry->rid;
5152 /* if the recipe is a non-root recipe RID should be programmed
5153 * as 0 for the rules to be applied correctly.
5155 buf[recps].content.rid = 0;
5156 ice_memset(&buf[recps].content.lkup_indx, 0,
5157 sizeof(buf[recps].content.lkup_indx),
5160 /* All recipes use look-up index 0 to match switch ID. */
5161 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5162 buf[recps].content.mask[0] =
5163 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5164 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5167 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5168 buf[recps].content.lkup_indx[i] = 0x80;
5169 buf[recps].content.mask[i] = 0;
5172 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5173 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5174 buf[recps].content.mask[i + 1] =
5175 CPU_TO_LE16(entry->fv_mask[i]);
5178 if (rm->n_grp_count > 1) {
5179 /* Checks to see if there really is a valid result index
5182 if (chain_idx >= ICE_MAX_FV_WORDS) {
5183 ice_debug(hw, ICE_DBG_SW,
5184 "No chain index available\n");
5185 status = ICE_ERR_MAX_LIMIT;
5189 entry->chain_idx = chain_idx;
5190 buf[recps].content.result_indx =
5191 ICE_AQ_RECIPE_RESULT_EN |
5192 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5193 ICE_AQ_RECIPE_RESULT_DATA_M);
5194 ice_clear_bit(chain_idx, result_idx_bm);
5195 chain_idx = ice_find_first_bit(result_idx_bm,
5199 /* fill recipe dependencies */
5200 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5201 ICE_MAX_NUM_RECIPES);
5202 ice_set_bit(buf[recps].recipe_indx,
5203 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5204 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5208 if (rm->n_grp_count == 1) {
5209 rm->root_rid = buf[0].recipe_indx;
5210 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5211 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5212 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5213 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5214 sizeof(buf[0].recipe_bitmap),
5215 ICE_NONDMA_TO_NONDMA);
5217 status = ICE_ERR_BAD_PTR;
5220 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5221 * the recipe which is getting created if specified
5222 * by user. Usually any advanced switch filter, which results
5223 * into new extraction sequence, ended up creating a new recipe
5224 * of type ROOT and usually recipes are associated with profiles
5225 * Switch rule referreing newly created recipe, needs to have
5226 * either/or 'fwd' or 'join' priority, otherwise switch rule
5227 * evaluation will not happen correctly. In other words, if
5228 * switch rule to be evaluated on priority basis, then recipe
5229 * needs to have priority, otherwise it will be evaluated last.
5231 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5233 struct ice_recp_grp_entry *last_chain_entry;
5236 /* Allocate the last recipe that will chain the outcomes of the
5237 * other recipes together
5239 status = ice_alloc_recipe(hw, &rid);
5243 buf[recps].recipe_indx = (u8)rid;
5244 buf[recps].content.rid = (u8)rid;
5245 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5246 /* the new entry created should also be part of rg_list to
5247 * make sure we have complete recipe
5249 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5250 sizeof(*last_chain_entry));
5251 if (!last_chain_entry) {
5252 status = ICE_ERR_NO_MEMORY;
5255 last_chain_entry->rid = rid;
5256 ice_memset(&buf[recps].content.lkup_indx, 0,
5257 sizeof(buf[recps].content.lkup_indx),
5259 /* All recipes use look-up index 0 to match switch ID. */
5260 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5261 buf[recps].content.mask[0] =
5262 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5263 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5264 buf[recps].content.lkup_indx[i] =
5265 ICE_AQ_RECIPE_LKUP_IGNORE;
5266 buf[recps].content.mask[i] = 0;
5270 /* update r_bitmap with the recp that is used for chaining */
5271 ice_set_bit(rid, rm->r_bitmap);
5272 /* this is the recipe that chains all the other recipes so it
5273 * should not have a chaining ID to indicate the same
5275 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5276 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5278 last_chain_entry->fv_idx[i] = entry->chain_idx;
5279 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5280 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5281 ice_set_bit(entry->rid, rm->r_bitmap);
5283 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5284 if (sizeof(buf[recps].recipe_bitmap) >=
5285 sizeof(rm->r_bitmap)) {
5286 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5287 sizeof(buf[recps].recipe_bitmap),
5288 ICE_NONDMA_TO_NONDMA);
5290 status = ICE_ERR_BAD_PTR;
5293 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5295 /* To differentiate among different UDP tunnels, a meta data ID
5299 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5300 buf[recps].content.mask[i] =
5301 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5305 rm->root_rid = (u8)rid;
5307 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5311 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5312 ice_release_change_lock(hw);
5316 /* Every recipe that just got created add it to the recipe
5319 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5320 struct ice_switch_info *sw = hw->switch_info;
5321 bool is_root, idx_found = false;
5322 struct ice_sw_recipe *recp;
5323 u16 idx, buf_idx = 0;
5325 /* find buffer index for copying some data */
5326 for (idx = 0; idx < rm->n_grp_count; idx++)
5327 if (buf[idx].recipe_indx == entry->rid) {
5333 status = ICE_ERR_OUT_OF_RANGE;
5337 recp = &sw->recp_list[entry->rid];
5338 is_root = (rm->root_rid == entry->rid);
5339 recp->is_root = is_root;
5341 recp->root_rid = entry->rid;
5342 recp->big_recp = (is_root && rm->n_grp_count > 1);
5344 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5345 entry->r_group.n_val_pairs *
5346 sizeof(struct ice_fv_word),
5347 ICE_NONDMA_TO_NONDMA);
5349 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5350 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5352 /* Copy non-result fv index values and masks to recipe. This
5353 * call will also update the result recipe bitmask.
5355 ice_collect_result_idx(&buf[buf_idx], recp);
5357 /* for non-root recipes, also copy to the root, this allows
5358 * easier matching of a complete chained recipe
5361 ice_collect_result_idx(&buf[buf_idx],
5362 &sw->recp_list[rm->root_rid]);
5364 recp->n_ext_words = entry->r_group.n_val_pairs;
5365 recp->chain_idx = entry->chain_idx;
5366 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5367 recp->tun_type = rm->tun_type;
5368 recp->recp_created = true;
5383 * ice_create_recipe_group - creates recipe group
5384 * @hw: pointer to hardware structure
5385 * @rm: recipe management list entry
5386 * @lkup_exts: lookup elements
5388 static enum ice_status
5389 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5390 struct ice_prot_lkup_ext *lkup_exts)
5392 struct ice_recp_grp_entry *entry;
5393 struct ice_recp_grp_entry *tmp;
5394 enum ice_status status;
5398 rm->n_grp_count = 0;
5401 if (lkup_exts->n_val_words > ICE_NUM_WORDS_RECIPE) {
5402 /* Each switch recipe can match up to 5 words or metadata. One
5403 * word in each recipe is used to match the switch ID. Four
5404 * words are left for matching other values. If the new advanced
5405 * recipe requires more than 4 words, it needs to be split into
5406 * multiple recipes which are chained together using the
5407 * intermediate result that each produces as input to the other
5408 * recipes in the sequence.
5410 groups = ARRAY_SIZE(ice_recipe_pack);
5412 /* Check if any of the preferred recipes from the grouping
5415 for (i = 0; i < groups; i++)
5416 /* Check if the recipe from the preferred grouping
5417 * matches or is a subset of the fields that needs to be
5420 if (ice_is_recipe_subset(lkup_exts,
5421 &ice_recipe_pack[i])) {
5422 /* This recipe can be used by itself or grouped
5423 * with other recipes.
5425 entry = (struct ice_recp_grp_entry *)
5426 ice_malloc(hw, sizeof(*entry));
5428 status = ICE_ERR_NO_MEMORY;
5431 entry->r_group = ice_recipe_pack[i];
5432 LIST_ADD(&entry->l_entry, &rm->rg_list);
5437 /* Create recipes for words that are marked not done by packing them
5440 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5441 &rm->rg_list, &recp_count);
5443 rm->n_grp_count += recp_count;
5444 rm->n_ext_words = lkup_exts->n_val_words;
5445 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5446 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5447 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5448 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5453 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5455 LIST_DEL(&entry->l_entry);
5456 ice_free(hw, entry);
5464 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5465 * @hw: pointer to hardware structure
5466 * @lkups: lookup elements or match criteria for the advanced recipe, one
5467 * structure per protocol header
5468 * @lkups_cnt: number of protocols
5469 * @bm: bitmap of field vectors to consider
5470 * @fv_list: pointer to a list that holds the returned field vectors
5472 static enum ice_status
5473 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5474 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5476 enum ice_status status;
5480 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5482 return ICE_ERR_NO_MEMORY;
5484 for (i = 0; i < lkups_cnt; i++)
5485 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5486 status = ICE_ERR_CFG;
5490 /* Find field vectors that include all specified protocol types */
5491 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5494 ice_free(hw, prot_ids);
5499 * ice_add_special_words - Add words that are not protocols, such as metadata
5500 * @rinfo: other information regarding the rule e.g. priority and action info
5501 * @lkup_exts: lookup word structure
5503 static enum ice_status
5504 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5505 struct ice_prot_lkup_ext *lkup_exts)
5507 /* If this is a tunneled packet, then add recipe index to match the
5508 * tunnel bit in the packet metadata flags.
5510 if (rinfo->tun_type != ICE_NON_TUN) {
5511 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5512 u8 word = lkup_exts->n_val_words++;
5514 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5515 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5517 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5519 return ICE_ERR_MAX_LIMIT;
5526 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5527 * @hw: pointer to hardware structure
5528 * @rinfo: other information regarding the rule e.g. priority and action info
5529 * @bm: pointer to memory for returning the bitmap of field vectors
5532 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5535 enum ice_prof_type type;
5537 switch (rinfo->tun_type) {
5539 type = ICE_PROF_NON_TUN;
5541 case ICE_ALL_TUNNELS:
5542 type = ICE_PROF_TUN_ALL;
5544 case ICE_SW_TUN_VXLAN_GPE:
5545 case ICE_SW_TUN_GENEVE:
5546 case ICE_SW_TUN_VXLAN:
5547 case ICE_SW_TUN_UDP:
5548 case ICE_SW_TUN_GTP:
5549 type = ICE_PROF_TUN_UDP;
5551 case ICE_SW_TUN_NVGRE:
5552 type = ICE_PROF_TUN_GRE;
5554 case ICE_SW_TUN_PPPOE:
5555 type = ICE_PROF_TUN_PPPOE;
5557 case ICE_SW_TUN_AND_NON_TUN:
5559 type = ICE_PROF_ALL;
5563 ice_get_sw_fv_bitmap(hw, type, bm);
5567 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5568 * @hw: pointer to hardware structure
5569 * @lkups: lookup elements or match criteria for the advanced recipe, one
5570 * structure per protocol header
5571 * @lkups_cnt: number of protocols
5572 * @rinfo: other information regarding the rule e.g. priority and action info
5573 * @rid: return the recipe ID of the recipe created
5575 static enum ice_status
5576 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5577 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5579 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5580 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5581 struct ice_prot_lkup_ext *lkup_exts;
5582 struct ice_recp_grp_entry *r_entry;
5583 struct ice_sw_fv_list_entry *fvit;
5584 struct ice_recp_grp_entry *r_tmp;
5585 struct ice_sw_fv_list_entry *tmp;
5586 enum ice_status status = ICE_SUCCESS;
5587 struct ice_sw_recipe *rm;
5588 bool match_tun = false;
5592 return ICE_ERR_PARAM;
5594 lkup_exts = (struct ice_prot_lkup_ext *)
5595 ice_malloc(hw, sizeof(*lkup_exts));
5597 return ICE_ERR_NO_MEMORY;
5599 /* Determine the number of words to be matched and if it exceeds a
5600 * recipe's restrictions
5602 for (i = 0; i < lkups_cnt; i++) {
5605 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5606 status = ICE_ERR_CFG;
5607 goto err_free_lkup_exts;
5610 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5612 status = ICE_ERR_CFG;
5613 goto err_free_lkup_exts;
5617 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5619 status = ICE_ERR_NO_MEMORY;
5620 goto err_free_lkup_exts;
5623 /* Get field vectors that contain fields extracted from all the protocol
5624 * headers being programmed.
5626 INIT_LIST_HEAD(&rm->fv_list);
5627 INIT_LIST_HEAD(&rm->rg_list);
5629 /* Get bitmap of field vectors (profiles) that are compatible with the
5630 * rule request; only these will be searched in the subsequent call to
5633 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5635 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5639 /* Group match words into recipes using preferred recipe grouping
5642 status = ice_create_recipe_group(hw, rm, lkup_exts);
5646 /* There is only profile for UDP tunnels. So, it is necessary to use a
5647 * metadata ID flag to differentiate different tunnel types. A separate
5648 * recipe needs to be used for the metadata.
5650 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5651 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5652 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5655 /* set the recipe priority if specified */
5656 rm->priority = rinfo->priority ? rinfo->priority : 0;
5658 /* Find offsets from the field vector. Pick the first one for all the
5661 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5665 /* get bitmap of all profiles the recipe will be associated with */
5666 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5667 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5669 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5670 ice_set_bit((u16)fvit->profile_id, profiles);
5673 /* Create any special protocol/offset pairs, such as looking at tunnel
5674 * bits by extracting metadata
5676 status = ice_add_special_words(rinfo, lkup_exts);
5678 goto err_free_lkup_exts;
5680 /* Look for a recipe which matches our requested fv / mask list */
5681 *rid = ice_find_recp(hw, lkup_exts);
5682 if (*rid < ICE_MAX_NUM_RECIPES)
5683 /* Success if found a recipe that match the existing criteria */
5686 /* Recipe we need does not exist, add a recipe */
5687 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5691 /* Associate all the recipes created with all the profiles in the
5692 * common field vector.
5694 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5696 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5698 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5699 (u8 *)r_bitmap, NULL);
5703 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5704 ICE_MAX_NUM_RECIPES);
5705 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5709 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5712 ice_release_change_lock(hw);
5718 *rid = rm->root_rid;
5719 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5720 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5722 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5723 ice_recp_grp_entry, l_entry) {
5724 LIST_DEL(&r_entry->l_entry);
5725 ice_free(hw, r_entry);
5728 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5730 LIST_DEL(&fvit->list_entry);
5735 ice_free(hw, rm->root_buf);
5740 ice_free(hw, lkup_exts);
5746 * ice_find_dummy_packet - find dummy packet by tunnel type
5748 * @lkups: lookup elements or match criteria for the advanced recipe, one
5749 * structure per protocol header
5750 * @lkups_cnt: number of protocols
5751 * @tun_type: tunnel type from the match criteria
5752 * @pkt: dummy packet to fill according to filter match criteria
5753 * @pkt_len: packet length of dummy packet
5754 * @offsets: pointer to receive the pointer to the offsets for the packet
5757 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5758 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5760 const struct ice_dummy_pkt_offsets **offsets)
5762 bool tcp = false, udp = false, ipv6 = false;
5765 if (tun_type == ICE_SW_TUN_GTP) {
5766 *pkt = dummy_udp_gtp_packet;
5767 *pkt_len = sizeof(dummy_udp_gtp_packet);
5768 *offsets = dummy_udp_gtp_packet_offsets;
5771 if (tun_type == ICE_SW_TUN_PPPOE) {
5772 *pkt = dummy_pppoe_packet;
5773 *pkt_len = sizeof(dummy_pppoe_packet);
5774 *offsets = dummy_pppoe_packet_offsets;
5777 for (i = 0; i < lkups_cnt; i++) {
5778 if (lkups[i].type == ICE_UDP_ILOS)
5780 else if (lkups[i].type == ICE_TCP_IL)
5782 else if (lkups[i].type == ICE_IPV6_OFOS)
5786 if (tun_type == ICE_ALL_TUNNELS) {
5787 *pkt = dummy_gre_udp_packet;
5788 *pkt_len = sizeof(dummy_gre_udp_packet);
5789 *offsets = dummy_gre_udp_packet_offsets;
5793 if (tun_type == ICE_SW_TUN_NVGRE) {
5795 *pkt = dummy_gre_tcp_packet;
5796 *pkt_len = sizeof(dummy_gre_tcp_packet);
5797 *offsets = dummy_gre_tcp_packet_offsets;
5801 *pkt = dummy_gre_udp_packet;
5802 *pkt_len = sizeof(dummy_gre_udp_packet);
5803 *offsets = dummy_gre_udp_packet_offsets;
5807 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5808 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5810 *pkt = dummy_udp_tun_tcp_packet;
5811 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5812 *offsets = dummy_udp_tun_tcp_packet_offsets;
5816 *pkt = dummy_udp_tun_udp_packet;
5817 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5818 *offsets = dummy_udp_tun_udp_packet_offsets;
5823 *pkt = dummy_udp_packet;
5824 *pkt_len = sizeof(dummy_udp_packet);
5825 *offsets = dummy_udp_packet_offsets;
5827 } else if (udp && ipv6) {
5828 *pkt = dummy_udp_ipv6_packet;
5829 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5830 *offsets = dummy_udp_ipv6_packet_offsets;
5832 } else if ((tcp && ipv6) || ipv6) {
5833 *pkt = dummy_tcp_ipv6_packet;
5834 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5835 *offsets = dummy_tcp_ipv6_packet_offsets;
5839 *pkt = dummy_tcp_packet;
5840 *pkt_len = sizeof(dummy_tcp_packet);
5841 *offsets = dummy_tcp_packet_offsets;
5845 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5847 * @lkups: lookup elements or match criteria for the advanced recipe, one
5848 * structure per protocol header
5849 * @lkups_cnt: number of protocols
5850 * @s_rule: stores rule information from the match criteria
5851 * @dummy_pkt: dummy packet to fill according to filter match criteria
5852 * @pkt_len: packet length of dummy packet
5853 * @offsets: offset info for the dummy packet
5855 static enum ice_status
5856 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5857 struct ice_aqc_sw_rules_elem *s_rule,
5858 const u8 *dummy_pkt, u16 pkt_len,
5859 const struct ice_dummy_pkt_offsets *offsets)
5864 /* Start with a packet with a pre-defined/dummy content. Then, fill
5865 * in the header values to be looked up or matched.
5867 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5869 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5871 for (i = 0; i < lkups_cnt; i++) {
5872 enum ice_protocol_type type;
5873 u16 offset = 0, len = 0, j;
5876 /* find the start of this layer; it should be found since this
5877 * was already checked when search for the dummy packet
5879 type = lkups[i].type;
5880 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5881 if (type == offsets[j].type) {
5882 offset = offsets[j].offset;
5887 /* this should never happen in a correct calling sequence */
5889 return ICE_ERR_PARAM;
5891 switch (lkups[i].type) {
5894 len = sizeof(struct ice_ether_hdr);
5897 len = sizeof(struct ice_ethtype_hdr);
5901 len = sizeof(struct ice_ipv4_hdr);
5905 len = sizeof(struct ice_ipv6_hdr);
5910 len = sizeof(struct ice_l4_hdr);
5913 len = sizeof(struct ice_sctp_hdr);
5916 len = sizeof(struct ice_nvgre);
5921 len = sizeof(struct ice_udp_tnl_hdr);
5925 len = sizeof(struct ice_udp_gtp_hdr);
5928 return ICE_ERR_PARAM;
5931 /* the length should be a word multiple */
5932 if (len % ICE_BYTES_PER_WORD)
5935 /* We have the offset to the header start, the length, the
5936 * caller's header values and mask. Use this information to
5937 * copy the data into the dummy packet appropriately based on
5938 * the mask. Note that we need to only write the bits as
5939 * indicated by the mask to make sure we don't improperly write
5940 * over any significant packet data.
5942 for (j = 0; j < len / sizeof(u16); j++)
5943 if (((u16 *)&lkups[i].m_u)[j])
5944 ((u16 *)(pkt + offset))[j] =
5945 (((u16 *)(pkt + offset))[j] &
5946 ~((u16 *)&lkups[i].m_u)[j]) |
5947 (((u16 *)&lkups[i].h_u)[j] &
5948 ((u16 *)&lkups[i].m_u)[j]);
5951 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5957 * ice_find_adv_rule_entry - Search a rule entry
5958 * @hw: pointer to the hardware structure
5959 * @lkups: lookup elements or match criteria for the advanced recipe, one
5960 * structure per protocol header
5961 * @lkups_cnt: number of protocols
5962 * @recp_id: recipe ID for which we are finding the rule
5963 * @rinfo: other information regarding the rule e.g. priority and action info
5965 * Helper function to search for a given advance rule entry
5966 * Returns pointer to entry storing the rule if found
5968 static struct ice_adv_fltr_mgmt_list_entry *
5969 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5970 u16 lkups_cnt, u8 recp_id,
5971 struct ice_adv_rule_info *rinfo)
5973 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5974 struct ice_switch_info *sw = hw->switch_info;
5977 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5978 ice_adv_fltr_mgmt_list_entry, list_entry) {
5979 bool lkups_matched = true;
5981 if (lkups_cnt != list_itr->lkups_cnt)
5983 for (i = 0; i < list_itr->lkups_cnt; i++)
5984 if (memcmp(&list_itr->lkups[i], &lkups[i],
5986 lkups_matched = false;
5989 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5990 rinfo->tun_type == list_itr->rule_info.tun_type &&
5998 * ice_adv_add_update_vsi_list
5999 * @hw: pointer to the hardware structure
6000 * @m_entry: pointer to current adv filter management list entry
6001 * @cur_fltr: filter information from the book keeping entry
6002 * @new_fltr: filter information with the new VSI to be added
6004 * Call AQ command to add or update previously created VSI list with new VSI.
6006 * Helper function to do book keeping associated with adding filter information
6007 * The algorithm to do the booking keeping is described below :
6008 * When a VSI needs to subscribe to a given advanced filter
6009 * if only one VSI has been added till now
6010 * Allocate a new VSI list and add two VSIs
6011 * to this list using switch rule command
6012 * Update the previously created switch rule with the
6013 * newly created VSI list ID
6014 * if a VSI list was previously created
6015 * Add the new VSI to the previously created VSI list set
6016 * using the update switch rule command
6018 static enum ice_status
6019 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6020 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6021 struct ice_adv_rule_info *cur_fltr,
6022 struct ice_adv_rule_info *new_fltr)
6024 enum ice_status status;
6025 u16 vsi_list_id = 0;
6027 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6028 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
6029 return ICE_ERR_NOT_IMPL;
6031 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
6032 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6033 return ICE_ERR_ALREADY_EXISTS;
6035 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6036 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6037 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6038 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6039 return ICE_ERR_NOT_IMPL;
6041 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6042 /* Only one entry existed in the mapping and it was not already
6043 * a part of a VSI list. So, create a VSI list with the old and
6046 struct ice_fltr_info tmp_fltr;
6047 u16 vsi_handle_arr[2];
6049 /* A rule already exists with the new VSI being added */
6050 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6051 new_fltr->sw_act.fwd_id.hw_vsi_id)
6052 return ICE_ERR_ALREADY_EXISTS;
6054 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6055 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6056 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6062 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6063 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6064 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6065 /* Update the previous switch rule of "forward to VSI" to
6068 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6072 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6073 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6074 m_entry->vsi_list_info =
6075 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6078 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6080 if (!m_entry->vsi_list_info)
6083 /* A rule already exists with the new VSI being added */
6084 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6087 /* Update the previously created VSI list set with
6088 * the new VSI ID passed in
6090 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6092 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6094 ice_aqc_opc_update_sw_rules,
6096 /* update VSI list mapping info with new VSI ID */
6098 ice_set_bit(vsi_handle,
6099 m_entry->vsi_list_info->vsi_map);
6102 m_entry->vsi_count++;
6107 * ice_add_adv_rule - helper function to create an advanced switch rule
6108 * @hw: pointer to the hardware structure
6109 * @lkups: information on the words that needs to be looked up. All words
6110 * together makes one recipe
6111 * @lkups_cnt: num of entries in the lkups array
6112 * @rinfo: other information related to the rule that needs to be programmed
6113 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6114 * ignored is case of error.
6116 * This function can program only 1 rule at a time. The lkups is used to
6117 * describe the all the words that forms the "lookup" portion of the recipe.
6118 * These words can span multiple protocols. Callers to this function need to
6119 * pass in a list of protocol headers with lookup information along and mask
6120 * that determines which words are valid from the given protocol header.
6121 * rinfo describes other information related to this rule such as forwarding
6122 * IDs, priority of this rule, etc.
6125 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6126 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6127 struct ice_rule_query_data *added_entry)
6129 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6130 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6131 const struct ice_dummy_pkt_offsets *pkt_offsets;
6132 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6133 struct LIST_HEAD_TYPE *rule_head;
6134 struct ice_switch_info *sw;
6135 enum ice_status status;
6136 const u8 *pkt = NULL;
6142 return ICE_ERR_PARAM;
6144 /* get # of words we need to match */
6146 for (i = 0; i < lkups_cnt; i++) {
6149 ptr = (u16 *)&lkups[i].m_u;
6150 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6154 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6155 return ICE_ERR_PARAM;
6157 /* make sure that we can locate a dummy packet */
6158 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6161 status = ICE_ERR_PARAM;
6162 goto err_ice_add_adv_rule;
6165 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6166 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6167 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6168 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6171 vsi_handle = rinfo->sw_act.vsi_handle;
6172 if (!ice_is_vsi_valid(hw, vsi_handle))
6173 return ICE_ERR_PARAM;
6175 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6176 rinfo->sw_act.fwd_id.hw_vsi_id =
6177 ice_get_hw_vsi_num(hw, vsi_handle);
6178 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6179 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6181 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6184 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6186 /* we have to add VSI to VSI_LIST and increment vsi_count.
6187 * Also Update VSI list so that we can change forwarding rule
6188 * if the rule already exists, we will check if it exists with
6189 * same vsi_id, if not then add it to the VSI list if it already
6190 * exists if not then create a VSI list and add the existing VSI
6191 * ID and the new VSI ID to the list
6192 * We will add that VSI to the list
6194 status = ice_adv_add_update_vsi_list(hw, m_entry,
6195 &m_entry->rule_info,
6198 added_entry->rid = rid;
6199 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6200 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6204 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6205 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6207 return ICE_ERR_NO_MEMORY;
6208 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6209 switch (rinfo->sw_act.fltr_act) {
6210 case ICE_FWD_TO_VSI:
6211 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6212 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6213 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6216 act |= ICE_SINGLE_ACT_TO_Q;
6217 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6218 ICE_SINGLE_ACT_Q_INDEX_M;
6220 case ICE_FWD_TO_QGRP:
6221 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6222 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6223 act |= ICE_SINGLE_ACT_TO_Q;
6224 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6225 ICE_SINGLE_ACT_Q_INDEX_M;
6226 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6227 ICE_SINGLE_ACT_Q_REGION_M;
6229 case ICE_DROP_PACKET:
6230 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6231 ICE_SINGLE_ACT_VALID_BIT;
6234 status = ICE_ERR_CFG;
6235 goto err_ice_add_adv_rule;
6238 /* set the rule LOOKUP type based on caller specified 'RX'
6239 * instead of hardcoding it to be either LOOKUP_TX/RX
6241 * for 'RX' set the source to be the port number
6242 * for 'TX' set the source to be the source HW VSI number (determined
6246 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6247 s_rule->pdata.lkup_tx_rx.src =
6248 CPU_TO_LE16(hw->port_info->lport);
6250 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6251 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6254 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6255 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6257 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
6260 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6261 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6264 goto err_ice_add_adv_rule;
6265 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6266 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6268 status = ICE_ERR_NO_MEMORY;
6269 goto err_ice_add_adv_rule;
6272 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6273 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6274 ICE_NONDMA_TO_NONDMA);
6275 if (!adv_fltr->lkups) {
6276 status = ICE_ERR_NO_MEMORY;
6277 goto err_ice_add_adv_rule;
6280 adv_fltr->lkups_cnt = lkups_cnt;
6281 adv_fltr->rule_info = *rinfo;
6282 adv_fltr->rule_info.fltr_rule_id =
6283 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6284 sw = hw->switch_info;
6285 sw->recp_list[rid].adv_rule = true;
6286 rule_head = &sw->recp_list[rid].filt_rules;
6288 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6289 struct ice_fltr_info tmp_fltr;
6291 tmp_fltr.fltr_rule_id =
6292 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6293 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6294 tmp_fltr.fwd_id.hw_vsi_id =
6295 ice_get_hw_vsi_num(hw, vsi_handle);
6296 tmp_fltr.vsi_handle = vsi_handle;
6297 /* Update the previous switch rule of "forward to VSI" to
6300 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6302 goto err_ice_add_adv_rule;
6303 adv_fltr->vsi_count = 1;
6306 /* Add rule entry to book keeping list */
6307 LIST_ADD(&adv_fltr->list_entry, rule_head);
6309 added_entry->rid = rid;
6310 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6311 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6313 err_ice_add_adv_rule:
6314 if (status && adv_fltr) {
6315 ice_free(hw, adv_fltr->lkups);
6316 ice_free(hw, adv_fltr);
6319 ice_free(hw, s_rule);
6325 * ice_adv_rem_update_vsi_list
6326 * @hw: pointer to the hardware structure
6327 * @vsi_handle: VSI handle of the VSI to remove
6328 * @fm_list: filter management entry for which the VSI list management needs to
6331 static enum ice_status
6332 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6333 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6335 struct ice_vsi_list_map_info *vsi_list_info;
6336 enum ice_sw_lkup_type lkup_type;
6337 enum ice_status status;
6340 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6341 fm_list->vsi_count == 0)
6342 return ICE_ERR_PARAM;
6344 /* A rule with the VSI being removed does not exist */
6345 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6346 return ICE_ERR_DOES_NOT_EXIST;
6348 lkup_type = ICE_SW_LKUP_LAST;
6349 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6350 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6351 ice_aqc_opc_update_sw_rules,
6356 fm_list->vsi_count--;
6357 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6358 vsi_list_info = fm_list->vsi_list_info;
6359 if (fm_list->vsi_count == 1) {
6360 struct ice_fltr_info tmp_fltr;
6363 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6365 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6366 return ICE_ERR_OUT_OF_RANGE;
6368 /* Make sure VSI list is empty before removing it below */
6369 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6371 ice_aqc_opc_update_sw_rules,
6375 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6376 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6377 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6378 tmp_fltr.fwd_id.hw_vsi_id =
6379 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6380 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6381 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6383 /* Update the previous switch rule of "MAC forward to VSI" to
6384 * "MAC fwd to VSI list"
6386 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6388 ice_debug(hw, ICE_DBG_SW,
6389 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6390 tmp_fltr.fwd_id.hw_vsi_id, status);
6395 if (fm_list->vsi_count == 1) {
6396 /* Remove the VSI list since it is no longer used */
6397 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6399 ice_debug(hw, ICE_DBG_SW,
6400 "Failed to remove VSI list %d, error %d\n",
6401 vsi_list_id, status);
6405 LIST_DEL(&vsi_list_info->list_entry);
6406 ice_free(hw, vsi_list_info);
6407 fm_list->vsi_list_info = NULL;
6414 * ice_rem_adv_rule - removes existing advanced switch rule
6415 * @hw: pointer to the hardware structure
6416 * @lkups: information on the words that needs to be looked up. All words
6417 * together makes one recipe
6418 * @lkups_cnt: num of entries in the lkups array
6419 * @rinfo: Its the pointer to the rule information for the rule
6421 * This function can be used to remove 1 rule at a time. The lkups is
6422 * used to describe all the words that forms the "lookup" portion of the
6423 * rule. These words can span multiple protocols. Callers to this function
6424 * need to pass in a list of protocol headers with lookup information along
6425 * and mask that determines which words are valid from the given protocol
6426 * header. rinfo describes other information related to this rule such as
6427 * forwarding IDs, priority of this rule, etc.
6430 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6431 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6433 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6434 const struct ice_dummy_pkt_offsets *offsets;
6435 struct ice_prot_lkup_ext lkup_exts;
6436 u16 rule_buf_sz, pkt_len, i, rid;
6437 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6438 enum ice_status status = ICE_SUCCESS;
6439 bool remove_rule = false;
6440 const u8 *pkt = NULL;
6443 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6444 for (i = 0; i < lkups_cnt; i++) {
6447 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6450 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6455 /* Create any special protocol/offset pairs, such as looking at tunnel
6456 * bits by extracting metadata
6458 status = ice_add_special_words(rinfo, &lkup_exts);
6462 rid = ice_find_recp(hw, &lkup_exts);
6463 /* If did not find a recipe that match the existing criteria */
6464 if (rid == ICE_MAX_NUM_RECIPES)
6465 return ICE_ERR_PARAM;
6467 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6468 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6469 /* the rule is already removed */
6472 ice_acquire_lock(rule_lock);
6473 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6475 } else if (list_elem->vsi_count > 1) {
6476 list_elem->vsi_list_info->ref_cnt--;
6477 remove_rule = false;
6478 vsi_handle = rinfo->sw_act.vsi_handle;
6479 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6481 vsi_handle = rinfo->sw_act.vsi_handle;
6482 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6484 ice_release_lock(rule_lock);
6487 if (list_elem->vsi_count == 0)
6490 ice_release_lock(rule_lock);
6492 struct ice_aqc_sw_rules_elem *s_rule;
6494 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
6495 &pkt_len, &offsets);
6496 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6498 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6501 return ICE_ERR_NO_MEMORY;
6502 s_rule->pdata.lkup_tx_rx.act = 0;
6503 s_rule->pdata.lkup_tx_rx.index =
6504 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6505 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6506 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6508 ice_aqc_opc_remove_sw_rules, NULL);
6509 if (status == ICE_SUCCESS) {
6510 ice_acquire_lock(rule_lock);
6511 LIST_DEL(&list_elem->list_entry);
6512 ice_free(hw, list_elem->lkups);
6513 ice_free(hw, list_elem);
6514 ice_release_lock(rule_lock);
6516 ice_free(hw, s_rule);
6522 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6523 * @hw: pointer to the hardware structure
6524 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6526 * This function is used to remove 1 rule at a time. The removal is based on
6527 * the remove_entry parameter. This function will remove rule for a given
6528 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6531 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6532 struct ice_rule_query_data *remove_entry)
6534 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6535 struct LIST_HEAD_TYPE *list_head;
6536 struct ice_adv_rule_info rinfo;
6537 struct ice_switch_info *sw;
6539 sw = hw->switch_info;
6540 if (!sw->recp_list[remove_entry->rid].recp_created)
6541 return ICE_ERR_PARAM;
6542 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6543 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6545 if (list_itr->rule_info.fltr_rule_id ==
6546 remove_entry->rule_id) {
6547 rinfo = list_itr->rule_info;
6548 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6549 return ice_rem_adv_rule(hw, list_itr->lkups,
6550 list_itr->lkups_cnt, &rinfo);
6553 return ICE_ERR_PARAM;
6557 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6559 * @hw: pointer to the hardware structure
6560 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6562 * This function is used to remove all the rules for a given VSI and as soon
6563 * as removing a rule fails, it will return immediately with the error code,
6564 * else it will return ICE_SUCCESS
6567 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6569 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6570 struct ice_vsi_list_map_info *map_info;
6571 struct LIST_HEAD_TYPE *list_head;
6572 struct ice_adv_rule_info rinfo;
6573 struct ice_switch_info *sw;
6574 enum ice_status status;
6575 u16 vsi_list_id = 0;
6578 sw = hw->switch_info;
6579 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6580 if (!sw->recp_list[rid].recp_created)
6582 if (!sw->recp_list[rid].adv_rule)
6584 list_head = &sw->recp_list[rid].filt_rules;
6586 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6587 ice_adv_fltr_mgmt_list_entry, list_entry) {
6588 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6592 rinfo = list_itr->rule_info;
6593 rinfo.sw_act.vsi_handle = vsi_handle;
6594 status = ice_rem_adv_rule(hw, list_itr->lkups,
6595 list_itr->lkups_cnt, &rinfo);
6605 * ice_replay_fltr - Replay all the filters stored by a specific list head
6606 * @hw: pointer to the hardware structure
6607 * @list_head: list for which filters needs to be replayed
6608 * @recp_id: Recipe ID for which rules need to be replayed
6610 static enum ice_status
6611 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6613 struct ice_fltr_mgmt_list_entry *itr;
6614 struct LIST_HEAD_TYPE l_head;
6615 enum ice_status status = ICE_SUCCESS;
6617 if (LIST_EMPTY(list_head))
6620 /* Move entries from the given list_head to a temporary l_head so that
6621 * they can be replayed. Otherwise when trying to re-add the same
6622 * filter, the function will return already exists
6624 LIST_REPLACE_INIT(list_head, &l_head);
6626 /* Mark the given list_head empty by reinitializing it so filters
6627 * could be added again by *handler
6629 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6631 struct ice_fltr_list_entry f_entry;
6633 f_entry.fltr_info = itr->fltr_info;
6634 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6635 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6636 if (status != ICE_SUCCESS)
6641 /* Add a filter per VSI separately */
6646 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6648 if (!ice_is_vsi_valid(hw, vsi_handle))
6651 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6652 f_entry.fltr_info.vsi_handle = vsi_handle;
6653 f_entry.fltr_info.fwd_id.hw_vsi_id =
6654 ice_get_hw_vsi_num(hw, vsi_handle);
6655 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6656 if (recp_id == ICE_SW_LKUP_VLAN)
6657 status = ice_add_vlan_internal(hw, &f_entry);
6659 status = ice_add_rule_internal(hw, recp_id,
6661 if (status != ICE_SUCCESS)
6666 /* Clear the filter management list */
6667 ice_rem_sw_rule_info(hw, &l_head);
6672 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6673 * @hw: pointer to the hardware structure
6675 * NOTE: This function does not clean up partially added filters on error.
6676 * It is up to caller of the function to issue a reset or fail early.
6678 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6680 struct ice_switch_info *sw = hw->switch_info;
6681 enum ice_status status = ICE_SUCCESS;
6684 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6685 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6687 status = ice_replay_fltr(hw, i, head);
6688 if (status != ICE_SUCCESS)
6695 * ice_replay_vsi_fltr - Replay filters for requested VSI
6696 * @hw: pointer to the hardware structure
6697 * @vsi_handle: driver VSI handle
6698 * @recp_id: Recipe ID for which rules need to be replayed
6699 * @list_head: list for which filters need to be replayed
6701 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6702 * It is required to pass valid VSI handle.
6704 static enum ice_status
6705 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6706 struct LIST_HEAD_TYPE *list_head)
6708 struct ice_fltr_mgmt_list_entry *itr;
6709 enum ice_status status = ICE_SUCCESS;
6712 if (LIST_EMPTY(list_head))
6714 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6716 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6718 struct ice_fltr_list_entry f_entry;
6720 f_entry.fltr_info = itr->fltr_info;
6721 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6722 itr->fltr_info.vsi_handle == vsi_handle) {
6723 /* update the src in case it is VSI num */
6724 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6725 f_entry.fltr_info.src = hw_vsi_id;
6726 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6727 if (status != ICE_SUCCESS)
6731 if (!itr->vsi_list_info ||
6732 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6734 /* Clearing it so that the logic can add it back */
6735 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6736 f_entry.fltr_info.vsi_handle = vsi_handle;
6737 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6738 /* update the src in case it is VSI num */
6739 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6740 f_entry.fltr_info.src = hw_vsi_id;
6741 if (recp_id == ICE_SW_LKUP_VLAN)
6742 status = ice_add_vlan_internal(hw, &f_entry);
6744 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6745 if (status != ICE_SUCCESS)
6753 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6754 * @hw: pointer to the hardware structure
6755 * @vsi_handle: driver VSI handle
6756 * @list_head: list for which filters need to be replayed
6758 * Replay the advanced rule for the given VSI.
6760 static enum ice_status
6761 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6762 struct LIST_HEAD_TYPE *list_head)
6764 struct ice_rule_query_data added_entry = { 0 };
6765 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6766 enum ice_status status = ICE_SUCCESS;
6768 if (LIST_EMPTY(list_head))
6770 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6772 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6773 u16 lk_cnt = adv_fltr->lkups_cnt;
6775 if (vsi_handle != rinfo->sw_act.vsi_handle)
6777 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6786 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6787 * @hw: pointer to the hardware structure
6788 * @vsi_handle: driver VSI handle
6790 * Replays filters for requested VSI via vsi_handle.
6792 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6794 struct ice_switch_info *sw = hw->switch_info;
6795 enum ice_status status;
6798 /* Update the recipes that were created */
6799 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6800 struct LIST_HEAD_TYPE *head;
6802 head = &sw->recp_list[i].filt_replay_rules;
6803 if (!sw->recp_list[i].adv_rule)
6804 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6806 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6807 if (status != ICE_SUCCESS)
6815 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6816 * @hw: pointer to the HW struct
6818 * Deletes the filter replay rules.
6820 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6822 struct ice_switch_info *sw = hw->switch_info;
6828 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6829 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6830 struct LIST_HEAD_TYPE *l_head;
6832 l_head = &sw->recp_list[i].filt_replay_rules;
6833 if (!sw->recp_list[i].adv_rule)
6834 ice_rem_sw_rule_info(hw, l_head);
6836 ice_rem_adv_rule_info(hw, l_head);