1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
70 { ICE_PROTOCOL_LAST, 0 },
74 u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
109 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
111 { ICE_ETYPE_OL, 12 },
112 { ICE_IPV4_OFOS, 14 },
116 { ICE_UDP_ILOS, 76 },
117 { ICE_PROTOCOL_LAST, 0 },
121 u8 dummy_gre_udp_packet[] = {
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_OL 12 */
128 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x2F, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
135 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x00, 0x00, 0x00,
142 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
149 0x00, 0x08, 0x00, 0x00,
153 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
155 { ICE_ETYPE_OL, 12 },
156 { ICE_IPV4_OFOS, 14 },
162 { ICE_PROTOCOL_LAST, 0 },
166 u8 dummy_udp_tun_tcp_packet[] = {
167 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
168 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
171 0x08, 0x00, /* ICE_ETYPE_OL 12 */
173 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
174 0x00, 0x01, 0x00, 0x00,
175 0x40, 0x11, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
180 0x00, 0x46, 0x00, 0x00,
182 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
186 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00,
190 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
191 0x00, 0x01, 0x00, 0x00,
192 0x40, 0x06, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
199 0x50, 0x02, 0x20, 0x00,
200 0x00, 0x00, 0x00, 0x00
204 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
206 { ICE_ETYPE_OL, 12 },
207 { ICE_IPV4_OFOS, 14 },
212 { ICE_UDP_ILOS, 84 },
213 { ICE_PROTOCOL_LAST, 0 },
217 u8 dummy_udp_tun_udp_packet[] = {
218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
222 0x08, 0x00, /* ICE_ETYPE_OL 12 */
224 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
225 0x00, 0x01, 0x00, 0x00,
226 0x00, 0x11, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
231 0x00, 0x3a, 0x00, 0x00,
233 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
234 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
237 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00,
241 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
242 0x00, 0x01, 0x00, 0x00,
243 0x00, 0x11, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
248 0x00, 0x08, 0x00, 0x00,
252 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
254 { ICE_ETYPE_OL, 12 },
255 { ICE_IPV4_OFOS, 14 },
256 { ICE_UDP_ILOS, 34 },
257 { ICE_PROTOCOL_LAST, 0 },
261 dummy_udp_packet[] = {
262 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
263 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00,
266 0x08, 0x00, /* ICE_ETYPE_OL 12 */
268 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
269 0x00, 0x01, 0x00, 0x00,
270 0x00, 0x11, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
275 0x00, 0x08, 0x00, 0x00,
277 0x00, 0x00, /* 2 bytes for 4 byte alignment */
281 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
283 { ICE_ETYPE_OL, 12 },
284 { ICE_IPV4_OFOS, 14 },
286 { ICE_PROTOCOL_LAST, 0 },
290 dummy_tcp_packet[] = {
291 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
292 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00,
295 0x08, 0x00, /* ICE_ETYPE_OL 12 */
297 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
298 0x00, 0x01, 0x00, 0x00,
299 0x00, 0x06, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
306 0x50, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, /* 2 bytes for 4 byte alignment */
313 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
315 { ICE_ETYPE_OL, 12 },
316 { ICE_IPV6_OFOS, 14 },
318 { ICE_PROTOCOL_LAST, 0 },
322 dummy_tcp_ipv6_packet[] = {
323 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
327 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
329 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
330 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
341 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00,
343 0x50, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, /* 2 bytes for 4 byte alignment */
350 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
352 { ICE_ETYPE_OL, 12 },
353 { ICE_IPV6_OFOS, 14 },
354 { ICE_UDP_ILOS, 54 },
355 { ICE_PROTOCOL_LAST, 0 },
359 dummy_udp_ipv6_packet[] = {
360 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
364 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
366 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
367 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
378 0x00, 0x08, 0x00, 0x00,
380 0x00, 0x00, /* 2 bytes for 4 byte alignment */
384 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
386 { ICE_IPV4_OFOS, 14 },
389 { ICE_PROTOCOL_LAST, 0 },
393 dummy_udp_gtp_packet[] = {
394 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
399 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x11, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
406 0x00, 0x1c, 0x00, 0x00,
408 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
409 0x00, 0x00, 0x00, 0x00,
410 0x00, 0x00, 0x00, 0x85,
412 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
413 0x00, 0x00, 0x00, 0x00,
417 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
420 { ICE_PROTOCOL_LAST, 0 },
424 dummy_pppoe_packet[] = {
425 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
426 0x00, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
430 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 14 */
431 0x00, 0x4e, 0x00, 0x21,
433 0x45, 0x00, 0x00, 0x30, /* PDU */
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x11, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
440 /* this is a recipe to profile association bitmap */
441 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
442 ICE_MAX_NUM_PROFILES);
444 /* this is a profile to recipe association bitmap */
445 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
446 ICE_MAX_NUM_RECIPES);
448 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
451 * ice_collect_result_idx - copy result index values
452 * @buf: buffer that contains the result index
453 * @recp: the recipe struct to copy data into
455 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
456 struct ice_sw_recipe *recp)
458 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
459 ice_set_bit(buf->content.result_indx &
460 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
464 * ice_init_possible_res_bm - initialize possible result bitmap
465 * @pos_result_bm: pointer to the bitmap to initialize
467 static void ice_init_possible_res_bm(ice_bitmap_t *pos_result_bm)
471 ice_zero_bitmap(pos_result_bm, ICE_MAX_FV_WORDS);
473 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
474 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
475 ice_set_bit(bit, pos_result_bm);
479 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
480 * @hw: pointer to hardware structure
481 * @recps: struct that we need to populate
482 * @rid: recipe ID that we are populating
483 * @refresh_required: true if we should get recipe to profile mapping from FW
485 * This function is used to populate all the necessary entries into our
486 * bookkeeping so that we have a current list of all the recipes that are
487 * programmed in the firmware.
489 static enum ice_status
490 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
491 bool *refresh_required)
493 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
494 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
495 struct ice_aqc_recipe_data_elem *tmp;
496 u16 num_recps = ICE_MAX_NUM_RECIPES;
497 struct ice_prot_lkup_ext *lkup_exts;
498 u16 i, sub_recps, fv_word_idx = 0;
499 enum ice_status status;
501 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
502 ice_init_possible_res_bm(possible_idx);
504 /* we need a buffer big enough to accommodate all the recipes */
505 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
506 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
508 return ICE_ERR_NO_MEMORY;
510 tmp[0].recipe_indx = rid;
511 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
512 /* non-zero status meaning recipe doesn't exist */
516 /* Get recipe to profile map so that we can get the fv from lkups that
517 * we read for a recipe from FW. Since we want to minimize the number of
518 * times we make this FW call, just make one call and cache the copy
519 * until a new recipe is added. This operation is only required the
520 * first time to get the changes from FW. Then to search existing
521 * entries we don't need to update the cache again until another recipe
524 if (*refresh_required) {
525 ice_get_recp_to_prof_map(hw);
526 *refresh_required = false;
529 /* Start populating all the entries for recps[rid] based on lkups from
530 * firmware. Note that we are only creating the root recipe in our
533 lkup_exts = &recps[rid].lkup_exts;
535 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
536 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
537 struct ice_recp_grp_entry *rg_entry;
538 u8 prof_id, idx, prot = 0;
542 rg_entry = (struct ice_recp_grp_entry *)
543 ice_malloc(hw, sizeof(*rg_entry));
545 status = ICE_ERR_NO_MEMORY;
549 idx = root_bufs.recipe_indx;
550 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
552 /* Mark all result indices in this chain */
553 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
554 ice_set_bit(root_bufs.content.result_indx &
555 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
557 /* get the first profile that is associated with rid */
558 prof_id = ice_find_first_bit(recipe_to_profile[idx],
559 ICE_MAX_NUM_PROFILES);
560 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
561 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
563 rg_entry->fv_idx[i] = lkup_indx;
564 rg_entry->fv_mask[i] =
565 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
567 /* If the recipe is a chained recipe then all its
568 * child recipe's result will have a result index.
569 * To fill fv_words we should not use those result
570 * index, we only need the protocol ids and offsets.
571 * We will skip all the fv_idx which stores result
572 * index in them. We also need to skip any fv_idx which
573 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
574 * valid offset value.
576 if (ice_is_bit_set(possible_idx, rg_entry->fv_idx[i]) ||
577 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
578 rg_entry->fv_idx[i] == 0)
581 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
582 rg_entry->fv_idx[i], &prot, &off);
583 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
584 lkup_exts->fv_words[fv_word_idx].off = off;
587 /* populate rg_list with the data from the child entry of this
590 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
592 /* Propagate some data to the recipe database */
593 recps[idx].is_root = is_root;
594 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
595 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
596 recps[idx].chain_idx = root_bufs.content.result_indx &
597 ~ICE_AQ_RECIPE_RESULT_EN;
599 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
604 /* Only do the following for root recipes entries */
605 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
606 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
607 recps[idx].root_rid = root_bufs.content.rid &
608 ~ICE_AQ_RECIPE_ID_IS_ROOT;
609 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
610 recps[idx].big_recp = (recps[rid].n_grp_count > 1);
613 /* Complete initialization of the root recipe entry */
614 lkup_exts->n_val_words = fv_word_idx;
615 recps[rid].n_grp_count = num_recps;
616 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
617 ice_calloc(hw, recps[rid].n_grp_count,
618 sizeof(struct ice_aqc_recipe_data_elem));
619 if (!recps[rid].root_buf)
622 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
623 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
625 /* Copy result indexes */
626 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
627 ICE_NONDMA_TO_NONDMA);
628 recps[rid].recp_created = true;
636 * ice_get_recp_to_prof_map - updates recipe to profile mapping
637 * @hw: pointer to hardware structure
639 * This function is used to populate recipe_to_profile matrix where index to
640 * this array is the recipe ID and the element is the mapping of which profiles
641 * is this recipe mapped to.
644 ice_get_recp_to_prof_map(struct ice_hw *hw)
646 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
649 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
652 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
653 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
654 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
656 ice_memcpy(profile_to_recipe[i], r_bitmap,
657 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
658 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
659 if (ice_is_bit_set(r_bitmap, j))
660 ice_set_bit(i, recipe_to_profile[j]);
665 * ice_init_def_sw_recp - initialize the recipe book keeping tables
666 * @hw: pointer to the HW struct
668 * Allocate memory for the entire recipe table and initialize the structures/
669 * entries corresponding to basic recipes.
671 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
673 struct ice_sw_recipe *recps;
676 recps = (struct ice_sw_recipe *)
677 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
679 return ICE_ERR_NO_MEMORY;
681 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
682 recps[i].root_rid = i;
683 INIT_LIST_HEAD(&recps[i].filt_rules);
684 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
685 INIT_LIST_HEAD(&recps[i].rg_list);
686 ice_init_lock(&recps[i].filt_rule_lock);
689 hw->switch_info->recp_list = recps;
695 * ice_aq_get_sw_cfg - get switch configuration
696 * @hw: pointer to the hardware structure
697 * @buf: pointer to the result buffer
698 * @buf_size: length of the buffer available for response
699 * @req_desc: pointer to requested descriptor
700 * @num_elems: pointer to number of elements
701 * @cd: pointer to command details structure or NULL
703 * Get switch configuration (0x0200) to be placed in 'buff'.
704 * This admin command returns information such as initial VSI/port number
705 * and switch ID it belongs to.
707 * NOTE: *req_desc is both an input/output parameter.
708 * The caller of this function first calls this function with *request_desc set
709 * to 0. If the response from f/w has *req_desc set to 0, all the switch
710 * configuration information has been returned; if non-zero (meaning not all
711 * the information was returned), the caller should call this function again
712 * with *req_desc set to the previous value returned by f/w to get the
713 * next block of switch configuration information.
715 * *num_elems is output only parameter. This reflects the number of elements
716 * in response buffer. The caller of this function to use *num_elems while
717 * parsing the response buffer.
719 static enum ice_status
720 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
721 u16 buf_size, u16 *req_desc, u16 *num_elems,
722 struct ice_sq_cd *cd)
724 struct ice_aqc_get_sw_cfg *cmd;
725 enum ice_status status;
726 struct ice_aq_desc desc;
728 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
729 cmd = &desc.params.get_sw_conf;
730 cmd->element = CPU_TO_LE16(*req_desc);
732 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
734 *req_desc = LE16_TO_CPU(cmd->element);
735 *num_elems = LE16_TO_CPU(cmd->num_elems);
743 * ice_alloc_sw - allocate resources specific to switch
744 * @hw: pointer to the HW struct
745 * @ena_stats: true to turn on VEB stats
746 * @shared_res: true for shared resource, false for dedicated resource
747 * @sw_id: switch ID returned
748 * @counter_id: VEB counter ID returned
750 * allocates switch resources (SWID and VEB counter) (0x0208)
753 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
756 struct ice_aqc_alloc_free_res_elem *sw_buf;
757 struct ice_aqc_res_elem *sw_ele;
758 enum ice_status status;
761 buf_len = sizeof(*sw_buf);
762 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
763 ice_malloc(hw, buf_len);
765 return ICE_ERR_NO_MEMORY;
767 /* Prepare buffer for switch ID.
768 * The number of resource entries in buffer is passed as 1 since only a
769 * single switch/VEB instance is allocated, and hence a single sw_id
772 sw_buf->num_elems = CPU_TO_LE16(1);
774 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
775 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
776 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
778 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
779 ice_aqc_opc_alloc_res, NULL);
782 goto ice_alloc_sw_exit;
784 sw_ele = &sw_buf->elem[0];
785 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
788 /* Prepare buffer for VEB Counter */
789 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
790 struct ice_aqc_alloc_free_res_elem *counter_buf;
791 struct ice_aqc_res_elem *counter_ele;
793 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
794 ice_malloc(hw, buf_len);
796 status = ICE_ERR_NO_MEMORY;
797 goto ice_alloc_sw_exit;
800 /* The number of resource entries in buffer is passed as 1 since
801 * only a single switch/VEB instance is allocated, and hence a
802 * single VEB counter is requested.
804 counter_buf->num_elems = CPU_TO_LE16(1);
805 counter_buf->res_type =
806 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
807 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
808 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
812 ice_free(hw, counter_buf);
813 goto ice_alloc_sw_exit;
815 counter_ele = &counter_buf->elem[0];
816 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
817 ice_free(hw, counter_buf);
821 ice_free(hw, sw_buf);
826 * ice_free_sw - free resources specific to switch
827 * @hw: pointer to the HW struct
828 * @sw_id: switch ID returned
829 * @counter_id: VEB counter ID returned
831 * free switch resources (SWID and VEB counter) (0x0209)
833 * NOTE: This function frees multiple resources. It continues
834 * releasing other resources even after it encounters error.
835 * The error code returned is the last error it encountered.
837 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
839 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
840 enum ice_status status, ret_status;
843 buf_len = sizeof(*sw_buf);
844 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
845 ice_malloc(hw, buf_len);
847 return ICE_ERR_NO_MEMORY;
849 /* Prepare buffer to free for switch ID res.
850 * The number of resource entries in buffer is passed as 1 since only a
851 * single switch/VEB instance is freed, and hence a single sw_id
854 sw_buf->num_elems = CPU_TO_LE16(1);
855 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
856 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
858 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
859 ice_aqc_opc_free_res, NULL);
862 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
864 /* Prepare buffer to free for VEB Counter resource */
865 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
866 ice_malloc(hw, buf_len);
868 ice_free(hw, sw_buf);
869 return ICE_ERR_NO_MEMORY;
872 /* The number of resource entries in buffer is passed as 1 since only a
873 * single switch/VEB instance is freed, and hence a single VEB counter
876 counter_buf->num_elems = CPU_TO_LE16(1);
877 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
878 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
880 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
881 ice_aqc_opc_free_res, NULL);
883 ice_debug(hw, ICE_DBG_SW,
884 "VEB counter resource could not be freed\n");
888 ice_free(hw, counter_buf);
889 ice_free(hw, sw_buf);
895 * @hw: pointer to the HW struct
896 * @vsi_ctx: pointer to a VSI context struct
897 * @cd: pointer to command details structure or NULL
899 * Add a VSI context to the hardware (0x0210)
902 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
903 struct ice_sq_cd *cd)
905 struct ice_aqc_add_update_free_vsi_resp *res;
906 struct ice_aqc_add_get_update_free_vsi *cmd;
907 struct ice_aq_desc desc;
908 enum ice_status status;
910 cmd = &desc.params.vsi_cmd;
911 res = &desc.params.add_update_free_vsi_res;
913 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
915 if (!vsi_ctx->alloc_from_pool)
916 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
917 ICE_AQ_VSI_IS_VALID);
919 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
921 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
923 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
924 sizeof(vsi_ctx->info), cd);
927 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
928 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
929 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
937 * @hw: pointer to the HW struct
938 * @vsi_ctx: pointer to a VSI context struct
939 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
940 * @cd: pointer to command details structure or NULL
942 * Free VSI context info from hardware (0x0213)
945 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
946 bool keep_vsi_alloc, struct ice_sq_cd *cd)
948 struct ice_aqc_add_update_free_vsi_resp *resp;
949 struct ice_aqc_add_get_update_free_vsi *cmd;
950 struct ice_aq_desc desc;
951 enum ice_status status;
953 cmd = &desc.params.vsi_cmd;
954 resp = &desc.params.add_update_free_vsi_res;
956 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
958 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
960 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
962 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
964 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
965 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
973 * @hw: pointer to the HW struct
974 * @vsi_ctx: pointer to a VSI context struct
975 * @cd: pointer to command details structure or NULL
977 * Update VSI context in the hardware (0x0211)
980 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
981 struct ice_sq_cd *cd)
983 struct ice_aqc_add_update_free_vsi_resp *resp;
984 struct ice_aqc_add_get_update_free_vsi *cmd;
985 struct ice_aq_desc desc;
986 enum ice_status status;
988 cmd = &desc.params.vsi_cmd;
989 resp = &desc.params.add_update_free_vsi_res;
991 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
993 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
995 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
997 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
998 sizeof(vsi_ctx->info), cd);
1001 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1002 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1009 * ice_is_vsi_valid - check whether the VSI is valid or not
1010 * @hw: pointer to the HW struct
1011 * @vsi_handle: VSI handle
1013 * check whether the VSI is valid or not
1015 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1017 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1021 * ice_get_hw_vsi_num - return the HW VSI number
1022 * @hw: pointer to the HW struct
1023 * @vsi_handle: VSI handle
1025 * return the HW VSI number
1026 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1028 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1030 return hw->vsi_ctx[vsi_handle]->vsi_num;
1034 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1035 * @hw: pointer to the HW struct
1036 * @vsi_handle: VSI handle
1038 * return the VSI context entry for a given VSI handle
1040 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1042 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1046 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1047 * @hw: pointer to the HW struct
1048 * @vsi_handle: VSI handle
1049 * @vsi: VSI context pointer
1051 * save the VSI context entry for a given VSI handle
1054 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1056 hw->vsi_ctx[vsi_handle] = vsi;
1060 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1061 * @hw: pointer to the HW struct
1062 * @vsi_handle: VSI handle
1064 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1066 struct ice_vsi_ctx *vsi;
1069 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1072 ice_for_each_traffic_class(i) {
1073 if (vsi->lan_q_ctx[i]) {
1074 ice_free(hw, vsi->lan_q_ctx[i]);
1075 vsi->lan_q_ctx[i] = NULL;
1081 * ice_clear_vsi_ctx - clear the VSI context entry
1082 * @hw: pointer to the HW struct
1083 * @vsi_handle: VSI handle
1085 * clear the VSI context entry
1087 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1089 struct ice_vsi_ctx *vsi;
1091 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1093 ice_clear_vsi_q_ctx(hw, vsi_handle);
1095 hw->vsi_ctx[vsi_handle] = NULL;
1100 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1101 * @hw: pointer to the HW struct
1103 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1107 for (i = 0; i < ICE_MAX_VSI; i++)
1108 ice_clear_vsi_ctx(hw, i);
1112 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1113 * @hw: pointer to the HW struct
1114 * @vsi_handle: unique VSI handle provided by drivers
1115 * @vsi_ctx: pointer to a VSI context struct
1116 * @cd: pointer to command details structure or NULL
1118 * Add a VSI context to the hardware also add it into the VSI handle list.
1119 * If this function gets called after reset for existing VSIs then update
1120 * with the new HW VSI number in the corresponding VSI handle list entry.
1123 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1124 struct ice_sq_cd *cd)
1126 struct ice_vsi_ctx *tmp_vsi_ctx;
1127 enum ice_status status;
1129 if (vsi_handle >= ICE_MAX_VSI)
1130 return ICE_ERR_PARAM;
1131 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1134 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1136 /* Create a new VSI context */
1137 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1138 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1140 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1141 return ICE_ERR_NO_MEMORY;
1143 *tmp_vsi_ctx = *vsi_ctx;
1145 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1147 /* update with new HW VSI num */
1148 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1155 * ice_free_vsi- free VSI context from hardware and VSI handle list
1156 * @hw: pointer to the HW struct
1157 * @vsi_handle: unique VSI handle
1158 * @vsi_ctx: pointer to a VSI context struct
1159 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1160 * @cd: pointer to command details structure or NULL
1162 * Free VSI context info from hardware as well as from VSI handle list
1165 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1166 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1168 enum ice_status status;
1170 if (!ice_is_vsi_valid(hw, vsi_handle))
1171 return ICE_ERR_PARAM;
1172 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1173 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1175 ice_clear_vsi_ctx(hw, vsi_handle);
1181 * @hw: pointer to the HW struct
1182 * @vsi_handle: unique VSI handle
1183 * @vsi_ctx: pointer to a VSI context struct
1184 * @cd: pointer to command details structure or NULL
1186 * Update VSI context in the hardware
1189 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1190 struct ice_sq_cd *cd)
1192 if (!ice_is_vsi_valid(hw, vsi_handle))
1193 return ICE_ERR_PARAM;
1194 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1195 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1199 * ice_aq_get_vsi_params
1200 * @hw: pointer to the HW struct
1201 * @vsi_ctx: pointer to a VSI context struct
1202 * @cd: pointer to command details structure or NULL
1204 * Get VSI context info from hardware (0x0212)
1207 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1208 struct ice_sq_cd *cd)
1210 struct ice_aqc_add_get_update_free_vsi *cmd;
1211 struct ice_aqc_get_vsi_resp *resp;
1212 struct ice_aq_desc desc;
1213 enum ice_status status;
1215 cmd = &desc.params.vsi_cmd;
1216 resp = &desc.params.get_vsi_resp;
1218 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1220 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1222 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1223 sizeof(vsi_ctx->info), cd);
1225 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1227 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1228 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1235 * ice_aq_add_update_mir_rule - add/update a mirror rule
1236 * @hw: pointer to the HW struct
1237 * @rule_type: Rule Type
1238 * @dest_vsi: VSI number to which packets will be mirrored
1239 * @count: length of the list
1240 * @mr_buf: buffer for list of mirrored VSI numbers
1241 * @cd: pointer to command details structure or NULL
1244 * Add/Update Mirror Rule (0x260).
1247 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1248 u16 count, struct ice_mir_rule_buf *mr_buf,
1249 struct ice_sq_cd *cd, u16 *rule_id)
1251 struct ice_aqc_add_update_mir_rule *cmd;
1252 struct ice_aq_desc desc;
1253 enum ice_status status;
1254 __le16 *mr_list = NULL;
1257 switch (rule_type) {
1258 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1259 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1260 /* Make sure count and mr_buf are set for these rule_types */
1261 if (!(count && mr_buf))
1262 return ICE_ERR_PARAM;
1264 buf_size = count * sizeof(__le16);
1265 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1267 return ICE_ERR_NO_MEMORY;
1269 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1270 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1271 /* Make sure count and mr_buf are not set for these
1274 if (count || mr_buf)
1275 return ICE_ERR_PARAM;
1278 ice_debug(hw, ICE_DBG_SW,
1279 "Error due to unsupported rule_type %u\n", rule_type);
1280 return ICE_ERR_OUT_OF_RANGE;
1283 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1285 /* Pre-process 'mr_buf' items for add/update of virtual port
1286 * ingress/egress mirroring (but not physical port ingress/egress
1292 for (i = 0; i < count; i++) {
1295 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1297 /* Validate specified VSI number, make sure it is less
1298 * than ICE_MAX_VSI, if not return with error.
1300 if (id >= ICE_MAX_VSI) {
1301 ice_debug(hw, ICE_DBG_SW,
1302 "Error VSI index (%u) out-of-range\n",
1304 ice_free(hw, mr_list);
1305 return ICE_ERR_OUT_OF_RANGE;
1308 /* add VSI to mirror rule */
1311 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1312 else /* remove VSI from mirror rule */
1313 mr_list[i] = CPU_TO_LE16(id);
1317 cmd = &desc.params.add_update_rule;
1318 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1319 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1320 ICE_AQC_RULE_ID_VALID_M);
1321 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1322 cmd->num_entries = CPU_TO_LE16(count);
1323 cmd->dest = CPU_TO_LE16(dest_vsi);
1325 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1327 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1329 ice_free(hw, mr_list);
1335 * ice_aq_delete_mir_rule - delete a mirror rule
1336 * @hw: pointer to the HW struct
1337 * @rule_id: Mirror rule ID (to be deleted)
1338 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1339 * otherwise it is returned to the shared pool
1340 * @cd: pointer to command details structure or NULL
1342 * Delete Mirror Rule (0x261).
1345 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1346 struct ice_sq_cd *cd)
1348 struct ice_aqc_delete_mir_rule *cmd;
1349 struct ice_aq_desc desc;
1351 /* rule_id should be in the range 0...63 */
1352 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1353 return ICE_ERR_OUT_OF_RANGE;
1355 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1357 cmd = &desc.params.del_rule;
1358 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1359 cmd->rule_id = CPU_TO_LE16(rule_id);
1362 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1364 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1368 * ice_aq_alloc_free_vsi_list
1369 * @hw: pointer to the HW struct
1370 * @vsi_list_id: VSI list ID returned or used for lookup
1371 * @lkup_type: switch rule filter lookup type
1372 * @opc: switch rules population command type - pass in the command opcode
1374 * allocates or free a VSI list resource
1376 static enum ice_status
1377 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1378 enum ice_sw_lkup_type lkup_type,
1379 enum ice_adminq_opc opc)
1381 struct ice_aqc_alloc_free_res_elem *sw_buf;
1382 struct ice_aqc_res_elem *vsi_ele;
1383 enum ice_status status;
1386 buf_len = sizeof(*sw_buf);
1387 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1388 ice_malloc(hw, buf_len);
1390 return ICE_ERR_NO_MEMORY;
1391 sw_buf->num_elems = CPU_TO_LE16(1);
1393 if (lkup_type == ICE_SW_LKUP_MAC ||
1394 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1395 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1396 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1397 lkup_type == ICE_SW_LKUP_PROMISC ||
1398 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1399 lkup_type == ICE_SW_LKUP_LAST) {
1400 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1401 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1403 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1405 status = ICE_ERR_PARAM;
1406 goto ice_aq_alloc_free_vsi_list_exit;
1409 if (opc == ice_aqc_opc_free_res)
1410 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1412 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1414 goto ice_aq_alloc_free_vsi_list_exit;
1416 if (opc == ice_aqc_opc_alloc_res) {
1417 vsi_ele = &sw_buf->elem[0];
1418 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1421 ice_aq_alloc_free_vsi_list_exit:
1422 ice_free(hw, sw_buf);
1427 * ice_aq_set_storm_ctrl - Sets storm control configuration
1428 * @hw: pointer to the HW struct
1429 * @bcast_thresh: represents the upper threshold for broadcast storm control
1430 * @mcast_thresh: represents the upper threshold for multicast storm control
1431 * @ctl_bitmask: storm control control knobs
1433 * Sets the storm control configuration (0x0280)
1436 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1439 struct ice_aqc_storm_cfg *cmd;
1440 struct ice_aq_desc desc;
1442 cmd = &desc.params.storm_conf;
1444 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1446 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1447 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1448 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1450 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1454 * ice_aq_get_storm_ctrl - gets storm control configuration
1455 * @hw: pointer to the HW struct
1456 * @bcast_thresh: represents the upper threshold for broadcast storm control
1457 * @mcast_thresh: represents the upper threshold for multicast storm control
1458 * @ctl_bitmask: storm control control knobs
1460 * Gets the storm control configuration (0x0281)
1463 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1466 enum ice_status status;
1467 struct ice_aq_desc desc;
1469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1471 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1473 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1476 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1479 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1482 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1489 * ice_aq_sw_rules - add/update/remove switch rules
1490 * @hw: pointer to the HW struct
1491 * @rule_list: pointer to switch rule population list
1492 * @rule_list_sz: total size of the rule list in bytes
1493 * @num_rules: number of switch rules in the rule_list
1494 * @opc: switch rules population command type - pass in the command opcode
1495 * @cd: pointer to command details structure or NULL
1497 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1499 static enum ice_status
1500 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1501 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1503 struct ice_aq_desc desc;
1505 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1507 if (opc != ice_aqc_opc_add_sw_rules &&
1508 opc != ice_aqc_opc_update_sw_rules &&
1509 opc != ice_aqc_opc_remove_sw_rules)
1510 return ICE_ERR_PARAM;
1512 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1514 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1515 desc.params.sw_rules.num_rules_fltr_entry_index =
1516 CPU_TO_LE16(num_rules);
1517 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1521 * ice_aq_add_recipe - add switch recipe
1522 * @hw: pointer to the HW struct
1523 * @s_recipe_list: pointer to switch rule population list
1524 * @num_recipes: number of switch recipes in the list
1525 * @cd: pointer to command details structure or NULL
1530 ice_aq_add_recipe(struct ice_hw *hw,
1531 struct ice_aqc_recipe_data_elem *s_recipe_list,
1532 u16 num_recipes, struct ice_sq_cd *cd)
1534 struct ice_aqc_add_get_recipe *cmd;
1535 struct ice_aq_desc desc;
1538 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1539 cmd = &desc.params.add_get_recipe;
1540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1542 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1543 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1545 buf_size = num_recipes * sizeof(*s_recipe_list);
1547 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1551 * ice_aq_get_recipe - get switch recipe
1552 * @hw: pointer to the HW struct
1553 * @s_recipe_list: pointer to switch rule population list
1554 * @num_recipes: pointer to the number of recipes (input and output)
1555 * @recipe_root: root recipe number of recipe(s) to retrieve
1556 * @cd: pointer to command details structure or NULL
1560 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1561 * On output, *num_recipes will equal the number of entries returned in
1564 * The caller must supply enough space in s_recipe_list to hold all possible
1565 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1568 ice_aq_get_recipe(struct ice_hw *hw,
1569 struct ice_aqc_recipe_data_elem *s_recipe_list,
1570 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1572 struct ice_aqc_add_get_recipe *cmd;
1573 struct ice_aq_desc desc;
1574 enum ice_status status;
1577 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1578 return ICE_ERR_PARAM;
1580 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1581 cmd = &desc.params.add_get_recipe;
1582 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1584 cmd->return_index = CPU_TO_LE16(recipe_root);
1585 cmd->num_sub_recipes = 0;
1587 buf_size = *num_recipes * sizeof(*s_recipe_list);
1589 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1590 /* cppcheck-suppress constArgument */
1591 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1597 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1598 * @hw: pointer to the HW struct
1599 * @profile_id: package profile ID to associate the recipe with
1600 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1601 * @cd: pointer to command details structure or NULL
1602 * Recipe to profile association (0x0291)
1605 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1606 struct ice_sq_cd *cd)
1608 struct ice_aqc_recipe_to_profile *cmd;
1609 struct ice_aq_desc desc;
1611 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1612 cmd = &desc.params.recipe_to_profile;
1613 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1614 cmd->profile_id = CPU_TO_LE16(profile_id);
1615 /* Set the recipe ID bit in the bitmask to let the device know which
1616 * profile we are associating the recipe to
1618 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1619 ICE_NONDMA_TO_NONDMA);
1621 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1625 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1626 * @hw: pointer to the HW struct
1627 * @profile_id: package profile ID to associate the recipe with
1628 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1629 * @cd: pointer to command details structure or NULL
1630 * Associate profile ID with given recipe (0x0293)
1633 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1634 struct ice_sq_cd *cd)
1636 struct ice_aqc_recipe_to_profile *cmd;
1637 struct ice_aq_desc desc;
1638 enum ice_status status;
1640 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1641 cmd = &desc.params.recipe_to_profile;
1642 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1643 cmd->profile_id = CPU_TO_LE16(profile_id);
1645 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1647 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1648 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1654 * ice_alloc_recipe - add recipe resource
1655 * @hw: pointer to the hardware structure
1656 * @rid: recipe ID returned as response to AQ call
1658 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1660 struct ice_aqc_alloc_free_res_elem *sw_buf;
1661 enum ice_status status;
1664 buf_len = sizeof(*sw_buf);
1665 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1667 return ICE_ERR_NO_MEMORY;
1669 sw_buf->num_elems = CPU_TO_LE16(1);
1670 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1671 ICE_AQC_RES_TYPE_S) |
1672 ICE_AQC_RES_TYPE_FLAG_SHARED);
1673 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1674 ice_aqc_opc_alloc_res, NULL);
1676 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1677 ice_free(hw, sw_buf);
1682 /* ice_init_port_info - Initialize port_info with switch configuration data
1683 * @pi: pointer to port_info
1684 * @vsi_port_num: VSI number or port number
1685 * @type: Type of switch element (port or VSI)
1686 * @swid: switch ID of the switch the element is attached to
1687 * @pf_vf_num: PF or VF number
1688 * @is_vf: true if the element is a VF, false otherwise
1691 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1692 u16 swid, u16 pf_vf_num, bool is_vf)
1695 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1696 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1698 pi->pf_vf_num = pf_vf_num;
1700 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1701 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1704 ice_debug(pi->hw, ICE_DBG_SW,
1705 "incorrect VSI/port type received\n");
1710 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1711 * @hw: pointer to the hardware structure
1713 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1715 struct ice_aqc_get_sw_cfg_resp *rbuf;
1716 enum ice_status status;
1717 u16 num_total_ports;
1723 num_total_ports = 1;
1725 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1726 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1729 return ICE_ERR_NO_MEMORY;
1731 /* Multiple calls to ice_aq_get_sw_cfg may be required
1732 * to get all the switch configuration information. The need
1733 * for additional calls is indicated by ice_aq_get_sw_cfg
1734 * writing a non-zero value in req_desc
1737 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1738 &req_desc, &num_elems, NULL);
1743 for (i = 0; i < num_elems; i++) {
1744 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1745 u16 pf_vf_num, swid, vsi_port_num;
1749 ele = rbuf[i].elements;
1750 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1751 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1753 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1754 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1756 swid = LE16_TO_CPU(ele->swid);
1758 if (LE16_TO_CPU(ele->pf_vf_num) &
1759 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1762 type = LE16_TO_CPU(ele->vsi_port_num) >>
1763 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1766 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1767 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1768 if (j == num_total_ports) {
1769 ice_debug(hw, ICE_DBG_SW,
1770 "more ports than expected\n");
1771 status = ICE_ERR_CFG;
1774 ice_init_port_info(hw->port_info,
1775 vsi_port_num, type, swid,
1783 } while (req_desc && !status);
1787 ice_free(hw, (void *)rbuf);
1793 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1794 * @hw: pointer to the hardware structure
1795 * @fi: filter info structure to fill/update
1797 * This helper function populates the lb_en and lan_en elements of the provided
1798 * ice_fltr_info struct using the switch's type and characteristics of the
1799 * switch rule being configured.
1801 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1805 if ((fi->flag & ICE_FLTR_TX) &&
1806 (fi->fltr_act == ICE_FWD_TO_VSI ||
1807 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1808 fi->fltr_act == ICE_FWD_TO_Q ||
1809 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1810 /* Setting LB for prune actions will result in replicated
1811 * packets to the internal switch that will be dropped.
1813 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1816 /* Set lan_en to TRUE if
1817 * 1. The switch is a VEB AND
1819 * 2.1 The lookup is a directional lookup like ethertype,
1820 * promiscuous, ethertype-MAC, promiscuous-VLAN
1821 * and default-port OR
1822 * 2.2 The lookup is VLAN, OR
1823 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1824 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1828 * The switch is a VEPA.
1830 * In all other cases, the LAN enable has to be set to false.
1833 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1834 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1835 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1836 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1837 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1838 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1839 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1840 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1841 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1842 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1851 * ice_fill_sw_rule - Helper function to fill switch rule structure
1852 * @hw: pointer to the hardware structure
1853 * @f_info: entry containing packet forwarding information
1854 * @s_rule: switch rule structure to be filled in based on mac_entry
1855 * @opc: switch rules population command type - pass in the command opcode
1858 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1859 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1861 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1869 if (opc == ice_aqc_opc_remove_sw_rules) {
1870 s_rule->pdata.lkup_tx_rx.act = 0;
1871 s_rule->pdata.lkup_tx_rx.index =
1872 CPU_TO_LE16(f_info->fltr_rule_id);
1873 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1877 eth_hdr_sz = sizeof(dummy_eth_header);
1878 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1880 /* initialize the ether header with a dummy header */
1881 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1882 ice_fill_sw_info(hw, f_info);
1884 switch (f_info->fltr_act) {
1885 case ICE_FWD_TO_VSI:
1886 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1887 ICE_SINGLE_ACT_VSI_ID_M;
1888 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1889 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1890 ICE_SINGLE_ACT_VALID_BIT;
1892 case ICE_FWD_TO_VSI_LIST:
1893 act |= ICE_SINGLE_ACT_VSI_LIST;
1894 act |= (f_info->fwd_id.vsi_list_id <<
1895 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1896 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1897 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1898 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1899 ICE_SINGLE_ACT_VALID_BIT;
1902 act |= ICE_SINGLE_ACT_TO_Q;
1903 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1904 ICE_SINGLE_ACT_Q_INDEX_M;
1906 case ICE_DROP_PACKET:
1907 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1908 ICE_SINGLE_ACT_VALID_BIT;
1910 case ICE_FWD_TO_QGRP:
1911 q_rgn = f_info->qgrp_size > 0 ?
1912 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1913 act |= ICE_SINGLE_ACT_TO_Q;
1914 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1915 ICE_SINGLE_ACT_Q_INDEX_M;
1916 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1917 ICE_SINGLE_ACT_Q_REGION_M;
1924 act |= ICE_SINGLE_ACT_LB_ENABLE;
1926 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1928 switch (f_info->lkup_type) {
1929 case ICE_SW_LKUP_MAC:
1930 daddr = f_info->l_data.mac.mac_addr;
1932 case ICE_SW_LKUP_VLAN:
1933 vlan_id = f_info->l_data.vlan.vlan_id;
1934 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1935 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1936 act |= ICE_SINGLE_ACT_PRUNE;
1937 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1940 case ICE_SW_LKUP_ETHERTYPE_MAC:
1941 daddr = f_info->l_data.ethertype_mac.mac_addr;
1943 case ICE_SW_LKUP_ETHERTYPE:
1944 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1945 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1947 case ICE_SW_LKUP_MAC_VLAN:
1948 daddr = f_info->l_data.mac_vlan.mac_addr;
1949 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1951 case ICE_SW_LKUP_PROMISC_VLAN:
1952 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1954 case ICE_SW_LKUP_PROMISC:
1955 daddr = f_info->l_data.mac_vlan.mac_addr;
1961 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1962 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1963 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1965 /* Recipe set depending on lookup type */
1966 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1967 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1968 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1971 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1972 ICE_NONDMA_TO_NONDMA);
1974 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1975 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1976 *off = CPU_TO_BE16(vlan_id);
1979 /* Create the switch rule with the final dummy Ethernet header */
1980 if (opc != ice_aqc_opc_update_sw_rules)
1981 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1985 * ice_add_marker_act
1986 * @hw: pointer to the hardware structure
1987 * @m_ent: the management entry for which sw marker needs to be added
1988 * @sw_marker: sw marker to tag the Rx descriptor with
1989 * @l_id: large action resource ID
1991 * Create a large action to hold software marker and update the switch rule
1992 * entry pointed by m_ent with newly created large action
1994 static enum ice_status
1995 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1996 u16 sw_marker, u16 l_id)
1998 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1999 /* For software marker we need 3 large actions
2000 * 1. FWD action: FWD TO VSI or VSI LIST
2001 * 2. GENERIC VALUE action to hold the profile ID
2002 * 3. GENERIC VALUE action to hold the software marker ID
2004 const u16 num_lg_acts = 3;
2005 enum ice_status status;
2011 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2012 return ICE_ERR_PARAM;
2014 /* Create two back-to-back switch rules and submit them to the HW using
2015 * one memory buffer:
2019 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2020 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2021 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2023 return ICE_ERR_NO_MEMORY;
2025 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2027 /* Fill in the first switch rule i.e. large action */
2028 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2029 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2030 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2032 /* First action VSI forwarding or VSI list forwarding depending on how
2035 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2036 m_ent->fltr_info.fwd_id.hw_vsi_id;
2038 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2039 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2040 ICE_LG_ACT_VSI_LIST_ID_M;
2041 if (m_ent->vsi_count > 1)
2042 act |= ICE_LG_ACT_VSI_LIST;
2043 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2045 /* Second action descriptor type */
2046 act = ICE_LG_ACT_GENERIC;
2048 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2049 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2051 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2052 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2054 /* Third action Marker value */
2055 act |= ICE_LG_ACT_GENERIC;
2056 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2057 ICE_LG_ACT_GENERIC_VALUE_M;
2059 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2061 /* call the fill switch rule to fill the lookup Tx Rx structure */
2062 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2063 ice_aqc_opc_update_sw_rules);
2065 /* Update the action to point to the large action ID */
2066 rx_tx->pdata.lkup_tx_rx.act =
2067 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2068 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2069 ICE_SINGLE_ACT_PTR_VAL_M));
2071 /* Use the filter rule ID of the previously created rule with single
2072 * act. Once the update happens, hardware will treat this as large
2075 rx_tx->pdata.lkup_tx_rx.index =
2076 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2078 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2079 ice_aqc_opc_update_sw_rules, NULL);
2081 m_ent->lg_act_idx = l_id;
2082 m_ent->sw_marker_id = sw_marker;
2085 ice_free(hw, lg_act);
2090 * ice_add_counter_act - add/update filter rule with counter action
2091 * @hw: pointer to the hardware structure
2092 * @m_ent: the management entry for which counter needs to be added
2093 * @counter_id: VLAN counter ID returned as part of allocate resource
2094 * @l_id: large action resource ID
2096 static enum ice_status
2097 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2098 u16 counter_id, u16 l_id)
2100 struct ice_aqc_sw_rules_elem *lg_act;
2101 struct ice_aqc_sw_rules_elem *rx_tx;
2102 enum ice_status status;
2103 /* 2 actions will be added while adding a large action counter */
2104 const int num_acts = 2;
2111 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2112 return ICE_ERR_PARAM;
2114 /* Create two back-to-back switch rules and submit them to the HW using
2115 * one memory buffer:
2119 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2120 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2121 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2124 return ICE_ERR_NO_MEMORY;
2126 rx_tx = (struct ice_aqc_sw_rules_elem *)
2127 ((u8 *)lg_act + lg_act_size);
2129 /* Fill in the first switch rule i.e. large action */
2130 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2131 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2132 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2134 /* First action VSI forwarding or VSI list forwarding depending on how
2137 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2138 m_ent->fltr_info.fwd_id.hw_vsi_id;
2140 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2141 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2142 ICE_LG_ACT_VSI_LIST_ID_M;
2143 if (m_ent->vsi_count > 1)
2144 act |= ICE_LG_ACT_VSI_LIST;
2145 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2147 /* Second action counter ID */
2148 act = ICE_LG_ACT_STAT_COUNT;
2149 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2150 ICE_LG_ACT_STAT_COUNT_M;
2151 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2153 /* call the fill switch rule to fill the lookup Tx Rx structure */
2154 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2155 ice_aqc_opc_update_sw_rules);
2157 act = ICE_SINGLE_ACT_PTR;
2158 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2159 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2161 /* Use the filter rule ID of the previously created rule with single
2162 * act. Once the update happens, hardware will treat this as large
2165 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2166 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2168 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2169 ice_aqc_opc_update_sw_rules, NULL);
2171 m_ent->lg_act_idx = l_id;
2172 m_ent->counter_index = counter_id;
2175 ice_free(hw, lg_act);
2180 * ice_create_vsi_list_map
2181 * @hw: pointer to the hardware structure
2182 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2183 * @num_vsi: number of VSI handles in the array
2184 * @vsi_list_id: VSI list ID generated as part of allocate resource
2186 * Helper function to create a new entry of VSI list ID to VSI mapping
2187 * using the given VSI list ID
2189 static struct ice_vsi_list_map_info *
2190 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2193 struct ice_switch_info *sw = hw->switch_info;
2194 struct ice_vsi_list_map_info *v_map;
2197 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2202 v_map->vsi_list_id = vsi_list_id;
2204 for (i = 0; i < num_vsi; i++)
2205 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2207 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2212 * ice_update_vsi_list_rule
2213 * @hw: pointer to the hardware structure
2214 * @vsi_handle_arr: array of VSI handles to form a VSI list
2215 * @num_vsi: number of VSI handles in the array
2216 * @vsi_list_id: VSI list ID generated as part of allocate resource
2217 * @remove: Boolean value to indicate if this is a remove action
2218 * @opc: switch rules population command type - pass in the command opcode
2219 * @lkup_type: lookup type of the filter
2221 * Call AQ command to add a new switch rule or update existing switch rule
2222 * using the given VSI list ID
2224 static enum ice_status
2225 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2226 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2227 enum ice_sw_lkup_type lkup_type)
2229 struct ice_aqc_sw_rules_elem *s_rule;
2230 enum ice_status status;
2236 return ICE_ERR_PARAM;
2238 if (lkup_type == ICE_SW_LKUP_MAC ||
2239 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2240 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2241 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2242 lkup_type == ICE_SW_LKUP_PROMISC ||
2243 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2244 lkup_type == ICE_SW_LKUP_LAST)
2245 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2246 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2247 else if (lkup_type == ICE_SW_LKUP_VLAN)
2248 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2249 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2251 return ICE_ERR_PARAM;
2253 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2254 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2256 return ICE_ERR_NO_MEMORY;
2257 for (i = 0; i < num_vsi; i++) {
2258 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2259 status = ICE_ERR_PARAM;
2262 /* AQ call requires hw_vsi_id(s) */
2263 s_rule->pdata.vsi_list.vsi[i] =
2264 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2267 s_rule->type = CPU_TO_LE16(type);
2268 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2269 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2271 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2274 ice_free(hw, s_rule);
2279 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2280 * @hw: pointer to the HW struct
2281 * @vsi_handle_arr: array of VSI handles to form a VSI list
2282 * @num_vsi: number of VSI handles in the array
2283 * @vsi_list_id: stores the ID of the VSI list to be created
2284 * @lkup_type: switch rule filter's lookup type
2286 static enum ice_status
2287 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2288 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2290 enum ice_status status;
2292 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2293 ice_aqc_opc_alloc_res);
2297 /* Update the newly created VSI list to include the specified VSIs */
2298 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2299 *vsi_list_id, false,
2300 ice_aqc_opc_add_sw_rules, lkup_type);
2304 * ice_create_pkt_fwd_rule
2305 * @hw: pointer to the hardware structure
2306 * @f_entry: entry containing packet forwarding information
2308 * Create switch rule with given filter information and add an entry
2309 * to the corresponding filter management list to track this switch rule
2312 static enum ice_status
2313 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2314 struct ice_fltr_list_entry *f_entry)
2316 struct ice_fltr_mgmt_list_entry *fm_entry;
2317 struct ice_aqc_sw_rules_elem *s_rule;
2318 enum ice_sw_lkup_type l_type;
2319 struct ice_sw_recipe *recp;
2320 enum ice_status status;
2322 s_rule = (struct ice_aqc_sw_rules_elem *)
2323 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2325 return ICE_ERR_NO_MEMORY;
2326 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2327 ice_malloc(hw, sizeof(*fm_entry));
2329 status = ICE_ERR_NO_MEMORY;
2330 goto ice_create_pkt_fwd_rule_exit;
2333 fm_entry->fltr_info = f_entry->fltr_info;
2335 /* Initialize all the fields for the management entry */
2336 fm_entry->vsi_count = 1;
2337 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2338 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2339 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2341 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2342 ice_aqc_opc_add_sw_rules);
2344 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2345 ice_aqc_opc_add_sw_rules, NULL);
2347 ice_free(hw, fm_entry);
2348 goto ice_create_pkt_fwd_rule_exit;
2351 f_entry->fltr_info.fltr_rule_id =
2352 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2353 fm_entry->fltr_info.fltr_rule_id =
2354 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2356 /* The book keeping entries will get removed when base driver
2357 * calls remove filter AQ command
2359 l_type = fm_entry->fltr_info.lkup_type;
2360 recp = &hw->switch_info->recp_list[l_type];
2361 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2363 ice_create_pkt_fwd_rule_exit:
2364 ice_free(hw, s_rule);
2369 * ice_update_pkt_fwd_rule
2370 * @hw: pointer to the hardware structure
2371 * @f_info: filter information for switch rule
2373 * Call AQ command to update a previously created switch rule with a
2376 static enum ice_status
2377 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2379 struct ice_aqc_sw_rules_elem *s_rule;
2380 enum ice_status status;
2382 s_rule = (struct ice_aqc_sw_rules_elem *)
2383 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2385 return ICE_ERR_NO_MEMORY;
2387 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2389 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2391 /* Update switch rule with new rule set to forward VSI list */
2392 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2393 ice_aqc_opc_update_sw_rules, NULL);
2395 ice_free(hw, s_rule);
2400 * ice_update_sw_rule_bridge_mode
2401 * @hw: pointer to the HW struct
2403 * Updates unicast switch filter rules based on VEB/VEPA mode
2405 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2407 struct ice_switch_info *sw = hw->switch_info;
2408 struct ice_fltr_mgmt_list_entry *fm_entry;
2409 enum ice_status status = ICE_SUCCESS;
2410 struct LIST_HEAD_TYPE *rule_head;
2411 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2413 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2414 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2416 ice_acquire_lock(rule_lock);
2417 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2419 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2420 u8 *addr = fi->l_data.mac.mac_addr;
2422 /* Update unicast Tx rules to reflect the selected
2425 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2426 (fi->fltr_act == ICE_FWD_TO_VSI ||
2427 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2428 fi->fltr_act == ICE_FWD_TO_Q ||
2429 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2430 status = ice_update_pkt_fwd_rule(hw, fi);
2436 ice_release_lock(rule_lock);
2442 * ice_add_update_vsi_list
2443 * @hw: pointer to the hardware structure
2444 * @m_entry: pointer to current filter management list entry
2445 * @cur_fltr: filter information from the book keeping entry
2446 * @new_fltr: filter information with the new VSI to be added
2448 * Call AQ command to add or update previously created VSI list with new VSI.
2450 * Helper function to do book keeping associated with adding filter information
2451 * The algorithm to do the book keeping is described below :
2452 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2453 * if only one VSI has been added till now
2454 * Allocate a new VSI list and add two VSIs
2455 * to this list using switch rule command
2456 * Update the previously created switch rule with the
2457 * newly created VSI list ID
2458 * if a VSI list was previously created
2459 * Add the new VSI to the previously created VSI list set
2460 * using the update switch rule command
2462 static enum ice_status
2463 ice_add_update_vsi_list(struct ice_hw *hw,
2464 struct ice_fltr_mgmt_list_entry *m_entry,
2465 struct ice_fltr_info *cur_fltr,
2466 struct ice_fltr_info *new_fltr)
2468 enum ice_status status = ICE_SUCCESS;
2469 u16 vsi_list_id = 0;
2471 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2472 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2473 return ICE_ERR_NOT_IMPL;
2475 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2476 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2477 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2478 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2479 return ICE_ERR_NOT_IMPL;
2481 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2482 /* Only one entry existed in the mapping and it was not already
2483 * a part of a VSI list. So, create a VSI list with the old and
2486 struct ice_fltr_info tmp_fltr;
2487 u16 vsi_handle_arr[2];
2489 /* A rule already exists with the new VSI being added */
2490 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2491 return ICE_ERR_ALREADY_EXISTS;
2493 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2494 vsi_handle_arr[1] = new_fltr->vsi_handle;
2495 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2497 new_fltr->lkup_type);
2501 tmp_fltr = *new_fltr;
2502 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2503 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2504 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2505 /* Update the previous switch rule of "MAC forward to VSI" to
2506 * "MAC fwd to VSI list"
2508 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2512 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2513 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2514 m_entry->vsi_list_info =
2515 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2518 /* If this entry was large action then the large action needs
2519 * to be updated to point to FWD to VSI list
2521 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2523 ice_add_marker_act(hw, m_entry,
2524 m_entry->sw_marker_id,
2525 m_entry->lg_act_idx);
2527 u16 vsi_handle = new_fltr->vsi_handle;
2528 enum ice_adminq_opc opcode;
2530 if (!m_entry->vsi_list_info)
2533 /* A rule already exists with the new VSI being added */
2534 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2537 /* Update the previously created VSI list set with
2538 * the new VSI ID passed in
2540 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2541 opcode = ice_aqc_opc_update_sw_rules;
2543 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2544 vsi_list_id, false, opcode,
2545 new_fltr->lkup_type);
2546 /* update VSI list mapping info with new VSI ID */
2548 ice_set_bit(vsi_handle,
2549 m_entry->vsi_list_info->vsi_map);
2552 m_entry->vsi_count++;
2557 * ice_find_rule_entry - Search a rule entry
2558 * @hw: pointer to the hardware structure
2559 * @recp_id: lookup type for which the specified rule needs to be searched
2560 * @f_info: rule information
2562 * Helper function to search for a given rule entry
2563 * Returns pointer to entry storing the rule if found
2565 static struct ice_fltr_mgmt_list_entry *
2566 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2568 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2569 struct ice_switch_info *sw = hw->switch_info;
2570 struct LIST_HEAD_TYPE *list_head;
2572 list_head = &sw->recp_list[recp_id].filt_rules;
2573 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2575 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2576 sizeof(f_info->l_data)) &&
2577 f_info->flag == list_itr->fltr_info.flag) {
2586 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2587 * @hw: pointer to the hardware structure
2588 * @recp_id: lookup type for which VSI lists needs to be searched
2589 * @vsi_handle: VSI handle to be found in VSI list
2590 * @vsi_list_id: VSI list ID found containing vsi_handle
2592 * Helper function to search a VSI list with single entry containing given VSI
2593 * handle element. This can be extended further to search VSI list with more
2594 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2596 static struct ice_vsi_list_map_info *
2597 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2600 struct ice_vsi_list_map_info *map_info = NULL;
2601 struct ice_switch_info *sw = hw->switch_info;
2602 struct LIST_HEAD_TYPE *list_head;
2604 list_head = &sw->recp_list[recp_id].filt_rules;
2605 if (sw->recp_list[recp_id].adv_rule) {
2606 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2608 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2609 ice_adv_fltr_mgmt_list_entry,
2611 if (list_itr->vsi_list_info) {
2612 map_info = list_itr->vsi_list_info;
2613 if (ice_is_bit_set(map_info->vsi_map,
2615 *vsi_list_id = map_info->vsi_list_id;
2621 struct ice_fltr_mgmt_list_entry *list_itr;
2623 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2624 ice_fltr_mgmt_list_entry,
2626 if (list_itr->vsi_count == 1 &&
2627 list_itr->vsi_list_info) {
2628 map_info = list_itr->vsi_list_info;
2629 if (ice_is_bit_set(map_info->vsi_map,
2631 *vsi_list_id = map_info->vsi_list_id;
2641 * ice_add_rule_internal - add rule for a given lookup type
2642 * @hw: pointer to the hardware structure
2643 * @recp_id: lookup type (recipe ID) for which rule has to be added
2644 * @f_entry: structure containing MAC forwarding information
2646 * Adds or updates the rule lists for a given recipe
2648 static enum ice_status
2649 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2650 struct ice_fltr_list_entry *f_entry)
2652 struct ice_switch_info *sw = hw->switch_info;
2653 struct ice_fltr_info *new_fltr, *cur_fltr;
2654 struct ice_fltr_mgmt_list_entry *m_entry;
2655 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2656 enum ice_status status = ICE_SUCCESS;
2658 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2659 return ICE_ERR_PARAM;
2661 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2662 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2663 f_entry->fltr_info.fwd_id.hw_vsi_id =
2664 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2666 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2668 ice_acquire_lock(rule_lock);
2669 new_fltr = &f_entry->fltr_info;
2670 if (new_fltr->flag & ICE_FLTR_RX)
2671 new_fltr->src = hw->port_info->lport;
2672 else if (new_fltr->flag & ICE_FLTR_TX)
2674 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2676 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2678 status = ice_create_pkt_fwd_rule(hw, f_entry);
2679 goto exit_add_rule_internal;
2682 cur_fltr = &m_entry->fltr_info;
2683 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2685 exit_add_rule_internal:
2686 ice_release_lock(rule_lock);
2691 * ice_remove_vsi_list_rule
2692 * @hw: pointer to the hardware structure
2693 * @vsi_list_id: VSI list ID generated as part of allocate resource
2694 * @lkup_type: switch rule filter lookup type
2696 * The VSI list should be emptied before this function is called to remove the
2699 static enum ice_status
2700 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2701 enum ice_sw_lkup_type lkup_type)
2703 struct ice_aqc_sw_rules_elem *s_rule;
2704 enum ice_status status;
2707 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2708 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2710 return ICE_ERR_NO_MEMORY;
2712 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2713 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2715 /* Free the vsi_list resource that we allocated. It is assumed that the
2716 * list is empty at this point.
2718 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2719 ice_aqc_opc_free_res);
2721 ice_free(hw, s_rule);
2726 * ice_rem_update_vsi_list
2727 * @hw: pointer to the hardware structure
2728 * @vsi_handle: VSI handle of the VSI to remove
2729 * @fm_list: filter management entry for which the VSI list management needs to
2732 static enum ice_status
2733 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2734 struct ice_fltr_mgmt_list_entry *fm_list)
2736 enum ice_sw_lkup_type lkup_type;
2737 enum ice_status status = ICE_SUCCESS;
2740 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2741 fm_list->vsi_count == 0)
2742 return ICE_ERR_PARAM;
2744 /* A rule with the VSI being removed does not exist */
2745 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2746 return ICE_ERR_DOES_NOT_EXIST;
2748 lkup_type = fm_list->fltr_info.lkup_type;
2749 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2750 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2751 ice_aqc_opc_update_sw_rules,
2756 fm_list->vsi_count--;
2757 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2759 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2760 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2761 struct ice_vsi_list_map_info *vsi_list_info =
2762 fm_list->vsi_list_info;
2765 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2767 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2768 return ICE_ERR_OUT_OF_RANGE;
2770 /* Make sure VSI list is empty before removing it below */
2771 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2773 ice_aqc_opc_update_sw_rules,
2778 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2779 tmp_fltr_info.fwd_id.hw_vsi_id =
2780 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2781 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2782 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2784 ice_debug(hw, ICE_DBG_SW,
2785 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2786 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2790 fm_list->fltr_info = tmp_fltr_info;
2793 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2794 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2795 struct ice_vsi_list_map_info *vsi_list_info =
2796 fm_list->vsi_list_info;
2798 /* Remove the VSI list since it is no longer used */
2799 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2801 ice_debug(hw, ICE_DBG_SW,
2802 "Failed to remove VSI list %d, error %d\n",
2803 vsi_list_id, status);
2807 LIST_DEL(&vsi_list_info->list_entry);
2808 ice_free(hw, vsi_list_info);
2809 fm_list->vsi_list_info = NULL;
2816 * ice_remove_rule_internal - Remove a filter rule of a given type
2818 * @hw: pointer to the hardware structure
2819 * @recp_id: recipe ID for which the rule needs to removed
2820 * @f_entry: rule entry containing filter information
2822 static enum ice_status
2823 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2824 struct ice_fltr_list_entry *f_entry)
2826 struct ice_switch_info *sw = hw->switch_info;
2827 struct ice_fltr_mgmt_list_entry *list_elem;
2828 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2829 enum ice_status status = ICE_SUCCESS;
2830 bool remove_rule = false;
2833 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2834 return ICE_ERR_PARAM;
2835 f_entry->fltr_info.fwd_id.hw_vsi_id =
2836 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2838 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2839 ice_acquire_lock(rule_lock);
2840 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2842 status = ICE_ERR_DOES_NOT_EXIST;
2846 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2848 } else if (!list_elem->vsi_list_info) {
2849 status = ICE_ERR_DOES_NOT_EXIST;
2851 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2852 /* a ref_cnt > 1 indicates that the vsi_list is being
2853 * shared by multiple rules. Decrement the ref_cnt and
2854 * remove this rule, but do not modify the list, as it
2855 * is in-use by other rules.
2857 list_elem->vsi_list_info->ref_cnt--;
2860 /* a ref_cnt of 1 indicates the vsi_list is only used
2861 * by one rule. However, the original removal request is only
2862 * for a single VSI. Update the vsi_list first, and only
2863 * remove the rule if there are no further VSIs in this list.
2865 vsi_handle = f_entry->fltr_info.vsi_handle;
2866 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2869 /* if VSI count goes to zero after updating the VSI list */
2870 if (list_elem->vsi_count == 0)
2875 /* Remove the lookup rule */
2876 struct ice_aqc_sw_rules_elem *s_rule;
2878 s_rule = (struct ice_aqc_sw_rules_elem *)
2879 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2881 status = ICE_ERR_NO_MEMORY;
2885 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2886 ice_aqc_opc_remove_sw_rules);
2888 status = ice_aq_sw_rules(hw, s_rule,
2889 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2890 ice_aqc_opc_remove_sw_rules, NULL);
2892 /* Remove a book keeping from the list */
2893 ice_free(hw, s_rule);
2898 LIST_DEL(&list_elem->list_entry);
2899 ice_free(hw, list_elem);
2902 ice_release_lock(rule_lock);
2907 * ice_aq_get_res_alloc - get allocated resources
2908 * @hw: pointer to the HW struct
2909 * @num_entries: pointer to u16 to store the number of resource entries returned
2910 * @buf: pointer to user-supplied buffer
2911 * @buf_size: size of buff
2912 * @cd: pointer to command details structure or NULL
2914 * The user-supplied buffer must be large enough to store the resource
2915 * information for all resource types. Each resource type is an
2916 * ice_aqc_get_res_resp_data_elem structure.
2919 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2920 u16 buf_size, struct ice_sq_cd *cd)
2922 struct ice_aqc_get_res_alloc *resp;
2923 enum ice_status status;
2924 struct ice_aq_desc desc;
2927 return ICE_ERR_BAD_PTR;
2929 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2930 return ICE_ERR_INVAL_SIZE;
2932 resp = &desc.params.get_res;
2934 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2935 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2937 if (!status && num_entries)
2938 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2944 * ice_aq_get_res_descs - get allocated resource descriptors
2945 * @hw: pointer to the hardware structure
2946 * @num_entries: number of resource entries in buffer
2947 * @buf: Indirect buffer to hold data parameters and response
2948 * @buf_size: size of buffer for indirect commands
2949 * @res_type: resource type
2950 * @res_shared: is resource shared
2951 * @desc_id: input - first desc ID to start; output - next desc ID
2952 * @cd: pointer to command details structure or NULL
2955 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2956 struct ice_aqc_get_allocd_res_desc_resp *buf,
2957 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2958 struct ice_sq_cd *cd)
2960 struct ice_aqc_get_allocd_res_desc *cmd;
2961 struct ice_aq_desc desc;
2962 enum ice_status status;
2964 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2966 cmd = &desc.params.get_res_desc;
2969 return ICE_ERR_PARAM;
2971 if (buf_size != (num_entries * sizeof(*buf)))
2972 return ICE_ERR_PARAM;
2974 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2976 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2977 ICE_AQC_RES_TYPE_M) | (res_shared ?
2978 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2979 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2981 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2983 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2985 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2991 * ice_add_mac - Add a MAC address based filter rule
2992 * @hw: pointer to the hardware structure
2993 * @m_list: list of MAC addresses and forwarding information
2995 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2996 * multiple unicast addresses, the function assumes that all the
2997 * addresses are unique in a given add_mac call. It doesn't
2998 * check for duplicates in this case, removing duplicates from a given
2999 * list should be taken care of in the caller of this function.
3002 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3004 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3005 struct ice_fltr_list_entry *m_list_itr;
3006 struct LIST_HEAD_TYPE *rule_head;
3007 u16 elem_sent, total_elem_left;
3008 struct ice_switch_info *sw;
3009 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3010 enum ice_status status = ICE_SUCCESS;
3011 u16 num_unicast = 0;
3015 return ICE_ERR_PARAM;
3017 sw = hw->switch_info;
3018 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3019 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3021 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3025 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3026 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3027 if (!ice_is_vsi_valid(hw, vsi_handle))
3028 return ICE_ERR_PARAM;
3029 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3030 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3031 /* update the src in case it is VSI num */
3032 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3033 return ICE_ERR_PARAM;
3034 m_list_itr->fltr_info.src = hw_vsi_id;
3035 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3036 IS_ZERO_ETHER_ADDR(add))
3037 return ICE_ERR_PARAM;
3038 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3039 /* Don't overwrite the unicast address */
3040 ice_acquire_lock(rule_lock);
3041 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3042 &m_list_itr->fltr_info)) {
3043 ice_release_lock(rule_lock);
3044 return ICE_ERR_ALREADY_EXISTS;
3046 ice_release_lock(rule_lock);
3048 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3049 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3050 m_list_itr->status =
3051 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3053 if (m_list_itr->status)
3054 return m_list_itr->status;
3058 ice_acquire_lock(rule_lock);
3059 /* Exit if no suitable entries were found for adding bulk switch rule */
3061 status = ICE_SUCCESS;
3062 goto ice_add_mac_exit;
3065 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3067 /* Allocate switch rule buffer for the bulk update for unicast */
3068 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3069 s_rule = (struct ice_aqc_sw_rules_elem *)
3070 ice_calloc(hw, num_unicast, s_rule_size);
3072 status = ICE_ERR_NO_MEMORY;
3073 goto ice_add_mac_exit;
3077 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3079 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3080 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3082 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3083 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3084 ice_aqc_opc_add_sw_rules);
3085 r_iter = (struct ice_aqc_sw_rules_elem *)
3086 ((u8 *)r_iter + s_rule_size);
3090 /* Call AQ bulk switch rule update for all unicast addresses */
3092 /* Call AQ switch rule in AQ_MAX chunk */
3093 for (total_elem_left = num_unicast; total_elem_left > 0;
3094 total_elem_left -= elem_sent) {
3095 struct ice_aqc_sw_rules_elem *entry = r_iter;
3097 elem_sent = min(total_elem_left,
3098 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3099 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3100 elem_sent, ice_aqc_opc_add_sw_rules,
3103 goto ice_add_mac_exit;
3104 r_iter = (struct ice_aqc_sw_rules_elem *)
3105 ((u8 *)r_iter + (elem_sent * s_rule_size));
3108 /* Fill up rule ID based on the value returned from FW */
3110 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3112 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3113 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3114 struct ice_fltr_mgmt_list_entry *fm_entry;
3116 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3117 f_info->fltr_rule_id =
3118 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3119 f_info->fltr_act = ICE_FWD_TO_VSI;
3120 /* Create an entry to track this MAC address */
3121 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3122 ice_malloc(hw, sizeof(*fm_entry));
3124 status = ICE_ERR_NO_MEMORY;
3125 goto ice_add_mac_exit;
3127 fm_entry->fltr_info = *f_info;
3128 fm_entry->vsi_count = 1;
3129 /* The book keeping entries will get removed when
3130 * base driver calls remove filter AQ command
3133 LIST_ADD(&fm_entry->list_entry, rule_head);
3134 r_iter = (struct ice_aqc_sw_rules_elem *)
3135 ((u8 *)r_iter + s_rule_size);
3140 ice_release_lock(rule_lock);
3142 ice_free(hw, s_rule);
3147 * ice_add_vlan_internal - Add one VLAN based filter rule
3148 * @hw: pointer to the hardware structure
3149 * @f_entry: filter entry containing one VLAN information
3151 static enum ice_status
3152 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3154 struct ice_switch_info *sw = hw->switch_info;
3155 struct ice_fltr_mgmt_list_entry *v_list_itr;
3156 struct ice_fltr_info *new_fltr, *cur_fltr;
3157 enum ice_sw_lkup_type lkup_type;
3158 u16 vsi_list_id = 0, vsi_handle;
3159 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3160 enum ice_status status = ICE_SUCCESS;
3162 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3163 return ICE_ERR_PARAM;
3165 f_entry->fltr_info.fwd_id.hw_vsi_id =
3166 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3167 new_fltr = &f_entry->fltr_info;
3169 /* VLAN ID should only be 12 bits */
3170 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3171 return ICE_ERR_PARAM;
3173 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3174 return ICE_ERR_PARAM;
3176 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3177 lkup_type = new_fltr->lkup_type;
3178 vsi_handle = new_fltr->vsi_handle;
3179 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3180 ice_acquire_lock(rule_lock);
3181 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3183 struct ice_vsi_list_map_info *map_info = NULL;
3185 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3186 /* All VLAN pruning rules use a VSI list. Check if
3187 * there is already a VSI list containing VSI that we
3188 * want to add. If found, use the same vsi_list_id for
3189 * this new VLAN rule or else create a new list.
3191 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3195 status = ice_create_vsi_list_rule(hw,
3203 /* Convert the action to forwarding to a VSI list. */
3204 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3205 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3208 status = ice_create_pkt_fwd_rule(hw, f_entry);
3210 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3213 status = ICE_ERR_DOES_NOT_EXIST;
3216 /* reuse VSI list for new rule and increment ref_cnt */
3218 v_list_itr->vsi_list_info = map_info;
3219 map_info->ref_cnt++;
3221 v_list_itr->vsi_list_info =
3222 ice_create_vsi_list_map(hw, &vsi_handle,
3226 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3227 /* Update existing VSI list to add new VSI ID only if it used
3230 cur_fltr = &v_list_itr->fltr_info;
3231 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3234 /* If VLAN rule exists and VSI list being used by this rule is
3235 * referenced by more than 1 VLAN rule. Then create a new VSI
3236 * list appending previous VSI with new VSI and update existing
3237 * VLAN rule to point to new VSI list ID
3239 struct ice_fltr_info tmp_fltr;
3240 u16 vsi_handle_arr[2];
3243 /* Current implementation only supports reusing VSI list with
3244 * one VSI count. We should never hit below condition
3246 if (v_list_itr->vsi_count > 1 &&
3247 v_list_itr->vsi_list_info->ref_cnt > 1) {
3248 ice_debug(hw, ICE_DBG_SW,
3249 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3250 status = ICE_ERR_CFG;
3255 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3258 /* A rule already exists with the new VSI being added */
3259 if (cur_handle == vsi_handle) {
3260 status = ICE_ERR_ALREADY_EXISTS;
3264 vsi_handle_arr[0] = cur_handle;
3265 vsi_handle_arr[1] = vsi_handle;
3266 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3267 &vsi_list_id, lkup_type);
3271 tmp_fltr = v_list_itr->fltr_info;
3272 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3273 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3274 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3275 /* Update the previous switch rule to a new VSI list which
3276 * includes current VSI that is requested
3278 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3282 /* before overriding VSI list map info. decrement ref_cnt of
3285 v_list_itr->vsi_list_info->ref_cnt--;
3287 /* now update to newly created list */
3288 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3289 v_list_itr->vsi_list_info =
3290 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3292 v_list_itr->vsi_count++;
3296 ice_release_lock(rule_lock);
3301 * ice_add_vlan - Add VLAN based filter rule
3302 * @hw: pointer to the hardware structure
3303 * @v_list: list of VLAN entries and forwarding information
3306 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3308 struct ice_fltr_list_entry *v_list_itr;
3311 return ICE_ERR_PARAM;
3313 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3315 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3316 return ICE_ERR_PARAM;
3317 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3318 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3319 if (v_list_itr->status)
3320 return v_list_itr->status;
3326 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3327 * @hw: pointer to the hardware structure
3328 * @mv_list: list of MAC and VLAN filters
3330 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3331 * pruning bits enabled, then it is the responsibility of the caller to make
3332 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3333 * VLAN won't be received on that VSI otherwise.
3336 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3338 struct ice_fltr_list_entry *mv_list_itr;
3340 if (!mv_list || !hw)
3341 return ICE_ERR_PARAM;
3343 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3345 enum ice_sw_lkup_type l_type =
3346 mv_list_itr->fltr_info.lkup_type;
3348 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3349 return ICE_ERR_PARAM;
3350 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3351 mv_list_itr->status =
3352 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3354 if (mv_list_itr->status)
3355 return mv_list_itr->status;
3361 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3362 * @hw: pointer to the hardware structure
3363 * @em_list: list of ether type MAC filter, MAC is optional
3365 * This function requires the caller to populate the entries in
3366 * the filter list with the necessary fields (including flags to
3367 * indicate Tx or Rx rules).
3370 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3372 struct ice_fltr_list_entry *em_list_itr;
3374 if (!em_list || !hw)
3375 return ICE_ERR_PARAM;
3377 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3379 enum ice_sw_lkup_type l_type =
3380 em_list_itr->fltr_info.lkup_type;
3382 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3383 l_type != ICE_SW_LKUP_ETHERTYPE)
3384 return ICE_ERR_PARAM;
3386 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3388 if (em_list_itr->status)
3389 return em_list_itr->status;
3395 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3396 * @hw: pointer to the hardware structure
3397 * @em_list: list of ethertype or ethertype MAC entries
3400 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3402 struct ice_fltr_list_entry *em_list_itr, *tmp;
3404 if (!em_list || !hw)
3405 return ICE_ERR_PARAM;
3407 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3409 enum ice_sw_lkup_type l_type =
3410 em_list_itr->fltr_info.lkup_type;
3412 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3413 l_type != ICE_SW_LKUP_ETHERTYPE)
3414 return ICE_ERR_PARAM;
3416 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3418 if (em_list_itr->status)
3419 return em_list_itr->status;
3426 * ice_rem_sw_rule_info
3427 * @hw: pointer to the hardware structure
3428 * @rule_head: pointer to the switch list structure that we want to delete
3431 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3433 if (!LIST_EMPTY(rule_head)) {
3434 struct ice_fltr_mgmt_list_entry *entry;
3435 struct ice_fltr_mgmt_list_entry *tmp;
3437 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3438 ice_fltr_mgmt_list_entry, list_entry) {
3439 LIST_DEL(&entry->list_entry);
3440 ice_free(hw, entry);
3446 * ice_rem_adv_rule_info
3447 * @hw: pointer to the hardware structure
3448 * @rule_head: pointer to the switch list structure that we want to delete
3451 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3453 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3454 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3456 if (LIST_EMPTY(rule_head))
3459 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3460 ice_adv_fltr_mgmt_list_entry, list_entry) {
3461 LIST_DEL(&lst_itr->list_entry);
3462 ice_free(hw, lst_itr->lkups);
3463 ice_free(hw, lst_itr);
3468 * ice_rem_all_sw_rules_info
3469 * @hw: pointer to the hardware structure
3471 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3473 struct ice_switch_info *sw = hw->switch_info;
3476 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3477 struct LIST_HEAD_TYPE *rule_head;
3479 rule_head = &sw->recp_list[i].filt_rules;
3480 if (!sw->recp_list[i].adv_rule)
3481 ice_rem_sw_rule_info(hw, rule_head);
3483 ice_rem_adv_rule_info(hw, rule_head);
3488 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3489 * @pi: pointer to the port_info structure
3490 * @vsi_handle: VSI handle to set as default
3491 * @set: true to add the above mentioned switch rule, false to remove it
3492 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3494 * add filter rule to set/unset given VSI as default VSI for the switch
3495 * (represented by swid)
3498 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3501 struct ice_aqc_sw_rules_elem *s_rule;
3502 struct ice_fltr_info f_info;
3503 struct ice_hw *hw = pi->hw;
3504 enum ice_adminq_opc opcode;
3505 enum ice_status status;
3509 if (!ice_is_vsi_valid(hw, vsi_handle))
3510 return ICE_ERR_PARAM;
3511 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3513 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3514 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3515 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3517 return ICE_ERR_NO_MEMORY;
3519 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3521 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3522 f_info.flag = direction;
3523 f_info.fltr_act = ICE_FWD_TO_VSI;
3524 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3526 if (f_info.flag & ICE_FLTR_RX) {
3527 f_info.src = pi->lport;
3528 f_info.src_id = ICE_SRC_ID_LPORT;
3530 f_info.fltr_rule_id =
3531 pi->dflt_rx_vsi_rule_id;
3532 } else if (f_info.flag & ICE_FLTR_TX) {
3533 f_info.src_id = ICE_SRC_ID_VSI;
3534 f_info.src = hw_vsi_id;
3536 f_info.fltr_rule_id =
3537 pi->dflt_tx_vsi_rule_id;
3541 opcode = ice_aqc_opc_add_sw_rules;
3543 opcode = ice_aqc_opc_remove_sw_rules;
3545 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3547 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3548 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3551 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3553 if (f_info.flag & ICE_FLTR_TX) {
3554 pi->dflt_tx_vsi_num = hw_vsi_id;
3555 pi->dflt_tx_vsi_rule_id = index;
3556 } else if (f_info.flag & ICE_FLTR_RX) {
3557 pi->dflt_rx_vsi_num = hw_vsi_id;
3558 pi->dflt_rx_vsi_rule_id = index;
3561 if (f_info.flag & ICE_FLTR_TX) {
3562 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3563 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3564 } else if (f_info.flag & ICE_FLTR_RX) {
3565 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3566 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3571 ice_free(hw, s_rule);
3576 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3577 * @hw: pointer to the hardware structure
3578 * @recp_id: lookup type for which the specified rule needs to be searched
3579 * @f_info: rule information
3581 * Helper function to search for a unicast rule entry - this is to be used
3582 * to remove unicast MAC filter that is not shared with other VSIs on the
3585 * Returns pointer to entry storing the rule if found
3587 static struct ice_fltr_mgmt_list_entry *
3588 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3589 struct ice_fltr_info *f_info)
3591 struct ice_switch_info *sw = hw->switch_info;
3592 struct ice_fltr_mgmt_list_entry *list_itr;
3593 struct LIST_HEAD_TYPE *list_head;
3595 list_head = &sw->recp_list[recp_id].filt_rules;
3596 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3598 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3599 sizeof(f_info->l_data)) &&
3600 f_info->fwd_id.hw_vsi_id ==
3601 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3602 f_info->flag == list_itr->fltr_info.flag)
3609 * ice_remove_mac - remove a MAC address based filter rule
3610 * @hw: pointer to the hardware structure
3611 * @m_list: list of MAC addresses and forwarding information
3613 * This function removes either a MAC filter rule or a specific VSI from a
3614 * VSI list for a multicast MAC address.
3616 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3617 * ice_add_mac. Caller should be aware that this call will only work if all
3618 * the entries passed into m_list were added previously. It will not attempt to
3619 * do a partial remove of entries that were found.
3622 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3624 struct ice_fltr_list_entry *list_itr, *tmp;
3625 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3628 return ICE_ERR_PARAM;
3630 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3631 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3633 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3634 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3637 if (l_type != ICE_SW_LKUP_MAC)
3638 return ICE_ERR_PARAM;
3640 vsi_handle = list_itr->fltr_info.vsi_handle;
3641 if (!ice_is_vsi_valid(hw, vsi_handle))
3642 return ICE_ERR_PARAM;
3644 list_itr->fltr_info.fwd_id.hw_vsi_id =
3645 ice_get_hw_vsi_num(hw, vsi_handle);
3646 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3647 /* Don't remove the unicast address that belongs to
3648 * another VSI on the switch, since it is not being
3651 ice_acquire_lock(rule_lock);
3652 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3653 &list_itr->fltr_info)) {
3654 ice_release_lock(rule_lock);
3655 return ICE_ERR_DOES_NOT_EXIST;
3657 ice_release_lock(rule_lock);
3659 list_itr->status = ice_remove_rule_internal(hw,
3662 if (list_itr->status)
3663 return list_itr->status;
3669 * ice_remove_vlan - Remove VLAN based filter rule
3670 * @hw: pointer to the hardware structure
3671 * @v_list: list of VLAN entries and forwarding information
3674 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3676 struct ice_fltr_list_entry *v_list_itr, *tmp;
3679 return ICE_ERR_PARAM;
3681 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3683 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3685 if (l_type != ICE_SW_LKUP_VLAN)
3686 return ICE_ERR_PARAM;
3687 v_list_itr->status = ice_remove_rule_internal(hw,
3690 if (v_list_itr->status)
3691 return v_list_itr->status;
3697 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3698 * @hw: pointer to the hardware structure
3699 * @v_list: list of MAC VLAN entries and forwarding information
3702 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3704 struct ice_fltr_list_entry *v_list_itr, *tmp;
3707 return ICE_ERR_PARAM;
3709 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3711 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3713 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3714 return ICE_ERR_PARAM;
3715 v_list_itr->status =
3716 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3718 if (v_list_itr->status)
3719 return v_list_itr->status;
3725 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3726 * @fm_entry: filter entry to inspect
3727 * @vsi_handle: VSI handle to compare with filter info
3730 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3732 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3733 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3734 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3735 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3740 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3741 * @hw: pointer to the hardware structure
3742 * @vsi_handle: VSI handle to remove filters from
3743 * @vsi_list_head: pointer to the list to add entry to
3744 * @fi: pointer to fltr_info of filter entry to copy & add
3746 * Helper function, used when creating a list of filters to remove from
3747 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3748 * original filter entry, with the exception of fltr_info.fltr_act and
3749 * fltr_info.fwd_id fields. These are set such that later logic can
3750 * extract which VSI to remove the fltr from, and pass on that information.
3752 static enum ice_status
3753 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3754 struct LIST_HEAD_TYPE *vsi_list_head,
3755 struct ice_fltr_info *fi)
3757 struct ice_fltr_list_entry *tmp;
3759 /* this memory is freed up in the caller function
3760 * once filters for this VSI are removed
3762 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3764 return ICE_ERR_NO_MEMORY;
3766 tmp->fltr_info = *fi;
3768 /* Overwrite these fields to indicate which VSI to remove filter from,
3769 * so find and remove logic can extract the information from the
3770 * list entries. Note that original entries will still have proper
3773 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3774 tmp->fltr_info.vsi_handle = vsi_handle;
3775 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3777 LIST_ADD(&tmp->list_entry, vsi_list_head);
3783 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3784 * @hw: pointer to the hardware structure
3785 * @vsi_handle: VSI handle to remove filters from
3786 * @lkup_list_head: pointer to the list that has certain lookup type filters
3787 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3789 * Locates all filters in lkup_list_head that are used by the given VSI,
3790 * and adds COPIES of those entries to vsi_list_head (intended to be used
3791 * to remove the listed filters).
3792 * Note that this means all entries in vsi_list_head must be explicitly
3793 * deallocated by the caller when done with list.
3795 static enum ice_status
3796 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3797 struct LIST_HEAD_TYPE *lkup_list_head,
3798 struct LIST_HEAD_TYPE *vsi_list_head)
3800 struct ice_fltr_mgmt_list_entry *fm_entry;
3801 enum ice_status status = ICE_SUCCESS;
3803 /* check to make sure VSI ID is valid and within boundary */
3804 if (!ice_is_vsi_valid(hw, vsi_handle))
3805 return ICE_ERR_PARAM;
3807 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3808 ice_fltr_mgmt_list_entry, list_entry) {
3809 struct ice_fltr_info *fi;
3811 fi = &fm_entry->fltr_info;
3812 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3815 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3825 * ice_determine_promisc_mask
3826 * @fi: filter info to parse
3828 * Helper function to determine which ICE_PROMISC_ mask corresponds
3829 * to given filter into.
3831 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3833 u16 vid = fi->l_data.mac_vlan.vlan_id;
3834 u8 *macaddr = fi->l_data.mac.mac_addr;
3835 bool is_tx_fltr = false;
3836 u8 promisc_mask = 0;
3838 if (fi->flag == ICE_FLTR_TX)
3841 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3842 promisc_mask |= is_tx_fltr ?
3843 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3844 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3845 promisc_mask |= is_tx_fltr ?
3846 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3847 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3848 promisc_mask |= is_tx_fltr ?
3849 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3851 promisc_mask |= is_tx_fltr ?
3852 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3854 return promisc_mask;
3858 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3859 * @hw: pointer to the hardware structure
3860 * @vsi_handle: VSI handle to retrieve info from
3861 * @promisc_mask: pointer to mask to be filled in
3862 * @vid: VLAN ID of promisc VLAN VSI
3865 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3868 struct ice_switch_info *sw = hw->switch_info;
3869 struct ice_fltr_mgmt_list_entry *itr;
3870 struct LIST_HEAD_TYPE *rule_head;
3871 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3873 if (!ice_is_vsi_valid(hw, vsi_handle))
3874 return ICE_ERR_PARAM;
3878 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3879 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3881 ice_acquire_lock(rule_lock);
3882 LIST_FOR_EACH_ENTRY(itr, rule_head,
3883 ice_fltr_mgmt_list_entry, list_entry) {
3884 /* Continue if this filter doesn't apply to this VSI or the
3885 * VSI ID is not in the VSI map for this filter
3887 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3890 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3892 ice_release_lock(rule_lock);
3898 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3899 * @hw: pointer to the hardware structure
3900 * @vsi_handle: VSI handle to retrieve info from
3901 * @promisc_mask: pointer to mask to be filled in
3902 * @vid: VLAN ID of promisc VLAN VSI
3905 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3908 struct ice_switch_info *sw = hw->switch_info;
3909 struct ice_fltr_mgmt_list_entry *itr;
3910 struct LIST_HEAD_TYPE *rule_head;
3911 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3913 if (!ice_is_vsi_valid(hw, vsi_handle))
3914 return ICE_ERR_PARAM;
3918 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3919 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3921 ice_acquire_lock(rule_lock);
3922 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3924 /* Continue if this filter doesn't apply to this VSI or the
3925 * VSI ID is not in the VSI map for this filter
3927 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3930 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3932 ice_release_lock(rule_lock);
3938 * ice_remove_promisc - Remove promisc based filter rules
3939 * @hw: pointer to the hardware structure
3940 * @recp_id: recipe ID for which the rule needs to removed
3941 * @v_list: list of promisc entries
3943 static enum ice_status
3944 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3945 struct LIST_HEAD_TYPE *v_list)
3947 struct ice_fltr_list_entry *v_list_itr, *tmp;
3949 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3951 v_list_itr->status =
3952 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3953 if (v_list_itr->status)
3954 return v_list_itr->status;
3960 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3961 * @hw: pointer to the hardware structure
3962 * @vsi_handle: VSI handle to clear mode
3963 * @promisc_mask: mask of promiscuous config bits to clear
3964 * @vid: VLAN ID to clear VLAN promiscuous
3967 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3970 struct ice_switch_info *sw = hw->switch_info;
3971 struct ice_fltr_list_entry *fm_entry, *tmp;
3972 struct LIST_HEAD_TYPE remove_list_head;
3973 struct ice_fltr_mgmt_list_entry *itr;
3974 struct LIST_HEAD_TYPE *rule_head;
3975 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3976 enum ice_status status = ICE_SUCCESS;
3979 if (!ice_is_vsi_valid(hw, vsi_handle))
3980 return ICE_ERR_PARAM;
3983 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3985 recipe_id = ICE_SW_LKUP_PROMISC;
3987 rule_head = &sw->recp_list[recipe_id].filt_rules;
3988 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3990 INIT_LIST_HEAD(&remove_list_head);
3992 ice_acquire_lock(rule_lock);
3993 LIST_FOR_EACH_ENTRY(itr, rule_head,
3994 ice_fltr_mgmt_list_entry, list_entry) {
3995 u8 fltr_promisc_mask = 0;
3997 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4000 fltr_promisc_mask |=
4001 ice_determine_promisc_mask(&itr->fltr_info);
4003 /* Skip if filter is not completely specified by given mask */
4004 if (fltr_promisc_mask & ~promisc_mask)
4007 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4011 ice_release_lock(rule_lock);
4012 goto free_fltr_list;
4015 ice_release_lock(rule_lock);
4017 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4020 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4021 ice_fltr_list_entry, list_entry) {
4022 LIST_DEL(&fm_entry->list_entry);
4023 ice_free(hw, fm_entry);
4030 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4031 * @hw: pointer to the hardware structure
4032 * @vsi_handle: VSI handle to configure
4033 * @promisc_mask: mask of promiscuous config bits
4034 * @vid: VLAN ID to set VLAN promiscuous
4037 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4039 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4040 struct ice_fltr_list_entry f_list_entry;
4041 struct ice_fltr_info new_fltr;
4042 enum ice_status status = ICE_SUCCESS;
4048 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4050 if (!ice_is_vsi_valid(hw, vsi_handle))
4051 return ICE_ERR_PARAM;
4052 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4054 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4056 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4057 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4058 new_fltr.l_data.mac_vlan.vlan_id = vid;
4059 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4061 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4062 recipe_id = ICE_SW_LKUP_PROMISC;
4065 /* Separate filters must be set for each direction/packet type
4066 * combination, so we will loop over the mask value, store the
4067 * individual type, and clear it out in the input mask as it
4070 while (promisc_mask) {
4076 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4077 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4078 pkt_type = UCAST_FLTR;
4079 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4080 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4081 pkt_type = UCAST_FLTR;
4083 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4084 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4085 pkt_type = MCAST_FLTR;
4086 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4087 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4088 pkt_type = MCAST_FLTR;
4090 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4091 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4092 pkt_type = BCAST_FLTR;
4093 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4094 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4095 pkt_type = BCAST_FLTR;
4099 /* Check for VLAN promiscuous flag */
4100 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4101 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4102 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4103 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4107 /* Set filter DA based on packet type */
4108 mac_addr = new_fltr.l_data.mac.mac_addr;
4109 if (pkt_type == BCAST_FLTR) {
4110 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4111 } else if (pkt_type == MCAST_FLTR ||
4112 pkt_type == UCAST_FLTR) {
4113 /* Use the dummy ether header DA */
4114 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4115 ICE_NONDMA_TO_NONDMA);
4116 if (pkt_type == MCAST_FLTR)
4117 mac_addr[0] |= 0x1; /* Set multicast bit */
4120 /* Need to reset this to zero for all iterations */
4123 new_fltr.flag |= ICE_FLTR_TX;
4124 new_fltr.src = hw_vsi_id;
4126 new_fltr.flag |= ICE_FLTR_RX;
4127 new_fltr.src = hw->port_info->lport;
4130 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4131 new_fltr.vsi_handle = vsi_handle;
4132 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4133 f_list_entry.fltr_info = new_fltr;
4135 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4136 if (status != ICE_SUCCESS)
4137 goto set_promisc_exit;
4145 * ice_set_vlan_vsi_promisc
4146 * @hw: pointer to the hardware structure
4147 * @vsi_handle: VSI handle to configure
4148 * @promisc_mask: mask of promiscuous config bits
4149 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4151 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4154 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4155 bool rm_vlan_promisc)
4157 struct ice_switch_info *sw = hw->switch_info;
4158 struct ice_fltr_list_entry *list_itr, *tmp;
4159 struct LIST_HEAD_TYPE vsi_list_head;
4160 struct LIST_HEAD_TYPE *vlan_head;
4161 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4162 enum ice_status status;
4165 INIT_LIST_HEAD(&vsi_list_head);
4166 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4167 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4168 ice_acquire_lock(vlan_lock);
4169 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4171 ice_release_lock(vlan_lock);
4173 goto free_fltr_list;
4175 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4177 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4178 if (rm_vlan_promisc)
4179 status = ice_clear_vsi_promisc(hw, vsi_handle,
4180 promisc_mask, vlan_id);
4182 status = ice_set_vsi_promisc(hw, vsi_handle,
4183 promisc_mask, vlan_id);
4189 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4190 ice_fltr_list_entry, list_entry) {
4191 LIST_DEL(&list_itr->list_entry);
4192 ice_free(hw, list_itr);
4198 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4199 * @hw: pointer to the hardware structure
4200 * @vsi_handle: VSI handle to remove filters from
4201 * @lkup: switch rule filter lookup type
4204 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4205 enum ice_sw_lkup_type lkup)
4207 struct ice_switch_info *sw = hw->switch_info;
4208 struct ice_fltr_list_entry *fm_entry;
4209 struct LIST_HEAD_TYPE remove_list_head;
4210 struct LIST_HEAD_TYPE *rule_head;
4211 struct ice_fltr_list_entry *tmp;
4212 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4213 enum ice_status status;
4215 INIT_LIST_HEAD(&remove_list_head);
4216 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4217 rule_head = &sw->recp_list[lkup].filt_rules;
4218 ice_acquire_lock(rule_lock);
4219 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4221 ice_release_lock(rule_lock);
4226 case ICE_SW_LKUP_MAC:
4227 ice_remove_mac(hw, &remove_list_head);
4229 case ICE_SW_LKUP_VLAN:
4230 ice_remove_vlan(hw, &remove_list_head);
4232 case ICE_SW_LKUP_PROMISC:
4233 case ICE_SW_LKUP_PROMISC_VLAN:
4234 ice_remove_promisc(hw, lkup, &remove_list_head);
4236 case ICE_SW_LKUP_MAC_VLAN:
4237 ice_remove_mac_vlan(hw, &remove_list_head);
4239 case ICE_SW_LKUP_ETHERTYPE:
4240 case ICE_SW_LKUP_ETHERTYPE_MAC:
4241 ice_remove_eth_mac(hw, &remove_list_head);
4243 case ICE_SW_LKUP_DFLT:
4244 ice_debug(hw, ICE_DBG_SW,
4245 "Remove filters for this lookup type hasn't been implemented yet\n");
4247 case ICE_SW_LKUP_LAST:
4248 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4252 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4253 ice_fltr_list_entry, list_entry) {
4254 LIST_DEL(&fm_entry->list_entry);
4255 ice_free(hw, fm_entry);
4260 * ice_remove_vsi_fltr - Remove all filters for a VSI
4261 * @hw: pointer to the hardware structure
4262 * @vsi_handle: VSI handle to remove filters from
4264 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4266 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4268 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4269 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4270 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4271 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4272 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4273 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4274 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4275 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4279 * ice_alloc_res_cntr - allocating resource counter
4280 * @hw: pointer to the hardware structure
4281 * @type: type of resource
4282 * @alloc_shared: if set it is shared else dedicated
4283 * @num_items: number of entries requested for FD resource type
4284 * @counter_id: counter index returned by AQ call
4287 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4290 struct ice_aqc_alloc_free_res_elem *buf;
4291 enum ice_status status;
4294 /* Allocate resource */
4295 buf_len = sizeof(*buf);
4296 buf = (struct ice_aqc_alloc_free_res_elem *)
4297 ice_malloc(hw, buf_len);
4299 return ICE_ERR_NO_MEMORY;
4301 buf->num_elems = CPU_TO_LE16(num_items);
4302 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4303 ICE_AQC_RES_TYPE_M) | alloc_shared);
4305 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4306 ice_aqc_opc_alloc_res, NULL);
4310 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4318 * ice_free_res_cntr - free resource counter
4319 * @hw: pointer to the hardware structure
4320 * @type: type of resource
4321 * @alloc_shared: if set it is shared else dedicated
4322 * @num_items: number of entries to be freed for FD resource type
4323 * @counter_id: counter ID resource which needs to be freed
4326 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4329 struct ice_aqc_alloc_free_res_elem *buf;
4330 enum ice_status status;
4334 buf_len = sizeof(*buf);
4335 buf = (struct ice_aqc_alloc_free_res_elem *)
4336 ice_malloc(hw, buf_len);
4338 return ICE_ERR_NO_MEMORY;
4340 buf->num_elems = CPU_TO_LE16(num_items);
4341 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4342 ICE_AQC_RES_TYPE_M) | alloc_shared);
4343 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4345 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4346 ice_aqc_opc_free_res, NULL);
4348 ice_debug(hw, ICE_DBG_SW,
4349 "counter resource could not be freed\n");
4356 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4357 * @hw: pointer to the hardware structure
4358 * @counter_id: returns counter index
4360 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4362 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4363 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4368 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4369 * @hw: pointer to the hardware structure
4370 * @counter_id: counter index to be freed
4372 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4374 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4375 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4380 * ice_alloc_res_lg_act - add large action resource
4381 * @hw: pointer to the hardware structure
4382 * @l_id: large action ID to fill it in
4383 * @num_acts: number of actions to hold with a large action entry
4385 static enum ice_status
4386 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4388 struct ice_aqc_alloc_free_res_elem *sw_buf;
4389 enum ice_status status;
4392 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4393 return ICE_ERR_PARAM;
4395 /* Allocate resource for large action */
4396 buf_len = sizeof(*sw_buf);
4397 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4398 ice_malloc(hw, buf_len);
4400 return ICE_ERR_NO_MEMORY;
4402 sw_buf->num_elems = CPU_TO_LE16(1);
4404 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4405 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4406 * If num_acts is greater than 2, then use
4407 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4408 * The num_acts cannot exceed 4. This was ensured at the
4409 * beginning of the function.
4412 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4413 else if (num_acts == 2)
4414 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4416 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4418 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4419 ice_aqc_opc_alloc_res, NULL);
4421 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4423 ice_free(hw, sw_buf);
4428 * ice_add_mac_with_sw_marker - add filter with sw marker
4429 * @hw: pointer to the hardware structure
4430 * @f_info: filter info structure containing the MAC filter information
4431 * @sw_marker: sw marker to tag the Rx descriptor with
4434 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4437 struct ice_switch_info *sw = hw->switch_info;
4438 struct ice_fltr_mgmt_list_entry *m_entry;
4439 struct ice_fltr_list_entry fl_info;
4440 struct LIST_HEAD_TYPE l_head;
4441 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4442 enum ice_status ret;
4446 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4447 return ICE_ERR_PARAM;
4449 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4450 return ICE_ERR_PARAM;
4452 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4453 return ICE_ERR_PARAM;
4455 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4456 return ICE_ERR_PARAM;
4457 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4459 /* Add filter if it doesn't exist so then the adding of large
4460 * action always results in update
4463 INIT_LIST_HEAD(&l_head);
4464 fl_info.fltr_info = *f_info;
4465 LIST_ADD(&fl_info.list_entry, &l_head);
4467 entry_exists = false;
4468 ret = ice_add_mac(hw, &l_head);
4469 if (ret == ICE_ERR_ALREADY_EXISTS)
4470 entry_exists = true;
4474 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4475 ice_acquire_lock(rule_lock);
4476 /* Get the book keeping entry for the filter */
4477 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4481 /* If counter action was enabled for this rule then don't enable
4482 * sw marker large action
4484 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4485 ret = ICE_ERR_PARAM;
4489 /* if same marker was added before */
4490 if (m_entry->sw_marker_id == sw_marker) {
4491 ret = ICE_ERR_ALREADY_EXISTS;
4495 /* Allocate a hardware table entry to hold large act. Three actions
4496 * for marker based large action
4498 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4502 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4505 /* Update the switch rule to add the marker action */
4506 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4508 ice_release_lock(rule_lock);
4513 ice_release_lock(rule_lock);
4514 /* only remove entry if it did not exist previously */
4516 ret = ice_remove_mac(hw, &l_head);
4522 * ice_add_mac_with_counter - add filter with counter enabled
4523 * @hw: pointer to the hardware structure
4524 * @f_info: pointer to filter info structure containing the MAC filter
4528 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4530 struct ice_switch_info *sw = hw->switch_info;
4531 struct ice_fltr_mgmt_list_entry *m_entry;
4532 struct ice_fltr_list_entry fl_info;
4533 struct LIST_HEAD_TYPE l_head;
4534 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4535 enum ice_status ret;
4540 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4541 return ICE_ERR_PARAM;
4543 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4544 return ICE_ERR_PARAM;
4546 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4547 return ICE_ERR_PARAM;
4548 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4550 entry_exist = false;
4552 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4554 /* Add filter if it doesn't exist so then the adding of large
4555 * action always results in update
4557 INIT_LIST_HEAD(&l_head);
4559 fl_info.fltr_info = *f_info;
4560 LIST_ADD(&fl_info.list_entry, &l_head);
4562 ret = ice_add_mac(hw, &l_head);
4563 if (ret == ICE_ERR_ALREADY_EXISTS)
4568 ice_acquire_lock(rule_lock);
4569 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4571 ret = ICE_ERR_BAD_PTR;
4575 /* Don't enable counter for a filter for which sw marker was enabled */
4576 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4577 ret = ICE_ERR_PARAM;
4581 /* If a counter was already enabled then don't need to add again */
4582 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4583 ret = ICE_ERR_ALREADY_EXISTS;
4587 /* Allocate a hardware table entry to VLAN counter */
4588 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4592 /* Allocate a hardware table entry to hold large act. Two actions for
4593 * counter based large action
4595 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4599 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4602 /* Update the switch rule to add the counter action */
4603 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4605 ice_release_lock(rule_lock);
4610 ice_release_lock(rule_lock);
4611 /* only remove entry if it did not exist previously */
4613 ret = ice_remove_mac(hw, &l_head);
4618 /* This is mapping table entry that maps every word within a given protocol
4619 * structure to the real byte offset as per the specification of that
4621 * for example dst address is 3 words in ethertype header and corresponding
4622 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4623 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4624 * matching entry describing its field. This needs to be updated if new
4625 * structure is added to that union.
4627 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4628 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4629 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4630 { ICE_ETYPE_OL, { 0 } },
4631 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4632 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4633 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4634 26, 28, 30, 32, 34, 36, 38 } },
4635 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4636 26, 28, 30, 32, 34, 36, 38 } },
4637 { ICE_TCP_IL, { 0, 2 } },
4638 { ICE_UDP_OF, { 0, 2 } },
4639 { ICE_UDP_ILOS, { 0, 2 } },
4640 { ICE_SCTP_IL, { 0, 2 } },
4641 { ICE_VXLAN, { 8, 10, 12, 14 } },
4642 { ICE_GENEVE, { 8, 10, 12, 14 } },
4643 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4644 { ICE_NVGRE, { 0, 2, 4, 6 } },
4645 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4646 { ICE_PPPOE, { 0, 2, 4, 6 } },
4647 { ICE_PROTOCOL_LAST, { 0 } }
4650 /* The following table describes preferred grouping of recipes.
4651 * If a recipe that needs to be programmed is a superset or matches one of the
4652 * following combinations, then the recipe needs to be chained as per the
4655 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4656 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4657 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4658 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4659 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4660 { 0xffff, 0xffff, 0xffff, 0xffff } },
4661 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4662 { 0xffff, 0xffff, 0xffff, 0xffff } },
4663 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4664 { 0xffff, 0xffff, 0xffff, 0xffff } },
4667 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4668 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4669 { ICE_MAC_IL, ICE_MAC_IL_HW },
4670 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4671 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4672 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4673 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4674 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4675 { ICE_TCP_IL, ICE_TCP_IL_HW },
4676 { ICE_UDP_OF, ICE_UDP_OF_HW },
4677 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4678 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4679 { ICE_VXLAN, ICE_UDP_OF_HW },
4680 { ICE_GENEVE, ICE_UDP_OF_HW },
4681 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4682 { ICE_NVGRE, ICE_GRE_OF_HW },
4683 { ICE_GTP, ICE_UDP_OF_HW },
4684 { ICE_PPPOE, ICE_PPPOE_HW },
4685 { ICE_PROTOCOL_LAST, 0 }
4689 * ice_find_recp - find a recipe
4690 * @hw: pointer to the hardware structure
4691 * @lkup_exts: extension sequence to match
4693 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4695 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4697 bool refresh_required = true;
4698 struct ice_sw_recipe *recp;
4701 /* Walk through existing recipes to find a match */
4702 recp = hw->switch_info->recp_list;
4703 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4704 /* If recipe was not created for this ID, in SW bookkeeping,
4705 * check if FW has an entry for this recipe. If the FW has an
4706 * entry update it in our SW bookkeeping and continue with the
4709 if (!recp[i].recp_created)
4710 if (ice_get_recp_frm_fw(hw,
4711 hw->switch_info->recp_list, i,
4715 /* if number of words we are looking for match */
4716 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4717 struct ice_fv_word *a = lkup_exts->fv_words;
4718 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4722 for (p = 0; p < lkup_exts->n_val_words; p++) {
4723 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4725 if (a[p].off == b[q].off &&
4726 a[p].prot_id == b[q].prot_id)
4727 /* Found the "p"th word in the
4732 /* After walking through all the words in the
4733 * "i"th recipe if "p"th word was not found then
4734 * this recipe is not what we are looking for.
4735 * So break out from this loop and try the next
4738 if (q >= recp[i].lkup_exts.n_val_words) {
4743 /* If for "i"th recipe the found was never set to false
4744 * then it means we found our match
4747 return i; /* Return the recipe ID */
4750 return ICE_MAX_NUM_RECIPES;
4754 * ice_prot_type_to_id - get protocol ID from protocol type
4755 * @type: protocol type
4756 * @id: pointer to variable that will receive the ID
4758 * Returns true if found, false otherwise
4760 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4764 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4765 if (ice_prot_id_tbl[i].type == type) {
4766 *id = ice_prot_id_tbl[i].protocol_id;
4773 * ice_find_valid_words - count valid words
4774 * @rule: advanced rule with lookup information
4775 * @lkup_exts: byte offset extractions of the words that are valid
4777 * calculate valid words in a lookup rule using mask value
4780 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4781 struct ice_prot_lkup_ext *lkup_exts)
4787 if (!ice_prot_type_to_id(rule->type, &prot_id))
4790 word = lkup_exts->n_val_words;
4792 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4793 if (((u16 *)&rule->m_u)[j] &&
4794 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4795 /* No more space to accommodate */
4796 if (word >= ICE_MAX_CHAIN_WORDS)
4798 lkup_exts->fv_words[word].off =
4799 ice_prot_ext[rule->type].offs[j];
4800 lkup_exts->fv_words[word].prot_id =
4801 ice_prot_id_tbl[rule->type].protocol_id;
4802 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4806 ret_val = word - lkup_exts->n_val_words;
4807 lkup_exts->n_val_words = word;
4813 * ice_find_prot_off_ind - check for specific ID and offset in rule
4814 * @lkup_exts: an array of protocol header extractions
4815 * @prot_type: protocol type to check
4816 * @off: expected offset of the extraction
4818 * Check if the prot_ext has given protocol ID and offset
4821 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4826 for (j = 0; j < lkup_exts->n_val_words; j++)
4827 if (lkup_exts->fv_words[j].off == off &&
4828 lkup_exts->fv_words[j].prot_id == prot_type)
4831 return ICE_MAX_CHAIN_WORDS;
4835 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4836 * @lkup_exts: an array of protocol header extractions
4837 * @r_policy: preferred recipe grouping policy
4839 * Helper function to check if given recipe group is subset we need to check if
4840 * all the words described by the given recipe group exist in the advanced rule
4841 * look up information
4844 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4845 const struct ice_pref_recipe_group *r_policy)
4847 u8 ind[ICE_NUM_WORDS_RECIPE];
4851 /* check if everything in the r_policy is part of the entire rule */
4852 for (i = 0; i < r_policy->n_val_pairs; i++) {
4855 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4856 r_policy->pairs[i].off);
4857 if (j >= ICE_MAX_CHAIN_WORDS)
4860 /* store the indexes temporarily found by the find function
4861 * this will be used to mark the words as 'done'
4866 /* If the entire policy recipe was a true match, then mark the fields
4867 * that are covered by the recipe as 'done' meaning that these words
4868 * will be clumped together in one recipe.
4869 * "Done" here means in our searching if certain recipe group
4870 * matches or is subset of the given rule, then we mark all
4871 * the corresponding offsets as found. So the remaining recipes should
4872 * be created with whatever words that were left.
4874 for (i = 0; i < count; i++) {
4877 ice_set_bit(in, lkup_exts->done);
4883 * ice_create_first_fit_recp_def - Create a recipe grouping
4884 * @hw: pointer to the hardware structure
4885 * @lkup_exts: an array of protocol header extractions
4886 * @rg_list: pointer to a list that stores new recipe groups
4887 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4889 * Using first fit algorithm, take all the words that are still not done
4890 * and start grouping them in 4-word groups. Each group makes up one
4893 static enum ice_status
4894 ice_create_first_fit_recp_def(struct ice_hw *hw,
4895 struct ice_prot_lkup_ext *lkup_exts,
4896 struct LIST_HEAD_TYPE *rg_list,
4899 struct ice_pref_recipe_group *grp = NULL;
4904 /* Walk through every word in the rule to check if it is not done. If so
4905 * then this word needs to be part of a new recipe.
4907 for (j = 0; j < lkup_exts->n_val_words; j++)
4908 if (!ice_is_bit_set(lkup_exts->done, j)) {
4910 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4911 struct ice_recp_grp_entry *entry;
4913 entry = (struct ice_recp_grp_entry *)
4914 ice_malloc(hw, sizeof(*entry));
4916 return ICE_ERR_NO_MEMORY;
4917 LIST_ADD(&entry->l_entry, rg_list);
4918 grp = &entry->r_group;
4922 grp->pairs[grp->n_val_pairs].prot_id =
4923 lkup_exts->fv_words[j].prot_id;
4924 grp->pairs[grp->n_val_pairs].off =
4925 lkup_exts->fv_words[j].off;
4926 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4934 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4935 * @hw: pointer to the hardware structure
4936 * @fv_list: field vector with the extraction sequence information
4937 * @rg_list: recipe groupings with protocol-offset pairs
4939 * Helper function to fill in the field vector indices for protocol-offset
4940 * pairs. These indexes are then ultimately programmed into a recipe.
4942 static enum ice_status
4943 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4944 struct LIST_HEAD_TYPE *rg_list)
4946 struct ice_sw_fv_list_entry *fv;
4947 struct ice_recp_grp_entry *rg;
4948 struct ice_fv_word *fv_ext;
4950 if (LIST_EMPTY(fv_list))
4953 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4954 fv_ext = fv->fv_ptr->ew;
4956 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4959 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4960 struct ice_fv_word *pr;
4965 pr = &rg->r_group.pairs[i];
4966 mask = rg->r_group.mask[i];
4968 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4969 if (fv_ext[j].prot_id == pr->prot_id &&
4970 fv_ext[j].off == pr->off) {
4973 /* Store index of field vector */
4975 /* Mask is given by caller as big
4976 * endian, but sent to FW as little
4979 rg->fv_mask[i] = mask << 8 | mask >> 8;
4983 /* Protocol/offset could not be found, caller gave an
4987 return ICE_ERR_PARAM;
4995 * ice_find_free_recp_res_idx - find free result indexes for recipe
4996 * @hw: pointer to hardware structure
4997 * @profiles: bitmap of profiles that will be associated with the new recipe
4998 * @free_idx: pointer to variable to receive the free index bitmap
5000 * The algorithm used here is:
5001 * 1. When creating a new recipe, create a set P which contains all
5002 * Profiles that will be associated with our new recipe
5004 * 2. For each Profile p in set P:
5005 * a. Add all recipes associated with Profile p into set R
5006 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5007 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5008 * i. Or just assume they all have the same possible indexes:
5010 * i.e., PossibleIndexes = 0x0000F00000000000
5012 * 3. For each Recipe r in set R:
5013 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5014 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5016 * FreeIndexes will contain the bits indicating the indexes free for use,
5017 * then the code needs to update the recipe[r].used_result_idx_bits to
5018 * indicate which indexes were selected for use by this recipe.
5021 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5022 ice_bitmap_t *free_idx)
5024 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5025 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5026 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5030 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5031 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5032 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5033 ice_init_possible_res_bm(possible_idx);
5035 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
5036 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
5037 ice_set_bit(bit, possible_idx);
5039 /* For each profile we are going to associate the recipe with, add the
5040 * recipes that are associated with that profile. This will give us
5041 * the set of recipes that our recipe may collide with.
5044 while (ICE_MAX_NUM_PROFILES >
5045 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5046 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5047 ICE_MAX_NUM_RECIPES);
5052 /* For each recipe that our new recipe may collide with, determine
5053 * which indexes have been used.
5055 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5056 if (ice_is_bit_set(recipes, bit))
5057 ice_or_bitmap(used_idx, used_idx,
5058 hw->switch_info->recp_list[bit].res_idxs,
5061 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5063 /* return number of free indexes */
5065 while (ICE_MAX_FV_WORDS >
5066 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5075 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5076 * @hw: pointer to hardware structure
5077 * @rm: recipe management list entry
5078 * @match_tun: if field vector index for tunnel needs to be programmed
5079 * @profiles: bitmap of profiles that will be assocated.
5081 static enum ice_status
5082 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5083 bool match_tun, ice_bitmap_t *profiles)
5085 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5086 struct ice_aqc_recipe_data_elem *tmp;
5087 struct ice_aqc_recipe_data_elem *buf;
5088 struct ice_recp_grp_entry *entry;
5089 enum ice_status status;
5095 /* When more than one recipe are required, another recipe is needed to
5096 * chain them together. Matching a tunnel metadata ID takes up one of
5097 * the match fields in the chaining recipe reducing the number of
5098 * chained recipes by one.
5100 /* check number of free result indices */
5101 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5102 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5104 if (rm->n_grp_count > 1) {
5105 if (rm->n_grp_count > free_res_idx)
5106 return ICE_ERR_MAX_LIMIT;
5111 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5112 ICE_MAX_NUM_RECIPES,
5115 return ICE_ERR_NO_MEMORY;
5117 buf = (struct ice_aqc_recipe_data_elem *)
5118 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5120 status = ICE_ERR_NO_MEMORY;
5124 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5125 recipe_count = ICE_MAX_NUM_RECIPES;
5126 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5128 if (status || recipe_count == 0)
5131 /* Allocate the recipe resources, and configure them according to the
5132 * match fields from protocol headers and extracted field vectors.
5134 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5135 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5138 status = ice_alloc_recipe(hw, &entry->rid);
5142 /* Clear the result index of the located recipe, as this will be
5143 * updated, if needed, later in the recipe creation process.
5145 tmp[0].content.result_indx = 0;
5147 buf[recps] = tmp[0];
5148 buf[recps].recipe_indx = (u8)entry->rid;
5149 /* if the recipe is a non-root recipe RID should be programmed
5150 * as 0 for the rules to be applied correctly.
5152 buf[recps].content.rid = 0;
5153 ice_memset(&buf[recps].content.lkup_indx, 0,
5154 sizeof(buf[recps].content.lkup_indx),
5157 /* All recipes use look-up index 0 to match switch ID. */
5158 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5159 buf[recps].content.mask[0] =
5160 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5161 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5164 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5165 buf[recps].content.lkup_indx[i] = 0x80;
5166 buf[recps].content.mask[i] = 0;
5169 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5170 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5171 buf[recps].content.mask[i + 1] =
5172 CPU_TO_LE16(entry->fv_mask[i]);
5175 if (rm->n_grp_count > 1) {
5176 /* Checks to see if there really is a valid result index
5179 if (chain_idx >= ICE_MAX_FV_WORDS) {
5180 ice_debug(hw, ICE_DBG_SW,
5181 "No chain index available\n");
5182 status = ICE_ERR_MAX_LIMIT;
5186 entry->chain_idx = chain_idx;
5187 buf[recps].content.result_indx =
5188 ICE_AQ_RECIPE_RESULT_EN |
5189 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5190 ICE_AQ_RECIPE_RESULT_DATA_M);
5191 ice_clear_bit(chain_idx, result_idx_bm);
5192 chain_idx = ice_find_first_bit(result_idx_bm,
5196 /* fill recipe dependencies */
5197 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5198 ICE_MAX_NUM_RECIPES);
5199 ice_set_bit(buf[recps].recipe_indx,
5200 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5201 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5205 if (rm->n_grp_count == 1) {
5206 rm->root_rid = buf[0].recipe_indx;
5207 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5208 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5209 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5210 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5211 sizeof(buf[0].recipe_bitmap),
5212 ICE_NONDMA_TO_NONDMA);
5214 status = ICE_ERR_BAD_PTR;
5217 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5218 * the recipe which is getting created if specified
5219 * by user. Usually any advanced switch filter, which results
5220 * into new extraction sequence, ended up creating a new recipe
5221 * of type ROOT and usually recipes are associated with profiles
5222 * Switch rule referreing newly created recipe, needs to have
5223 * either/or 'fwd' or 'join' priority, otherwise switch rule
5224 * evaluation will not happen correctly. In other words, if
5225 * switch rule to be evaluated on priority basis, then recipe
5226 * needs to have priority, otherwise it will be evaluated last.
5228 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5230 struct ice_recp_grp_entry *last_chain_entry;
5233 /* Allocate the last recipe that will chain the outcomes of the
5234 * other recipes together
5236 status = ice_alloc_recipe(hw, &rid);
5240 buf[recps].recipe_indx = (u8)rid;
5241 buf[recps].content.rid = (u8)rid;
5242 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5243 /* the new entry created should also be part of rg_list to
5244 * make sure we have complete recipe
5246 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5247 sizeof(*last_chain_entry));
5248 if (!last_chain_entry) {
5249 status = ICE_ERR_NO_MEMORY;
5252 last_chain_entry->rid = rid;
5253 ice_memset(&buf[recps].content.lkup_indx, 0,
5254 sizeof(buf[recps].content.lkup_indx),
5256 /* All recipes use look-up index 0 to match switch ID. */
5257 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5258 buf[recps].content.mask[0] =
5259 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5260 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5261 buf[recps].content.lkup_indx[i] =
5262 ICE_AQ_RECIPE_LKUP_IGNORE;
5263 buf[recps].content.mask[i] = 0;
5267 /* update r_bitmap with the recp that is used for chaining */
5268 ice_set_bit(rid, rm->r_bitmap);
5269 /* this is the recipe that chains all the other recipes so it
5270 * should not have a chaining ID to indicate the same
5272 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5273 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5275 last_chain_entry->fv_idx[i] = entry->chain_idx;
5276 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5277 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5278 ice_set_bit(entry->rid, rm->r_bitmap);
5280 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5281 if (sizeof(buf[recps].recipe_bitmap) >=
5282 sizeof(rm->r_bitmap)) {
5283 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5284 sizeof(buf[recps].recipe_bitmap),
5285 ICE_NONDMA_TO_NONDMA);
5287 status = ICE_ERR_BAD_PTR;
5290 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5292 /* To differentiate among different UDP tunnels, a meta data ID
5296 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5297 buf[recps].content.mask[i] =
5298 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5302 rm->root_rid = (u8)rid;
5304 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5308 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5309 ice_release_change_lock(hw);
5313 /* Every recipe that just got created add it to the recipe
5316 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5317 struct ice_switch_info *sw = hw->switch_info;
5318 bool is_root, idx_found = false;
5319 struct ice_sw_recipe *recp;
5320 u16 idx, buf_idx = 0;
5322 /* find buffer index for copying some data */
5323 for (idx = 0; idx < rm->n_grp_count; idx++)
5324 if (buf[idx].recipe_indx == entry->rid) {
5330 status = ICE_ERR_OUT_OF_RANGE;
5334 recp = &sw->recp_list[entry->rid];
5335 is_root = (rm->root_rid == entry->rid);
5336 recp->is_root = is_root;
5338 recp->root_rid = entry->rid;
5339 recp->big_recp = (is_root && rm->n_grp_count > 1);
5341 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5342 entry->r_group.n_val_pairs *
5343 sizeof(struct ice_fv_word),
5344 ICE_NONDMA_TO_NONDMA);
5346 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5347 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5349 /* Copy non-result fv index values and masks to recipe. This
5350 * call will also update the result recipe bitmask.
5352 ice_collect_result_idx(&buf[buf_idx], recp);
5354 /* for non-root recipes, also copy to the root, this allows
5355 * easier matching of a complete chained recipe
5358 ice_collect_result_idx(&buf[buf_idx],
5359 &sw->recp_list[rm->root_rid]);
5361 recp->n_ext_words = entry->r_group.n_val_pairs;
5362 recp->chain_idx = entry->chain_idx;
5363 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5364 recp->tun_type = rm->tun_type;
5365 recp->recp_created = true;
5380 * ice_create_recipe_group - creates recipe group
5381 * @hw: pointer to hardware structure
5382 * @rm: recipe management list entry
5383 * @lkup_exts: lookup elements
5385 static enum ice_status
5386 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5387 struct ice_prot_lkup_ext *lkup_exts)
5389 struct ice_recp_grp_entry *entry;
5390 struct ice_recp_grp_entry *tmp;
5391 enum ice_status status;
5395 rm->n_grp_count = 0;
5398 if (lkup_exts->n_val_words > ICE_NUM_WORDS_RECIPE) {
5399 /* Each switch recipe can match up to 5 words or metadata. One
5400 * word in each recipe is used to match the switch ID. Four
5401 * words are left for matching other values. If the new advanced
5402 * recipe requires more than 4 words, it needs to be split into
5403 * multiple recipes which are chained together using the
5404 * intermediate result that each produces as input to the other
5405 * recipes in the sequence.
5407 groups = ARRAY_SIZE(ice_recipe_pack);
5409 /* Check if any of the preferred recipes from the grouping
5412 for (i = 0; i < groups; i++)
5413 /* Check if the recipe from the preferred grouping
5414 * matches or is a subset of the fields that needs to be
5417 if (ice_is_recipe_subset(lkup_exts,
5418 &ice_recipe_pack[i])) {
5419 /* This recipe can be used by itself or grouped
5420 * with other recipes.
5422 entry = (struct ice_recp_grp_entry *)
5423 ice_malloc(hw, sizeof(*entry));
5425 status = ICE_ERR_NO_MEMORY;
5428 entry->r_group = ice_recipe_pack[i];
5429 LIST_ADD(&entry->l_entry, &rm->rg_list);
5434 /* Create recipes for words that are marked not done by packing them
5437 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5438 &rm->rg_list, &recp_count);
5440 rm->n_grp_count += recp_count;
5441 rm->n_ext_words = lkup_exts->n_val_words;
5442 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5443 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5444 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5445 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5450 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5452 LIST_DEL(&entry->l_entry);
5453 ice_free(hw, entry);
5461 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5462 * @hw: pointer to hardware structure
5463 * @lkups: lookup elements or match criteria for the advanced recipe, one
5464 * structure per protocol header
5465 * @lkups_cnt: number of protocols
5466 * @bm: bitmap of field vectors to consider
5467 * @fv_list: pointer to a list that holds the returned field vectors
5469 static enum ice_status
5470 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5471 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5473 enum ice_status status;
5477 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5479 return ICE_ERR_NO_MEMORY;
5481 for (i = 0; i < lkups_cnt; i++)
5482 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5483 status = ICE_ERR_CFG;
5487 /* Find field vectors that include all specified protocol types */
5488 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5491 ice_free(hw, prot_ids);
5496 * ice_add_special_words - Add words that are not protocols, such as metadata
5497 * @rinfo: other information regarding the rule e.g. priority and action info
5498 * @lkup_exts: lookup word structure
5500 static enum ice_status
5501 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5502 struct ice_prot_lkup_ext *lkup_exts)
5504 /* If this is a tunneled packet, then add recipe index to match the
5505 * tunnel bit in the packet metadata flags.
5507 if (rinfo->tun_type != ICE_NON_TUN) {
5508 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5509 u8 word = lkup_exts->n_val_words++;
5511 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5512 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5514 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5516 return ICE_ERR_MAX_LIMIT;
5523 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5524 * @hw: pointer to hardware structure
5525 * @rinfo: other information regarding the rule e.g. priority and action info
5526 * @bm: pointer to memory for returning the bitmap of field vectors
5529 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5532 enum ice_prof_type type;
5534 switch (rinfo->tun_type) {
5536 type = ICE_PROF_NON_TUN;
5538 case ICE_ALL_TUNNELS:
5539 type = ICE_PROF_TUN_ALL;
5541 case ICE_SW_TUN_VXLAN_GPE:
5542 case ICE_SW_TUN_GENEVE:
5543 case ICE_SW_TUN_VXLAN:
5544 case ICE_SW_TUN_UDP:
5545 case ICE_SW_TUN_GTP:
5546 type = ICE_PROF_TUN_UDP;
5548 case ICE_SW_TUN_NVGRE:
5549 type = ICE_PROF_TUN_GRE;
5551 case ICE_SW_TUN_PPPOE:
5552 type = ICE_PROF_TUN_PPPOE;
5554 case ICE_SW_TUN_AND_NON_TUN:
5556 type = ICE_PROF_ALL;
5560 ice_get_sw_fv_bitmap(hw, type, bm);
5564 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5565 * @hw: pointer to hardware structure
5566 * @lkups: lookup elements or match criteria for the advanced recipe, one
5567 * structure per protocol header
5568 * @lkups_cnt: number of protocols
5569 * @rinfo: other information regarding the rule e.g. priority and action info
5570 * @rid: return the recipe ID of the recipe created
5572 static enum ice_status
5573 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5574 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5576 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5577 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5578 struct ice_prot_lkup_ext *lkup_exts;
5579 struct ice_recp_grp_entry *r_entry;
5580 struct ice_sw_fv_list_entry *fvit;
5581 struct ice_recp_grp_entry *r_tmp;
5582 struct ice_sw_fv_list_entry *tmp;
5583 enum ice_status status = ICE_SUCCESS;
5584 struct ice_sw_recipe *rm;
5585 bool match_tun = false;
5589 return ICE_ERR_PARAM;
5591 lkup_exts = (struct ice_prot_lkup_ext *)
5592 ice_malloc(hw, sizeof(*lkup_exts));
5594 return ICE_ERR_NO_MEMORY;
5596 /* Determine the number of words to be matched and if it exceeds a
5597 * recipe's restrictions
5599 for (i = 0; i < lkups_cnt; i++) {
5602 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5603 status = ICE_ERR_CFG;
5604 goto err_free_lkup_exts;
5607 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5609 status = ICE_ERR_CFG;
5610 goto err_free_lkup_exts;
5614 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5616 status = ICE_ERR_NO_MEMORY;
5617 goto err_free_lkup_exts;
5620 /* Get field vectors that contain fields extracted from all the protocol
5621 * headers being programmed.
5623 INIT_LIST_HEAD(&rm->fv_list);
5624 INIT_LIST_HEAD(&rm->rg_list);
5626 /* Get bitmap of field vectors (profiles) that are compatible with the
5627 * rule request; only these will be searched in the subsequent call to
5630 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5632 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5636 /* Group match words into recipes using preferred recipe grouping
5639 status = ice_create_recipe_group(hw, rm, lkup_exts);
5643 /* There is only profile for UDP tunnels. So, it is necessary to use a
5644 * metadata ID flag to differentiate different tunnel types. A separate
5645 * recipe needs to be used for the metadata.
5647 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5648 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5649 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5652 /* set the recipe priority if specified */
5653 rm->priority = rinfo->priority ? rinfo->priority : 0;
5655 /* Find offsets from the field vector. Pick the first one for all the
5658 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5662 /* get bitmap of all profiles the recipe will be associated with */
5663 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5664 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5666 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5667 ice_set_bit((u16)fvit->profile_id, profiles);
5670 /* Create any special protocol/offset pairs, such as looking at tunnel
5671 * bits by extracting metadata
5673 status = ice_add_special_words(rinfo, lkup_exts);
5675 goto err_free_lkup_exts;
5677 /* Look for a recipe which matches our requested fv / mask list */
5678 *rid = ice_find_recp(hw, lkup_exts);
5679 if (*rid < ICE_MAX_NUM_RECIPES)
5680 /* Success if found a recipe that match the existing criteria */
5683 /* Recipe we need does not exist, add a recipe */
5684 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5688 /* Associate all the recipes created with all the profiles in the
5689 * common field vector.
5691 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5693 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5695 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5696 (u8 *)r_bitmap, NULL);
5700 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5701 ICE_MAX_NUM_RECIPES);
5702 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5706 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5709 ice_release_change_lock(hw);
5715 *rid = rm->root_rid;
5716 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5717 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5719 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5720 ice_recp_grp_entry, l_entry) {
5721 LIST_DEL(&r_entry->l_entry);
5722 ice_free(hw, r_entry);
5725 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5727 LIST_DEL(&fvit->list_entry);
5732 ice_free(hw, rm->root_buf);
5737 ice_free(hw, lkup_exts);
5743 * ice_find_dummy_packet - find dummy packet by tunnel type
5745 * @lkups: lookup elements or match criteria for the advanced recipe, one
5746 * structure per protocol header
5747 * @lkups_cnt: number of protocols
5748 * @tun_type: tunnel type from the match criteria
5749 * @pkt: dummy packet to fill according to filter match criteria
5750 * @pkt_len: packet length of dummy packet
5751 * @offsets: pointer to receive the pointer to the offsets for the packet
5754 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5755 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5757 const struct ice_dummy_pkt_offsets **offsets)
5759 bool tcp = false, udp = false, ipv6 = false;
5762 if (tun_type == ICE_SW_TUN_GTP) {
5763 *pkt = dummy_udp_gtp_packet;
5764 *pkt_len = sizeof(dummy_udp_gtp_packet);
5765 *offsets = dummy_udp_gtp_packet_offsets;
5768 if (tun_type == ICE_SW_TUN_PPPOE) {
5769 *pkt = dummy_pppoe_packet;
5770 *pkt_len = sizeof(dummy_pppoe_packet);
5771 *offsets = dummy_pppoe_packet_offsets;
5774 for (i = 0; i < lkups_cnt; i++) {
5775 if (lkups[i].type == ICE_UDP_ILOS)
5777 else if (lkups[i].type == ICE_TCP_IL)
5779 else if (lkups[i].type == ICE_IPV6_OFOS)
5783 if (tun_type == ICE_ALL_TUNNELS) {
5784 *pkt = dummy_gre_udp_packet;
5785 *pkt_len = sizeof(dummy_gre_udp_packet);
5786 *offsets = dummy_gre_udp_packet_offsets;
5790 if (tun_type == ICE_SW_TUN_NVGRE) {
5792 *pkt = dummy_gre_tcp_packet;
5793 *pkt_len = sizeof(dummy_gre_tcp_packet);
5794 *offsets = dummy_gre_tcp_packet_offsets;
5798 *pkt = dummy_gre_udp_packet;
5799 *pkt_len = sizeof(dummy_gre_udp_packet);
5800 *offsets = dummy_gre_udp_packet_offsets;
5804 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5805 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5807 *pkt = dummy_udp_tun_tcp_packet;
5808 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5809 *offsets = dummy_udp_tun_tcp_packet_offsets;
5813 *pkt = dummy_udp_tun_udp_packet;
5814 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5815 *offsets = dummy_udp_tun_udp_packet_offsets;
5820 *pkt = dummy_udp_packet;
5821 *pkt_len = sizeof(dummy_udp_packet);
5822 *offsets = dummy_udp_packet_offsets;
5824 } else if (udp && ipv6) {
5825 *pkt = dummy_udp_ipv6_packet;
5826 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5827 *offsets = dummy_udp_ipv6_packet_offsets;
5829 } else if ((tcp && ipv6) || ipv6) {
5830 *pkt = dummy_tcp_ipv6_packet;
5831 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5832 *offsets = dummy_tcp_ipv6_packet_offsets;
5836 *pkt = dummy_tcp_packet;
5837 *pkt_len = sizeof(dummy_tcp_packet);
5838 *offsets = dummy_tcp_packet_offsets;
5842 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5844 * @lkups: lookup elements or match criteria for the advanced recipe, one
5845 * structure per protocol header
5846 * @lkups_cnt: number of protocols
5847 * @s_rule: stores rule information from the match criteria
5848 * @dummy_pkt: dummy packet to fill according to filter match criteria
5849 * @pkt_len: packet length of dummy packet
5850 * @offsets: offset info for the dummy packet
5852 static enum ice_status
5853 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5854 struct ice_aqc_sw_rules_elem *s_rule,
5855 const u8 *dummy_pkt, u16 pkt_len,
5856 const struct ice_dummy_pkt_offsets *offsets)
5861 /* Start with a packet with a pre-defined/dummy content. Then, fill
5862 * in the header values to be looked up or matched.
5864 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5866 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5868 for (i = 0; i < lkups_cnt; i++) {
5869 enum ice_protocol_type type;
5870 u16 offset = 0, len = 0, j;
5873 /* find the start of this layer; it should be found since this
5874 * was already checked when search for the dummy packet
5876 type = lkups[i].type;
5877 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5878 if (type == offsets[j].type) {
5879 offset = offsets[j].offset;
5884 /* this should never happen in a correct calling sequence */
5886 return ICE_ERR_PARAM;
5888 switch (lkups[i].type) {
5891 len = sizeof(struct ice_ether_hdr);
5894 len = sizeof(struct ice_ethtype_hdr);
5898 len = sizeof(struct ice_ipv4_hdr);
5902 len = sizeof(struct ice_ipv6_hdr);
5907 len = sizeof(struct ice_l4_hdr);
5910 len = sizeof(struct ice_sctp_hdr);
5913 len = sizeof(struct ice_nvgre);
5918 len = sizeof(struct ice_udp_tnl_hdr);
5922 len = sizeof(struct ice_udp_gtp_hdr);
5925 return ICE_ERR_PARAM;
5928 /* the length should be a word multiple */
5929 if (len % ICE_BYTES_PER_WORD)
5932 /* We have the offset to the header start, the length, the
5933 * caller's header values and mask. Use this information to
5934 * copy the data into the dummy packet appropriately based on
5935 * the mask. Note that we need to only write the bits as
5936 * indicated by the mask to make sure we don't improperly write
5937 * over any significant packet data.
5939 for (j = 0; j < len / sizeof(u16); j++)
5940 if (((u16 *)&lkups[i].m_u)[j])
5941 ((u16 *)(pkt + offset))[j] =
5942 (((u16 *)(pkt + offset))[j] &
5943 ~((u16 *)&lkups[i].m_u)[j]) |
5944 (((u16 *)&lkups[i].h_u)[j] &
5945 ((u16 *)&lkups[i].m_u)[j]);
5948 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5954 * ice_find_adv_rule_entry - Search a rule entry
5955 * @hw: pointer to the hardware structure
5956 * @lkups: lookup elements or match criteria for the advanced recipe, one
5957 * structure per protocol header
5958 * @lkups_cnt: number of protocols
5959 * @recp_id: recipe ID for which we are finding the rule
5960 * @rinfo: other information regarding the rule e.g. priority and action info
5962 * Helper function to search for a given advance rule entry
5963 * Returns pointer to entry storing the rule if found
5965 static struct ice_adv_fltr_mgmt_list_entry *
5966 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5967 u16 lkups_cnt, u8 recp_id,
5968 struct ice_adv_rule_info *rinfo)
5970 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5971 struct ice_switch_info *sw = hw->switch_info;
5974 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5975 ice_adv_fltr_mgmt_list_entry, list_entry) {
5976 bool lkups_matched = true;
5978 if (lkups_cnt != list_itr->lkups_cnt)
5980 for (i = 0; i < list_itr->lkups_cnt; i++)
5981 if (memcmp(&list_itr->lkups[i], &lkups[i],
5983 lkups_matched = false;
5986 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5987 rinfo->tun_type == list_itr->rule_info.tun_type &&
5995 * ice_adv_add_update_vsi_list
5996 * @hw: pointer to the hardware structure
5997 * @m_entry: pointer to current adv filter management list entry
5998 * @cur_fltr: filter information from the book keeping entry
5999 * @new_fltr: filter information with the new VSI to be added
6001 * Call AQ command to add or update previously created VSI list with new VSI.
6003 * Helper function to do book keeping associated with adding filter information
6004 * The algorithm to do the booking keeping is described below :
6005 * When a VSI needs to subscribe to a given advanced filter
6006 * if only one VSI has been added till now
6007 * Allocate a new VSI list and add two VSIs
6008 * to this list using switch rule command
6009 * Update the previously created switch rule with the
6010 * newly created VSI list ID
6011 * if a VSI list was previously created
6012 * Add the new VSI to the previously created VSI list set
6013 * using the update switch rule command
6015 static enum ice_status
6016 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6017 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6018 struct ice_adv_rule_info *cur_fltr,
6019 struct ice_adv_rule_info *new_fltr)
6021 enum ice_status status;
6022 u16 vsi_list_id = 0;
6024 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6025 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
6026 return ICE_ERR_NOT_IMPL;
6028 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
6029 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6030 return ICE_ERR_ALREADY_EXISTS;
6032 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6033 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6034 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6035 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6036 return ICE_ERR_NOT_IMPL;
6038 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6039 /* Only one entry existed in the mapping and it was not already
6040 * a part of a VSI list. So, create a VSI list with the old and
6043 struct ice_fltr_info tmp_fltr;
6044 u16 vsi_handle_arr[2];
6046 /* A rule already exists with the new VSI being added */
6047 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6048 new_fltr->sw_act.fwd_id.hw_vsi_id)
6049 return ICE_ERR_ALREADY_EXISTS;
6051 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6052 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6053 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6059 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6060 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6061 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6062 /* Update the previous switch rule of "forward to VSI" to
6065 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6069 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6070 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6071 m_entry->vsi_list_info =
6072 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6075 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6077 if (!m_entry->vsi_list_info)
6080 /* A rule already exists with the new VSI being added */
6081 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6084 /* Update the previously created VSI list set with
6085 * the new VSI ID passed in
6087 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6089 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6091 ice_aqc_opc_update_sw_rules,
6093 /* update VSI list mapping info with new VSI ID */
6095 ice_set_bit(vsi_handle,
6096 m_entry->vsi_list_info->vsi_map);
6099 m_entry->vsi_count++;
6104 * ice_add_adv_rule - helper function to create an advanced switch rule
6105 * @hw: pointer to the hardware structure
6106 * @lkups: information on the words that needs to be looked up. All words
6107 * together makes one recipe
6108 * @lkups_cnt: num of entries in the lkups array
6109 * @rinfo: other information related to the rule that needs to be programmed
6110 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6111 * ignored is case of error.
6113 * This function can program only 1 rule at a time. The lkups is used to
6114 * describe the all the words that forms the "lookup" portion of the recipe.
6115 * These words can span multiple protocols. Callers to this function need to
6116 * pass in a list of protocol headers with lookup information along and mask
6117 * that determines which words are valid from the given protocol header.
6118 * rinfo describes other information related to this rule such as forwarding
6119 * IDs, priority of this rule, etc.
6122 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6123 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6124 struct ice_rule_query_data *added_entry)
6126 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6127 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6128 const struct ice_dummy_pkt_offsets *pkt_offsets;
6129 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6130 struct LIST_HEAD_TYPE *rule_head;
6131 struct ice_switch_info *sw;
6132 enum ice_status status;
6133 const u8 *pkt = NULL;
6139 return ICE_ERR_PARAM;
6141 /* get # of words we need to match */
6143 for (i = 0; i < lkups_cnt; i++) {
6146 ptr = (u16 *)&lkups[i].m_u;
6147 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6151 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6152 return ICE_ERR_PARAM;
6154 /* make sure that we can locate a dummy packet */
6155 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6158 status = ICE_ERR_PARAM;
6159 goto err_ice_add_adv_rule;
6162 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6163 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6164 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6165 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6168 vsi_handle = rinfo->sw_act.vsi_handle;
6169 if (!ice_is_vsi_valid(hw, vsi_handle))
6170 return ICE_ERR_PARAM;
6172 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6173 rinfo->sw_act.fwd_id.hw_vsi_id =
6174 ice_get_hw_vsi_num(hw, vsi_handle);
6175 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6176 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6178 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6181 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6183 /* we have to add VSI to VSI_LIST and increment vsi_count.
6184 * Also Update VSI list so that we can change forwarding rule
6185 * if the rule already exists, we will check if it exists with
6186 * same vsi_id, if not then add it to the VSI list if it already
6187 * exists if not then create a VSI list and add the existing VSI
6188 * ID and the new VSI ID to the list
6189 * We will add that VSI to the list
6191 status = ice_adv_add_update_vsi_list(hw, m_entry,
6192 &m_entry->rule_info,
6195 added_entry->rid = rid;
6196 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6197 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6201 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6202 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6204 return ICE_ERR_NO_MEMORY;
6205 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6206 switch (rinfo->sw_act.fltr_act) {
6207 case ICE_FWD_TO_VSI:
6208 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6209 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6210 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6213 act |= ICE_SINGLE_ACT_TO_Q;
6214 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6215 ICE_SINGLE_ACT_Q_INDEX_M;
6217 case ICE_FWD_TO_QGRP:
6218 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6219 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6220 act |= ICE_SINGLE_ACT_TO_Q;
6221 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6222 ICE_SINGLE_ACT_Q_INDEX_M;
6223 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6224 ICE_SINGLE_ACT_Q_REGION_M;
6226 case ICE_DROP_PACKET:
6227 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6228 ICE_SINGLE_ACT_VALID_BIT;
6231 status = ICE_ERR_CFG;
6232 goto err_ice_add_adv_rule;
6235 /* set the rule LOOKUP type based on caller specified 'RX'
6236 * instead of hardcoding it to be either LOOKUP_TX/RX
6238 * for 'RX' set the source to be the port number
6239 * for 'TX' set the source to be the source HW VSI number (determined
6243 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6244 s_rule->pdata.lkup_tx_rx.src =
6245 CPU_TO_LE16(hw->port_info->lport);
6247 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6248 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6251 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6252 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6254 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
6257 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6258 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6261 goto err_ice_add_adv_rule;
6262 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6263 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6265 status = ICE_ERR_NO_MEMORY;
6266 goto err_ice_add_adv_rule;
6269 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6270 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6271 ICE_NONDMA_TO_NONDMA);
6272 if (!adv_fltr->lkups) {
6273 status = ICE_ERR_NO_MEMORY;
6274 goto err_ice_add_adv_rule;
6277 adv_fltr->lkups_cnt = lkups_cnt;
6278 adv_fltr->rule_info = *rinfo;
6279 adv_fltr->rule_info.fltr_rule_id =
6280 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6281 sw = hw->switch_info;
6282 sw->recp_list[rid].adv_rule = true;
6283 rule_head = &sw->recp_list[rid].filt_rules;
6285 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6286 struct ice_fltr_info tmp_fltr;
6288 tmp_fltr.fltr_rule_id =
6289 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6290 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6291 tmp_fltr.fwd_id.hw_vsi_id =
6292 ice_get_hw_vsi_num(hw, vsi_handle);
6293 tmp_fltr.vsi_handle = vsi_handle;
6294 /* Update the previous switch rule of "forward to VSI" to
6297 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6299 goto err_ice_add_adv_rule;
6300 adv_fltr->vsi_count = 1;
6303 /* Add rule entry to book keeping list */
6304 LIST_ADD(&adv_fltr->list_entry, rule_head);
6306 added_entry->rid = rid;
6307 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6308 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6310 err_ice_add_adv_rule:
6311 if (status && adv_fltr) {
6312 ice_free(hw, adv_fltr->lkups);
6313 ice_free(hw, adv_fltr);
6316 ice_free(hw, s_rule);
6322 * ice_adv_rem_update_vsi_list
6323 * @hw: pointer to the hardware structure
6324 * @vsi_handle: VSI handle of the VSI to remove
6325 * @fm_list: filter management entry for which the VSI list management needs to
6328 static enum ice_status
6329 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6330 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6332 struct ice_vsi_list_map_info *vsi_list_info;
6333 enum ice_sw_lkup_type lkup_type;
6334 enum ice_status status;
6337 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6338 fm_list->vsi_count == 0)
6339 return ICE_ERR_PARAM;
6341 /* A rule with the VSI being removed does not exist */
6342 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6343 return ICE_ERR_DOES_NOT_EXIST;
6345 lkup_type = ICE_SW_LKUP_LAST;
6346 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6347 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6348 ice_aqc_opc_update_sw_rules,
6353 fm_list->vsi_count--;
6354 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6355 vsi_list_info = fm_list->vsi_list_info;
6356 if (fm_list->vsi_count == 1) {
6357 struct ice_fltr_info tmp_fltr;
6360 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6362 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6363 return ICE_ERR_OUT_OF_RANGE;
6365 /* Make sure VSI list is empty before removing it below */
6366 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6368 ice_aqc_opc_update_sw_rules,
6372 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6373 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6374 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6375 tmp_fltr.fwd_id.hw_vsi_id =
6376 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6377 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6378 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6380 /* Update the previous switch rule of "MAC forward to VSI" to
6381 * "MAC fwd to VSI list"
6383 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6385 ice_debug(hw, ICE_DBG_SW,
6386 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6387 tmp_fltr.fwd_id.hw_vsi_id, status);
6392 if (fm_list->vsi_count == 1) {
6393 /* Remove the VSI list since it is no longer used */
6394 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6396 ice_debug(hw, ICE_DBG_SW,
6397 "Failed to remove VSI list %d, error %d\n",
6398 vsi_list_id, status);
6402 LIST_DEL(&vsi_list_info->list_entry);
6403 ice_free(hw, vsi_list_info);
6404 fm_list->vsi_list_info = NULL;
6411 * ice_rem_adv_rule - removes existing advanced switch rule
6412 * @hw: pointer to the hardware structure
6413 * @lkups: information on the words that needs to be looked up. All words
6414 * together makes one recipe
6415 * @lkups_cnt: num of entries in the lkups array
6416 * @rinfo: Its the pointer to the rule information for the rule
6418 * This function can be used to remove 1 rule at a time. The lkups is
6419 * used to describe all the words that forms the "lookup" portion of the
6420 * rule. These words can span multiple protocols. Callers to this function
6421 * need to pass in a list of protocol headers with lookup information along
6422 * and mask that determines which words are valid from the given protocol
6423 * header. rinfo describes other information related to this rule such as
6424 * forwarding IDs, priority of this rule, etc.
6427 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6428 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6430 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6431 const struct ice_dummy_pkt_offsets *offsets;
6432 struct ice_prot_lkup_ext lkup_exts;
6433 u16 rule_buf_sz, pkt_len, i, rid;
6434 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6435 enum ice_status status = ICE_SUCCESS;
6436 bool remove_rule = false;
6437 const u8 *pkt = NULL;
6440 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6441 for (i = 0; i < lkups_cnt; i++) {
6444 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6447 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6452 /* Create any special protocol/offset pairs, such as looking at tunnel
6453 * bits by extracting metadata
6455 status = ice_add_special_words(rinfo, &lkup_exts);
6459 rid = ice_find_recp(hw, &lkup_exts);
6460 /* If did not find a recipe that match the existing criteria */
6461 if (rid == ICE_MAX_NUM_RECIPES)
6462 return ICE_ERR_PARAM;
6464 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6465 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6466 /* the rule is already removed */
6469 ice_acquire_lock(rule_lock);
6470 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6472 } else if (list_elem->vsi_count > 1) {
6473 list_elem->vsi_list_info->ref_cnt--;
6474 remove_rule = false;
6475 vsi_handle = rinfo->sw_act.vsi_handle;
6476 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6478 vsi_handle = rinfo->sw_act.vsi_handle;
6479 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6481 ice_release_lock(rule_lock);
6484 if (list_elem->vsi_count == 0)
6487 ice_release_lock(rule_lock);
6489 struct ice_aqc_sw_rules_elem *s_rule;
6491 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
6492 &pkt_len, &offsets);
6493 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6495 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6498 return ICE_ERR_NO_MEMORY;
6499 s_rule->pdata.lkup_tx_rx.act = 0;
6500 s_rule->pdata.lkup_tx_rx.index =
6501 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6502 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6503 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6505 ice_aqc_opc_remove_sw_rules, NULL);
6506 if (status == ICE_SUCCESS) {
6507 ice_acquire_lock(rule_lock);
6508 LIST_DEL(&list_elem->list_entry);
6509 ice_free(hw, list_elem->lkups);
6510 ice_free(hw, list_elem);
6511 ice_release_lock(rule_lock);
6513 ice_free(hw, s_rule);
6519 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6520 * @hw: pointer to the hardware structure
6521 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6523 * This function is used to remove 1 rule at a time. The removal is based on
6524 * the remove_entry parameter. This function will remove rule for a given
6525 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6528 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6529 struct ice_rule_query_data *remove_entry)
6531 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6532 struct LIST_HEAD_TYPE *list_head;
6533 struct ice_adv_rule_info rinfo;
6534 struct ice_switch_info *sw;
6536 sw = hw->switch_info;
6537 if (!sw->recp_list[remove_entry->rid].recp_created)
6538 return ICE_ERR_PARAM;
6539 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6540 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6542 if (list_itr->rule_info.fltr_rule_id ==
6543 remove_entry->rule_id) {
6544 rinfo = list_itr->rule_info;
6545 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6546 return ice_rem_adv_rule(hw, list_itr->lkups,
6547 list_itr->lkups_cnt, &rinfo);
6550 return ICE_ERR_PARAM;
6554 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6556 * @hw: pointer to the hardware structure
6557 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6559 * This function is used to remove all the rules for a given VSI and as soon
6560 * as removing a rule fails, it will return immediately with the error code,
6561 * else it will return ICE_SUCCESS
6564 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6566 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6567 struct ice_vsi_list_map_info *map_info;
6568 struct LIST_HEAD_TYPE *list_head;
6569 struct ice_adv_rule_info rinfo;
6570 struct ice_switch_info *sw;
6571 enum ice_status status;
6572 u16 vsi_list_id = 0;
6575 sw = hw->switch_info;
6576 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6577 if (!sw->recp_list[rid].recp_created)
6579 if (!sw->recp_list[rid].adv_rule)
6581 list_head = &sw->recp_list[rid].filt_rules;
6583 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6584 ice_adv_fltr_mgmt_list_entry, list_entry) {
6585 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6589 rinfo = list_itr->rule_info;
6590 rinfo.sw_act.vsi_handle = vsi_handle;
6591 status = ice_rem_adv_rule(hw, list_itr->lkups,
6592 list_itr->lkups_cnt, &rinfo);
6602 * ice_replay_fltr - Replay all the filters stored by a specific list head
6603 * @hw: pointer to the hardware structure
6604 * @list_head: list for which filters needs to be replayed
6605 * @recp_id: Recipe ID for which rules need to be replayed
6607 static enum ice_status
6608 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6610 struct ice_fltr_mgmt_list_entry *itr;
6611 struct LIST_HEAD_TYPE l_head;
6612 enum ice_status status = ICE_SUCCESS;
6614 if (LIST_EMPTY(list_head))
6617 /* Move entries from the given list_head to a temporary l_head so that
6618 * they can be replayed. Otherwise when trying to re-add the same
6619 * filter, the function will return already exists
6621 LIST_REPLACE_INIT(list_head, &l_head);
6623 /* Mark the given list_head empty by reinitializing it so filters
6624 * could be added again by *handler
6626 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6628 struct ice_fltr_list_entry f_entry;
6630 f_entry.fltr_info = itr->fltr_info;
6631 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6632 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6633 if (status != ICE_SUCCESS)
6638 /* Add a filter per VSI separately */
6643 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6645 if (!ice_is_vsi_valid(hw, vsi_handle))
6648 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6649 f_entry.fltr_info.vsi_handle = vsi_handle;
6650 f_entry.fltr_info.fwd_id.hw_vsi_id =
6651 ice_get_hw_vsi_num(hw, vsi_handle);
6652 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6653 if (recp_id == ICE_SW_LKUP_VLAN)
6654 status = ice_add_vlan_internal(hw, &f_entry);
6656 status = ice_add_rule_internal(hw, recp_id,
6658 if (status != ICE_SUCCESS)
6663 /* Clear the filter management list */
6664 ice_rem_sw_rule_info(hw, &l_head);
6669 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6670 * @hw: pointer to the hardware structure
6672 * NOTE: This function does not clean up partially added filters on error.
6673 * It is up to caller of the function to issue a reset or fail early.
6675 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6677 struct ice_switch_info *sw = hw->switch_info;
6678 enum ice_status status = ICE_SUCCESS;
6681 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6682 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6684 status = ice_replay_fltr(hw, i, head);
6685 if (status != ICE_SUCCESS)
6692 * ice_replay_vsi_fltr - Replay filters for requested VSI
6693 * @hw: pointer to the hardware structure
6694 * @vsi_handle: driver VSI handle
6695 * @recp_id: Recipe ID for which rules need to be replayed
6696 * @list_head: list for which filters need to be replayed
6698 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6699 * It is required to pass valid VSI handle.
6701 static enum ice_status
6702 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6703 struct LIST_HEAD_TYPE *list_head)
6705 struct ice_fltr_mgmt_list_entry *itr;
6706 enum ice_status status = ICE_SUCCESS;
6709 if (LIST_EMPTY(list_head))
6711 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6713 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6715 struct ice_fltr_list_entry f_entry;
6717 f_entry.fltr_info = itr->fltr_info;
6718 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6719 itr->fltr_info.vsi_handle == vsi_handle) {
6720 /* update the src in case it is VSI num */
6721 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6722 f_entry.fltr_info.src = hw_vsi_id;
6723 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6724 if (status != ICE_SUCCESS)
6728 if (!itr->vsi_list_info ||
6729 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6731 /* Clearing it so that the logic can add it back */
6732 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6733 f_entry.fltr_info.vsi_handle = vsi_handle;
6734 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6735 /* update the src in case it is VSI num */
6736 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6737 f_entry.fltr_info.src = hw_vsi_id;
6738 if (recp_id == ICE_SW_LKUP_VLAN)
6739 status = ice_add_vlan_internal(hw, &f_entry);
6741 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6742 if (status != ICE_SUCCESS)
6750 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6751 * @hw: pointer to the hardware structure
6752 * @vsi_handle: driver VSI handle
6753 * @list_head: list for which filters need to be replayed
6755 * Replay the advanced rule for the given VSI.
6757 static enum ice_status
6758 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6759 struct LIST_HEAD_TYPE *list_head)
6761 struct ice_rule_query_data added_entry = { 0 };
6762 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6763 enum ice_status status = ICE_SUCCESS;
6765 if (LIST_EMPTY(list_head))
6767 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6769 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6770 u16 lk_cnt = adv_fltr->lkups_cnt;
6772 if (vsi_handle != rinfo->sw_act.vsi_handle)
6774 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6783 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6784 * @hw: pointer to the hardware structure
6785 * @vsi_handle: driver VSI handle
6787 * Replays filters for requested VSI via vsi_handle.
6789 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6791 struct ice_switch_info *sw = hw->switch_info;
6792 enum ice_status status;
6795 /* Update the recipes that were created */
6796 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6797 struct LIST_HEAD_TYPE *head;
6799 head = &sw->recp_list[i].filt_replay_rules;
6800 if (!sw->recp_list[i].adv_rule)
6801 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6803 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6804 if (status != ICE_SUCCESS)
6812 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6813 * @hw: pointer to the HW struct
6815 * Deletes the filter replay rules.
6817 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6819 struct ice_switch_info *sw = hw->switch_info;
6825 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6826 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6827 struct LIST_HEAD_TYPE *l_head;
6829 l_head = &sw->recp_list[i].filt_replay_rules;
6830 if (!sw->recp_list[i].adv_rule)
6831 ice_rem_sw_rule_info(hw, l_head);
6833 ice_rem_adv_rule_info(hw, l_head);